diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/dp_rx.c')
-rw-r--r-- | drivers/net/wireless/ath/ath11k/dp_rx.c | 141 |
1 files changed, 68 insertions, 73 deletions
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 029ecf51c9ef..218ab41c0f3c 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/ieee80211.h> @@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); rx_tid = &cmd->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd); } @@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; rx_tid = &cmd_cache->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd_cache); } @@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - if (rx_tid->vaddr) { - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } } @@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } } @@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, return; free_desc: - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, @@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, if (ret != -ESHUTDOWN) ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", tid, ret); - dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } rx_tid->paddr = 0; + rx_tid->paddr_unaligned = 0; rx_tid->size = 0; + rx_tid->unaligned_size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, @@ -904,7 +906,7 @@ void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) rx_tid = &peer->rx_tid[i]; spin_unlock_bh(&ar->ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); ath11k_dp_rx_frags_cleanup(rx_tid, true); @@ -925,7 +927,7 @@ void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&ar->ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); } } @@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, if (!rx_tid->active) goto unlock_exit; - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; rx_tid->active = false; @@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; - u32 hw_desc_sz; - u32 *addr_aligned; - void *vaddr; + u32 hw_desc_sz, *vaddr; + void *vaddr_unaligned; dma_addr_t paddr; int ret; @@ -1050,37 +1050,34 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); - vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); - if (!vaddr) { + rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; + vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, + DMA_BIDIRECTIONAL, GFP_ATOMIC); + if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; } - addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); - - ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, - ssn, pn_type); - - paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, - DMA_BIDIRECTIONAL); - - ret = dma_mapping_error(ab->dev, paddr); - if (ret) { - spin_unlock_bh(&ab->base_lock); - ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", - peer_mac, tid, ret); - goto err_mem_free; - } - - rx_tid->vaddr = vaddr; - rx_tid->paddr = paddr; + rx_tid->vaddr_unaligned = vaddr_unaligned; + vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); + rx_tid->paddr_unaligned = paddr; + rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - + (unsigned long)rx_tid->vaddr_unaligned); + ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true; + /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. + * Since these changes are not reflected in the device, driver now needs to + * explicitly call dma_sync_single_for_device. + */ + dma_sync_single_for_device(ab->dev, rx_tid->paddr, + rx_tid->size, + DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock); - ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, - paddr, tid, 1, ba_win_sz); + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, + tid, 1, ba_win_sz); if (ret) { ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); @@ -1088,12 +1085,6 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, } return ret; - -err_mem_free: - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; - - return ret; } int ath11k_dp_rx_ampdu_start(struct ath11k *ar, @@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, rx_stats->dcm_count += ppdu_info->dcm; rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; - arsta->rssi_comb = ppdu_info->rssi_comb; - BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); @@ -3721,7 +3710,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, } spin_unlock_bh(&ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); @@ -4783,7 +4772,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); - break; + goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { @@ -5148,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; - void *mon_dst_srng; + struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; @@ -5165,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, spin_lock_bh(&pmon->mon_lock); + spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); ppdu_id = pmon->mon_ppdu_info.ppdu_id; @@ -5223,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); @@ -5410,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); - break; + goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -5612,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; - void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; @@ -5628,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, goto reap_status_ring; } + mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + spin_lock_bh(&mon_dst_srng->lock); + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; @@ -5671,6 +5665,7 @@ next_entry: } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { @@ -5786,7 +5781,7 @@ int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) int ret; if (stop_timer) - del_timer_sync(&ab->mon_reap_timer); + timer_delete_sync(&ab->mon_reap_timer); /* reap all the monitor related rings */ ret = ath11k_dp_purge_mon_ring(ab); |