summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/ce.c
diff options
context:
space:
mode:
authorRajkumar Manoharan <rmanohar@qti.qualcomm.com>2016-03-22 17:22:18 +0530
committerKalle Valo <kvalo@qca.qualcomm.com>2016-04-04 17:03:21 +0300
commit128abd09134a5b415fef4373841ea6d3fb7b680f (patch)
treef044f94436a670bcd38d9aa22dd9c12258fc872d /drivers/net/wireless/ath/ath10k/ce.c
parent24d9ef5eff5057bb6339ed1cf852a2b2a7be324d (diff)
downloadlinux-0-day-128abd09134a5b415fef4373841ea6d3fb7b680f.tar.gz
linux-0-day-128abd09134a5b415fef4373841ea6d3fb7b680f.tar.xz
ath10k: reuse copy engine 5 (htt rx) descriptors
Whenever htt rx indication i.e target to host messages are received on rx copy engine (CE5), the message will be freed after processing the response. Then CE 5 will be refilled with new descriptors at post rx processing. This memory alloc and free operations can be avoided by reusing the same descriptors. During CE pipe allocation, full ring is not initialized i.e n-1 entries are filled up. So for CE 5 full ring should be filled up to reuse descriptors. Moreover CE 5 write index will be updated in single shot instead of incremental access. This could avoid multiple pci_write and ce_ring access. From experiments, It improves CPU usage by ~3% in IPQ4019 platform. Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d6da404c9fa7d..7212802eb3274 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
lockdep_assert_held(&ar_pci->ce_lock);
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
@@ -478,8 +492,11 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
- /* sanity */
- dest_ring->per_transfer_context[sw_index] = NULL;
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);