summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@cavium.com>2017-05-02 18:36:50 +0530
committerDavid S. Miller <davem@davemloft.net>2017-05-02 15:41:20 -0400
commit5836b4429777bf57ca8fc02b154263aa54d97508 (patch)
tree2a3878ca7264b93ecbfbd64d82dfd660d5c4055c /drivers/net/ethernet/cavium/thunder/nicvf_queues.h
parentee0d8d8482345ff97a75a7d747efc309f13b0d80 (diff)
downloadlinux-stable-5836b4429777bf57ca8fc02b154263aa54d97508.tar.gz
linux-stable-5836b4429777bf57ca8fc02b154263aa54d97508.tar.bz2
linux-stable-5836b4429777bf57ca8fc02b154263aa54d97508.zip
net: thunderx: Support for page recycling
Adds support for page recycling for allocating receive buffers to reduce cost of refilling RBDR ring. Also got rid of using compound pages when pagesize is 4K, only order-0 pages now. Only page is recycled, DMA mappings still needs to be done for every receive buffer allocated due to following constraints - Cannot have just one receive buffer per 64KB page. - There is just one buffer ring shared across 8 Rx queues, so buffers of same page can go to any Rx queue. - HW gives buffer address where packet has been DMA'ed and not the index into buffer ring. This makes it not possible to resue DMA mapping info. So unfortunately have to go through costly mapping route for every buffer. Signed-off-by: Sunil Goutham <sgoutham@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/nicvf_queues.h')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 10cb4b84625b..da4836601d8c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -213,6 +213,11 @@ struct q_desc_mem {
void *unalign_base;
};
+struct pgcache {
+ struct page *page;
+ u64 dma_addr;
+};
+
struct rbdr {
bool enable;
u32 dma_size;
@@ -222,6 +227,12 @@ struct rbdr {
u32 head;
u32 tail;
struct q_desc_mem dmem;
+
+ /* For page recycling */
+ int pgidx;
+ int pgcnt;
+ int pgalloc;
+ struct pgcache *pgcache;
} ____cacheline_aligned_in_smp;
struct rcv_queue {