summaryrefslogtreecommitdiffstats
path: root/include/linux/skb_array.h
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2017-05-17 12:14:40 +0800
committerDavid S. Miller <davem@davemloft.net>2017-05-18 10:07:40 -0400
commit3528c1a52e7af001e0e387fcb6bac2bdb3775d3e (patch)
treec603fe3f356d56b740cbd027d506c665310e3fdc /include/linux/skb_array.h
parent728fc8d5532b956f9c4b48dff0577fb722251343 (diff)
downloadlinux-3528c1a52e7af001e0e387fcb6bac2bdb3775d3e.tar.gz
linux-3528c1a52e7af001e0e387fcb6bac2bdb3775d3e.tar.bz2
linux-3528c1a52e7af001e0e387fcb6bac2bdb3775d3e.zip
skb_array: introduce batch dequeuing
Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skb_array.h')
-rw-r--r--include/linux/skb_array.h25
1 files changed, 25 insertions, 0 deletions
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 79850b638bf2..35226cd4efb0 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a)
return ptr_ring_consume(&a->ring);
}
+static inline int skb_array_consume_batched(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
+static inline int skb_array_consume_batched_irq(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
+static inline int skb_array_consume_batched_any(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
+}
+
+
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
+static inline int skb_array_consume_batched_bh(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
+}
+
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {