summaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h21
1 files changed, 15 insertions, 6 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 38501d20650c..c9525bce80f6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -187,7 +187,6 @@ union skb_shared_tx {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
@@ -197,6 +196,12 @@ struct skb_shared_info {
union skb_shared_tx tx_flags;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
+
+ /*
+ * Warning : all fields before dataref are cleared in __alloc_skb()
+ */
+ atomic_t dataref;
+
skb_frag_t frags[MAX_SKB_FRAGS];
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
@@ -470,10 +475,6 @@ extern int skb_cow_data(struct sk_buff *skb, int tailbits,
struct sk_buff **trailer);
extern int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
-extern void skb_over_panic(struct sk_buff *skb, int len,
- void *here);
-extern void skb_under_panic(struct sk_buff *skb, int len,
- void *here);
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int getfrag(void *from, char *to, int offset,
@@ -1132,6 +1133,11 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
return skb->data += len;
}
+static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@@ -1355,9 +1361,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
+ * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
+ * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 32
+#define NET_SKB_PAD 64
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);