diff options
author | Malcolm Crossley <malcolm.crossley@citrix.com> | 2014-11-05 10:50:22 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-06 14:40:18 -0500 |
commit | 7e5d7753956b374516530e156c5e8aa19652398d (patch) | |
tree | 536746bc7043795210b9dd9fd4588dfcf2c37f5f /drivers/net/xen-netback | |
parent | 17007aa53a1d870d57fdc6a316a3315eabb10f2a (diff) | |
download | linux-7e5d7753956b374516530e156c5e8aa19652398d.tar.gz linux-7e5d7753956b374516530e156c5e8aa19652398d.tar.bz2 linux-7e5d7753956b374516530e156c5e8aa19652398d.zip |
xen-netback: remove unconditional __pskb_pull_tail() in guest Tx path
Unconditionally pulling 128 bytes into the linear area is not required
for:
- security: Every protocol demux starts with pskb_may_pull() to pull
frag data into the linear area, if necessary, before looking at
headers.
- performance: Netback has already grant copied up-to 128 bytes from
the first slot of a packet into the linear area. The first slot
normally contain all the IPv4/IPv6 and TCP/UDP headers.
The unconditional pull would often copy frag data unnecessarily. This
is a performance problem when running on a version of Xen where grant
unmap avoids TLB flushes for pages which are not accessed. TLB
flushes can now be avoided for > 99% of unmaps (it was 0% before).
Grant unmap TLB flush avoidance will be available in a future version
of Xen (probably 4.6).
Signed-off-by: Malcolm Crossley <malcolm.crossley@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 45755f9aa3f9..4a509f715fe8 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues, static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); +/* The amount to copy out of the first guest Tx slot into the skb's + * linear area. If the first slot has more data, it will be mapped + * and put into the first frag. + * + * This is sized to avoid pulling headers from the frags for most + * TCP/IP packets. + */ +#define XEN_NETBACK_TX_COPY_LEN 128 + + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); @@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) pending_tx_info[0]); } -/* This is a miniumum size for the linear area to avoid lots of - * calls to __pskb_pull_tail() as we set up checksum offsets. The - * value 128 was chosen as it covers all IPv4 and most likely - * IPv6 headers. - */ -#define PKT_PROT_LEN 128 - static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; @@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > PKT_PROT_LEN && + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - PKT_PROT_LEN : txreq.size; + XEN_NETBACK_TX_COPY_LEN : txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) } } - if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { - int target = min_t(int, skb->len, PKT_PROT_LEN); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } - skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); |