summaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_tunnel.c
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-01-02 11:48:33 -0800
committerDavid S. Miller <davem@davemloft.net>2014-01-03 19:40:57 -0500
commit9a4aa9af447f784f0a47313c8dcb79ac63442cf7 (patch)
treee5e6d38f79704d282679a06d6bf00cf819a85d7a /net/ipv4/ip_tunnel.c
parent7d442fab0a6777fd7612cfcada32ea859553d370 (diff)
downloadlinux-9a4aa9af447f784f0a47313c8dcb79ac63442cf7.tar.gz
linux-9a4aa9af447f784f0a47313c8dcb79ac63442cf7.tar.bz2
linux-9a4aa9af447f784f0a47313c8dcb79ac63442cf7.zip
ipv4: Use percpu Cache route in IP tunnels
percpu route cache eliminates share of dst refcnt between CPUs. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_tunnel.c')
-rw-r--r--net/ipv4/ip_tunnel.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 27d756f8f870..e2c9cff26eb5 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -68,23 +68,24 @@ static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
IP_TNL_HASH_BITS);
}
-static inline void __tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
+static inline void __tunnel_dst_set(struct ip_tunnel_dst *idst,
+ struct dst_entry *dst)
{
struct dst_entry *old_dst;
if (dst && (dst->flags & DST_NOCACHE))
dst = NULL;
- spin_lock_bh(&t->dst_lock);
- old_dst = rcu_dereference_raw(t->dst_cache);
- rcu_assign_pointer(t->dst_cache, dst);
+ spin_lock_bh(&idst->lock);
+ old_dst = rcu_dereference(idst->dst);
+ rcu_assign_pointer(idst->dst, dst);
dst_release(old_dst);
- spin_unlock_bh(&t->dst_lock);
+ spin_unlock_bh(&idst->lock);
}
static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
{
- __tunnel_dst_set(t, dst);
+ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
}
static inline void tunnel_dst_reset(struct ip_tunnel *t)
@@ -92,12 +93,20 @@ static inline void tunnel_dst_reset(struct ip_tunnel *t)
tunnel_dst_set(t, NULL);
}
+static void tunnel_dst_reset_all(struct ip_tunnel *t)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
+}
+
static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
{
struct dst_entry *dst;
rcu_read_lock();
- dst = rcu_dereference(t->dst_cache);
+ dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
if (dst)
dst_hold(dst);
rcu_read_unlock();
@@ -755,7 +764,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (set_mtu)
dev->mtu = mtu;
}
- tunnel_dst_reset(t);
+ tunnel_dst_reset_all(t);
netdev_state_change(dev);
}
@@ -871,6 +880,7 @@ static void ip_tunnel_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
gro_cells_destroy(&tunnel->gro_cells);
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -1049,8 +1059,21 @@ int ip_tunnel_init(struct net_device *dev)
u64_stats_init(&ipt_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(i) {
+ struct ip_tunnel_dst *idst = per_cpu_ptr(tunnel->dst_cache, i);
+ idst-> dst = NULL;
+ spin_lock_init(&idst->lock);
+ }
+
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
return err;
}
@@ -1061,9 +1084,6 @@ int ip_tunnel_init(struct net_device *dev)
iph->version = 4;
iph->ihl = 5;
- tunnel->dst_cache = NULL;
- spin_lock_init(&tunnel->dst_lock);
-
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1079,7 +1099,7 @@ void ip_tunnel_uninit(struct net_device *dev)
if (itn->fb_tunnel_dev != dev)
ip_tunnel_del(netdev_priv(dev));
- tunnel_dst_reset(tunnel);
+ tunnel_dst_reset_all(tunnel);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);