diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2013-10-29 17:22:21 -0700 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2014-01-06 15:52:24 -0800 |
commit | e298e505700604c97e6a9edb21cebb080bdb91f6 (patch) | |
tree | d2f4452ed7a4ec42a212690ac2633b006c1e10c0 /net/openvswitch/flow_table.c | |
parent | 795449d8b846a42d11d47d6ff2f51ab2967411c3 (diff) | |
download | linux-e298e505700604c97e6a9edb21cebb080bdb91f6.tar.gz linux-e298e505700604c97e6a9edb21cebb080bdb91f6.tar.bz2 linux-e298e505700604c97e6a9edb21cebb080bdb91f6.zip |
openvswitch: Per cpu flow stats.
With mega flow implementation ovs flow can be shared between
multiple CPUs which makes stats updates highly contended
operation. This patch uses per-CPU stats in cases where a flow
is likely to be shared (if there is a wildcard in the 5-tuple
and therefore likely to be spread by RSS). In other situations,
it uses the current strategy, saving memory and allocation time.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r-- | net/openvswitch/flow_table.c | 31 |
1 files changed, 29 insertions, 2 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 7b9cf2c43813..299ea8bb400c 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -72,19 +72,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, *d++ = *s++ & *m++; } -struct sw_flow *ovs_flow_alloc(void) +struct sw_flow *ovs_flow_alloc(bool percpu_stats) { struct sw_flow *flow; + int cpu; flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); if (!flow) return ERR_PTR(-ENOMEM); - spin_lock_init(&flow->lock); flow->sf_acts = NULL; flow->mask = NULL; + flow->stats.is_percpu = percpu_stats; + + if (!percpu_stats) { + flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL); + if (!flow->stats.stat) + goto err; + + spin_lock_init(&flow->stats.stat->lock); + } else { + flow->stats.cpu_stats = alloc_percpu(struct flow_stats); + if (!flow->stats.cpu_stats) + goto err; + + for_each_possible_cpu(cpu) { + struct flow_stats *cpu_stats; + + cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); + spin_lock_init(&cpu_stats->lock); + } + } return flow; +err: + kfree(flow); + return ERR_PTR(-ENOMEM); } int ovs_flow_tbl_count(struct flow_table *table) @@ -118,6 +141,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) static void flow_free(struct sw_flow *flow) { kfree((struct sf_flow_acts __force *)flow->sf_acts); + if (flow->stats.is_percpu) + free_percpu(flow->stats.cpu_stats); + else + kfree(flow->stats.stat); kmem_cache_free(flow_cache, flow); } |