summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2019-07-19 18:20:13 +0200
committerDavid S. Miller <davem@davemloft.net>2019-07-19 21:27:45 -0700
commitaef833c58d321f09ae4ce4467723542842ba9faf (patch)
tree2d2567ee29b74b37e128ce44bb8c7147e175db56
parent9a2f97bb8ddddbf655ce1fcdf688dcec19deb59f (diff)
downloadlinux-stable-aef833c58d321f09ae4ce4467723542842ba9faf.tar.gz
linux-stable-aef833c58d321f09ae4ce4467723542842ba9faf.tar.bz2
linux-stable-aef833c58d321f09ae4ce4467723542842ba9faf.zip
net: openvswitch: rename flow_stats to sw_flow_stats
There is a flow_stats structure defined in include/net/flow_offload.h and a follow up patch adds #include <net/flow_offload.h> to net/sch_generic.h. This breaks compilation since OVS codebase includes net/sock.h which pulls in linux/filter.h which includes net/sch_generic.h. In file included from ./include/net/sch_generic.h:18:0, from ./include/linux/filter.h:25, from ./include/net/sock.h:59, from ./include/linux/tcp.h:19, from net/openvswitch/datapath.c:24 This definition takes precedence on OVS since it is placed in the networking core, so rename flow_stats in OVS to sw_flow_stats since this structure is contained in sw_flow. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow.h4
-rw-r--r--net/openvswitch/flow_table.c8
3 files changed, 10 insertions, 10 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..bc89e16e0505 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb)
{
- struct flow_stats *stats;
+ struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */
- struct flow_stats *new_stats;
+ struct sw_flow_stats *new_stats;
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
- struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
+ struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
- struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
+ struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 3e2cc2202d66..a5506e2d4b7a 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -194,7 +194,7 @@ struct sw_flow_actions {
struct nlattr actions[];
};
-struct flow_stats {
+struct sw_flow_stats {
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */
@@ -216,7 +216,7 @@ struct sw_flow {
struct cpumask cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
- struct flow_stats __rcu *stats[]; /* One for each CPU. First one
+ struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 988fd8a94e43..cf3582c5ed70 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
- struct flow_stats *stats;
+ struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
- (struct flow_stats __force *)flow->stats[cpu]);
+ (struct sw_flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@@ -712,13 +712,13 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
- * sizeof(struct flow_stats *)),
+ * sizeof(struct sw_flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
- = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+ = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);