diff options
author | David S. Miller <davem@davemloft.net> | 2010-04-27 15:05:31 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-27 15:11:48 -0700 |
commit | c58dc01babfd58ec9e71a6ce080150dc27755d88 (patch) | |
tree | 065c58b5236ea23ff0868a6bbf3c5233b990f0be /include/net/sock.h | |
parent | c4ee6a5348102b9cea49fb9adf88307445407911 (diff) | |
download | linux-stable-c58dc01babfd58ec9e71a6ce080150dc27755d88.tar.gz linux-stable-c58dc01babfd58ec9e71a6ce080150dc27755d88.tar.bz2 linux-stable-c58dc01babfd58ec9e71a6ce080150dc27755d88.zip |
net: Make RFS socket operations not be inet specific.
Idea from Eric Dumazet.
As for placement inside of struct sock, I tried to choose a place
that otherwise has a 32-bit hole on 64-bit systems.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 4081db86a352..07822280d953 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -198,6 +198,7 @@ struct sock_common { * @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting + * @sk_rxhash: flow hash received from netif layer * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab * @sk_timer: sock cleanup timer @@ -279,6 +280,9 @@ struct sock { int sk_gso_type; unsigned int sk_gso_max_size; int sk_rcvlowat; +#ifdef CONFIG_RPS + __u32 sk_rxhash; +#endif unsigned long sk_flags; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; @@ -620,6 +624,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) return sk->sk_backlog_rcv(sk, skb); } +static inline void sock_rps_record_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + struct rps_sock_flow_table *sock_flow_table; + + rcu_read_lock(); + sock_flow_table = rcu_dereference(rps_sock_flow_table); + rps_record_sock_flow(sock_flow_table, sk->sk_rxhash); + rcu_read_unlock(); +#endif +} + +static inline void sock_rps_reset_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + struct rps_sock_flow_table *sock_flow_table; + + rcu_read_lock(); + sock_flow_table = rcu_dereference(rps_sock_flow_table); + rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash); + rcu_read_unlock(); +#endif +} + +static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash) +{ +#ifdef CONFIG_RPS + if (unlikely(sk->sk_rxhash != rxhash)) { + sock_rps_reset_flow(sk); + sk->sk_rxhash = rxhash; + } +#endif +} + #define sk_wait_event(__sk, __timeo, __condition) \ ({ int __rc; \ release_sock(__sk); \ |