summaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
authorLorenz Bauer <lmb@isovalent.com>2023-07-20 17:30:08 +0200
committerMartin KaFai Lau <martin.lau@kernel.org>2023-07-25 13:51:44 -0700
commit0f495f7617229772403e683033abc473f0f0553c (patch)
tree14162556d8b75af77a81d29a147babdbbf9c8735 /net/ipv4/inet_hashtables.c
parentce796e60b3b196b61fcc565df195443cbb846ef0 (diff)
downloadlinux-stable-0f495f7617229772403e683033abc473f0f0553c.tar.gz
linux-stable-0f495f7617229772403e683033abc473f0f0553c.tar.bz2
linux-stable-0f495f7617229772403e683033abc473f0f0553c.zip
net: remove duplicate reuseport_lookup functions
There are currently four copies of reuseport_lookup: one each for (TCP, UDP)x(IPv4, IPv6). This forces us to duplicate all callers of those functions as well. This is already the case for sk_lookup helpers (inet,inet6,udp4,udp6)_lookup_run_bpf. There are two differences between the reuseport_lookup helpers: 1. They call different hash functions depending on protocol 2. UDP reuseport_lookup checks that sk_state != TCP_ESTABLISHED Move the check for sk_state into the caller and use the INDIRECT_CALL infrastructure to cut down the helpers to one per IP version. Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: Lorenz Bauer <lmb@isovalent.com> Link: https://lore.kernel.org/r/20230720-so-reuseport-v6-4-7021b683cdae@isovalent.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index ecb838460629..0a7a726464d9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -28,9 +28,9 @@
#include <net/tcp.h>
#include <net/sock_reuseport.h>
-static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
- const __u16 lport, const __be32 faddr,
- const __be16 fport)
+u32 inet_ehashfn(const struct net *net, const __be32 laddr,
+ const __u16 lport, const __be32 faddr,
+ const __be16 fport)
{
static u32 inet_ehash_secret __read_mostly;
@@ -39,6 +39,7 @@ static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
return __inet_ehashfn(laddr, lport, faddr, fport,
inet_ehash_secret + net_hash_mix(net));
}
+EXPORT_SYMBOL_GPL(inet_ehashfn);
/* This function handles inet_sock, but also timewait and request sockets
* for IPv4/IPv6.
@@ -332,16 +333,20 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
+INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
+
struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
- __be32 daddr, unsigned short hnum)
+ __be32 daddr, unsigned short hnum,
+ inet_ehashfn_t *ehashfn)
{
struct sock *reuse_sk = NULL;
u32 phash;
if (sk->sk_reuseport) {
- phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
+ phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
+ net, daddr, hnum, saddr, sport);
reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
}
return reuse_sk;
@@ -371,7 +376,7 @@ static struct sock *inet_lhash2_lookup(struct net *net,
score = compute_score(sk, net, hnum, daddr, dif, sdif);
if (score > hiscore) {
result = inet_lookup_reuseport(net, sk, skb, doff,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, inet_ehashfn);
if (result)
return result;
@@ -400,7 +405,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
- reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
+ reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
+ inet_ehashfn);
if (reuse_sk)
sk = reuse_sk;
return sk;