diff options
author | Eric Dumazet <edumazet@google.com> | 2024-04-21 17:52:48 +0000 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-04-23 19:02:24 -0700 |
commit | 3584718cf2ec7e79b6814f2596dcf398c5fb2eca (patch) | |
tree | fb22eed338d4551788705bea0b54eb2dbcde456e /include | |
parent | a44f2eb106a46f2275a79de54ce0ea63e4f3d8c8 (diff) | |
download | linux-stable-3584718cf2ec7e79b6814f2596dcf398c5fb2eca.tar.gz linux-stable-3584718cf2ec7e79b6814f2596dcf398c5fb2eca.tar.bz2 linux-stable-3584718cf2ec7e79b6814f2596dcf398c5fb2eca.zip |
net: fix sk_memory_allocated_{add|sub} vs softirqs
Jonathan Heathcote reported a regression caused by blamed commit
on aarch64 architecture.
x86 happens to have irq-safe __this_cpu_add_return()
and __this_cpu_sub(), but this is not generic.
I think my confusion came from "struct sock" argument,
because these helpers are called with a locked socket.
But the memory accounting is per-proto (and per-cpu after
the blamed commit). We might cleanup these helpers later
to directly accept a "struct proto *proto" argument.
Switch to this_cpu_add_return() and this_cpu_xchg()
operations, and get rid of preempt_disable()/preempt_enable() pairs.
Fast path becomes a bit faster as a result :)
Many thanks to Jonathan Heathcote for his awesome report and
investigations.
Fixes: 3cd3399dd7a8 ("net: implement per-cpu reserves for memory_allocated")
Reported-by: Jonathan Heathcote <jonathan.heathcote@bbc.co.uk>
Closes: https://lore.kernel.org/netdev/VI1PR01MB42407D7947B2EA448F1E04EFD10D2@VI1PR01MB4240.eurprd01.prod.exchangelabs.com/
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Link: https://lore.kernel.org/r/20240421175248.1692552-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/sock.h | 38 |
1 files changed, 20 insertions, 18 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index f57bfd8a2ad2..b4b553df7870 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk) #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) extern int sysctl_mem_pcpu_rsv; +static inline void proto_memory_pcpu_drain(struct proto *proto) +{ + int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0); + + if (val) + atomic_long_add(val, proto->memory_allocated); +} + static inline void -sk_memory_allocated_add(struct sock *sk, int amt) +sk_memory_allocated_add(const struct sock *sk, int val) { - int local_reserve; + struct proto *proto = sk->sk_prot; - preempt_disable(); - local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt); - if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) { - __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); - atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); - } - preempt_enable(); + val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val); + + if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv))) + proto_memory_pcpu_drain(proto); } static inline void -sk_memory_allocated_sub(struct sock *sk, int amt) +sk_memory_allocated_sub(const struct sock *sk, int val) { - int local_reserve; + struct proto *proto = sk->sk_prot; - preempt_disable(); - local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt); - if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) { - __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); - atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); - } - preempt_enable(); + val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val); + + if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv))) + proto_memory_pcpu_drain(proto); } #define SK_ALLOC_PERCPU_COUNTER_BATCH 16 |