summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJudy Hsiao <judyhsiao@chromium.org>2023-12-06 03:38:33 +0000
committerDavid S. Miller <davem@davemloft.net>2023-12-08 10:37:43 +0000
commite5dc5afff62f3e97e86c3643ec9fcad23de4f2d3 (patch)
treee4c14162082802b8f862a108210a038b3aa69b99 /net/core
parent179a8b515e4b8971ae4ad2db36a44f0691fc6756 (diff)
downloadlinux-stable-e5dc5afff62f3e97e86c3643ec9fcad23de4f2d3.tar.gz
linux-stable-e5dc5afff62f3e97e86c3643ec9fcad23de4f2d3.tar.bz2
linux-stable-e5dc5afff62f3e97e86c3643ec9fcad23de4f2d3.zip
neighbour: Don't let neigh_forced_gc() disable preemption for long
We are seeing cases where neigh_cleanup_and_release() is called by neigh_forced_gc() many times in a row with preemption turned off. When running on a low powered CPU at a low CPU frequency, this has been measured to keep preemption off for ~10 ms. That's not great on a system with HZ=1000 which expects tasks to be able to schedule in with ~1ms latency. Suggested-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Judy Hsiao <judyhsiao@chromium.org> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/neighbour.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index df81c1f0a570..552719c3bbc3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) -
READ_ONCE(tbl->gc_thresh2);
+ u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
+ int loop = 0;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
@@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
shrunk++;
if (shrunk >= max_clean)
break;
+ if (++loop == 16) {
+ if (ktime_get_ns() > tmax)
+ goto unlock;
+ loop = 0;
+ }
}
}
WRITE_ONCE(tbl->last_flush, jiffies);
-
+unlock:
write_unlock_bh(&tbl->lock);
return shrunk;