summaryrefslogtreecommitdiffstats
path: root/net/unix
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@amazon.com>2024-03-25 13:24:21 -0700
committerJakub Kicinski <kuba@kernel.org>2024-03-29 08:28:29 -0700
commitad081928a8b0f57f269df999a28087fce6f2b6ce (patch)
treea4e69083a76da9a10a6a2a0de1a13c19f773911c /net/unix
parent77e5593aebba823bcbcf2c4b58b07efcd63933b8 (diff)
downloadlinux-stable-ad081928a8b0f57f269df999a28087fce6f2b6ce.tar.gz
linux-stable-ad081928a8b0f57f269df999a28087fce6f2b6ce.tar.bz2
linux-stable-ad081928a8b0f57f269df999a28087fce6f2b6ce.zip
af_unix: Avoid Tarjan's algorithm if unnecessary.
Once a cyclic reference is formed, we need to run GC to check if there is dead SCC. However, we do not need to run Tarjan's algorithm if we know that the shape of the inflight graph has not been changed. If an edge is added/updated/deleted and the edge's successor is inflight, we set false to unix_graph_grouped, which means we need to re-classify SCC. Once we finalise SCC, we set true to unix_graph_grouped. While unix_graph_grouped is true, we can iterate the grouped SCC using vertex->scc_entry in unix_walk_scc_fast(). list_add() and list_for_each_entry_reverse() uses seem weird, but they are to keep the vertex order consistent and make writing test easier. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Acked-by: Paolo Abeni <pabeni@redhat.com> Link: https://lore.kernel.org/r/20240325202425.60930-12-kuniyu@amazon.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/garbage.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 5a1fae78d6dc..654aa8e30a8b 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -113,6 +113,7 @@ static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
}
static bool unix_graph_maybe_cyclic;
+static bool unix_graph_grouped;
static void unix_update_graph(struct unix_vertex *vertex)
{
@@ -123,6 +124,7 @@ static void unix_update_graph(struct unix_vertex *vertex)
return;
unix_graph_maybe_cyclic = true;
+ unix_graph_grouped = false;
}
static LIST_HEAD(unix_unvisited_vertices);
@@ -144,6 +146,7 @@ static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
vertex->index = unix_vertex_unvisited_index;
vertex->out_degree = 0;
INIT_LIST_HEAD(&vertex->edges);
+ INIT_LIST_HEAD(&vertex->scc_entry);
list_move_tail(&vertex->entry, &unix_unvisited_vertices);
edge->predecessor->vertex = vertex;
@@ -418,6 +421,26 @@ static void unix_walk_scc(void)
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
+
+ unix_graph_grouped = true;
+}
+
+static void unix_walk_scc_fast(void)
+{
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+ struct list_head scc;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ list_add(&scc, &vertex->scc_entry);
+
+ list_for_each_entry_reverse(vertex, &scc, scc_entry)
+ list_move_tail(&vertex->entry, &unix_visited_vertices);
+
+ list_del(&scc);
+ }
+
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
}
static LIST_HEAD(gc_candidates);
@@ -570,7 +593,10 @@ static void __unix_gc(struct work_struct *work)
if (!unix_graph_maybe_cyclic)
goto skip_gc;
- unix_walk_scc();
+ if (unix_graph_grouped)
+ unix_walk_scc_fast();
+ else
+ unix_walk_scc();
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones