1

af_unix: Add dead flag to struct scm_fp_list.

Commit 1af2dface5 ("af_unix: Don't access successor in unix_del_edges()
during GC.") fixed use-after-free by avoid accessing edge->successor while
GC is in progress.

However, there could be a small race window where another process could
call unix_del_edges() while gc_in_progress is true and __skb_queue_purge()
is on the way.

So, we need another marker for struct scm_fp_list which indicates if the
skb is garbage-collected.

This patch adds dead flag in struct scm_fp_list and set it true before
calling __skb_queue_purge().

Fixes: 1af2dface5 ("af_unix: Don't access successor in unix_del_edges() during GC.")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Link: https://lore.kernel.org/r/20240508171150.50601-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Kuniyuki Iwashima 2024-05-08 10:11:50 -07:00 committed by Jakub Kicinski
parent 84c8b7ad5e
commit 7172dc93d6
3 changed files with 12 additions and 4 deletions

View File

@ -33,6 +33,7 @@ struct scm_fp_list {
short max; short max;
#ifdef CONFIG_UNIX #ifdef CONFIG_UNIX
bool inflight; bool inflight;
bool dead;
struct list_head vertices; struct list_head vertices;
struct unix_edge *edges; struct unix_edge *edges;
#endif #endif

View File

@ -91,6 +91,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fpl->user = NULL; fpl->user = NULL;
#if IS_ENABLED(CONFIG_UNIX) #if IS_ENABLED(CONFIG_UNIX)
fpl->inflight = false; fpl->inflight = false;
fpl->dead = false;
fpl->edges = NULL; fpl->edges = NULL;
INIT_LIST_HEAD(&fpl->vertices); INIT_LIST_HEAD(&fpl->vertices);
#endif #endif

View File

@ -158,13 +158,11 @@ static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
unix_update_graph(unix_edge_successor(edge)); unix_update_graph(unix_edge_successor(edge));
} }
static bool gc_in_progress;
static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{ {
struct unix_vertex *vertex = edge->predecessor->vertex; struct unix_vertex *vertex = edge->predecessor->vertex;
if (!gc_in_progress) if (!fpl->dead)
unix_update_graph(unix_edge_successor(edge)); unix_update_graph(unix_edge_successor(edge));
list_del(&edge->vertex_entry); list_del(&edge->vertex_entry);
@ -240,7 +238,7 @@ void unix_del_edges(struct scm_fp_list *fpl)
unix_del_edge(fpl, edge); unix_del_edge(fpl, edge);
} while (i < fpl->count_unix); } while (i < fpl->count_unix);
if (!gc_in_progress) { if (!fpl->dead) {
receiver = fpl->edges[0].successor; receiver = fpl->edges[0].successor;
receiver->scm_stat.nr_unix_fds -= fpl->count_unix; receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
} }
@ -559,9 +557,12 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
} }
static bool gc_in_progress;
static void __unix_gc(struct work_struct *work) static void __unix_gc(struct work_struct *work)
{ {
struct sk_buff_head hitlist; struct sk_buff_head hitlist;
struct sk_buff *skb;
spin_lock(&unix_gc_lock); spin_lock(&unix_gc_lock);
@ -579,6 +580,11 @@ static void __unix_gc(struct work_struct *work)
spin_unlock(&unix_gc_lock); spin_unlock(&unix_gc_lock);
skb_queue_walk(&hitlist, skb) {
if (UNIXCB(skb).fp)
UNIXCB(skb).fp->dead = true;
}
__skb_queue_purge(&hitlist); __skb_queue_purge(&hitlist);
skip_gc: skip_gc:
WRITE_ONCE(gc_in_progress, false); WRITE_ONCE(gc_in_progress, false);