From bc212465326e8587325f520a052346f0b57360e6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 1 Oct 2024 14:26:58 +0100 Subject: [PATCH] rxrpc: Fix a race between socket set up and I/O thread creation In rxrpc_open_socket(), it sets up the socket and then sets up the I/O thread that will handle it. This is a problem, however, as there's a gap between the two phases in which a packet may come into rxrpc_encap_rcv() from the UDP packet but we oops when trying to wake the not-yet created I/O thread. As a quick fix, just make rxrpc_encap_rcv() discard the packet if there's no I/O thread yet. A better, but more intrusive fix would perhaps be to rearrange things such that the socket creation is done by the I/O thread. Fixes: a275da62e8c1 ("rxrpc: Create a per-local endpoint receive queue and I/O thread") Signed-off-by: David Howells cc: yuxuanzhe@outlook.com cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20241001132702.3122709-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/ar-internal.h | 2 +- net/rxrpc/io_thread.c | 10 ++++++++-- net/rxrpc/local_object.c | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 80d682f89b23..d0fd37bdcfe9 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, int rxrpc_io_thread(void *data); static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) { - wake_up_process(local->io_thread); + wake_up_process(READ_ONCE(local->io_thread)); } static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why) diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c index 0300baa9afcd..07c74c77d802 100644 --- a/net/rxrpc/io_thread.c +++ b/net/rxrpc/io_thread.c @@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb) { struct sk_buff_head *rx_queue; struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); + struct task_struct *io_thread; if (unlikely(!local)) { kfree_skb(skb); return 0; } + io_thread = READ_ONCE(local->io_thread); + if (!io_thread) { + kfree_skb(skb); + return 0; + } if (skb->tstamp == 0) skb->tstamp = ktime_get_real(); @@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb) #endif skb_queue_tail(rx_queue, skb); - rxrpc_wake_up_io_thread(local); + wake_up_process(io_thread); return 0; } @@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data) __set_current_state(TASK_RUNNING); rxrpc_see_local(local, rxrpc_local_stop); rxrpc_destroy_local(local); - local->io_thread = NULL; + WRITE_ONCE(local->io_thread, NULL); rxrpc_see_local(local, rxrpc_local_stopped); return 0; } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 504453c688d7..f9623ace2201 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } wait_for_completion(&local->io_thread_ready); - local->io_thread = io_thread; + WRITE_ONCE(local->io_thread, io_thread); _leave(" = 0"); return 0;