2019-05-20 10:08:01 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2016-09-02 14:39:45 -07:00
|
|
|
/* AF_RXRPC sendmsg() implementation.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/export.h>
|
2017-02-02 11:15:33 -07:00
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/af_rxrpc.h>
|
|
|
|
#include "ar-internal.h"
|
|
|
|
|
2022-10-12 14:17:56 -07:00
|
|
|
/*
|
|
|
|
* Propose an abort to be made in the I/O thread.
|
|
|
|
*/
|
2022-10-06 13:45:42 -07:00
|
|
|
bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
|
|
|
|
enum rxrpc_abort_reason why)
|
2022-10-12 14:17:56 -07:00
|
|
|
{
|
2022-10-06 13:45:42 -07:00
|
|
|
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
|
2022-10-12 14:17:56 -07:00
|
|
|
|
2022-12-19 08:32:32 -07:00
|
|
|
if (!call->send_abort && !rxrpc_call_is_complete(call)) {
|
2022-10-12 14:17:56 -07:00
|
|
|
call->send_abort_why = why;
|
|
|
|
call->send_abort_err = error;
|
2022-10-06 13:45:42 -07:00
|
|
|
call->send_abort_seq = 0;
|
2022-10-12 14:17:56 -07:00
|
|
|
/* Request abort locklessly vs rxrpc_input_call_event(). */
|
|
|
|
smp_store_release(&call->send_abort, abort_code);
|
|
|
|
rxrpc_poke_call(call, rxrpc_call_poke_abort);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-10-19 01:45:43 -07:00
|
|
|
/*
|
|
|
|
* Wait for a call to become connected. Interruption here doesn't cause the
|
|
|
|
* call to be aborted.
|
|
|
|
*/
|
|
|
|
static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
|
|
|
|
{
|
|
|
|
DECLARE_WAITQUEUE(myself, current);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
_enter("%d", call->debug_id);
|
|
|
|
|
|
|
|
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
|
2023-04-25 05:56:35 -07:00
|
|
|
goto no_wait;
|
2022-10-19 01:45:43 -07:00
|
|
|
|
|
|
|
add_wait_queue_exclusive(&call->waitq, &myself);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
switch (call->interruptibility) {
|
|
|
|
case RXRPC_INTERRUPTIBLE:
|
|
|
|
case RXRPC_PREINTERRUPTIBLE:
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
break;
|
|
|
|
case RXRPC_UNINTERRUPTIBLE:
|
|
|
|
default:
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
break;
|
|
|
|
}
|
2023-04-25 05:56:35 -07:00
|
|
|
|
|
|
|
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
|
2022-10-19 01:45:43 -07:00
|
|
|
break;
|
|
|
|
if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
|
|
|
|
call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
|
|
|
|
signal_pending(current)) {
|
|
|
|
ret = sock_intr_errno(*timeo);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*timeo = schedule_timeout(*timeo);
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_wait_queue(&call->waitq, &myself);
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
2023-04-25 05:56:35 -07:00
|
|
|
no_wait:
|
2022-10-19 01:45:43 -07:00
|
|
|
if (ret == 0 && rxrpc_call_is_complete(call))
|
|
|
|
ret = call->error;
|
|
|
|
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-13 02:05:38 -07:00
|
|
|
/*
|
|
|
|
* Return true if there's sufficient Tx queue space.
|
|
|
|
*/
|
|
|
|
static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
|
|
|
|
{
|
|
|
|
if (_tx_win)
|
2022-03-31 15:55:08 -07:00
|
|
|
*_tx_win = call->tx_bottom;
|
|
|
|
return call->tx_prepared - call->tx_bottom < 256;
|
2020-03-13 02:05:38 -07:00
|
|
|
}
|
|
|
|
|
2017-10-18 03:07:31 -07:00
|
|
|
/*
|
|
|
|
* Wait for space to appear in the Tx queue or a signal to occur.
|
|
|
|
*/
|
|
|
|
static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
|
|
|
|
struct rxrpc_call *call,
|
|
|
|
long *timeo)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2020-03-13 02:05:38 -07:00
|
|
|
if (rxrpc_check_tx_space(call, NULL))
|
2017-10-18 03:07:31 -07:00
|
|
|
return 0;
|
|
|
|
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call))
|
2017-10-18 03:07:31 -07:00
|
|
|
return call->error;
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
return sock_intr_errno(*timeo);
|
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
|
2017-10-18 03:07:31 -07:00
|
|
|
*timeo = schedule_timeout(*timeo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for space to appear in the Tx queue uninterruptibly, but with
|
|
|
|
* a timeout of 2*RTT if no progress was made and a signal occurred.
|
|
|
|
*/
|
2020-03-13 02:22:09 -07:00
|
|
|
static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
|
2017-10-18 03:07:31 -07:00
|
|
|
struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
rxrpc_seq_t tx_start, tx_win;
|
2020-05-11 06:54:34 -07:00
|
|
|
signed long rtt, timeout;
|
2017-10-18 03:07:31 -07:00
|
|
|
|
2020-05-11 06:54:34 -07:00
|
|
|
rtt = READ_ONCE(call->peer->srtt_us) >> 3;
|
|
|
|
rtt = usecs_to_jiffies(rtt) * 2;
|
|
|
|
if (rtt < 2)
|
|
|
|
rtt = 2;
|
2017-10-18 03:07:31 -07:00
|
|
|
|
2020-05-11 06:54:34 -07:00
|
|
|
timeout = rtt;
|
2022-03-31 15:55:08 -07:00
|
|
|
tx_start = smp_load_acquire(&call->acks_hard_ack);
|
2017-10-18 03:07:31 -07:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
|
2020-03-13 02:05:38 -07:00
|
|
|
if (rxrpc_check_tx_space(call, &tx_win))
|
2017-10-18 03:07:31 -07:00
|
|
|
return 0;
|
|
|
|
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call))
|
2017-10-18 03:07:31 -07:00
|
|
|
return call->error;
|
|
|
|
|
2020-03-13 02:22:09 -07:00
|
|
|
if (timeout == 0 &&
|
2017-10-18 03:07:31 -07:00
|
|
|
tx_win == tx_start && signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
if (tx_win != tx_start) {
|
2020-05-11 06:54:34 -07:00
|
|
|
timeout = rtt;
|
2017-10-18 03:07:31 -07:00
|
|
|
tx_start = tx_win;
|
|
|
|
}
|
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
|
2017-10-18 03:07:31 -07:00
|
|
|
timeout = schedule_timeout(timeout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-13 02:22:09 -07:00
|
|
|
/*
|
|
|
|
* Wait for space to appear in the Tx queue uninterruptibly.
|
|
|
|
*/
|
|
|
|
static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
|
|
|
|
struct rxrpc_call *call,
|
|
|
|
long *timeo)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
if (rxrpc_check_tx_space(call, NULL))
|
|
|
|
return 0;
|
|
|
|
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call))
|
2020-03-13 02:22:09 -07:00
|
|
|
return call->error;
|
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
|
2020-03-13 02:22:09 -07:00
|
|
|
*timeo = schedule_timeout(*timeo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/*
|
2016-09-02 14:39:45 -07:00
|
|
|
* wait for space to appear in the transmit/ACK window
|
|
|
|
* - caller holds the socket locked
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
2016-09-02 14:39:45 -07:00
|
|
|
static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
|
|
|
|
struct rxrpc_call *call,
|
2017-10-18 03:07:31 -07:00
|
|
|
long *timeo,
|
|
|
|
bool waitall)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2016-09-02 14:39:45 -07:00
|
|
|
DECLARE_WAITQUEUE(myself, current);
|
|
|
|
int ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
_enter(",{%u,%u,%u,%u}",
|
|
|
|
call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
add_wait_queue(&call->waitq, &myself);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2020-03-13 02:22:09 -07:00
|
|
|
switch (call->interruptibility) {
|
|
|
|
case RXRPC_INTERRUPTIBLE:
|
|
|
|
if (waitall)
|
|
|
|
ret = rxrpc_wait_for_tx_window_waitall(rx, call);
|
|
|
|
else
|
|
|
|
ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
|
|
|
|
break;
|
|
|
|
case RXRPC_PREINTERRUPTIBLE:
|
|
|
|
case RXRPC_UNINTERRUPTIBLE:
|
|
|
|
default:
|
|
|
|
ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
|
|
|
|
break;
|
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
remove_wait_queue(&call->waitq, &myself);
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
|
2017-08-29 02:18:56 -07:00
|
|
|
/*
|
|
|
|
* Notify the owner of the call that the transmit phase is ended and the last
|
|
|
|
* packet has been queued.
|
|
|
|
*/
|
|
|
|
static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
|
|
|
rxrpc_notify_end_tx_t notify_end_tx)
|
|
|
|
{
|
|
|
|
if (notify_end_tx)
|
|
|
|
notify_end_tx(&rx->sk, call, call->user_call_ID);
|
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/*
|
2019-04-12 08:33:54 -07:00
|
|
|
* Queue a DATA packet for transmission, set the resend timeout and send
|
|
|
|
* the packet immediately. Returns the error from rxrpc_send_data_packet()
|
|
|
|
* in case the caller wants to do something with it.
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
2022-03-31 15:55:08 -07:00
|
|
|
static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
|
|
|
struct rxrpc_txbuf *txb,
|
|
|
|
rxrpc_notify_end_tx_t notify_end_tx)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2022-03-31 15:55:08 -07:00
|
|
|
rxrpc_seq_t seq = txb->seq;
|
2024-01-29 08:01:10 -07:00
|
|
|
bool poke, last = txb->flags & RXRPC_LAST_PACKET;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-05-11 06:01:25 -07:00
|
|
|
rxrpc_inc_stat(call->rxnet, stat_tx_data);
|
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
ASSERTCMP(txb->seq, ==, call->tx_prepared + 1);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-23 05:17:33 -07:00
|
|
|
/* We have to set the timestamp before queueing as the retransmit
|
|
|
|
* algorithm can see the packet as soon as we queue it.
|
|
|
|
*/
|
2022-03-31 15:55:08 -07:00
|
|
|
txb->last_sent = ktime_get_real();
|
2016-09-23 05:17:33 -07:00
|
|
|
|
2016-09-23 04:39:22 -07:00
|
|
|
if (last)
|
2022-03-31 15:55:08 -07:00
|
|
|
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
|
2016-09-23 04:39:22 -07:00
|
|
|
else
|
2022-03-31 15:55:08 -07:00
|
|
|
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
/* Add the packet to the call's output buffer */
|
|
|
|
spin_lock(&call->tx_lock);
|
2020-01-23 06:13:41 -07:00
|
|
|
poke = list_empty(&call->tx_sendmsg);
|
2022-03-31 15:55:08 -07:00
|
|
|
list_add_tail(&txb->call_link, &call->tx_sendmsg);
|
|
|
|
call->tx_prepared = seq;
|
2022-11-11 01:35:36 -07:00
|
|
|
if (last)
|
|
|
|
rxrpc_notify_end_tx(rx, call, notify_end_tx);
|
2022-03-31 15:55:08 -07:00
|
|
|
spin_unlock(&call->tx_lock);
|
|
|
|
|
2020-01-23 06:13:41 -07:00
|
|
|
if (poke)
|
|
|
|
rxrpc_poke_call(call, rxrpc_call_poke_start);
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/*
|
|
|
|
* send data through a socket
|
|
|
|
* - must be called in process context
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
* - The caller holds the call user access mutex, but not the socket lock.
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
2016-09-02 14:39:45 -07:00
|
|
|
static int rxrpc_send_data(struct rxrpc_sock *rx,
|
|
|
|
struct rxrpc_call *call,
|
2017-08-29 02:18:56 -07:00
|
|
|
struct msghdr *msg, size_t len,
|
2022-08-24 09:35:45 -07:00
|
|
|
rxrpc_notify_end_tx_t notify_end_tx,
|
|
|
|
bool *_dropped_lock)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2022-03-31 15:55:08 -07:00
|
|
|
struct rxrpc_txbuf *txb;
|
2016-09-02 14:39:45 -07:00
|
|
|
struct sock *sk = &rx->sk;
|
2022-08-24 09:35:45 -07:00
|
|
|
enum rxrpc_call_state state;
|
2016-09-02 14:39:45 -07:00
|
|
|
long timeo;
|
2022-08-24 09:35:45 -07:00
|
|
|
bool more = msg->msg_flags & MSG_MORE;
|
|
|
|
int ret, copied = 0;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-10-19 01:45:43 -07:00
|
|
|
ret = rxrpc_wait_to_be_connected(call, &timeo);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
|
|
|
|
ret = rxrpc_init_client_conn_security(call->conn);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/* this should be in poll */
|
|
|
|
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-08-24 09:35:45 -07:00
|
|
|
reload:
|
2024-10-01 06:26:59 -07:00
|
|
|
txb = call->tx_pending;
|
|
|
|
call->tx_pending = NULL;
|
|
|
|
if (txb)
|
|
|
|
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
|
|
|
|
|
2022-08-24 09:35:45 -07:00
|
|
|
ret = -EPIPE;
|
2020-07-20 04:41:46 -07:00
|
|
|
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
2022-08-24 09:35:45 -07:00
|
|
|
goto maybe_error;
|
2022-12-19 08:32:32 -07:00
|
|
|
state = rxrpc_call_state(call);
|
2022-08-24 09:35:45 -07:00
|
|
|
ret = -ESHUTDOWN;
|
|
|
|
if (state >= RXRPC_CALL_COMPLETE)
|
|
|
|
goto maybe_error;
|
|
|
|
ret = -EPROTO;
|
|
|
|
if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
|
|
|
|
state != RXRPC_CALL_SERVER_ACK_REQUEST &&
|
2022-11-11 01:35:36 -07:00
|
|
|
state != RXRPC_CALL_SERVER_SEND_REPLY) {
|
|
|
|
/* Request phase complete for this client call */
|
|
|
|
trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
|
|
|
|
call->cid, call->call_id, call->rx_consumed,
|
|
|
|
0, -EPROTO);
|
2022-08-24 09:35:45 -07:00
|
|
|
goto maybe_error;
|
2022-11-11 01:35:36 -07:00
|
|
|
}
|
2022-08-24 09:35:45 -07:00
|
|
|
|
|
|
|
ret = -EMSGSIZE;
|
2017-06-07 04:40:03 -07:00
|
|
|
if (call->tx_total_len != -1) {
|
2022-08-24 09:35:45 -07:00
|
|
|
if (len - copied > call->tx_total_len)
|
|
|
|
goto maybe_error;
|
|
|
|
if (!more && len - copied != call->tx_total_len)
|
|
|
|
goto maybe_error;
|
2017-06-07 04:40:03 -07:00
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
do {
|
2022-03-31 15:55:08 -07:00
|
|
|
if (!txb) {
|
2024-01-29 16:47:57 -07:00
|
|
|
size_t remain;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
_debug("alloc");
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-08-24 09:35:45 -07:00
|
|
|
if (!rxrpc_check_tx_space(call, NULL))
|
|
|
|
goto wait_for_space;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2020-09-15 17:34:39 -07:00
|
|
|
/* Work out the maximum size of a packet. Assume that
|
|
|
|
* the security header is going to be in the padded
|
|
|
|
* region (enc blocksize), but the trailer is not.
|
|
|
|
*/
|
|
|
|
remain = more ? INT_MAX : msg_data_left(msg);
|
2024-01-29 16:47:57 -07:00
|
|
|
txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
|
2024-03-12 16:37:18 -07:00
|
|
|
if (!txb) {
|
|
|
|
ret = -ENOMEM;
|
2020-09-15 17:34:39 -07:00
|
|
|
goto maybe_error;
|
2024-01-29 16:47:57 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
_debug("append");
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/* append next segment of data to the current buffer */
|
|
|
|
if (msg_data_left(msg) > 0) {
|
2022-03-31 15:55:08 -07:00
|
|
|
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
|
|
|
|
|
|
|
|
_debug("add %zu", copy);
|
2024-01-29 16:07:43 -07:00
|
|
|
if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
|
|
|
|
copy, &msg->msg_iter))
|
2016-09-02 14:39:45 -07:00
|
|
|
goto efault;
|
2022-03-31 15:55:08 -07:00
|
|
|
_debug("added");
|
|
|
|
txb->space -= copy;
|
|
|
|
txb->len += copy;
|
|
|
|
txb->offset += copy;
|
2016-09-02 14:39:45 -07:00
|
|
|
copied += copy;
|
2017-06-07 04:40:03 -07:00
|
|
|
if (call->tx_total_len != -1)
|
|
|
|
call->tx_total_len -= copy;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
|
2019-01-10 09:59:13 -07:00
|
|
|
/* check for the far side aborting the call or a network error
|
|
|
|
* occurring */
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call))
|
2019-01-10 09:59:13 -07:00
|
|
|
goto call_terminated;
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/* add the packet to the send queue if it's now full */
|
2022-03-31 15:55:08 -07:00
|
|
|
if (!txb->space ||
|
2016-09-02 14:39:45 -07:00
|
|
|
(msg_data_left(msg) == 0 && !more)) {
|
2024-01-29 08:01:10 -07:00
|
|
|
if (msg_data_left(msg) == 0 && !more)
|
|
|
|
txb->flags |= RXRPC_LAST_PACKET;
|
2022-03-31 15:55:08 -07:00
|
|
|
else if (call->tx_top - call->acks_hard_ack <
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 03:10:12 -07:00
|
|
|
call->tx_winsize)
|
2024-01-29 08:01:10 -07:00
|
|
|
txb->flags |= RXRPC_MORE_PACKETS;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-03-31 15:55:08 -07:00
|
|
|
ret = call->security->secure_packet(call, txb);
|
2016-09-02 14:39:45 -07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2024-01-29 16:07:43 -07:00
|
|
|
txb->kvec[0].iov_len += txb->len;
|
|
|
|
txb->len = txb->kvec[0].iov_len;
|
2022-03-31 15:55:08 -07:00
|
|
|
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
|
|
|
|
txb = NULL;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
} while (msg_data_left(msg) > 0);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
success:
|
|
|
|
ret = copied;
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call) &&
|
|
|
|
call->error < 0)
|
|
|
|
ret = call->error;
|
2016-09-02 14:39:45 -07:00
|
|
|
out:
|
2022-03-31 15:55:08 -07:00
|
|
|
call->tx_pending = txb;
|
2016-09-02 14:39:45 -07:00
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2019-01-10 09:59:13 -07:00
|
|
|
call_terminated:
|
2022-03-31 15:55:08 -07:00
|
|
|
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted);
|
2019-01-10 09:59:13 -07:00
|
|
|
_leave(" = %d", call->error);
|
|
|
|
return call->error;
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
maybe_error:
|
|
|
|
if (copied)
|
|
|
|
goto success;
|
|
|
|
goto out;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
efault:
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
2022-08-24 09:35:45 -07:00
|
|
|
|
|
|
|
wait_for_space:
|
|
|
|
ret = -EAGAIN;
|
|
|
|
if (msg->msg_flags & MSG_DONTWAIT)
|
|
|
|
goto maybe_error;
|
|
|
|
mutex_unlock(&call->user_mutex);
|
|
|
|
*_dropped_lock = true;
|
|
|
|
ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
|
|
|
|
msg->msg_flags & MSG_WAITALL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto maybe_error;
|
|
|
|
if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
|
|
|
|
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
|
|
|
|
ret = sock_intr_errno(timeo);
|
|
|
|
goto maybe_error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_lock(&call->user_mutex);
|
|
|
|
}
|
|
|
|
*_dropped_lock = false;
|
|
|
|
goto reload;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-09-02 14:39:45 -07:00
|
|
|
* extract control messages from the sendmsg() control buffer
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
2017-06-07 06:41:52 -07:00
|
|
|
static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2016-09-02 14:39:45 -07:00
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
bool got_user_ID = false;
|
|
|
|
int len;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
if (msg->msg_controllen == 0)
|
|
|
|
return -EINVAL;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
for_each_cmsghdr(cmsg, msg) {
|
|
|
|
if (!CMSG_OK(msg, cmsg))
|
|
|
|
return -EINVAL;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-01-03 05:42:17 -07:00
|
|
|
len = cmsg->cmsg_len - sizeof(struct cmsghdr);
|
2016-09-02 14:39:45 -07:00
|
|
|
_debug("CMSG %d, %d, %d",
|
|
|
|
cmsg->cmsg_level, cmsg->cmsg_type, len);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
if (cmsg->cmsg_level != SOL_RXRPC)
|
|
|
|
continue;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
switch (cmsg->cmsg_type) {
|
|
|
|
case RXRPC_USER_CALL_ID:
|
|
|
|
if (msg->msg_flags & MSG_CMSG_COMPAT) {
|
|
|
|
if (len != sizeof(u32))
|
|
|
|
return -EINVAL;
|
2017-11-24 03:18:41 -07:00
|
|
|
p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
|
2016-09-02 14:39:45 -07:00
|
|
|
} else {
|
|
|
|
if (len != sizeof(unsigned long))
|
|
|
|
return -EINVAL;
|
2017-11-24 03:18:41 -07:00
|
|
|
p->call.user_call_ID = *(unsigned long *)
|
2016-09-02 14:39:45 -07:00
|
|
|
CMSG_DATA(cmsg);
|
|
|
|
}
|
|
|
|
got_user_ID = true;
|
|
|
|
break;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
case RXRPC_ABORT:
|
2017-06-07 06:41:52 -07:00
|
|
|
if (p->command != RXRPC_CMD_SEND_DATA)
|
2016-09-02 14:39:45 -07:00
|
|
|
return -EINVAL;
|
2017-06-07 06:41:52 -07:00
|
|
|
p->command = RXRPC_CMD_SEND_ABORT;
|
|
|
|
if (len != sizeof(p->abort_code))
|
2016-09-02 14:39:45 -07:00
|
|
|
return -EINVAL;
|
2017-06-07 06:41:52 -07:00
|
|
|
p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
|
|
|
|
if (p->abort_code == 0)
|
2016-09-02 14:39:45 -07:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2020-09-30 13:27:18 -07:00
|
|
|
case RXRPC_CHARGE_ACCEPT:
|
2017-06-07 06:41:52 -07:00
|
|
|
if (p->command != RXRPC_CMD_SEND_DATA)
|
2016-09-02 14:39:45 -07:00
|
|
|
return -EINVAL;
|
2020-09-30 13:27:18 -07:00
|
|
|
p->command = RXRPC_CMD_CHARGE_ACCEPT;
|
2016-09-02 14:39:45 -07:00
|
|
|
if (len != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
case RXRPC_EXCLUSIVE_CALL:
|
2017-06-07 06:41:52 -07:00
|
|
|
p->exclusive = true;
|
2016-09-02 14:39:45 -07:00
|
|
|
if (len != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2017-06-05 06:30:49 -07:00
|
|
|
|
|
|
|
case RXRPC_UPGRADE_SERVICE:
|
2017-06-07 06:41:52 -07:00
|
|
|
p->upgrade = true;
|
2017-06-05 06:30:49 -07:00
|
|
|
if (len != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
2017-06-07 04:40:03 -07:00
|
|
|
case RXRPC_TX_LENGTH:
|
2017-11-24 03:18:41 -07:00
|
|
|
if (p->call.tx_total_len != -1 || len != sizeof(__s64))
|
2017-06-07 04:40:03 -07:00
|
|
|
return -EINVAL;
|
2017-11-24 03:18:41 -07:00
|
|
|
p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
|
|
|
|
if (p->call.tx_total_len < 0)
|
2017-06-07 04:40:03 -07:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
2017-11-24 03:18:41 -07:00
|
|
|
case RXRPC_SET_CALL_TIMEOUT:
|
|
|
|
if (len & 3 || len < 4 || len > 12)
|
|
|
|
return -EINVAL;
|
|
|
|
memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
|
|
|
|
p->call.nr_timeouts = len / 4;
|
|
|
|
if (p->call.timeouts.hard > INT_MAX / HZ)
|
|
|
|
return -ERANGE;
|
|
|
|
if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
|
|
|
|
return -ERANGE;
|
|
|
|
if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
if (!got_user_ID)
|
|
|
|
return -EINVAL;
|
2017-11-24 03:18:41 -07:00
|
|
|
if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
|
2017-06-07 04:40:03 -07:00
|
|
|
return -EINVAL;
|
2016-09-02 14:39:45 -07:00
|
|
|
_leave(" = 0");
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/*
|
|
|
|
* Create a new client call for sendmsg().
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
* - Called with the socket lock held, which it must release.
|
|
|
|
* - If it returns a call, the call's lock will need releasing by the caller.
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
|
|
|
static struct rxrpc_call *
|
|
|
|
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
2017-06-07 06:41:52 -07:00
|
|
|
struct rxrpc_send_params *p)
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
__releases(&rx->sk.sk_lock.slock)
|
2018-03-30 13:05:17 -07:00
|
|
|
__acquires(&call->user_mutex)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
|
|
|
struct rxrpc_conn_parameters cp;
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 04:55:11 -07:00
|
|
|
struct rxrpc_peer *peer;
|
2016-09-02 14:39:45 -07:00
|
|
|
struct rxrpc_call *call;
|
|
|
|
struct key *key;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
_enter("");
|
2016-09-02 14:39:45 -07:00
|
|
|
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
if (!msg->msg_name) {
|
|
|
|
release_sock(&rx->sk);
|
2016-09-02 14:39:45 -07:00
|
|
|
return ERR_PTR(-EDESTADDRREQ);
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 04:55:11 -07:00
|
|
|
peer = rxrpc_lookup_peer(rx->local, srx, GFP_KERNEL);
|
|
|
|
if (!peer) {
|
|
|
|
release_sock(&rx->sk);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
key = rx->key;
|
|
|
|
if (key && !rx->key->payload.data[0])
|
|
|
|
key = NULL;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
memset(&cp, 0, sizeof(cp));
|
|
|
|
cp.local = rx->local;
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 04:55:11 -07:00
|
|
|
cp.peer = peer;
|
2016-09-02 14:39:45 -07:00
|
|
|
cp.key = rx->key;
|
|
|
|
cp.security_level = rx->min_sec_level;
|
2017-06-07 06:41:52 -07:00
|
|
|
cp.exclusive = rx->exclusive | p->exclusive;
|
|
|
|
cp.upgrade = p->upgrade;
|
2016-09-02 14:39:45 -07:00
|
|
|
cp.service_id = srx->srx_service;
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 04:55:11 -07:00
|
|
|
call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL,
|
2018-03-27 15:03:00 -07:00
|
|
|
atomic_inc_return(&rxrpc_debug_id));
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
/* The socket is now unlocked */
|
2016-09-02 14:39:45 -07:00
|
|
|
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 04:55:11 -07:00
|
|
|
rxrpc_put_peer(peer, rxrpc_peer_put_application);
|
2016-09-02 14:39:45 -07:00
|
|
|
_leave(" = %p\n", call);
|
|
|
|
return call;
|
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/*
|
|
|
|
* send a message forming part of a client call through an RxRPC socket
|
|
|
|
* - caller holds the socket locked
|
|
|
|
* - the socket may be either a client socket or a server socket
|
|
|
|
*/
|
|
|
|
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
__releases(&rx->sk.sk_lock.slock)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
|
|
|
struct rxrpc_call *call;
|
2022-08-24 09:35:45 -07:00
|
|
|
bool dropped_lock = false;
|
2016-09-02 14:39:45 -07:00
|
|
|
int ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-06-07 06:41:52 -07:00
|
|
|
struct rxrpc_send_params p = {
|
2017-11-24 03:18:41 -07:00
|
|
|
.call.tx_total_len = -1,
|
|
|
|
.call.user_call_ID = 0,
|
2017-11-24 03:18:41 -07:00
|
|
|
.call.nr_timeouts = 0,
|
2020-03-13 02:22:09 -07:00
|
|
|
.call.interruptibility = RXRPC_INTERRUPTIBLE,
|
2017-11-24 03:18:41 -07:00
|
|
|
.abort_code = 0,
|
|
|
|
.command = RXRPC_CMD_SEND_DATA,
|
|
|
|
.exclusive = false,
|
|
|
|
.upgrade = false,
|
2017-06-07 06:41:52 -07:00
|
|
|
};
|
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
_enter("");
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-06-07 06:41:52 -07:00
|
|
|
ret = rxrpc_sendmsg_cmsg(msg, &p);
|
2016-09-02 14:39:45 -07:00
|
|
|
if (ret < 0)
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
goto error_release_sock;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2020-09-30 13:27:18 -07:00
|
|
|
if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
ret = -EINVAL;
|
2016-09-02 14:39:45 -07:00
|
|
|
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
goto error_release_sock;
|
2020-09-30 13:27:18 -07:00
|
|
|
ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
|
|
|
|
goto error_release_sock;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-11-24 03:18:41 -07:00
|
|
|
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
|
2016-09-02 14:39:45 -07:00
|
|
|
if (!call) {
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
ret = -EBADSLT;
|
2017-06-07 06:41:52 -07:00
|
|
|
if (p.command != RXRPC_CMD_SEND_DATA)
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
goto error_release_sock;
|
2017-06-07 06:41:52 -07:00
|
|
|
call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
/* The socket is now unlocked... */
|
2016-09-02 14:39:45 -07:00
|
|
|
if (IS_ERR(call))
|
|
|
|
return PTR_ERR(call);
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
/* ... and we have the call lock. */
|
2023-04-28 13:27:56 -07:00
|
|
|
p.call.nr_timeouts = 0;
|
2020-07-28 16:03:56 -07:00
|
|
|
ret = 0;
|
2022-12-19 08:32:32 -07:00
|
|
|
if (rxrpc_call_is_complete(call))
|
2020-07-28 16:03:56 -07:00
|
|
|
goto out_put_unlock;
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
} else {
|
2022-12-19 08:32:32 -07:00
|
|
|
switch (rxrpc_call_state(call)) {
|
2017-03-03 17:01:41 -07:00
|
|
|
case RXRPC_CALL_CLIENT_AWAIT_CONN:
|
|
|
|
case RXRPC_CALL_SERVER_SECURING:
|
2023-04-28 13:27:55 -07:00
|
|
|
if (p.command == RXRPC_CMD_SEND_ABORT)
|
|
|
|
break;
|
|
|
|
fallthrough;
|
|
|
|
case RXRPC_CALL_UNINITIALISED:
|
|
|
|
case RXRPC_CALL_SERVER_PREALLOC:
|
2022-10-21 06:39:26 -07:00
|
|
|
rxrpc_put_call(call, rxrpc_call_put_sendmsg);
|
2017-03-03 17:01:41 -07:00
|
|
|
ret = -EBUSY;
|
2017-03-02 16:48:52 -07:00
|
|
|
goto error_release_sock;
|
2017-03-03 17:01:41 -07:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-03-02 16:48:52 -07:00
|
|
|
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
ret = mutex_lock_interruptible(&call->user_mutex);
|
|
|
|
release_sock(&rx->sk);
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = -ERESTARTSYS;
|
|
|
|
goto error_put;
|
|
|
|
}
|
2017-06-07 04:40:03 -07:00
|
|
|
|
2017-11-24 03:18:41 -07:00
|
|
|
if (p.call.tx_total_len != -1) {
|
2017-06-07 04:40:03 -07:00
|
|
|
ret = -EINVAL;
|
|
|
|
if (call->tx_total_len != -1 ||
|
|
|
|
call->tx_pending ||
|
|
|
|
call->tx_top != 0)
|
2022-12-15 09:19:47 -07:00
|
|
|
goto out_put_unlock;
|
2017-11-24 03:18:41 -07:00
|
|
|
call->tx_total_len = p.call.tx_total_len;
|
2017-06-07 04:40:03 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-11-24 03:18:41 -07:00
|
|
|
switch (p.call.nr_timeouts) {
|
|
|
|
case 3:
|
2024-01-30 14:37:16 -07:00
|
|
|
WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
|
2020-08-23 15:36:59 -07:00
|
|
|
fallthrough;
|
2017-11-24 03:18:41 -07:00
|
|
|
case 2:
|
2024-01-30 14:37:16 -07:00
|
|
|
WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
|
2020-08-23 15:36:59 -07:00
|
|
|
fallthrough;
|
2017-11-24 03:18:41 -07:00
|
|
|
case 1:
|
|
|
|
if (p.call.timeouts.hard > 0) {
|
2024-01-30 14:37:16 -07:00
|
|
|
ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
|
|
|
|
|
|
|
|
WRITE_ONCE(call->expect_term_by,
|
|
|
|
ktime_add(p.call.timeouts.hard,
|
|
|
|
ktime_get_real()));
|
|
|
|
trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
|
|
|
|
rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
|
|
|
|
|
2017-11-24 03:18:41 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:35:36 -07:00
|
|
|
if (rxrpc_call_is_complete(call)) {
|
2016-09-02 14:39:45 -07:00
|
|
|
/* it's too late for this call */
|
|
|
|
ret = -ESHUTDOWN;
|
2017-06-07 06:41:52 -07:00
|
|
|
} else if (p.command == RXRPC_CMD_SEND_ABORT) {
|
2022-10-06 13:45:42 -07:00
|
|
|
rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
|
|
|
|
rxrpc_abort_call_sendmsg);
|
2016-09-02 14:39:45 -07:00
|
|
|
ret = 0;
|
2017-06-07 06:41:52 -07:00
|
|
|
} else if (p.command != RXRPC_CMD_SEND_DATA) {
|
2016-09-02 14:39:45 -07:00
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-08-24 09:35:45 -07:00
|
|
|
ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2017-11-24 03:18:40 -07:00
|
|
|
out_put_unlock:
|
2022-08-24 09:35:45 -07:00
|
|
|
if (!dropped_lock)
|
|
|
|
mutex_unlock(&call->user_mutex);
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
error_put:
|
2022-10-21 06:39:26 -07:00
|
|
|
rxrpc_put_call(call, rxrpc_call_put_sendmsg);
|
2016-09-02 14:39:45 -07:00
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
|
|
|
|
error_release_sock:
|
|
|
|
release_sock(&rx->sk);
|
|
|
|
return ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/**
|
|
|
|
* rxrpc_kernel_send_data - Allow a kernel service to send data on a call
|
|
|
|
* @sock: The socket the call is on
|
|
|
|
* @call: The call to send data through
|
|
|
|
* @msg: The data to send
|
|
|
|
* @len: The amount of data to send
|
2017-08-29 02:18:56 -07:00
|
|
|
* @notify_end_tx: Notification that the last packet is queued.
|
2016-09-02 14:39:45 -07:00
|
|
|
*
|
|
|
|
* Allow a kernel service to send data on a call. The call must be in an state
|
|
|
|
* appropriate to sending data. No control data should be supplied in @msg,
|
|
|
|
* nor should an address be supplied. MSG_MORE should be flagged if there's
|
|
|
|
* more data to come, otherwise this data will end the transmission phase.
|
|
|
|
*/
|
|
|
|
int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
|
2017-08-29 02:18:56 -07:00
|
|
|
struct msghdr *msg, size_t len,
|
|
|
|
rxrpc_notify_end_tx_t notify_end_tx)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2022-08-24 09:35:45 -07:00
|
|
|
bool dropped_lock = false;
|
2016-09-02 14:39:45 -07:00
|
|
|
int ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-11-11 01:35:36 -07:00
|
|
|
_enter("{%d},", call->debug_id);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
ASSERTCMP(msg->msg_name, ==, NULL);
|
|
|
|
ASSERTCMP(msg->msg_control, ==, NULL);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
mutex_lock(&call->user_mutex);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-11-11 01:35:36 -07:00
|
|
|
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
|
|
|
|
notify_end_tx, &dropped_lock);
|
|
|
|
if (ret == -ESHUTDOWN)
|
2017-08-29 02:18:43 -07:00
|
|
|
ret = call->error;
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2022-08-24 09:35:45 -07:00
|
|
|
if (!dropped_lock)
|
|
|
|
mutex_unlock(&call->user_mutex);
|
2016-09-02 14:39:45 -07:00
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rxrpc_kernel_send_data);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
2016-09-02 14:39:45 -07:00
|
|
|
/**
|
|
|
|
* rxrpc_kernel_abort_call - Allow a kernel service to abort a call
|
|
|
|
* @sock: The socket the call is on
|
|
|
|
* @call: The call to be aborted
|
|
|
|
* @abort_code: The abort code to stick into the ABORT packet
|
2016-09-06 14:19:51 -07:00
|
|
|
* @error: Local error value
|
2022-10-06 13:45:42 -07:00
|
|
|
* @why: Indication as to why.
|
2016-09-02 14:39:45 -07:00
|
|
|
*
|
2017-04-06 02:11:59 -07:00
|
|
|
* Allow a kernel service to abort a call, if it's still in an abortable state
|
|
|
|
* and return true if the call was aborted, false if it was already complete.
|
2016-09-02 14:39:45 -07:00
|
|
|
*/
|
2017-04-06 02:11:59 -07:00
|
|
|
bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
|
2022-10-06 13:45:42 -07:00
|
|
|
u32 abort_code, int error, enum rxrpc_abort_reason why)
|
2016-09-02 14:39:45 -07:00
|
|
|
{
|
2017-04-06 02:11:59 -07:00
|
|
|
bool aborted;
|
|
|
|
|
2022-10-06 13:45:42 -07:00
|
|
|
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
|
2016-09-02 14:39:45 -07:00
|
|
|
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
mutex_lock(&call->user_mutex);
|
2022-10-12 14:17:56 -07:00
|
|
|
aborted = rxrpc_propose_abort(call, abort_code, error, why);
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 08:43:06 -07:00
|
|
|
mutex_unlock(&call->user_mutex);
|
2017-04-06 02:11:59 -07:00
|
|
|
return aborted;
|
2016-09-02 14:39:45 -07:00
|
|
|
}
|
2016-09-02 14:39:45 -07:00
|
|
|
EXPORT_SYMBOL(rxrpc_kernel_abort_call);
|
2017-06-07 04:40:03 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rxrpc_kernel_set_tx_length - Set the total Tx length on a call
|
|
|
|
* @sock: The socket the call is on
|
|
|
|
* @call: The call to be informed
|
|
|
|
* @tx_total_len: The amount of data to be transmitted for this call
|
|
|
|
*
|
|
|
|
* Allow a kernel service to set the total transmit length on a call. This
|
|
|
|
* allows buffer-to-packet encrypt-and-copy to be performed.
|
|
|
|
*
|
|
|
|
* This function is primarily for use for setting the reply length since the
|
|
|
|
* request length can be set when beginning the call.
|
|
|
|
*/
|
|
|
|
void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
|
|
|
|
s64 tx_total_len)
|
|
|
|
{
|
|
|
|
WARN_ON(call->tx_total_len != -1);
|
|
|
|
call->tx_total_len = tx_total_len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);
|