cb41b195e6
pr_debug() have been added in various places in MPTCP code to help
developers to debug some situations. With the dynamic debug feature, it
is easy to enable all or some of them, and asks users to reproduce
issues with extra debug.
Many of these pr_debug() don't end with a new line, while no 'pr_cont()'
are used in MPTCP code. So the goal was not to display multiple debug
messages on one line: they were then not missing the '\n' on purpose.
Not having the new line at the end causes these messages to be printed
with a delay, when something else needs to be printed. This issue is not
visible when many messages need to be printed, but it is annoying and
confusing when only specific messages are expected, e.g.
# echo "func mptcp_pm_add_addr_echoed +fmp" \
> /sys/kernel/debug/dynamic_debug/control
# ./mptcp_join.sh "signal address"; \
echo "$(awk '{print $1}' /proc/uptime) - end"; \
sleep 5s; \
echo "$(awk '{print $1}' /proc/uptime) - restart"; \
./mptcp_join.sh "signal address"
013 signal address
(...)
10.75 - end
15.76 - restart
013 signal address
[ 10.367935] mptcp:mptcp_pm_add_addr_echoed: MPTCP: msk=(...)
(...)
=> a delay of 5 seconds: printed with a 10.36 ts, but after 'restart'
which was printed at the 15.76 ts.
The 'Fixes' tag here below points to the first pr_debug() used without
'\n' in net/mptcp. This patch could be split in many small ones, with
different Fixes tag, but it doesn't seem worth it, because it is easy to
re-generate this patch with this simple 'sed' command:
git grep -l pr_debug -- net/mptcp |
xargs sed -i "s/\(pr_debug(\".*[^n]\)\(\"[,)]\)/\1\\\n\2/g"
So in case of conflicts, simply drop the modifications, and launch this
command.
Fixes: f870fa0b57
("mptcp: Add MPTCP socket stubs")
Cc: stable@vger.kernel.org
Reviewed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-4-905199fe1172@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
196 lines
4.3 KiB
C
196 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Multipath TCP
|
|
*
|
|
* Copyright (c) 2022, SUSE.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "MPTCP: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/list.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/spinlock.h>
|
|
#include "protocol.h"
|
|
|
|
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
|
|
static LIST_HEAD(mptcp_sched_list);
|
|
|
|
static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
|
|
struct mptcp_sched_data *data)
|
|
{
|
|
struct sock *ssk;
|
|
|
|
ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
|
|
mptcp_subflow_get_send(msk);
|
|
if (!ssk)
|
|
return -EINVAL;
|
|
|
|
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
|
|
return 0;
|
|
}
|
|
|
|
static struct mptcp_sched_ops mptcp_sched_default = {
|
|
.get_subflow = mptcp_sched_default_get_subflow,
|
|
.name = "default",
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
/* Must be called with rcu read lock held */
|
|
struct mptcp_sched_ops *mptcp_sched_find(const char *name)
|
|
{
|
|
struct mptcp_sched_ops *sched, *ret = NULL;
|
|
|
|
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
|
|
if (!strcmp(sched->name, name)) {
|
|
ret = sched;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Build string with list of available scheduler values.
|
|
* Similar to tcp_get_available_congestion_control()
|
|
*/
|
|
void mptcp_get_available_schedulers(char *buf, size_t maxlen)
|
|
{
|
|
struct mptcp_sched_ops *sched;
|
|
size_t offs = 0;
|
|
|
|
rcu_read_lock();
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
|
|
offs += snprintf(buf + offs, maxlen - offs,
|
|
"%s%s",
|
|
offs == 0 ? "" : " ", sched->name);
|
|
|
|
if (WARN_ON_ONCE(offs >= maxlen))
|
|
break;
|
|
}
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched->get_subflow)
|
|
return -EINVAL;
|
|
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
if (mptcp_sched_find(sched->name)) {
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
return -EEXIST;
|
|
}
|
|
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
|
|
pr_debug("%s registered\n", sched->name);
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
if (sched == &mptcp_sched_default)
|
|
return;
|
|
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
list_del_rcu(&sched->list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
}
|
|
|
|
void mptcp_sched_init(void)
|
|
{
|
|
mptcp_register_scheduler(&mptcp_sched_default);
|
|
}
|
|
|
|
int mptcp_init_sched(struct mptcp_sock *msk,
|
|
struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched)
|
|
sched = &mptcp_sched_default;
|
|
|
|
if (!bpf_try_module_get(sched, sched->owner))
|
|
return -EBUSY;
|
|
|
|
msk->sched = sched;
|
|
if (msk->sched->init)
|
|
msk->sched->init(msk);
|
|
|
|
pr_debug("sched=%s\n", msk->sched->name);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_release_sched(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_sched_ops *sched = msk->sched;
|
|
|
|
if (!sched)
|
|
return;
|
|
|
|
msk->sched = NULL;
|
|
if (sched->release)
|
|
sched->release(msk);
|
|
|
|
bpf_module_put(sched, sched->owner);
|
|
}
|
|
|
|
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
|
|
bool scheduled)
|
|
{
|
|
WRITE_ONCE(subflow->scheduled, scheduled);
|
|
}
|
|
|
|
int mptcp_sched_get_send(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_subflow_context *subflow;
|
|
struct mptcp_sched_data data;
|
|
|
|
msk_owned_by_me(msk);
|
|
|
|
/* the following check is moved out of mptcp_subflow_get_send */
|
|
if (__mptcp_check_fallback(msk)) {
|
|
if (msk->first &&
|
|
__tcp_can_send(msk->first) &&
|
|
sk_stream_memory_free(msk->first)) {
|
|
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
|
|
return 0;
|
|
}
|
|
return -EINVAL;
|
|
}
|
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
if (READ_ONCE(subflow->scheduled))
|
|
return 0;
|
|
}
|
|
|
|
data.reinject = false;
|
|
if (msk->sched == &mptcp_sched_default || !msk->sched)
|
|
return mptcp_sched_default_get_subflow(msk, &data);
|
|
return msk->sched->get_subflow(msk, &data);
|
|
}
|
|
|
|
int mptcp_sched_get_retrans(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_subflow_context *subflow;
|
|
struct mptcp_sched_data data;
|
|
|
|
msk_owned_by_me(msk);
|
|
|
|
/* the following check is moved out of mptcp_subflow_get_retrans */
|
|
if (__mptcp_check_fallback(msk))
|
|
return -EINVAL;
|
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
if (READ_ONCE(subflow->scheduled))
|
|
return 0;
|
|
}
|
|
|
|
data.reinject = true;
|
|
if (msk->sched == &mptcp_sched_default || !msk->sched)
|
|
return mptcp_sched_default_get_subflow(msk, &data);
|
|
return msk->sched->get_subflow(msk, &data);
|
|
}
|