1

printk: nbcon: Rely on kthreads for normal operation

Once the kthread is running and available
(i.e. @printk_kthreads_running is set), the kthread becomes
responsible for flushing any pending messages which are added
in NBCON_PRIO_NORMAL context. Namely the legacy
console_flush_all() and device_release() no longer flush the
console. And nbcon_atomic_flush_pending() used by
nbcon_cpu_emergency_exit() no longer flushes messages added
after the emergency messages.

The console context is safe when used by the kthread only when
one of the following conditions are true:

  1. Other caller acquires the console context with
     NBCON_PRIO_NORMAL with preemption disabled. It will
     release the context before rescheduling.

  2. Other caller acquires the console context with
     NBCON_PRIO_NORMAL under the device_lock.

  3. The kthread is the only context which acquires the console
     with NBCON_PRIO_NORMAL.

This is satisfied for all atomic printing call sites:

nbcon_legacy_emit_next_record() (#1)

nbcon_atomic_flush_pending_con() (#1)

nbcon_device_release() (#2)

It is even double guaranteed when @printk_kthreads_running
is set because then _only_ the kthread will print for
NBCON_PRIO_NORMAL. (#3)

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-10-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
This commit is contained in:
John Ogness 2024-09-04 14:11:28 +02:06 committed by Petr Mladek
parent 5c586baa60
commit 13189fa73a
3 changed files with 84 additions and 7 deletions

View File

@ -113,6 +113,13 @@ static inline bool console_is_usable(struct console *con, short flags, bool use_
/* The write_atomic() callback is optional. */ /* The write_atomic() callback is optional. */
if (use_atomic && !con->write_atomic) if (use_atomic && !con->write_atomic)
return false; return false;
/*
* For the !use_atomic case, @printk_kthreads_running is not
* checked because the write_thread() callback is also used
* via the legacy loop when the printer threads are not
* available.
*/
} else { } else {
if (!con->write) if (!con->write)
return false; return false;
@ -176,6 +183,7 @@ static inline void nbcon_atomic_flush_pending(void) { }
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie, bool use_atomic) { return false; } int cookie, bool use_atomic) { return false; }
static inline void nbcon_kthread_wake(struct console *con) { } static inline void nbcon_kthread_wake(struct console *con) { }
static inline void nbcon_kthreads_wake(void) { }
static inline bool console_is_usable(struct console *con, short flags, static inline bool console_is_usable(struct console *con, short flags,
bool use_atomic) { return false; } bool use_atomic) { return false; }
@ -190,6 +198,7 @@ extern bool legacy_allow_panic_sync;
/** /**
* struct console_flush_type - Define available console flush methods * struct console_flush_type - Define available console flush methods
* @nbcon_atomic: Flush directly using nbcon_atomic() callback * @nbcon_atomic: Flush directly using nbcon_atomic() callback
* @nbcon_offload: Offload flush to printer thread
* @legacy_direct: Call the legacy loop in this context * @legacy_direct: Call the legacy loop in this context
* @legacy_offload: Offload the legacy loop into IRQ * @legacy_offload: Offload the legacy loop into IRQ
* *
@ -197,6 +206,7 @@ extern bool legacy_allow_panic_sync;
*/ */
struct console_flush_type { struct console_flush_type {
bool nbcon_atomic; bool nbcon_atomic;
bool nbcon_offload;
bool legacy_direct; bool legacy_direct;
bool legacy_offload; bool legacy_offload;
}; };
@ -211,6 +221,22 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
switch (nbcon_get_default_prio()) { switch (nbcon_get_default_prio()) {
case NBCON_PRIO_NORMAL: case NBCON_PRIO_NORMAL:
if (have_nbcon_console && !have_boot_console) {
if (printk_kthreads_running)
ft->nbcon_offload = true;
else
ft->nbcon_atomic = true;
}
/* Legacy consoles are flushed directly when possible. */
if (have_legacy_console || have_boot_console) {
if (!is_printk_legacy_deferred())
ft->legacy_direct = true;
else
ft->legacy_offload = true;
}
break;
case NBCON_PRIO_EMERGENCY: case NBCON_PRIO_EMERGENCY:
if (have_nbcon_console && !have_boot_console) if (have_nbcon_console && !have_boot_console)
ft->nbcon_atomic = true; ft->nbcon_atomic = true;

View File

@ -1494,6 +1494,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
bool allow_unsafe_takeover) bool allow_unsafe_takeover)
{ {
struct console_flush_type ft;
unsigned long flags; unsigned long flags;
int err; int err;
@ -1523,10 +1524,12 @@ again:
/* /*
* If flushing was successful but more records are available, this * If flushing was successful but more records are available, this
* context must flush those remaining records because there is no * context must flush those remaining records if the printer thread
* other context that will do it. * is not available do it.
*/ */
if (prb_read_valid(prb, nbcon_seq_read(con), NULL)) { printk_get_console_flush_type(&ft);
if (!ft.nbcon_offload &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
stop_seq = prb_next_reserve_seq(prb); stop_seq = prb_next_reserve_seq(prb);
goto again; goto again;
} }
@ -1754,17 +1757,19 @@ void nbcon_device_release(struct console *con)
/* /*
* This context must flush any new records added while the console * This context must flush any new records added while the console
* was locked. The console_srcu_read_lock must be taken to ensure * was locked if the printer thread is not available to do it. The
* the console is usable throughout flushing. * console_srcu_read_lock must be taken to ensure the console is
* usable throughout flushing.
*/ */
cookie = console_srcu_read_lock(); cookie = console_srcu_read_lock();
printk_get_console_flush_type(&ft);
if (console_is_usable(con, console_srcu_read_flags(con), true) && if (console_is_usable(con, console_srcu_read_flags(con), true) &&
!ft.nbcon_offload &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) { prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
/* /*
* If nbcon_atomic flushing is not available, fallback to * If nbcon_atomic flushing is not available, fallback to
* using the legacy loop. * using the legacy loop.
*/ */
printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic) { if (ft.nbcon_atomic) {
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false); __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
} else if (ft.legacy_direct) { } else if (ft.legacy_direct) {

View File

@ -2384,6 +2384,9 @@ asmlinkage int vprintk_emit(int facility, int level,
if (ft.nbcon_atomic) if (ft.nbcon_atomic)
nbcon_atomic_flush_pending(); nbcon_atomic_flush_pending();
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_direct) { if (ft.legacy_direct) {
/* /*
* The caller may be holding system-critical or * The caller may be holding system-critical or
@ -2732,6 +2735,7 @@ void suspend_console(void)
void resume_console(void) void resume_console(void)
{ {
struct console_flush_type ft;
struct console *con; struct console *con;
if (!console_suspend_enabled) if (!console_suspend_enabled)
@ -2749,6 +2753,10 @@ void resume_console(void)
*/ */
synchronize_srcu(&console_srcu); synchronize_srcu(&console_srcu);
printk_get_console_flush_type(&ft);
if (ft.nbcon_offload)
nbcon_kthreads_wake();
pr_flush(1000, true); pr_flush(1000, true);
} }
@ -3060,6 +3068,7 @@ static inline void printk_kthreads_check_locked(void) { }
*/ */
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
{ {
struct console_flush_type ft;
bool any_usable = false; bool any_usable = false;
struct console *con; struct console *con;
bool any_progress; bool any_progress;
@ -3071,12 +3080,22 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
do { do {
any_progress = false; any_progress = false;
printk_get_console_flush_type(&ft);
cookie = console_srcu_read_lock(); cookie = console_srcu_read_lock();
for_each_console_srcu(con) { for_each_console_srcu(con) {
short flags = console_srcu_read_flags(con); short flags = console_srcu_read_flags(con);
u64 printk_seq; u64 printk_seq;
bool progress; bool progress;
/*
* console_flush_all() is only responsible for nbcon
* consoles when the nbcon consoles cannot print via
* their atomic or threaded flushing.
*/
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
continue;
if (!console_is_usable(con, flags, !do_cond_resched)) if (!console_is_usable(con, flags, !do_cond_resched))
continue; continue;
any_usable = true; any_usable = true;
@ -3387,9 +3406,25 @@ EXPORT_SYMBOL(console_stop);
void console_start(struct console *console) void console_start(struct console *console)
{ {
struct console_flush_type ft;
bool is_nbcon;
console_list_lock(); console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED); console_srcu_write_flags(console, console->flags | CON_ENABLED);
is_nbcon = console->flags & CON_NBCON;
console_list_unlock(); console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. The related
* printing context must be able to see it is enabled so that
* it is guaranteed to wake up and resume printing.
*/
synchronize_srcu(&console_srcu);
printk_get_console_flush_type(&ft);
if (is_nbcon && ft.nbcon_offload)
nbcon_kthread_wake(console);
__pr_flush(console, 1000, true); __pr_flush(console, 1000, true);
} }
EXPORT_SYMBOL(console_start); EXPORT_SYMBOL(console_start);
@ -4115,6 +4150,8 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
/* Flush the consoles so that records up to @seq are printed. */ /* Flush the consoles so that records up to @seq are printed. */
printk_get_console_flush_type(&ft); printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.legacy_direct) { if (ft.legacy_direct) {
console_lock(); console_lock();
console_unlock(); console_unlock();
@ -4152,8 +4189,10 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* that they make forward progress, so only increment * that they make forward progress, so only increment
* @diff for usable consoles. * @diff for usable consoles.
*/ */
if (!console_is_usable(c, flags, true)) if (!console_is_usable(c, flags, true) &&
!console_is_usable(c, flags, false)) {
continue; continue;
}
if (flags & CON_NBCON) { if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c); printk_seq = nbcon_seq_read(c);
@ -4629,8 +4668,15 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
*/ */
void console_try_replay_all(void) void console_try_replay_all(void)
{ {
struct console_flush_type ft;
printk_get_console_flush_type(&ft);
if (console_trylock()) { if (console_trylock()) {
__console_rewind_all(); __console_rewind_all();
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.nbcon_offload)
nbcon_kthreads_wake();
/* Consoles are flushed as part of console_unlock(). */ /* Consoles are flushed as part of console_unlock(). */
console_unlock(); console_unlock();
} }