1

printk: nbcon: Add unsafe flushing on panic

Add nbcon_atomic_flush_unsafe() to flush all nbcon consoles
using the write_atomic() callback and allowing unsafe hostile
takeovers. Call this at the end of panic() as a final attempt
to flush any pending messages.

Note that legacy consoles use unsafe methods for flushing
from the beginning of panic (see bust_spinlocks()). Therefore,
systems using both legacy and nbcon consoles may still fail to
see panic messages due to unsafe legacy console usage.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240820063001.36405-27-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
This commit is contained in:
John Ogness 2024-08-20 08:35:52 +02:06 committed by Petr Mladek
parent d2e85ca7a7
commit 5dde3b7354
3 changed files with 31 additions and 7 deletions

View File

@ -202,6 +202,7 @@ void printk_trigger_flush(void);
void console_try_replay_all(void);
extern bool nbcon_device_try_acquire(struct console *con);
extern void nbcon_device_release(struct console *con);
void nbcon_atomic_flush_unsafe(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@ -294,6 +295,10 @@ static inline void nbcon_device_release(struct console *con)
{
}
static inline void nbcon_atomic_flush_unsafe(void)
{
}
#endif
bool this_cpu_in_panic(void);

View File

@ -463,6 +463,7 @@ void panic(const char *fmt, ...)
* Explicitly flush the kernel log buffer one last time.
*/
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
nbcon_atomic_flush_unsafe();
local_irq_enable();
for (i = 0; ; i += PANIC_TIMER_STEP) {

View File

@ -1083,6 +1083,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
* failure.
@ -1101,7 +1102,8 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
* returned, it cannot be expected that the unfinalized record will become
* available.
*/
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
bool allow_unsafe_takeover)
{
struct nbcon_write_context wctxt = { };
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
@ -1110,6 +1112,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
ctxt->console = con;
ctxt->spinwait_max_us = 2000;
ctxt->prio = nbcon_get_default_prio();
ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
if (!nbcon_context_try_acquire(ctxt))
return -EPERM;
@ -1140,13 +1143,15 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* This will stop flushing before @stop_seq if another context has ownership.
* That context is then responsible for the flushing. Likewise, if new records
* are added while this context was flushing and there is no other context
* to handle the printing, this context must also flush those records.
*/
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
bool allow_unsafe_takeover)
{
unsigned long flags;
int err;
@ -1160,7 +1165,7 @@ again:
*/
local_irq_save(flags);
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
local_irq_restore(flags);
@ -1190,8 +1195,9 @@ again:
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
* write_atomic() callback
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*/
static void __nbcon_atomic_flush_pending(u64 stop_seq)
static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
{
struct console *con;
int cookie;
@ -1209,7 +1215,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq)
if (nbcon_seq_read(con) >= stop_seq)
continue;
nbcon_atomic_flush_pending_con(con, stop_seq);
nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
}
console_srcu_read_unlock(cookie);
}
@ -1225,7 +1231,19 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq)
*/
void nbcon_atomic_flush_pending(void)
{
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
}
/**
* nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
* write_atomic() callback and allowing unsafe hostile takeovers
*
* Flush the backlog up through the currently newest record. Unsafe hostile
* takeovers will be performed, if necessary.
*/
void nbcon_atomic_flush_unsafe(void)
{
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
}
/**
@ -1342,7 +1360,7 @@ void nbcon_device_release(struct console *con)
if (console_is_usable(con, console_srcu_read_flags(con)) &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
if (!have_boot_console) {
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
} else if (!is_printk_legacy_deferred()) {
if (console_trylock())
console_unlock();