1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/printk.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
12 * Ted Ts'o, 2/11/93.
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/vmcore_info.h>
39 #include <linux/ratelimit.h>
40 #include <linux/kmsg_dump.h>
41 #include <linux/syslog.h>
42 #include <linux/cpu.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 #include <linux/irq_work.h>
46 #include <linux/ctype.h>
47 #include <linux/uio.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/debug.h>
50 #include <linux/sched/task_stack.h>
51
52 #include <linux/uaccess.h>
53 #include <asm/sections.h>
54
55 #include <trace/events/initcall.h>
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/printk.h>
58 #undef CREATE_TRACE_POINTS
59 #include <trace/hooks/printk.h>
60 #include <trace/hooks/logbuf.h>
61
62 #include "printk_ringbuffer.h"
63 #include "console_cmdline.h"
64 #include "braille.h"
65 #include "internal.h"
66
67 int console_printk[4] = {
68 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
69 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
70 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
71 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
72 };
73 EXPORT_SYMBOL_GPL(console_printk);
74
75 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
76 EXPORT_SYMBOL(ignore_console_lock_warning);
77
78 EXPORT_TRACEPOINT_SYMBOL_GPL(console);
79
80 /*
81 * Low level drivers may need that to know if they can schedule in
82 * their unblank() callback or not. So let's export it.
83 */
84 int oops_in_progress;
85 EXPORT_SYMBOL(oops_in_progress);
86
87 /*
88 * console_mutex protects console_list updates and console->flags updates.
89 * The flags are synchronized only for consoles that are registered, i.e.
90 * accessible via the console list.
91 */
92 static DEFINE_MUTEX(console_mutex);
93
94 /*
95 * console_sem protects updates to console->seq
96 * and also provides serialization for console printing.
97 */
98 static DEFINE_SEMAPHORE(console_sem, 1);
99 HLIST_HEAD(console_list);
100 EXPORT_SYMBOL_GPL(console_list);
101 DEFINE_STATIC_SRCU(console_srcu);
102
103 /*
104 * System may need to suppress printk message under certain
105 * circumstances, like after kernel panic happens.
106 */
107 int __read_mostly suppress_printk;
108
109 #ifdef CONFIG_LOCKDEP
110 static struct lockdep_map console_lock_dep_map = {
111 .name = "console_lock"
112 };
113
lockdep_assert_console_list_lock_held(void)114 void lockdep_assert_console_list_lock_held(void)
115 {
116 lockdep_assert_held(&console_mutex);
117 }
118 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
119 #endif
120
121 #ifdef CONFIG_DEBUG_LOCK_ALLOC
console_srcu_read_lock_is_held(void)122 bool console_srcu_read_lock_is_held(void)
123 {
124 return srcu_read_lock_held(&console_srcu);
125 }
126 EXPORT_SYMBOL(console_srcu_read_lock_is_held);
127 #endif
128
129 enum devkmsg_log_bits {
130 __DEVKMSG_LOG_BIT_ON = 0,
131 __DEVKMSG_LOG_BIT_OFF,
132 __DEVKMSG_LOG_BIT_LOCK,
133 };
134
135 enum devkmsg_log_masks {
136 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
137 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
138 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
139 };
140
141 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
142 #define DEVKMSG_LOG_MASK_DEFAULT 0
143
144 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
145
__control_devkmsg(char * str)146 static int __control_devkmsg(char *str)
147 {
148 size_t len;
149
150 if (!str)
151 return -EINVAL;
152
153 len = str_has_prefix(str, "on");
154 if (len) {
155 devkmsg_log = DEVKMSG_LOG_MASK_ON;
156 return len;
157 }
158
159 len = str_has_prefix(str, "off");
160 if (len) {
161 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
162 return len;
163 }
164
165 len = str_has_prefix(str, "ratelimit");
166 if (len) {
167 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
168 return len;
169 }
170
171 return -EINVAL;
172 }
173
control_devkmsg(char * str)174 static int __init control_devkmsg(char *str)
175 {
176 if (__control_devkmsg(str) < 0) {
177 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
178 return 1;
179 }
180
181 /*
182 * Set sysctl string accordingly:
183 */
184 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
185 strscpy(devkmsg_log_str, "on");
186 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
187 strscpy(devkmsg_log_str, "off");
188 /* else "ratelimit" which is set by default. */
189
190 /*
191 * Sysctl cannot change it anymore. The kernel command line setting of
192 * this parameter is to force the setting to be permanent throughout the
193 * runtime of the system. This is a precation measure against userspace
194 * trying to be a smarta** and attempting to change it up on us.
195 */
196 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
197
198 return 1;
199 }
200 __setup("printk.devkmsg=", control_devkmsg);
201
202 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
203 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
devkmsg_sysctl_set_loglvl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)204 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
205 void *buffer, size_t *lenp, loff_t *ppos)
206 {
207 char old_str[DEVKMSG_STR_MAX_SIZE];
208 unsigned int old;
209 int err;
210
211 if (write) {
212 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
213 return -EINVAL;
214
215 old = devkmsg_log;
216 strscpy(old_str, devkmsg_log_str);
217 }
218
219 err = proc_dostring(table, write, buffer, lenp, ppos);
220 if (err)
221 return err;
222
223 if (write) {
224 err = __control_devkmsg(devkmsg_log_str);
225
226 /*
227 * Do not accept an unknown string OR a known string with
228 * trailing crap...
229 */
230 if (err < 0 || (err + 1 != *lenp)) {
231
232 /* ... and restore old setting. */
233 devkmsg_log = old;
234 strscpy(devkmsg_log_str, old_str);
235
236 return -EINVAL;
237 }
238 }
239
240 return 0;
241 }
242 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
243
244 /**
245 * console_list_lock - Lock the console list
246 *
247 * For console list or console->flags updates
248 */
console_list_lock(void)249 void console_list_lock(void)
250 {
251 /*
252 * In unregister_console() and console_force_preferred_locked(),
253 * synchronize_srcu() is called with the console_list_lock held.
254 * Therefore it is not allowed that the console_list_lock is taken
255 * with the srcu_lock held.
256 *
257 * Detecting if this context is really in the read-side critical
258 * section is only possible if the appropriate debug options are
259 * enabled.
260 */
261 WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
262 srcu_read_lock_held(&console_srcu));
263
264 mutex_lock(&console_mutex);
265 }
266 EXPORT_SYMBOL(console_list_lock);
267
268 /**
269 * console_list_unlock - Unlock the console list
270 *
271 * Counterpart to console_list_lock()
272 */
console_list_unlock(void)273 void console_list_unlock(void)
274 {
275 mutex_unlock(&console_mutex);
276 }
277 EXPORT_SYMBOL(console_list_unlock);
278
279 /**
280 * console_srcu_read_lock - Register a new reader for the
281 * SRCU-protected console list
282 *
283 * Use for_each_console_srcu() to iterate the console list
284 *
285 * Context: Any context.
286 * Return: A cookie to pass to console_srcu_read_unlock().
287 */
console_srcu_read_lock(void)288 int console_srcu_read_lock(void)
289 __acquires(&console_srcu)
290 {
291 return srcu_read_lock_nmisafe(&console_srcu);
292 }
293 EXPORT_SYMBOL(console_srcu_read_lock);
294
295 /**
296 * console_srcu_read_unlock - Unregister an old reader from
297 * the SRCU-protected console list
298 * @cookie: cookie returned from console_srcu_read_lock()
299 *
300 * Counterpart to console_srcu_read_lock()
301 */
console_srcu_read_unlock(int cookie)302 void console_srcu_read_unlock(int cookie)
303 __releases(&console_srcu)
304 {
305 srcu_read_unlock_nmisafe(&console_srcu, cookie);
306 }
307 EXPORT_SYMBOL(console_srcu_read_unlock);
308
309 /*
310 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
311 * macros instead of functions so that _RET_IP_ contains useful information.
312 */
313 #define down_console_sem() do { \
314 down(&console_sem);\
315 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
316 } while (0)
317
__down_trylock_console_sem(unsigned long ip)318 static int __down_trylock_console_sem(unsigned long ip)
319 {
320 int lock_failed;
321 unsigned long flags;
322
323 /*
324 * Here and in __up_console_sem() we need to be in safe mode,
325 * because spindump/WARN/etc from under console ->lock will
326 * deadlock in printk()->down_trylock_console_sem() otherwise.
327 */
328 printk_safe_enter_irqsave(flags);
329 lock_failed = down_trylock(&console_sem);
330 printk_safe_exit_irqrestore(flags);
331
332 if (lock_failed)
333 return 1;
334 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
335 return 0;
336 }
337 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
338
__up_console_sem(unsigned long ip)339 static void __up_console_sem(unsigned long ip)
340 {
341 unsigned long flags;
342
343 mutex_release(&console_lock_dep_map, ip);
344
345 printk_safe_enter_irqsave(flags);
346 up(&console_sem);
347 printk_safe_exit_irqrestore(flags);
348 }
349 #define up_console_sem() __up_console_sem(_RET_IP_)
350
panic_in_progress(void)351 static bool panic_in_progress(void)
352 {
353 return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
354 }
355
356 /* Return true if a panic is in progress on the current CPU. */
this_cpu_in_panic(void)357 bool this_cpu_in_panic(void)
358 {
359 /*
360 * We can use raw_smp_processor_id() here because it is impossible for
361 * the task to be migrated to the panic_cpu, or away from it. If
362 * panic_cpu has already been set, and we're not currently executing on
363 * that CPU, then we never will be.
364 */
365 return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
366 }
367
368 /*
369 * Return true if a panic is in progress on a remote CPU.
370 *
371 * On true, the local CPU should immediately release any printing resources
372 * that may be needed by the panic CPU.
373 */
other_cpu_in_panic(void)374 bool other_cpu_in_panic(void)
375 {
376 return (panic_in_progress() && !this_cpu_in_panic());
377 }
378
379 /*
380 * This is used for debugging the mess that is the VT code by
381 * keeping track if we have the console semaphore held. It's
382 * definitely not the perfect debug tool (we don't know if _WE_
383 * hold it and are racing, but it helps tracking those weird code
384 * paths in the console code where we end up in places I want
385 * locked without the console semaphore held).
386 */
387 static int console_locked;
388
389 /*
390 * Array of consoles built from command line options (console=)
391 */
392
393 #define MAX_CMDLINECONSOLES 8
394
395 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
396
397 static int preferred_console = -1;
398 int console_set_on_cmdline;
399 EXPORT_SYMBOL(console_set_on_cmdline);
400
401 /* Flag: console code may call schedule() */
402 static int console_may_schedule;
403
404 enum con_msg_format_flags {
405 MSG_FORMAT_DEFAULT = 0,
406 MSG_FORMAT_SYSLOG = (1 << 0),
407 };
408
409 static int console_msg_format = MSG_FORMAT_DEFAULT;
410
411 /*
412 * The printk log buffer consists of a sequenced collection of records, each
413 * containing variable length message text. Every record also contains its
414 * own meta-data (@info).
415 *
416 * Every record meta-data carries the timestamp in microseconds, as well as
417 * the standard userspace syslog level and syslog facility. The usual kernel
418 * messages use LOG_KERN; userspace-injected messages always carry a matching
419 * syslog facility, by default LOG_USER. The origin of every message can be
420 * reliably determined that way.
421 *
422 * The human readable log message of a record is available in @text, the
423 * length of the message text in @text_len. The stored message is not
424 * terminated.
425 *
426 * Optionally, a record can carry a dictionary of properties (key/value
427 * pairs), to provide userspace with a machine-readable message context.
428 *
429 * Examples for well-defined, commonly used property names are:
430 * DEVICE=b12:8 device identifier
431 * b12:8 block dev_t
432 * c127:3 char dev_t
433 * n8 netdev ifindex
434 * +sound:card0 subsystem:devname
435 * SUBSYSTEM=pci driver-core subsystem name
436 *
437 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
438 * and values are terminated by a '\0' character.
439 *
440 * Example of record values:
441 * record.text_buf = "it's a line" (unterminated)
442 * record.info.seq = 56
443 * record.info.ts_nsec = 36863
444 * record.info.text_len = 11
445 * record.info.facility = 0 (LOG_KERN)
446 * record.info.flags = 0
447 * record.info.level = 3 (LOG_ERR)
448 * record.info.caller_id = 299 (task 299)
449 * record.info.dev_info.subsystem = "pci" (terminated)
450 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
451 *
452 * The 'struct printk_info' buffer must never be directly exported to
453 * userspace, it is a kernel-private implementation detail that might
454 * need to be changed in the future, when the requirements change.
455 *
456 * /dev/kmsg exports the structured data in the following line format:
457 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
458 *
459 * Users of the export format should ignore possible additional values
460 * separated by ',', and find the message after the ';' character.
461 *
462 * The optional key/value pairs are attached as continuation lines starting
463 * with a space character and terminated by a newline. All possible
464 * non-prinatable characters are escaped in the "\xff" notation.
465 */
466
467 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
468 static DEFINE_MUTEX(syslog_lock);
469
470 /*
471 * Specifies if a legacy console is registered. If legacy consoles are
472 * present, it is necessary to perform the console lock/unlock dance
473 * whenever console flushing should occur.
474 */
475 bool have_legacy_console;
476
477 /*
478 * Specifies if an nbcon console is registered. If nbcon consoles are present,
479 * synchronous printing of legacy consoles will not occur during panic until
480 * the backtrace has been stored to the ringbuffer.
481 */
482 bool have_nbcon_console;
483
484 /*
485 * Specifies if a boot console is registered. If boot consoles are present,
486 * nbcon consoles cannot print simultaneously and must be synchronized by
487 * the console lock. This is because boot consoles and nbcon consoles may
488 * have mapped the same hardware.
489 */
490 bool have_boot_console;
491
492 /* See printk_legacy_allow_panic_sync() for details. */
493 bool legacy_allow_panic_sync;
494
495 #ifdef CONFIG_PRINTK
496 DECLARE_WAIT_QUEUE_HEAD(log_wait);
497 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
498 /* All 3 protected by @syslog_lock. */
499 /* the next printk record to read by syslog(READ) or /proc/kmsg */
500 static u64 syslog_seq;
501 static size_t syslog_partial;
502 static bool syslog_time;
503
504 /* True when _all_ printer threads are available for printing. */
505 bool printk_kthreads_running;
506
507 struct latched_seq {
508 seqcount_latch_t latch;
509 u64 val[2];
510 };
511
512 /*
513 * The next printk record to read after the last 'clear' command. There are
514 * two copies (updated with seqcount_latch) so that reads can locklessly
515 * access a valid value. Writers are synchronized by @syslog_lock.
516 */
517 static struct latched_seq clear_seq = {
518 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
519 .val[0] = 0,
520 .val[1] = 0,
521 };
522
523 #define LOG_LEVEL(v) ((v) & 0x07)
524 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
525
526 /* record buffer */
527 #define LOG_ALIGN __alignof__(unsigned long)
528 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
529 #define LOG_BUF_LEN_MAX ((u32)1 << 31)
530 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
531 static char *log_buf = __log_buf;
532 static u32 log_buf_len = __LOG_BUF_LEN;
533
534 /*
535 * Define the average message size. This only affects the number of
536 * descriptors that will be available. Underestimating is better than
537 * overestimating (too many available descriptors is better than not enough).
538 */
539 #define PRB_AVGBITS 5 /* 32 character average length */
540
541 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
542 #error CONFIG_LOG_BUF_SHIFT value too small.
543 #endif
544 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
545 PRB_AVGBITS, &__log_buf[0]);
546
547 static struct printk_ringbuffer printk_rb_dynamic;
548
549 struct printk_ringbuffer *prb = &printk_rb_static;
550
551 /*
552 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
553 * per_cpu_areas are initialised. This variable is set to true when
554 * it's safe to access per-CPU data.
555 */
556 static bool __printk_percpu_data_ready __ro_after_init;
557
printk_percpu_data_ready(void)558 bool printk_percpu_data_ready(void)
559 {
560 return __printk_percpu_data_ready;
561 }
562
563 /* Must be called under syslog_lock. */
latched_seq_write(struct latched_seq * ls,u64 val)564 static void latched_seq_write(struct latched_seq *ls, u64 val)
565 {
566 raw_write_seqcount_latch(&ls->latch);
567 ls->val[0] = val;
568 raw_write_seqcount_latch(&ls->latch);
569 ls->val[1] = val;
570 }
571
572 /* Can be called from any context. */
latched_seq_read_nolock(struct latched_seq * ls)573 static u64 latched_seq_read_nolock(struct latched_seq *ls)
574 {
575 unsigned int seq;
576 unsigned int idx;
577 u64 val;
578
579 do {
580 seq = raw_read_seqcount_latch(&ls->latch);
581 idx = seq & 0x1;
582 val = ls->val[idx];
583 } while (raw_read_seqcount_latch_retry(&ls->latch, seq));
584
585 return val;
586 }
587
588 /* Return log buffer address */
log_buf_addr_get(void)589 char *log_buf_addr_get(void)
590 {
591 return log_buf;
592 }
593
594 /* Return log buffer size */
log_buf_len_get(void)595 u32 log_buf_len_get(void)
596 {
597 return log_buf_len;
598 }
599
600 /*
601 * Define how much of the log buffer we could take at maximum. The value
602 * must be greater than two. Note that only half of the buffer is available
603 * when the index points to the middle.
604 */
605 #define MAX_LOG_TAKE_PART 4
606 static const char trunc_msg[] = "<truncated>";
607
truncate_msg(u16 * text_len,u16 * trunc_msg_len)608 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
609 {
610 /*
611 * The message should not take the whole buffer. Otherwise, it might
612 * get removed too soon.
613 */
614 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
615
616 if (*text_len > max_text_len)
617 *text_len = max_text_len;
618
619 /* enable the warning message (if there is room) */
620 *trunc_msg_len = strlen(trunc_msg);
621 if (*text_len >= *trunc_msg_len)
622 *text_len -= *trunc_msg_len;
623 else
624 *trunc_msg_len = 0;
625 }
626
627 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
628
syslog_action_restricted(int type)629 static int syslog_action_restricted(int type)
630 {
631 if (dmesg_restrict)
632 return 1;
633 /*
634 * Unless restricted, we allow "read all" and "get buffer size"
635 * for everybody.
636 */
637 return type != SYSLOG_ACTION_READ_ALL &&
638 type != SYSLOG_ACTION_SIZE_BUFFER;
639 }
640
check_syslog_permissions(int type,int source)641 static int check_syslog_permissions(int type, int source)
642 {
643 /*
644 * If this is from /proc/kmsg and we've already opened it, then we've
645 * already done the capabilities checks at open time.
646 */
647 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
648 goto ok;
649
650 if (syslog_action_restricted(type)) {
651 if (capable(CAP_SYSLOG))
652 goto ok;
653 return -EPERM;
654 }
655 ok:
656 return security_syslog(type);
657 }
658
append_char(char ** pp,char * e,char c)659 static void append_char(char **pp, char *e, char c)
660 {
661 if (*pp < e)
662 *(*pp)++ = c;
663 }
664
info_print_ext_header(char * buf,size_t size,struct printk_info * info)665 static ssize_t info_print_ext_header(char *buf, size_t size,
666 struct printk_info *info)
667 {
668 u64 ts_usec = info->ts_nsec;
669 char caller[20];
670 #ifdef CONFIG_PRINTK_CALLER
671 int vh_ret = 0;
672 u32 id = info->caller_id;
673
674 trace_android_vh_printk_ext_header(caller, sizeof(caller), id, &vh_ret);
675
676 if (!vh_ret)
677 snprintf(caller, sizeof(caller), ",caller=%c%u",
678 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
679 #else
680 caller[0] = '\0';
681 #endif
682
683 do_div(ts_usec, 1000);
684
685 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
686 (info->facility << 3) | info->level, info->seq,
687 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
688 }
689
msg_add_ext_text(char * buf,size_t size,const char * text,size_t text_len,unsigned char endc)690 static ssize_t msg_add_ext_text(char *buf, size_t size,
691 const char *text, size_t text_len,
692 unsigned char endc)
693 {
694 char *p = buf, *e = buf + size;
695 size_t i;
696
697 /* escape non-printable characters */
698 for (i = 0; i < text_len; i++) {
699 unsigned char c = text[i];
700
701 if (c < ' ' || c >= 127 || c == '\\')
702 p += scnprintf(p, e - p, "\\x%02x", c);
703 else
704 append_char(&p, e, c);
705 }
706 append_char(&p, e, endc);
707
708 return p - buf;
709 }
710
msg_add_dict_text(char * buf,size_t size,const char * key,const char * val)711 static ssize_t msg_add_dict_text(char *buf, size_t size,
712 const char *key, const char *val)
713 {
714 size_t val_len = strlen(val);
715 ssize_t len;
716
717 if (!val_len)
718 return 0;
719
720 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
721 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
722 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
723
724 return len;
725 }
726
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)727 static ssize_t msg_print_ext_body(char *buf, size_t size,
728 char *text, size_t text_len,
729 struct dev_printk_info *dev_info)
730 {
731 ssize_t len;
732
733 len = msg_add_ext_text(buf, size, text, text_len, '\n');
734
735 if (!dev_info)
736 goto out;
737
738 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
739 dev_info->subsystem);
740 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
741 dev_info->device);
742 out:
743 return len;
744 }
745
746 /* /dev/kmsg - userspace message inject/listen interface */
747 struct devkmsg_user {
748 atomic64_t seq;
749 struct ratelimit_state rs;
750 struct mutex lock;
751 struct printk_buffers pbufs;
752 };
753
754 static __printf(3, 4) __cold
devkmsg_emit(int facility,int level,const char * fmt,...)755 int devkmsg_emit(int facility, int level, const char *fmt, ...)
756 {
757 va_list args;
758 int r;
759
760 va_start(args, fmt);
761 r = vprintk_emit(facility, level, NULL, fmt, args);
762 va_end(args);
763
764 return r;
765 }
766
devkmsg_write(struct kiocb * iocb,struct iov_iter * from)767 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
768 {
769 char *buf, *line;
770 int level = default_message_loglevel;
771 int facility = 1; /* LOG_USER */
772 struct file *file = iocb->ki_filp;
773 struct devkmsg_user *user = file->private_data;
774 size_t len = iov_iter_count(from);
775 ssize_t ret = len;
776
777 if (len > PRINTKRB_RECORD_MAX)
778 return -EINVAL;
779
780 /* Ignore when user logging is disabled. */
781 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
782 return len;
783
784 /* Ratelimit when not explicitly enabled. */
785 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
786 if (!___ratelimit(&user->rs, current->comm))
787 return ret;
788 }
789
790 buf = kmalloc(len+1, GFP_KERNEL);
791 if (buf == NULL)
792 return -ENOMEM;
793
794 buf[len] = '\0';
795 if (!copy_from_iter_full(buf, len, from)) {
796 kfree(buf);
797 return -EFAULT;
798 }
799
800 /*
801 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
802 * the decimal value represents 32bit, the lower 3 bit are the log
803 * level, the rest are the log facility.
804 *
805 * If no prefix or no userspace facility is specified, we
806 * enforce LOG_USER, to be able to reliably distinguish
807 * kernel-generated messages from userspace-injected ones.
808 */
809 line = buf;
810 if (line[0] == '<') {
811 char *endp = NULL;
812 unsigned int u;
813
814 u = simple_strtoul(line + 1, &endp, 10);
815 if (endp && endp[0] == '>') {
816 level = LOG_LEVEL(u);
817 if (LOG_FACILITY(u) != 0)
818 facility = LOG_FACILITY(u);
819 endp++;
820 line = endp;
821 }
822 }
823
824 devkmsg_emit(facility, level, "%s", line);
825 kfree(buf);
826 return ret;
827 }
828
devkmsg_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)829 static ssize_t devkmsg_read(struct file *file, char __user *buf,
830 size_t count, loff_t *ppos)
831 {
832 struct devkmsg_user *user = file->private_data;
833 char *outbuf = &user->pbufs.outbuf[0];
834 struct printk_message pmsg = {
835 .pbufs = &user->pbufs,
836 };
837 ssize_t ret;
838
839 ret = mutex_lock_interruptible(&user->lock);
840 if (ret)
841 return ret;
842
843 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
844 if (file->f_flags & O_NONBLOCK) {
845 ret = -EAGAIN;
846 goto out;
847 }
848
849 /*
850 * Guarantee this task is visible on the waitqueue before
851 * checking the wake condition.
852 *
853 * The full memory barrier within set_current_state() of
854 * prepare_to_wait_event() pairs with the full memory barrier
855 * within wq_has_sleeper().
856 *
857 * This pairs with __wake_up_klogd:A.
858 */
859 ret = wait_event_interruptible(log_wait,
860 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
861 false)); /* LMM(devkmsg_read:A) */
862 if (ret)
863 goto out;
864 }
865
866 if (pmsg.dropped) {
867 /* our last seen message is gone, return error and reset */
868 atomic64_set(&user->seq, pmsg.seq);
869 ret = -EPIPE;
870 goto out;
871 }
872
873 atomic64_set(&user->seq, pmsg.seq + 1);
874
875 if (pmsg.outbuf_len > count) {
876 ret = -EINVAL;
877 goto out;
878 }
879
880 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
881 ret = -EFAULT;
882 goto out;
883 }
884 ret = pmsg.outbuf_len;
885 out:
886 mutex_unlock(&user->lock);
887 return ret;
888 }
889
890 /*
891 * Be careful when modifying this function!!!
892 *
893 * Only few operations are supported because the device works only with the
894 * entire variable length messages (records). Non-standard values are
895 * returned in the other cases and has been this way for quite some time.
896 * User space applications might depend on this behavior.
897 */
devkmsg_llseek(struct file * file,loff_t offset,int whence)898 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
899 {
900 struct devkmsg_user *user = file->private_data;
901 loff_t ret = 0;
902
903 if (offset)
904 return -ESPIPE;
905
906 switch (whence) {
907 case SEEK_SET:
908 /* the first record */
909 atomic64_set(&user->seq, prb_first_valid_seq(prb));
910 break;
911 case SEEK_DATA:
912 /*
913 * The first record after the last SYSLOG_ACTION_CLEAR,
914 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
915 * changes no global state, and does not clear anything.
916 */
917 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
918 break;
919 case SEEK_END:
920 /* after the last record */
921 atomic64_set(&user->seq, prb_next_seq(prb));
922 break;
923 default:
924 ret = -EINVAL;
925 }
926 return ret;
927 }
928
devkmsg_poll(struct file * file,poll_table * wait)929 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
930 {
931 struct devkmsg_user *user = file->private_data;
932 struct printk_info info;
933 __poll_t ret = 0;
934
935 poll_wait(file, &log_wait, wait);
936
937 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
938 /* return error when data has vanished underneath us */
939 if (info.seq != atomic64_read(&user->seq))
940 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
941 else
942 ret = EPOLLIN|EPOLLRDNORM;
943 }
944
945 return ret;
946 }
947
devkmsg_open(struct inode * inode,struct file * file)948 static int devkmsg_open(struct inode *inode, struct file *file)
949 {
950 struct devkmsg_user *user;
951 int err;
952
953 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
954 return -EPERM;
955
956 /* write-only does not need any file context */
957 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
958 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
959 SYSLOG_FROM_READER);
960 if (err)
961 return err;
962 }
963
964 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
965 if (!user)
966 return -ENOMEM;
967
968 ratelimit_default_init(&user->rs);
969 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
970
971 mutex_init(&user->lock);
972
973 atomic64_set(&user->seq, prb_first_valid_seq(prb));
974
975 file->private_data = user;
976 return 0;
977 }
978
devkmsg_release(struct inode * inode,struct file * file)979 static int devkmsg_release(struct inode *inode, struct file *file)
980 {
981 struct devkmsg_user *user = file->private_data;
982
983 ratelimit_state_exit(&user->rs);
984
985 mutex_destroy(&user->lock);
986 kvfree(user);
987 return 0;
988 }
989
990 const struct file_operations kmsg_fops = {
991 .open = devkmsg_open,
992 .read = devkmsg_read,
993 .write_iter = devkmsg_write,
994 .llseek = devkmsg_llseek,
995 .poll = devkmsg_poll,
996 .release = devkmsg_release,
997 };
998
999 #ifdef CONFIG_VMCORE_INFO
1000 /*
1001 * This appends the listed symbols to /proc/vmcore
1002 *
1003 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
1004 * obtain access to symbols that are otherwise very difficult to locate. These
1005 * symbols are specifically used so that utilities can access and extract the
1006 * dmesg log from a vmcore file after a crash.
1007 */
log_buf_vmcoreinfo_setup(void)1008 void log_buf_vmcoreinfo_setup(void)
1009 {
1010 struct dev_printk_info *dev_info = NULL;
1011
1012 VMCOREINFO_SYMBOL(prb);
1013 VMCOREINFO_SYMBOL(printk_rb_static);
1014 VMCOREINFO_SYMBOL(clear_seq);
1015
1016 /*
1017 * Export struct size and field offsets. User space tools can
1018 * parse it and detect any changes to structure down the line.
1019 */
1020
1021 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
1022 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
1023 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
1024 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
1025
1026 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
1027 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
1028 VMCOREINFO_OFFSET(prb_desc_ring, descs);
1029 VMCOREINFO_OFFSET(prb_desc_ring, infos);
1030 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
1031 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
1032
1033 VMCOREINFO_STRUCT_SIZE(prb_desc);
1034 VMCOREINFO_OFFSET(prb_desc, state_var);
1035 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1036
1037 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1038 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1039 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1040
1041 VMCOREINFO_STRUCT_SIZE(printk_info);
1042 VMCOREINFO_OFFSET(printk_info, seq);
1043 VMCOREINFO_OFFSET(printk_info, ts_nsec);
1044 VMCOREINFO_OFFSET(printk_info, text_len);
1045 VMCOREINFO_OFFSET(printk_info, caller_id);
1046 VMCOREINFO_OFFSET(printk_info, dev_info);
1047
1048 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1049 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1050 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1051 VMCOREINFO_OFFSET(dev_printk_info, device);
1052 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1053
1054 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1055 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1056 VMCOREINFO_OFFSET(prb_data_ring, data);
1057 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1058 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1059
1060 VMCOREINFO_SIZE(atomic_long_t);
1061 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1062
1063 VMCOREINFO_STRUCT_SIZE(latched_seq);
1064 VMCOREINFO_OFFSET(latched_seq, val);
1065 }
1066 #endif
1067
1068 /* requested log_buf_len from kernel cmdline */
1069 static unsigned long __initdata new_log_buf_len;
1070
1071 /* we practice scaling the ring buffer by powers of 2 */
log_buf_len_update(u64 size)1072 static void __init log_buf_len_update(u64 size)
1073 {
1074 if (size > (u64)LOG_BUF_LEN_MAX) {
1075 size = (u64)LOG_BUF_LEN_MAX;
1076 pr_err("log_buf over 2G is not supported.\n");
1077 }
1078
1079 if (size)
1080 size = roundup_pow_of_two(size);
1081 if (size > log_buf_len)
1082 new_log_buf_len = (unsigned long)size;
1083 }
1084
1085 /* save requested log_buf_len since it's too early to process it */
log_buf_len_setup(char * str)1086 static int __init log_buf_len_setup(char *str)
1087 {
1088 u64 size;
1089
1090 if (!str)
1091 return -EINVAL;
1092
1093 size = memparse(str, &str);
1094
1095 log_buf_len_update(size);
1096
1097 return 0;
1098 }
1099 early_param("log_buf_len", log_buf_len_setup);
1100
1101 #ifdef CONFIG_SMP
1102 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1103
log_buf_add_cpu(void)1104 static void __init log_buf_add_cpu(void)
1105 {
1106 unsigned int cpu_extra;
1107
1108 /*
1109 * archs should set up cpu_possible_bits properly with
1110 * set_cpu_possible() after setup_arch() but just in
1111 * case lets ensure this is valid.
1112 */
1113 if (num_possible_cpus() == 1)
1114 return;
1115
1116 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1117
1118 /* by default this will only continue through for large > 64 CPUs */
1119 if (cpu_extra <= __LOG_BUF_LEN / 2)
1120 return;
1121
1122 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1123 __LOG_CPU_MAX_BUF_LEN);
1124 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1125 cpu_extra);
1126 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1127
1128 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1129 }
1130 #else /* !CONFIG_SMP */
log_buf_add_cpu(void)1131 static inline void log_buf_add_cpu(void) {}
1132 #endif /* CONFIG_SMP */
1133
set_percpu_data_ready(void)1134 static void __init set_percpu_data_ready(void)
1135 {
1136 __printk_percpu_data_ready = true;
1137 }
1138
add_to_rb(struct printk_ringbuffer * rb,struct printk_record * r)1139 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1140 struct printk_record *r)
1141 {
1142 struct prb_reserved_entry e;
1143 struct printk_record dest_r;
1144
1145 prb_rec_init_wr(&dest_r, r->info->text_len);
1146
1147 if (!prb_reserve(&e, rb, &dest_r))
1148 return 0;
1149
1150 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1151 dest_r.info->text_len = r->info->text_len;
1152 dest_r.info->facility = r->info->facility;
1153 dest_r.info->level = r->info->level;
1154 dest_r.info->flags = r->info->flags;
1155 dest_r.info->ts_nsec = r->info->ts_nsec;
1156 dest_r.info->caller_id = r->info->caller_id;
1157 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1158
1159 prb_final_commit(&e);
1160
1161 return prb_record_text_space(&e);
1162 }
1163
1164 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1165
print_log_buf_usage_stats(void)1166 static void print_log_buf_usage_stats(void)
1167 {
1168 unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
1169 size_t meta_data_size;
1170
1171 meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
1172
1173 pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
1174 log_buf_len, meta_data_size, log_buf_len + meta_data_size);
1175 }
1176
setup_log_buf(int early)1177 void __init setup_log_buf(int early)
1178 {
1179 struct printk_info *new_infos;
1180 unsigned int new_descs_count;
1181 struct prb_desc *new_descs;
1182 struct printk_info info;
1183 struct printk_record r;
1184 unsigned int text_size;
1185 size_t new_descs_size;
1186 size_t new_infos_size;
1187 unsigned long flags;
1188 char *new_log_buf;
1189 unsigned int free;
1190 u64 seq;
1191
1192 /*
1193 * Some archs call setup_log_buf() multiple times - first is very
1194 * early, e.g. from setup_arch(), and second - when percpu_areas
1195 * are initialised.
1196 */
1197 if (!early)
1198 set_percpu_data_ready();
1199
1200 if (log_buf != __log_buf)
1201 return;
1202
1203 if (!early && !new_log_buf_len)
1204 log_buf_add_cpu();
1205
1206 if (!new_log_buf_len) {
1207 /* Show the memory stats only once. */
1208 if (!early)
1209 goto out;
1210
1211 return;
1212 }
1213
1214 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1215 if (new_descs_count == 0) {
1216 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1217 goto out;
1218 }
1219
1220 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1221 if (unlikely(!new_log_buf)) {
1222 pr_err("log_buf_len: %lu text bytes not available\n",
1223 new_log_buf_len);
1224 goto out;
1225 }
1226
1227 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1228 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1229 if (unlikely(!new_descs)) {
1230 pr_err("log_buf_len: %zu desc bytes not available\n",
1231 new_descs_size);
1232 goto err_free_log_buf;
1233 }
1234
1235 new_infos_size = new_descs_count * sizeof(struct printk_info);
1236 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1237 if (unlikely(!new_infos)) {
1238 pr_err("log_buf_len: %zu info bytes not available\n",
1239 new_infos_size);
1240 goto err_free_descs;
1241 }
1242
1243 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1244
1245 prb_init(&printk_rb_dynamic,
1246 new_log_buf, ilog2(new_log_buf_len),
1247 new_descs, ilog2(new_descs_count),
1248 new_infos);
1249
1250 local_irq_save(flags);
1251
1252 log_buf_len = new_log_buf_len;
1253 log_buf = new_log_buf;
1254 new_log_buf_len = 0;
1255
1256 free = __LOG_BUF_LEN;
1257 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1258 text_size = add_to_rb(&printk_rb_dynamic, &r);
1259 if (text_size > free)
1260 free = 0;
1261 else
1262 free -= text_size;
1263 }
1264
1265 prb = &printk_rb_dynamic;
1266
1267 local_irq_restore(flags);
1268
1269 /*
1270 * Copy any remaining messages that might have appeared from
1271 * NMI context after copying but before switching to the
1272 * dynamic buffer.
1273 */
1274 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1275 text_size = add_to_rb(&printk_rb_dynamic, &r);
1276 if (text_size > free)
1277 free = 0;
1278 else
1279 free -= text_size;
1280 }
1281
1282 if (seq != prb_next_seq(&printk_rb_static)) {
1283 pr_err("dropped %llu messages\n",
1284 prb_next_seq(&printk_rb_static) - seq);
1285 }
1286
1287 print_log_buf_usage_stats();
1288 pr_info("early log buf free: %u(%u%%)\n",
1289 free, (free * 100) / __LOG_BUF_LEN);
1290 return;
1291
1292 err_free_descs:
1293 memblock_free(new_descs, new_descs_size);
1294 err_free_log_buf:
1295 memblock_free(new_log_buf, new_log_buf_len);
1296 out:
1297 print_log_buf_usage_stats();
1298 }
1299
1300 static bool __read_mostly ignore_loglevel;
1301
ignore_loglevel_setup(char * str)1302 static int __init ignore_loglevel_setup(char *str)
1303 {
1304 ignore_loglevel = true;
1305 pr_info("debug: ignoring loglevel setting.\n");
1306
1307 return 0;
1308 }
1309
1310 early_param("ignore_loglevel", ignore_loglevel_setup);
1311 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1312 MODULE_PARM_DESC(ignore_loglevel,
1313 "ignore loglevel setting (prints all kernel messages to the console)");
1314
suppress_message_printing(int level)1315 static bool suppress_message_printing(int level)
1316 {
1317 return (level >= console_loglevel && !ignore_loglevel);
1318 }
1319
1320 #ifdef CONFIG_BOOT_PRINTK_DELAY
1321
1322 static int boot_delay; /* msecs delay after each printk during bootup */
1323 static unsigned long long loops_per_msec; /* based on boot_delay */
1324
boot_delay_setup(char * str)1325 static int __init boot_delay_setup(char *str)
1326 {
1327 unsigned long lpj;
1328
1329 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1330 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1331
1332 get_option(&str, &boot_delay);
1333 if (boot_delay > 10 * 1000)
1334 boot_delay = 0;
1335
1336 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1337 "HZ: %d, loops_per_msec: %llu\n",
1338 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1339 return 0;
1340 }
1341 early_param("boot_delay", boot_delay_setup);
1342
boot_delay_msec(int level)1343 static void boot_delay_msec(int level)
1344 {
1345 unsigned long long k;
1346 unsigned long timeout;
1347
1348 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1349 || suppress_message_printing(level)) {
1350 return;
1351 }
1352
1353 k = (unsigned long long)loops_per_msec * boot_delay;
1354
1355 timeout = jiffies + msecs_to_jiffies(boot_delay);
1356 while (k) {
1357 k--;
1358 cpu_relax();
1359 /*
1360 * use (volatile) jiffies to prevent
1361 * compiler reduction; loop termination via jiffies
1362 * is secondary and may or may not happen.
1363 */
1364 if (time_after(jiffies, timeout))
1365 break;
1366 touch_nmi_watchdog();
1367 }
1368 }
1369 #else
boot_delay_msec(int level)1370 static inline void boot_delay_msec(int level)
1371 {
1372 }
1373 #endif
1374
1375 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1376 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1377
print_syslog(unsigned int level,char * buf)1378 static size_t print_syslog(unsigned int level, char *buf)
1379 {
1380 return sprintf(buf, "<%u>", level);
1381 }
1382
print_time(u64 ts,char * buf)1383 static size_t print_time(u64 ts, char *buf)
1384 {
1385 unsigned long rem_nsec = do_div(ts, 1000000000);
1386
1387 return sprintf(buf, "[%5lu.%06lu]",
1388 (unsigned long)ts, rem_nsec / 1000);
1389 }
1390
1391 #ifdef CONFIG_PRINTK_CALLER
print_caller(u32 id,char * buf)1392 static size_t print_caller(u32 id, char *buf)
1393 {
1394 char caller[12];
1395 int vh_ret = 0;
1396
1397 trace_android_vh_printk_caller(caller, sizeof(caller), id, &vh_ret);
1398 if (!vh_ret)
1399 snprintf(caller, sizeof(caller), "%c%u",
1400 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1401 return sprintf(buf, "[%6s]", caller);
1402 }
1403 #else
1404 #define print_caller(id, buf) 0
1405 #endif
1406
info_print_prefix(const struct printk_info * info,bool syslog,bool time,char * buf)1407 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1408 bool time, char *buf)
1409 {
1410 size_t len = 0;
1411
1412 if (syslog)
1413 len = print_syslog((info->facility << 3) | info->level, buf);
1414
1415 if (time)
1416 len += print_time(info->ts_nsec, buf + len);
1417
1418 len += print_caller(info->caller_id, buf + len);
1419
1420 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1421 buf[len++] = ' ';
1422 buf[len] = '\0';
1423 }
1424
1425 return len;
1426 }
1427
1428 /*
1429 * Prepare the record for printing. The text is shifted within the given
1430 * buffer to avoid a need for another one. The following operations are
1431 * done:
1432 *
1433 * - Add prefix for each line.
1434 * - Drop truncated lines that no longer fit into the buffer.
1435 * - Add the trailing newline that has been removed in vprintk_store().
1436 * - Add a string terminator.
1437 *
1438 * Since the produced string is always terminated, the maximum possible
1439 * return value is @r->text_buf_size - 1;
1440 *
1441 * Return: The length of the updated/prepared text, including the added
1442 * prefixes and the newline. The terminator is not counted. The dropped
1443 * line(s) are not counted.
1444 */
record_print_text(struct printk_record * r,bool syslog,bool time)1445 static size_t record_print_text(struct printk_record *r, bool syslog,
1446 bool time)
1447 {
1448 size_t text_len = r->info->text_len;
1449 size_t buf_size = r->text_buf_size;
1450 char *text = r->text_buf;
1451 char prefix[PRINTK_PREFIX_MAX];
1452 bool truncated = false;
1453 size_t prefix_len;
1454 size_t line_len;
1455 size_t len = 0;
1456 char *next;
1457
1458 /*
1459 * If the message was truncated because the buffer was not large
1460 * enough, treat the available text as if it were the full text.
1461 */
1462 if (text_len > buf_size)
1463 text_len = buf_size;
1464
1465 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1466
1467 /*
1468 * @text_len: bytes of unprocessed text
1469 * @line_len: bytes of current line _without_ newline
1470 * @text: pointer to beginning of current line
1471 * @len: number of bytes prepared in r->text_buf
1472 */
1473 for (;;) {
1474 next = memchr(text, '\n', text_len);
1475 if (next) {
1476 line_len = next - text;
1477 } else {
1478 /* Drop truncated line(s). */
1479 if (truncated)
1480 break;
1481 line_len = text_len;
1482 }
1483
1484 /*
1485 * Truncate the text if there is not enough space to add the
1486 * prefix and a trailing newline and a terminator.
1487 */
1488 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1489 /* Drop even the current line if no space. */
1490 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1491 break;
1492
1493 text_len = buf_size - len - prefix_len - 1 - 1;
1494 truncated = true;
1495 }
1496
1497 memmove(text + prefix_len, text, text_len);
1498 memcpy(text, prefix, prefix_len);
1499
1500 /*
1501 * Increment the prepared length to include the text and
1502 * prefix that were just moved+copied. Also increment for the
1503 * newline at the end of this line. If this is the last line,
1504 * there is no newline, but it will be added immediately below.
1505 */
1506 len += prefix_len + line_len + 1;
1507 if (text_len == line_len) {
1508 /*
1509 * This is the last line. Add the trailing newline
1510 * removed in vprintk_store().
1511 */
1512 text[prefix_len + line_len] = '\n';
1513 break;
1514 }
1515
1516 /*
1517 * Advance beyond the added prefix and the related line with
1518 * its newline.
1519 */
1520 text += prefix_len + line_len + 1;
1521
1522 /*
1523 * The remaining text has only decreased by the line with its
1524 * newline.
1525 *
1526 * Note that @text_len can become zero. It happens when @text
1527 * ended with a newline (either due to truncation or the
1528 * original string ending with "\n\n"). The loop is correctly
1529 * repeated and (if not truncated) an empty line with a prefix
1530 * will be prepared.
1531 */
1532 text_len -= line_len + 1;
1533 }
1534
1535 /*
1536 * If a buffer was provided, it will be terminated. Space for the
1537 * string terminator is guaranteed to be available. The terminator is
1538 * not counted in the return value.
1539 */
1540 if (buf_size > 0)
1541 r->text_buf[len] = 0;
1542
1543 return len;
1544 }
1545
get_record_print_text_size(struct printk_info * info,unsigned int line_count,bool syslog,bool time)1546 static size_t get_record_print_text_size(struct printk_info *info,
1547 unsigned int line_count,
1548 bool syslog, bool time)
1549 {
1550 char prefix[PRINTK_PREFIX_MAX];
1551 size_t prefix_len;
1552
1553 prefix_len = info_print_prefix(info, syslog, time, prefix);
1554
1555 /*
1556 * Each line will be preceded with a prefix. The intermediate
1557 * newlines are already within the text, but a final trailing
1558 * newline will be added.
1559 */
1560 return ((prefix_len * line_count) + info->text_len + 1);
1561 }
1562
1563 /*
1564 * Beginning with @start_seq, find the first record where it and all following
1565 * records up to (but not including) @max_seq fit into @size.
1566 *
1567 * @max_seq is simply an upper bound and does not need to exist. If the caller
1568 * does not require an upper bound, -1 can be used for @max_seq.
1569 */
find_first_fitting_seq(u64 start_seq,u64 max_seq,size_t size,bool syslog,bool time)1570 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1571 bool syslog, bool time)
1572 {
1573 struct printk_info info;
1574 unsigned int line_count;
1575 size_t len = 0;
1576 u64 seq;
1577
1578 /* Determine the size of the records up to @max_seq. */
1579 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1580 if (info.seq >= max_seq)
1581 break;
1582 len += get_record_print_text_size(&info, line_count, syslog, time);
1583 }
1584
1585 /*
1586 * Adjust the upper bound for the next loop to avoid subtracting
1587 * lengths that were never added.
1588 */
1589 if (seq < max_seq)
1590 max_seq = seq;
1591
1592 /*
1593 * Move first record forward until length fits into the buffer. Ignore
1594 * newest messages that were not counted in the above cycle. Messages
1595 * might appear and get lost in the meantime. This is a best effort
1596 * that prevents an infinite loop that could occur with a retry.
1597 */
1598 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1599 if (len <= size || info.seq >= max_seq)
1600 break;
1601 len -= get_record_print_text_size(&info, line_count, syslog, time);
1602 }
1603
1604 return seq;
1605 }
1606
1607 /* The caller is responsible for making sure @size is greater than 0. */
syslog_print(char __user * buf,int size)1608 static int syslog_print(char __user *buf, int size)
1609 {
1610 struct printk_info info;
1611 struct printk_record r;
1612 char *text;
1613 int len = 0;
1614 u64 seq;
1615
1616 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1617 if (!text)
1618 return -ENOMEM;
1619
1620 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1621
1622 mutex_lock(&syslog_lock);
1623
1624 /*
1625 * Wait for the @syslog_seq record to be available. @syslog_seq may
1626 * change while waiting.
1627 */
1628 do {
1629 seq = syslog_seq;
1630
1631 mutex_unlock(&syslog_lock);
1632 /*
1633 * Guarantee this task is visible on the waitqueue before
1634 * checking the wake condition.
1635 *
1636 * The full memory barrier within set_current_state() of
1637 * prepare_to_wait_event() pairs with the full memory barrier
1638 * within wq_has_sleeper().
1639 *
1640 * This pairs with __wake_up_klogd:A.
1641 */
1642 len = wait_event_interruptible(log_wait,
1643 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1644 mutex_lock(&syslog_lock);
1645
1646 if (len)
1647 goto out;
1648 } while (syslog_seq != seq);
1649
1650 /*
1651 * Copy records that fit into the buffer. The above cycle makes sure
1652 * that the first record is always available.
1653 */
1654 do {
1655 size_t n;
1656 size_t skip;
1657 int err;
1658
1659 if (!prb_read_valid(prb, syslog_seq, &r))
1660 break;
1661
1662 if (r.info->seq != syslog_seq) {
1663 /* message is gone, move to next valid one */
1664 syslog_seq = r.info->seq;
1665 syslog_partial = 0;
1666 }
1667
1668 /*
1669 * To keep reading/counting partial line consistent,
1670 * use printk_time value as of the beginning of a line.
1671 */
1672 if (!syslog_partial)
1673 syslog_time = printk_time;
1674
1675 skip = syslog_partial;
1676 n = record_print_text(&r, true, syslog_time);
1677 if (n - syslog_partial <= size) {
1678 /* message fits into buffer, move forward */
1679 syslog_seq = r.info->seq + 1;
1680 n -= syslog_partial;
1681 syslog_partial = 0;
1682 } else if (!len){
1683 /* partial read(), remember position */
1684 n = size;
1685 syslog_partial += n;
1686 } else
1687 n = 0;
1688
1689 if (!n)
1690 break;
1691
1692 mutex_unlock(&syslog_lock);
1693 err = copy_to_user(buf, text + skip, n);
1694 mutex_lock(&syslog_lock);
1695
1696 if (err) {
1697 if (!len)
1698 len = -EFAULT;
1699 break;
1700 }
1701
1702 len += n;
1703 size -= n;
1704 buf += n;
1705 } while (size);
1706 out:
1707 mutex_unlock(&syslog_lock);
1708 kfree(text);
1709 return len;
1710 }
1711
syslog_print_all(char __user * buf,int size,bool clear)1712 static int syslog_print_all(char __user *buf, int size, bool clear)
1713 {
1714 struct printk_info info;
1715 struct printk_record r;
1716 char *text;
1717 int len = 0;
1718 u64 seq;
1719 bool time;
1720
1721 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1722 if (!text)
1723 return -ENOMEM;
1724
1725 time = printk_time;
1726 /*
1727 * Find first record that fits, including all following records,
1728 * into the user-provided buffer for this dump.
1729 */
1730 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1731 size, true, time);
1732
1733 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1734
1735 prb_for_each_record(seq, prb, seq, &r) {
1736 int textlen;
1737
1738 textlen = record_print_text(&r, true, time);
1739
1740 if (len + textlen > size) {
1741 seq--;
1742 break;
1743 }
1744
1745 if (copy_to_user(buf + len, text, textlen))
1746 len = -EFAULT;
1747 else
1748 len += textlen;
1749
1750 if (len < 0)
1751 break;
1752 }
1753
1754 if (clear) {
1755 mutex_lock(&syslog_lock);
1756 latched_seq_write(&clear_seq, seq);
1757 mutex_unlock(&syslog_lock);
1758 }
1759
1760 kfree(text);
1761 return len;
1762 }
1763
syslog_clear(void)1764 static void syslog_clear(void)
1765 {
1766 mutex_lock(&syslog_lock);
1767 latched_seq_write(&clear_seq, prb_next_seq(prb));
1768 mutex_unlock(&syslog_lock);
1769 }
1770
do_syslog(int type,char __user * buf,int len,int source)1771 int do_syslog(int type, char __user *buf, int len, int source)
1772 {
1773 struct printk_info info;
1774 bool clear = false;
1775 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1776 int error;
1777
1778 error = check_syslog_permissions(type, source);
1779 if (error)
1780 return error;
1781
1782 switch (type) {
1783 case SYSLOG_ACTION_CLOSE: /* Close log */
1784 break;
1785 case SYSLOG_ACTION_OPEN: /* Open log */
1786 break;
1787 case SYSLOG_ACTION_READ: /* Read from log */
1788 if (!buf || len < 0)
1789 return -EINVAL;
1790 if (!len)
1791 return 0;
1792 if (!access_ok(buf, len))
1793 return -EFAULT;
1794 error = syslog_print(buf, len);
1795 break;
1796 /* Read/clear last kernel messages */
1797 case SYSLOG_ACTION_READ_CLEAR:
1798 clear = true;
1799 fallthrough;
1800 /* Read last kernel messages */
1801 case SYSLOG_ACTION_READ_ALL:
1802 if (!buf || len < 0)
1803 return -EINVAL;
1804 if (!len)
1805 return 0;
1806 if (!access_ok(buf, len))
1807 return -EFAULT;
1808 error = syslog_print_all(buf, len, clear);
1809 break;
1810 /* Clear ring buffer */
1811 case SYSLOG_ACTION_CLEAR:
1812 syslog_clear();
1813 break;
1814 /* Disable logging to console */
1815 case SYSLOG_ACTION_CONSOLE_OFF:
1816 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1817 saved_console_loglevel = console_loglevel;
1818 console_loglevel = minimum_console_loglevel;
1819 break;
1820 /* Enable logging to console */
1821 case SYSLOG_ACTION_CONSOLE_ON:
1822 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1823 console_loglevel = saved_console_loglevel;
1824 saved_console_loglevel = LOGLEVEL_DEFAULT;
1825 }
1826 break;
1827 /* Set level of messages printed to console */
1828 case SYSLOG_ACTION_CONSOLE_LEVEL:
1829 if (len < 1 || len > 8)
1830 return -EINVAL;
1831 if (len < minimum_console_loglevel)
1832 len = minimum_console_loglevel;
1833 console_loglevel = len;
1834 /* Implicitly re-enable logging to console */
1835 saved_console_loglevel = LOGLEVEL_DEFAULT;
1836 break;
1837 /* Number of chars in the log buffer */
1838 case SYSLOG_ACTION_SIZE_UNREAD:
1839 mutex_lock(&syslog_lock);
1840 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1841 /* No unread messages. */
1842 mutex_unlock(&syslog_lock);
1843 return 0;
1844 }
1845 if (info.seq != syslog_seq) {
1846 /* messages are gone, move to first one */
1847 syslog_seq = info.seq;
1848 syslog_partial = 0;
1849 }
1850 if (source == SYSLOG_FROM_PROC) {
1851 /*
1852 * Short-cut for poll(/"proc/kmsg") which simply checks
1853 * for pending data, not the size; return the count of
1854 * records, not the length.
1855 */
1856 error = prb_next_seq(prb) - syslog_seq;
1857 } else {
1858 bool time = syslog_partial ? syslog_time : printk_time;
1859 unsigned int line_count;
1860 u64 seq;
1861
1862 prb_for_each_info(syslog_seq, prb, seq, &info,
1863 &line_count) {
1864 error += get_record_print_text_size(&info, line_count,
1865 true, time);
1866 time = printk_time;
1867 }
1868 error -= syslog_partial;
1869 }
1870 mutex_unlock(&syslog_lock);
1871 break;
1872 /* Size of the log buffer */
1873 case SYSLOG_ACTION_SIZE_BUFFER:
1874 error = log_buf_len;
1875 break;
1876 default:
1877 error = -EINVAL;
1878 break;
1879 }
1880
1881 return error;
1882 }
1883
SYSCALL_DEFINE3(syslog,int,type,char __user *,buf,int,len)1884 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1885 {
1886 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1887 }
1888
1889 /*
1890 * Special console_lock variants that help to reduce the risk of soft-lockups.
1891 * They allow to pass console_lock to another printk() call using a busy wait.
1892 */
1893
1894 #ifdef CONFIG_LOCKDEP
1895 static struct lockdep_map console_owner_dep_map = {
1896 .name = "console_owner"
1897 };
1898 #endif
1899
1900 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1901 static struct task_struct *console_owner;
1902 static bool console_waiter;
1903
1904 /**
1905 * console_lock_spinning_enable - mark beginning of code where another
1906 * thread might safely busy wait
1907 *
1908 * This basically converts console_lock into a spinlock. This marks
1909 * the section where the console_lock owner can not sleep, because
1910 * there may be a waiter spinning (like a spinlock). Also it must be
1911 * ready to hand over the lock at the end of the section.
1912 */
console_lock_spinning_enable(void)1913 void console_lock_spinning_enable(void)
1914 {
1915 /*
1916 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1917 * Non-panic CPUs abandon the flush anyway.
1918 *
1919 * Just keep the lockdep annotation. The panic-CPU should avoid
1920 * taking console_owner_lock because it might cause a deadlock.
1921 * This looks like the easiest way how to prevent false lockdep
1922 * reports without handling races a lockless way.
1923 */
1924 if (panic_in_progress())
1925 goto lockdep;
1926
1927 raw_spin_lock(&console_owner_lock);
1928 console_owner = current;
1929 raw_spin_unlock(&console_owner_lock);
1930
1931 lockdep:
1932 /* The waiter may spin on us after setting console_owner */
1933 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1934 }
1935
1936 /**
1937 * console_lock_spinning_disable_and_check - mark end of code where another
1938 * thread was able to busy wait and check if there is a waiter
1939 * @cookie: cookie returned from console_srcu_read_lock()
1940 *
1941 * This is called at the end of the section where spinning is allowed.
1942 * It has two functions. First, it is a signal that it is no longer
1943 * safe to start busy waiting for the lock. Second, it checks if
1944 * there is a busy waiter and passes the lock rights to her.
1945 *
1946 * Important: Callers lose both the console_lock and the SRCU read lock if
1947 * there was a busy waiter. They must not touch items synchronized by
1948 * console_lock or SRCU read lock in this case.
1949 *
1950 * Return: 1 if the lock rights were passed, 0 otherwise.
1951 */
console_lock_spinning_disable_and_check(int cookie)1952 int console_lock_spinning_disable_and_check(int cookie)
1953 {
1954 int waiter;
1955
1956 /*
1957 * Ignore spinning waiters during panic() because they might get stopped
1958 * or blocked at any time,
1959 *
1960 * It is safe because nobody is allowed to start spinning during panic
1961 * in the first place. If there has been a waiter then non panic CPUs
1962 * might stay spinning. They would get stopped anyway. The panic context
1963 * will never start spinning and an interrupted spin on panic CPU will
1964 * never continue.
1965 */
1966 if (panic_in_progress()) {
1967 /* Keep lockdep happy. */
1968 spin_release(&console_owner_dep_map, _THIS_IP_);
1969 return 0;
1970 }
1971
1972 raw_spin_lock(&console_owner_lock);
1973 waiter = READ_ONCE(console_waiter);
1974 console_owner = NULL;
1975 raw_spin_unlock(&console_owner_lock);
1976
1977 if (!waiter) {
1978 spin_release(&console_owner_dep_map, _THIS_IP_);
1979 return 0;
1980 }
1981
1982 /* The waiter is now free to continue */
1983 WRITE_ONCE(console_waiter, false);
1984
1985 spin_release(&console_owner_dep_map, _THIS_IP_);
1986
1987 /*
1988 * Preserve lockdep lock ordering. Release the SRCU read lock before
1989 * releasing the console_lock.
1990 */
1991 console_srcu_read_unlock(cookie);
1992
1993 /*
1994 * Hand off console_lock to waiter. The waiter will perform
1995 * the up(). After this, the waiter is the console_lock owner.
1996 */
1997 mutex_release(&console_lock_dep_map, _THIS_IP_);
1998 return 1;
1999 }
2000
2001 /**
2002 * console_trylock_spinning - try to get console_lock by busy waiting
2003 *
2004 * This allows to busy wait for the console_lock when the current
2005 * owner is running in specially marked sections. It means that
2006 * the current owner is running and cannot reschedule until it
2007 * is ready to lose the lock.
2008 *
2009 * Return: 1 if we got the lock, 0 othrewise
2010 */
console_trylock_spinning(void)2011 static int console_trylock_spinning(void)
2012 {
2013 struct task_struct *owner = NULL;
2014 bool waiter;
2015 bool spin = false;
2016 unsigned long flags;
2017
2018 if (console_trylock())
2019 return 1;
2020
2021 /*
2022 * It's unsafe to spin once a panic has begun. If we are the
2023 * panic CPU, we may have already halted the owner of the
2024 * console_sem. If we are not the panic CPU, then we should
2025 * avoid taking console_sem, so the panic CPU has a better
2026 * chance of cleanly acquiring it later.
2027 */
2028 if (panic_in_progress())
2029 return 0;
2030
2031 printk_safe_enter_irqsave(flags);
2032
2033 raw_spin_lock(&console_owner_lock);
2034 owner = READ_ONCE(console_owner);
2035 waiter = READ_ONCE(console_waiter);
2036 if (!waiter && owner && owner != current) {
2037 WRITE_ONCE(console_waiter, true);
2038 spin = true;
2039 }
2040 raw_spin_unlock(&console_owner_lock);
2041
2042 /*
2043 * If there is an active printk() writing to the
2044 * consoles, instead of having it write our data too,
2045 * see if we can offload that load from the active
2046 * printer, and do some printing ourselves.
2047 * Go into a spin only if there isn't already a waiter
2048 * spinning, and there is an active printer, and
2049 * that active printer isn't us (recursive printk?).
2050 */
2051 if (!spin) {
2052 printk_safe_exit_irqrestore(flags);
2053 return 0;
2054 }
2055
2056 /* We spin waiting for the owner to release us */
2057 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2058 /* Owner will clear console_waiter on hand off */
2059 while (READ_ONCE(console_waiter))
2060 cpu_relax();
2061 spin_release(&console_owner_dep_map, _THIS_IP_);
2062
2063 printk_safe_exit_irqrestore(flags);
2064 /*
2065 * The owner passed the console lock to us.
2066 * Since we did not spin on console lock, annotate
2067 * this as a trylock. Otherwise lockdep will
2068 * complain.
2069 */
2070 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2071
2072 /*
2073 * Update @console_may_schedule for trylock because the previous
2074 * owner may have been schedulable.
2075 */
2076 console_may_schedule = 0;
2077
2078 return 1;
2079 }
2080
2081 /*
2082 * Recursion is tracked separately on each CPU. If NMIs are supported, an
2083 * additional NMI context per CPU is also separately tracked. Until per-CPU
2084 * is available, a separate "early tracking" is performed.
2085 */
2086 static DEFINE_PER_CPU(u8, printk_count);
2087 static u8 printk_count_early;
2088 #ifdef CONFIG_HAVE_NMI
2089 static DEFINE_PER_CPU(u8, printk_count_nmi);
2090 static u8 printk_count_nmi_early;
2091 #endif
2092
2093 /*
2094 * Recursion is limited to keep the output sane. printk() should not require
2095 * more than 1 level of recursion (allowing, for example, printk() to trigger
2096 * a WARN), but a higher value is used in case some printk-internal errors
2097 * exist, such as the ringbuffer validation checks failing.
2098 */
2099 #define PRINTK_MAX_RECURSION 3
2100
2101 /*
2102 * Return a pointer to the dedicated counter for the CPU+context of the
2103 * caller.
2104 */
__printk_recursion_counter(void)2105 static u8 *__printk_recursion_counter(void)
2106 {
2107 #ifdef CONFIG_HAVE_NMI
2108 if (in_nmi()) {
2109 if (printk_percpu_data_ready())
2110 return this_cpu_ptr(&printk_count_nmi);
2111 return &printk_count_nmi_early;
2112 }
2113 #endif
2114 if (printk_percpu_data_ready())
2115 return this_cpu_ptr(&printk_count);
2116 return &printk_count_early;
2117 }
2118
2119 /*
2120 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2121 * The caller must check the boolean return value to see if the recursion is
2122 * allowed. On failure, interrupts are not disabled.
2123 *
2124 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2125 * that is passed to printk_exit_irqrestore().
2126 */
2127 #define printk_enter_irqsave(recursion_ptr, flags) \
2128 ({ \
2129 bool success = true; \
2130 \
2131 typecheck(u8 *, recursion_ptr); \
2132 local_irq_save(flags); \
2133 (recursion_ptr) = __printk_recursion_counter(); \
2134 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
2135 local_irq_restore(flags); \
2136 success = false; \
2137 } else { \
2138 (*(recursion_ptr))++; \
2139 } \
2140 success; \
2141 })
2142
2143 /* Exit recursion tracking, restoring interrupts. */
2144 #define printk_exit_irqrestore(recursion_ptr, flags) \
2145 do { \
2146 typecheck(u8 *, recursion_ptr); \
2147 (*(recursion_ptr))--; \
2148 local_irq_restore(flags); \
2149 } while (0)
2150
2151 int printk_delay_msec __read_mostly;
2152
printk_delay(int level)2153 static inline void printk_delay(int level)
2154 {
2155 boot_delay_msec(level);
2156
2157 if (unlikely(printk_delay_msec)) {
2158 int m = printk_delay_msec;
2159
2160 while (m--) {
2161 mdelay(1);
2162 touch_nmi_watchdog();
2163 }
2164 }
2165 }
2166
printk_caller_id(void)2167 static inline u32 printk_caller_id(void)
2168 {
2169 u32 caller_id = 0;
2170
2171 trace_android_vh_printk_caller_id(&caller_id);
2172 if (caller_id)
2173 return caller_id;
2174
2175 return in_task() ? task_pid_nr(current) :
2176 0x80000000 + smp_processor_id();
2177 }
2178
2179 /**
2180 * printk_parse_prefix - Parse level and control flags.
2181 *
2182 * @text: The terminated text message.
2183 * @level: A pointer to the current level value, will be updated.
2184 * @flags: A pointer to the current printk_info flags, will be updated.
2185 *
2186 * @level may be NULL if the caller is not interested in the parsed value.
2187 * Otherwise the variable pointed to by @level must be set to
2188 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2189 *
2190 * @flags may be NULL if the caller is not interested in the parsed value.
2191 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2192 * value.
2193 *
2194 * Return: The length of the parsed level and control flags.
2195 */
printk_parse_prefix(const char * text,int * level,enum printk_info_flags * flags)2196 u16 printk_parse_prefix(const char *text, int *level,
2197 enum printk_info_flags *flags)
2198 {
2199 u16 prefix_len = 0;
2200 int kern_level;
2201
2202 while (*text) {
2203 kern_level = printk_get_level(text);
2204 if (!kern_level)
2205 break;
2206
2207 switch (kern_level) {
2208 case '0' ... '7':
2209 if (level && *level == LOGLEVEL_DEFAULT)
2210 *level = kern_level - '0';
2211 break;
2212 case 'c': /* KERN_CONT */
2213 if (flags)
2214 *flags |= LOG_CONT;
2215 }
2216
2217 prefix_len += 2;
2218 text += 2;
2219 }
2220
2221 return prefix_len;
2222 }
2223
2224 __printf(5, 0)
printk_sprint(char * text,u16 size,int facility,enum printk_info_flags * flags,const char * fmt,va_list args)2225 static u16 printk_sprint(char *text, u16 size, int facility,
2226 enum printk_info_flags *flags, const char *fmt,
2227 va_list args)
2228 {
2229 u16 text_len;
2230
2231 text_len = vscnprintf(text, size, fmt, args);
2232
2233 /* Mark and strip a trailing newline. */
2234 if (text_len && text[text_len - 1] == '\n') {
2235 text_len--;
2236 *flags |= LOG_NEWLINE;
2237 }
2238
2239 /* Strip log level and control flags. */
2240 if (facility == 0) {
2241 u16 prefix_len;
2242
2243 prefix_len = printk_parse_prefix(text, NULL, NULL);
2244 if (prefix_len) {
2245 text_len -= prefix_len;
2246 memmove(text, text + prefix_len, text_len);
2247 }
2248 }
2249
2250 trace_console(text, text_len);
2251
2252 return text_len;
2253 }
2254
2255 __printf(4, 0)
vprintk_store(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2256 int vprintk_store(int facility, int level,
2257 const struct dev_printk_info *dev_info,
2258 const char *fmt, va_list args)
2259 {
2260 struct prb_reserved_entry e;
2261 enum printk_info_flags flags = 0;
2262 struct printk_record r;
2263 unsigned long irqflags;
2264 u16 trunc_msg_len = 0;
2265 char prefix_buf[8];
2266 u8 *recursion_ptr;
2267 u16 reserve_size;
2268 va_list args2;
2269 u32 caller_id;
2270 u16 text_len;
2271 int ret = 0;
2272 u64 ts_nsec;
2273
2274 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2275 return 0;
2276
2277 /*
2278 * Since the duration of printk() can vary depending on the message
2279 * and state of the ringbuffer, grab the timestamp now so that it is
2280 * close to the call of printk(). This provides a more deterministic
2281 * timestamp with respect to the caller.
2282 */
2283 ts_nsec = local_clock();
2284
2285 caller_id = printk_caller_id();
2286 trace_android_vh_printk_save_irq(&caller_id, irqflags);
2287
2288 /*
2289 * The sprintf needs to come first since the syslog prefix might be
2290 * passed in as a parameter. An extra byte must be reserved so that
2291 * later the vscnprintf() into the reserved buffer has room for the
2292 * terminating '\0', which is not counted by vsnprintf().
2293 */
2294 va_copy(args2, args);
2295 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2296 va_end(args2);
2297
2298 if (reserve_size > PRINTKRB_RECORD_MAX)
2299 reserve_size = PRINTKRB_RECORD_MAX;
2300
2301 /* Extract log level or control flags. */
2302 if (facility == 0)
2303 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2304
2305 if (level == LOGLEVEL_DEFAULT)
2306 level = default_message_loglevel;
2307
2308 if (dev_info)
2309 flags |= LOG_NEWLINE;
2310
2311 if (flags & LOG_CONT) {
2312 prb_rec_init_wr(&r, reserve_size);
2313 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2314 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2315 facility, &flags, fmt, args);
2316 r.info->text_len += text_len;
2317
2318 if (flags & LOG_NEWLINE) {
2319 r.info->flags |= LOG_NEWLINE;
2320 prb_final_commit(&e);
2321 } else {
2322 prb_commit(&e);
2323 }
2324
2325 trace_android_vh_logbuf_pr_cont(&r, text_len);
2326 ret = text_len;
2327 goto out;
2328 }
2329 }
2330
2331 /*
2332 * Explicitly initialize the record before every prb_reserve() call.
2333 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2334 * structure when they fail.
2335 */
2336 prb_rec_init_wr(&r, reserve_size);
2337 if (!prb_reserve(&e, prb, &r)) {
2338 /* truncate the message if it is too long for empty buffer */
2339 truncate_msg(&reserve_size, &trunc_msg_len);
2340
2341 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2342 if (!prb_reserve(&e, prb, &r))
2343 goto out;
2344 }
2345
2346 /* fill message */
2347 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2348 if (trunc_msg_len)
2349 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2350 r.info->text_len = text_len + trunc_msg_len;
2351 r.info->facility = facility;
2352 r.info->level = level & 7;
2353 r.info->flags = flags & 0x1f;
2354 r.info->ts_nsec = ts_nsec;
2355 r.info->caller_id = caller_id;
2356 if (dev_info)
2357 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2358
2359 /* A message without a trailing newline can be continued. */
2360 if (!(flags & LOG_NEWLINE))
2361 prb_commit(&e);
2362 else
2363 prb_final_commit(&e);
2364
2365 trace_android_rvh_logbuf(prb, &r);
2366 trace_android_vh_logbuf(prb, &r);
2367 ret = text_len + trunc_msg_len;
2368 out:
2369 printk_exit_irqrestore(recursion_ptr, irqflags);
2370 return ret;
2371 }
2372
2373 /*
2374 * This acts as a one-way switch to allow legacy consoles to print from
2375 * the printk() caller context on a panic CPU. It also attempts to flush
2376 * the legacy consoles in this context.
2377 */
printk_legacy_allow_panic_sync(void)2378 void printk_legacy_allow_panic_sync(void)
2379 {
2380 struct console_flush_type ft;
2381
2382 legacy_allow_panic_sync = true;
2383
2384 printk_get_console_flush_type(&ft);
2385 if (ft.legacy_direct) {
2386 if (console_trylock())
2387 console_unlock();
2388 }
2389 }
2390
2391 bool __read_mostly debug_non_panic_cpus;
2392
2393 #ifdef CONFIG_PRINTK_CALLER
debug_non_panic_cpus_setup(char * str)2394 static int __init debug_non_panic_cpus_setup(char *str)
2395 {
2396 debug_non_panic_cpus = true;
2397 pr_info("allow messages from non-panic CPUs in panic()\n");
2398
2399 return 0;
2400 }
2401 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
2402 module_param(debug_non_panic_cpus, bool, 0644);
2403 MODULE_PARM_DESC(debug_non_panic_cpus,
2404 "allow messages from non-panic CPUs in panic()");
2405 #endif
2406
vprintk_emit(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2407 asmlinkage int vprintk_emit(int facility, int level,
2408 const struct dev_printk_info *dev_info,
2409 const char *fmt, va_list args)
2410 {
2411 struct console_flush_type ft;
2412 int printed_len;
2413
2414 /* Suppress unimportant messages after panic happens */
2415 if (unlikely(suppress_printk))
2416 return 0;
2417
2418 /*
2419 * The messages on the panic CPU are the most important. If
2420 * non-panic CPUs are generating any messages, they will be
2421 * silently dropped.
2422 */
2423 if (other_cpu_in_panic() &&
2424 !debug_non_panic_cpus &&
2425 !panic_triggering_all_cpu_backtrace)
2426 return 0;
2427
2428 printk_get_console_flush_type(&ft);
2429
2430 /* If called from the scheduler, we can not call up(). */
2431 if (level == LOGLEVEL_SCHED) {
2432 level = LOGLEVEL_DEFAULT;
2433 ft.legacy_offload |= ft.legacy_direct;
2434 ft.legacy_direct = false;
2435 }
2436
2437 printk_delay(level);
2438
2439 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2440
2441 if (ft.nbcon_atomic)
2442 nbcon_atomic_flush_pending();
2443
2444 if (ft.nbcon_offload)
2445 nbcon_kthreads_wake();
2446
2447 if (ft.legacy_direct) {
2448 /*
2449 * The caller may be holding system-critical or
2450 * timing-sensitive locks. Disable preemption during
2451 * printing of all remaining records to all consoles so that
2452 * this context can return as soon as possible. Hopefully
2453 * another printk() caller will take over the printing.
2454 */
2455 preempt_disable();
2456 /*
2457 * Try to acquire and then immediately release the console
2458 * semaphore. The release will print out buffers. With the
2459 * spinning variant, this context tries to take over the
2460 * printing from another printing context.
2461 */
2462 if (console_trylock_spinning())
2463 console_unlock();
2464 preempt_enable();
2465 }
2466
2467 if (ft.legacy_offload)
2468 defer_console_output();
2469 else
2470 wake_up_klogd();
2471
2472 return printed_len;
2473 }
2474 EXPORT_SYMBOL(vprintk_emit);
2475
vprintk_default(const char * fmt,va_list args)2476 int vprintk_default(const char *fmt, va_list args)
2477 {
2478 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2479 }
2480 EXPORT_SYMBOL_GPL(vprintk_default);
2481
_printk(const char * fmt,...)2482 asmlinkage __visible int _printk(const char *fmt, ...)
2483 {
2484 va_list args;
2485 int r;
2486
2487 va_start(args, fmt);
2488 r = vprintk(fmt, args);
2489 va_end(args);
2490
2491 return r;
2492 }
2493 EXPORT_SYMBOL(_printk);
2494
2495 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2496
2497 #else /* CONFIG_PRINTK */
2498
2499 #define printk_time false
2500
2501 #define prb_read_valid(rb, seq, r) false
2502 #define prb_first_valid_seq(rb) 0
2503 #define prb_next_seq(rb) 0
2504
2505 static u64 syslog_seq;
2506
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)2507 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2508
2509 #endif /* CONFIG_PRINTK */
2510
2511 #ifdef CONFIG_EARLY_PRINTK
2512 struct console *early_console;
2513
early_printk(const char * fmt,...)2514 asmlinkage __visible void early_printk(const char *fmt, ...)
2515 {
2516 va_list ap;
2517 char buf[512];
2518 int n;
2519
2520 if (!early_console)
2521 return;
2522
2523 va_start(ap, fmt);
2524 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2525 va_end(ap);
2526
2527 early_console->write(early_console, buf, n);
2528 }
2529 #endif
2530
set_user_specified(struct console_cmdline * c,bool user_specified)2531 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2532 {
2533 if (!user_specified)
2534 return;
2535
2536 /*
2537 * @c console was defined by the user on the command line.
2538 * Do not clear when added twice also by SPCR or the device tree.
2539 */
2540 c->user_specified = true;
2541 /* At least one console defined by the user on the command line. */
2542 console_set_on_cmdline = 1;
2543 }
2544
__add_preferred_console(const char * name,const short idx,const char * devname,char * options,char * brl_options,bool user_specified)2545 static int __add_preferred_console(const char *name, const short idx,
2546 const char *devname, char *options,
2547 char *brl_options, bool user_specified)
2548 {
2549 struct console_cmdline *c;
2550 int i;
2551
2552 if (!name && !devname)
2553 return -EINVAL;
2554
2555 /*
2556 * We use a signed short index for struct console for device drivers to
2557 * indicate a not yet assigned index or port. However, a negative index
2558 * value is not valid when the console name and index are defined on
2559 * the command line.
2560 */
2561 if (name && idx < 0)
2562 return -EINVAL;
2563
2564 /*
2565 * See if this tty is not yet registered, and
2566 * if we have a slot free.
2567 */
2568 for (i = 0, c = console_cmdline;
2569 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2570 i++, c++) {
2571 if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
2572 (devname && strcmp(c->devname, devname) == 0)) {
2573 if (!brl_options)
2574 preferred_console = i;
2575 set_user_specified(c, user_specified);
2576 return 0;
2577 }
2578 }
2579 if (i == MAX_CMDLINECONSOLES)
2580 return -E2BIG;
2581 if (!brl_options)
2582 preferred_console = i;
2583 if (name)
2584 strscpy(c->name, name);
2585 if (devname)
2586 strscpy(c->devname, devname);
2587 c->options = options;
2588 set_user_specified(c, user_specified);
2589 braille_set_options(c, brl_options);
2590
2591 c->index = idx;
2592 return 0;
2593 }
2594
console_msg_format_setup(char * str)2595 static int __init console_msg_format_setup(char *str)
2596 {
2597 if (!strcmp(str, "syslog"))
2598 console_msg_format = MSG_FORMAT_SYSLOG;
2599 if (!strcmp(str, "default"))
2600 console_msg_format = MSG_FORMAT_DEFAULT;
2601 return 1;
2602 }
2603 __setup("console_msg_format=", console_msg_format_setup);
2604
2605 /*
2606 * Set up a console. Called via do_early_param() in init/main.c
2607 * for each "console=" parameter in the boot command line.
2608 */
console_setup(char * str)2609 static int __init console_setup(char *str)
2610 {
2611 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
2612 char buf[sizeof(console_cmdline[0].devname)];
2613 char *brl_options = NULL;
2614 char *ttyname = NULL;
2615 char *devname = NULL;
2616 char *options;
2617 char *s;
2618 int idx;
2619
2620 /*
2621 * console="" or console=null have been suggested as a way to
2622 * disable console output. Use ttynull that has been created
2623 * for exactly this purpose.
2624 */
2625 if (str[0] == 0 || strcmp(str, "null") == 0) {
2626 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
2627 return 1;
2628 }
2629
2630 if (_braille_console_setup(&str, &brl_options))
2631 return 1;
2632
2633 /* For a DEVNAME:0.0 style console the character device is unknown early */
2634 if (strchr(str, ':'))
2635 devname = buf;
2636 else
2637 ttyname = buf;
2638
2639 /*
2640 * Decode str into name, index, options.
2641 */
2642 if (ttyname && isdigit(str[0]))
2643 scnprintf(buf, sizeof(buf), "ttyS%s", str);
2644 else
2645 strscpy(buf, str);
2646
2647 options = strchr(str, ',');
2648 if (options)
2649 *(options++) = 0;
2650
2651 #ifdef __sparc__
2652 if (!strcmp(str, "ttya"))
2653 strscpy(buf, "ttyS0");
2654 if (!strcmp(str, "ttyb"))
2655 strscpy(buf, "ttyS1");
2656 #endif
2657
2658 for (s = buf; *s; s++)
2659 if ((ttyname && isdigit(*s)) || *s == ',')
2660 break;
2661
2662 /* @idx will get defined when devname matches. */
2663 if (devname)
2664 idx = -1;
2665 else
2666 idx = simple_strtoul(s, NULL, 10);
2667
2668 *s = 0;
2669
2670 __add_preferred_console(ttyname, idx, devname, options, brl_options, true);
2671 return 1;
2672 }
2673 __setup("console=", console_setup);
2674
2675 /**
2676 * add_preferred_console - add a device to the list of preferred consoles.
2677 * @name: device name
2678 * @idx: device index
2679 * @options: options for this console
2680 *
2681 * The last preferred console added will be used for kernel messages
2682 * and stdin/out/err for init. Normally this is used by console_setup
2683 * above to handle user-supplied console arguments; however it can also
2684 * be used by arch-specific code either to override the user or more
2685 * commonly to provide a default console (ie from PROM variables) when
2686 * the user has not supplied one.
2687 */
add_preferred_console(const char * name,const short idx,char * options)2688 int add_preferred_console(const char *name, const short idx, char *options)
2689 {
2690 return __add_preferred_console(name, idx, NULL, options, NULL, false);
2691 }
2692
2693 /**
2694 * match_devname_and_update_preferred_console - Update a preferred console
2695 * when matching devname is found.
2696 * @devname: DEVNAME:0.0 style device name
2697 * @name: Name of the corresponding console driver, e.g. "ttyS"
2698 * @idx: Console index, e.g. port number.
2699 *
2700 * The function checks whether a device with the given @devname is
2701 * preferred via the console=DEVNAME:0.0 command line option.
2702 * It fills the missing console driver name and console index
2703 * so that a later register_console() call could find (match)
2704 * and enable this device.
2705 *
2706 * It might be used when a driver subsystem initializes particular
2707 * devices with already known DEVNAME:0.0 style names. And it
2708 * could predict which console driver name and index this device
2709 * would later get associated with.
2710 *
2711 * Return: 0 on success, negative error code on failure.
2712 */
match_devname_and_update_preferred_console(const char * devname,const char * name,const short idx)2713 int match_devname_and_update_preferred_console(const char *devname,
2714 const char *name,
2715 const short idx)
2716 {
2717 struct console_cmdline *c = console_cmdline;
2718 int i;
2719
2720 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
2721 return -EINVAL;
2722
2723 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2724 i++, c++) {
2725 if (!strcmp(devname, c->devname)) {
2726 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
2727 devname, name, idx);
2728 strscpy(c->name, name);
2729 c->index = idx;
2730 return 0;
2731 }
2732 }
2733
2734 return -ENOENT;
2735 }
2736 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
2737
2738 bool console_suspend_enabled = true;
2739 EXPORT_SYMBOL(console_suspend_enabled);
2740
console_suspend_disable(char * str)2741 static int __init console_suspend_disable(char *str)
2742 {
2743 console_suspend_enabled = false;
2744 return 1;
2745 }
2746 __setup("no_console_suspend", console_suspend_disable);
2747 module_param_named(console_suspend, console_suspend_enabled,
2748 bool, S_IRUGO | S_IWUSR);
2749 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2750 " and hibernate operations");
2751
2752 static bool printk_console_no_auto_verbose;
2753
console_verbose(void)2754 void console_verbose(void)
2755 {
2756 if (console_loglevel && !printk_console_no_auto_verbose)
2757 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2758 }
2759 EXPORT_SYMBOL_GPL(console_verbose);
2760
2761 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2762 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2763
2764 /**
2765 * suspend_console - suspend the console subsystem
2766 *
2767 * This disables printk() while we go into suspend states
2768 */
suspend_console(void)2769 void suspend_console(void)
2770 {
2771 struct console *con;
2772
2773 if (!console_suspend_enabled)
2774 return;
2775 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2776 pr_flush(1000, true);
2777
2778 console_list_lock();
2779 for_each_console(con)
2780 console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2781 console_list_unlock();
2782
2783 /*
2784 * Ensure that all SRCU list walks have completed. All printing
2785 * contexts must be able to see that they are suspended so that it
2786 * is guaranteed that all printing has stopped when this function
2787 * completes.
2788 */
2789 synchronize_srcu(&console_srcu);
2790 }
2791
resume_console(void)2792 void resume_console(void)
2793 {
2794 struct console_flush_type ft;
2795 struct console *con;
2796
2797 if (!console_suspend_enabled)
2798 return;
2799
2800 console_list_lock();
2801 for_each_console(con)
2802 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2803 console_list_unlock();
2804
2805 /*
2806 * Ensure that all SRCU list walks have completed. All printing
2807 * contexts must be able to see they are no longer suspended so
2808 * that they are guaranteed to wake up and resume printing.
2809 */
2810 synchronize_srcu(&console_srcu);
2811
2812 printk_get_console_flush_type(&ft);
2813 if (ft.nbcon_offload)
2814 nbcon_kthreads_wake();
2815 if (ft.legacy_offload)
2816 defer_console_output();
2817
2818 pr_flush(1000, true);
2819 }
2820
2821 /**
2822 * console_cpu_notify - print deferred console messages after CPU hotplug
2823 * @cpu: unused
2824 *
2825 * If printk() is called from a CPU that is not online yet, the messages
2826 * will be printed on the console only if there are CON_ANYTIME consoles.
2827 * This function is called when a new CPU comes online (or fails to come
2828 * up) or goes offline.
2829 */
console_cpu_notify(unsigned int cpu)2830 static int console_cpu_notify(unsigned int cpu)
2831 {
2832 int flag = 0;
2833
2834 trace_android_vh_printk_hotplug(&flag);
2835 if (flag)
2836 return 0;
2837
2838 struct console_flush_type ft;
2839
2840 if (!cpuhp_tasks_frozen) {
2841 printk_get_console_flush_type(&ft);
2842 if (ft.nbcon_atomic)
2843 nbcon_atomic_flush_pending();
2844 if (ft.legacy_direct) {
2845 if (console_trylock())
2846 console_unlock();
2847 }
2848 }
2849 return 0;
2850 }
2851
2852 /**
2853 * console_lock - block the console subsystem from printing
2854 *
2855 * Acquires a lock which guarantees that no consoles will
2856 * be in or enter their write() callback.
2857 *
2858 * Can sleep, returns nothing.
2859 */
console_lock(void)2860 void console_lock(void)
2861 {
2862 might_sleep();
2863
2864 /* On panic, the console_lock must be left to the panic cpu. */
2865 while (other_cpu_in_panic())
2866 msleep(1000);
2867
2868 down_console_sem();
2869 console_locked = 1;
2870 console_may_schedule = 1;
2871 }
2872 EXPORT_SYMBOL(console_lock);
2873
2874 /**
2875 * console_trylock - try to block the console subsystem from printing
2876 *
2877 * Try to acquire a lock which guarantees that no consoles will
2878 * be in or enter their write() callback.
2879 *
2880 * returns 1 on success, and 0 on failure to acquire the lock.
2881 */
console_trylock(void)2882 int console_trylock(void)
2883 {
2884 /* On panic, the console_lock must be left to the panic cpu. */
2885 if (other_cpu_in_panic())
2886 return 0;
2887 if (down_trylock_console_sem())
2888 return 0;
2889 console_locked = 1;
2890 console_may_schedule = 0;
2891 return 1;
2892 }
2893 EXPORT_SYMBOL(console_trylock);
2894
is_console_locked(void)2895 int is_console_locked(void)
2896 {
2897 return console_locked;
2898 }
2899 EXPORT_SYMBOL(is_console_locked);
2900
__console_unlock(void)2901 static void __console_unlock(void)
2902 {
2903 console_locked = 0;
2904 up_console_sem();
2905 }
2906
2907 #ifdef CONFIG_PRINTK
2908
2909 /*
2910 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
2911 * the existing message over and inserting the scratchbuf message.
2912 *
2913 * @pmsg is the original printk message.
2914 * @fmt is the printf format of the message which will prepend the existing one.
2915 *
2916 * If there is not enough space in @pmsg->pbufs->outbuf, the existing
2917 * message text will be sufficiently truncated.
2918 *
2919 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2920 */
2921 __printf(2, 3)
console_prepend_message(struct printk_message * pmsg,const char * fmt,...)2922 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
2923 {
2924 struct printk_buffers *pbufs = pmsg->pbufs;
2925 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2926 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2927 char *scratchbuf = &pbufs->scratchbuf[0];
2928 char *outbuf = &pbufs->outbuf[0];
2929 va_list args;
2930 size_t len;
2931
2932 va_start(args, fmt);
2933 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
2934 va_end(args);
2935
2936 /*
2937 * Make sure outbuf is sufficiently large before prepending.
2938 * Keep at least the prefix when the message must be truncated.
2939 * It is a rather theoretical problem when someone tries to
2940 * use a minimalist buffer.
2941 */
2942 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2943 return;
2944
2945 if (pmsg->outbuf_len + len >= outbuf_sz) {
2946 /* Truncate the message, but keep it terminated. */
2947 pmsg->outbuf_len = outbuf_sz - (len + 1);
2948 outbuf[pmsg->outbuf_len] = 0;
2949 }
2950
2951 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2952 memcpy(outbuf, scratchbuf, len);
2953 pmsg->outbuf_len += len;
2954 }
2955
2956 /*
2957 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
2958 * @pmsg->outbuf_len is updated appropriately.
2959 *
2960 * @pmsg is the printk message to prepend.
2961 *
2962 * @dropped is the dropped count to report in the dropped message.
2963 */
console_prepend_dropped(struct printk_message * pmsg,unsigned long dropped)2964 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2965 {
2966 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
2967 }
2968
2969 /*
2970 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
2971 * @pmsg->outbuf_len is updated appropriately.
2972 *
2973 * @pmsg is the printk message to prepend.
2974 */
console_prepend_replay(struct printk_message * pmsg)2975 void console_prepend_replay(struct printk_message *pmsg)
2976 {
2977 console_prepend_message(pmsg, "** replaying previous printk message **\n");
2978 }
2979
2980 /*
2981 * Read and format the specified record (or a later record if the specified
2982 * record is not available).
2983 *
2984 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2985 * struct printk_buffers.
2986 *
2987 * @seq is the record to read and format. If it is not available, the next
2988 * valid record is read.
2989 *
2990 * @is_extended specifies if the message should be formatted for extended
2991 * console output.
2992 *
2993 * @may_supress specifies if records may be skipped based on loglevel.
2994 *
2995 * Returns false if no record is available. Otherwise true and all fields
2996 * of @pmsg are valid. (See the documentation of struct printk_message
2997 * for information about the @pmsg fields.)
2998 */
printk_get_next_message(struct printk_message * pmsg,u64 seq,bool is_extended,bool may_suppress)2999 bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
3000 bool is_extended, bool may_suppress)
3001 {
3002 struct printk_buffers *pbufs = pmsg->pbufs;
3003 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
3004 const size_t outbuf_sz = sizeof(pbufs->outbuf);
3005 char *scratchbuf = &pbufs->scratchbuf[0];
3006 char *outbuf = &pbufs->outbuf[0];
3007 struct printk_info info;
3008 struct printk_record r;
3009 size_t len = 0;
3010
3011 /*
3012 * Formatting extended messages requires a separate buffer, so use the
3013 * scratch buffer to read in the ringbuffer text.
3014 *
3015 * Formatting normal messages is done in-place, so read the ringbuffer
3016 * text directly into the output buffer.
3017 */
3018 if (is_extended)
3019 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
3020 else
3021 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
3022
3023 if (!prb_read_valid(prb, seq, &r))
3024 return false;
3025
3026 pmsg->seq = r.info->seq;
3027 pmsg->dropped = r.info->seq - seq;
3028
3029 /* Skip record that has level above the console loglevel. */
3030 if (may_suppress && suppress_message_printing(r.info->level))
3031 goto out;
3032
3033 if (is_extended) {
3034 len = info_print_ext_header(outbuf, outbuf_sz, r.info);
3035 len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
3036 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
3037 } else {
3038 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
3039 }
3040 out:
3041 pmsg->outbuf_len = len;
3042 return true;
3043 }
3044
3045 /*
3046 * Legacy console printing from printk() caller context does not respect
3047 * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
3048 * false positive. For PREEMPT_RT the false positive condition does not
3049 * occur.
3050 *
3051 * This map is used to temporarily establish LD_WAIT_SLEEP context for the
3052 * console write() callback when legacy printing to avoid false positive
3053 * lockdep complaints, thus allowing lockdep to continue to function for
3054 * real issues.
3055 */
3056 #ifdef CONFIG_PREEMPT_RT
printk_legacy_allow_spinlock_enter(void)3057 static inline void printk_legacy_allow_spinlock_enter(void) { }
printk_legacy_allow_spinlock_exit(void)3058 static inline void printk_legacy_allow_spinlock_exit(void) { }
3059 #else
3060 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
3061
printk_legacy_allow_spinlock_enter(void)3062 static inline void printk_legacy_allow_spinlock_enter(void)
3063 {
3064 lock_map_acquire_try(&printk_legacy_map);
3065 }
3066
printk_legacy_allow_spinlock_exit(void)3067 static inline void printk_legacy_allow_spinlock_exit(void)
3068 {
3069 lock_map_release(&printk_legacy_map);
3070 }
3071 #endif /* CONFIG_PREEMPT_RT */
3072
3073 /*
3074 * Used as the printk buffers for non-panic, serialized console printing.
3075 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
3076 * Its usage requires the console_lock held.
3077 */
3078 struct printk_buffers printk_shared_pbufs;
3079
3080 /*
3081 * Print one record for the given console. The record printed is whatever
3082 * record is the next available record for the given console.
3083 *
3084 * @handover will be set to true if a printk waiter has taken over the
3085 * console_lock, in which case the caller is no longer holding both the
3086 * console_lock and the SRCU read lock. Otherwise it is set to false.
3087 *
3088 * @cookie is the cookie from the SRCU read lock.
3089 *
3090 * Returns false if the given console has no next record to print, otherwise
3091 * true.
3092 *
3093 * Requires the console_lock and the SRCU read lock.
3094 */
console_emit_next_record(struct console * con,bool * handover,int cookie)3095 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3096 {
3097 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
3098 char *outbuf = &printk_shared_pbufs.outbuf[0];
3099 struct printk_message pmsg = {
3100 .pbufs = &printk_shared_pbufs,
3101 };
3102 unsigned long flags;
3103
3104 *handover = false;
3105
3106 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
3107 return false;
3108
3109 con->dropped += pmsg.dropped;
3110
3111 /* Skip messages of formatted length 0. */
3112 if (pmsg.outbuf_len == 0) {
3113 con->seq = pmsg.seq + 1;
3114 goto skip;
3115 }
3116
3117 if (con->dropped && !is_extended) {
3118 console_prepend_dropped(&pmsg, con->dropped);
3119 con->dropped = 0;
3120 }
3121
3122 /* Write everything out to the hardware. */
3123
3124 if (force_legacy_kthread() && !panic_in_progress()) {
3125 /*
3126 * With forced threading this function is in a task context
3127 * (either legacy kthread or get_init_console_seq()). There
3128 * is no need for concern about printk reentrance, handovers,
3129 * or lockdep complaints.
3130 */
3131
3132 con->write(con, outbuf, pmsg.outbuf_len);
3133 con->seq = pmsg.seq + 1;
3134 } else {
3135 /*
3136 * While actively printing out messages, if another printk()
3137 * were to occur on another CPU, it may wait for this one to
3138 * finish. This task can not be preempted if there is a
3139 * waiter waiting to take over.
3140 *
3141 * Interrupts are disabled because the hand over to a waiter
3142 * must not be interrupted until the hand over is completed
3143 * (@console_waiter is cleared).
3144 */
3145 printk_safe_enter_irqsave(flags);
3146 console_lock_spinning_enable();
3147
3148 /* Do not trace print latency. */
3149 stop_critical_timings();
3150
3151 printk_legacy_allow_spinlock_enter();
3152 con->write(con, outbuf, pmsg.outbuf_len);
3153 printk_legacy_allow_spinlock_exit();
3154
3155 start_critical_timings();
3156
3157 con->seq = pmsg.seq + 1;
3158
3159 *handover = console_lock_spinning_disable_and_check(cookie);
3160 printk_safe_exit_irqrestore(flags);
3161 }
3162 skip:
3163 return true;
3164 }
3165
3166 #else
3167
console_emit_next_record(struct console * con,bool * handover,int cookie)3168 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3169 {
3170 *handover = false;
3171 return false;
3172 }
3173
printk_kthreads_check_locked(void)3174 static inline void printk_kthreads_check_locked(void) { }
3175
3176 #endif /* CONFIG_PRINTK */
3177
3178 /*
3179 * Print out all remaining records to all consoles.
3180 *
3181 * @do_cond_resched is set by the caller. It can be true only in schedulable
3182 * context.
3183 *
3184 * @next_seq is set to the sequence number after the last available record.
3185 * The value is valid only when this function returns true. It means that all
3186 * usable consoles are completely flushed.
3187 *
3188 * @handover will be set to true if a printk waiter has taken over the
3189 * console_lock, in which case the caller is no longer holding the
3190 * console_lock. Otherwise it is set to false.
3191 *
3192 * Returns true when there was at least one usable console and all messages
3193 * were flushed to all usable consoles. A returned false informs the caller
3194 * that everything was not flushed (either there were no usable consoles or
3195 * another context has taken over printing or it is a panic situation and this
3196 * is not the panic CPU). Regardless the reason, the caller should assume it
3197 * is not useful to immediately try again.
3198 *
3199 * Requires the console_lock.
3200 */
console_flush_all(bool do_cond_resched,u64 * next_seq,bool * handover)3201 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3202 {
3203 struct console_flush_type ft;
3204 bool any_usable = false;
3205 struct console *con;
3206 bool any_progress;
3207 int cookie;
3208
3209 *next_seq = 0;
3210 *handover = false;
3211
3212 do {
3213 any_progress = false;
3214
3215 printk_get_console_flush_type(&ft);
3216
3217 cookie = console_srcu_read_lock();
3218 for_each_console_srcu(con) {
3219 short flags = console_srcu_read_flags(con);
3220 u64 printk_seq;
3221 bool progress;
3222
3223 /*
3224 * console_flush_all() is only responsible for nbcon
3225 * consoles when the nbcon consoles cannot print via
3226 * their atomic or threaded flushing.
3227 */
3228 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3229 continue;
3230
3231 if (!console_is_usable(con, flags, !do_cond_resched))
3232 continue;
3233 any_usable = true;
3234
3235 if (flags & CON_NBCON) {
3236 progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3237 !do_cond_resched);
3238 printk_seq = nbcon_seq_read(con);
3239 } else {
3240 progress = console_emit_next_record(con, handover, cookie);
3241 printk_seq = con->seq;
3242 }
3243
3244 /*
3245 * If a handover has occurred, the SRCU read lock
3246 * is already released.
3247 */
3248 if (*handover)
3249 return false;
3250
3251 /* Track the next of the highest seq flushed. */
3252 if (printk_seq > *next_seq)
3253 *next_seq = printk_seq;
3254
3255 if (!progress)
3256 continue;
3257 any_progress = true;
3258
3259 /* Allow panic_cpu to take over the consoles safely. */
3260 if (other_cpu_in_panic())
3261 goto abandon;
3262
3263 if (do_cond_resched)
3264 cond_resched();
3265 }
3266 console_srcu_read_unlock(cookie);
3267 } while (any_progress);
3268
3269 return any_usable;
3270
3271 abandon:
3272 console_srcu_read_unlock(cookie);
3273 return false;
3274 }
3275
__console_flush_and_unlock(void)3276 static void __console_flush_and_unlock(void)
3277 {
3278 bool do_cond_resched;
3279 bool handover;
3280 bool flushed;
3281 u64 next_seq;
3282
3283 /*
3284 * Console drivers are called with interrupts disabled, so
3285 * @console_may_schedule should be cleared before; however, we may
3286 * end up dumping a lot of lines, for example, if called from
3287 * console registration path, and should invoke cond_resched()
3288 * between lines if allowable. Not doing so can cause a very long
3289 * scheduling stall on a slow console leading to RCU stall and
3290 * softlockup warnings which exacerbate the issue with more
3291 * messages practically incapacitating the system. Therefore, create
3292 * a local to use for the printing loop.
3293 */
3294 do_cond_resched = console_may_schedule;
3295
3296 do {
3297 console_may_schedule = 0;
3298
3299 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3300 if (!handover)
3301 __console_unlock();
3302
3303 /*
3304 * Abort if there was a failure to flush all messages to all
3305 * usable consoles. Either it is not possible to flush (in
3306 * which case it would be an infinite loop of retrying) or
3307 * another context has taken over printing.
3308 */
3309 if (!flushed)
3310 break;
3311
3312 /*
3313 * Some context may have added new records after
3314 * console_flush_all() but before unlocking the console.
3315 * Re-check if there is a new record to flush. If the trylock
3316 * fails, another context is already handling the printing.
3317 */
3318 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3319 }
3320
3321 /**
3322 * console_unlock - unblock the legacy console subsystem from printing
3323 *
3324 * Releases the console_lock which the caller holds to block printing of
3325 * the legacy console subsystem.
3326 *
3327 * While the console_lock was held, console output may have been buffered
3328 * by printk(). If this is the case, console_unlock() emits the output on
3329 * legacy consoles prior to releasing the lock.
3330 *
3331 * console_unlock(); may be called from any context.
3332 */
console_unlock(void)3333 void console_unlock(void)
3334 {
3335 struct console_flush_type ft;
3336
3337 printk_get_console_flush_type(&ft);
3338 if (ft.legacy_direct)
3339 __console_flush_and_unlock();
3340 else
3341 __console_unlock();
3342 }
3343 EXPORT_SYMBOL(console_unlock);
3344
3345 /**
3346 * console_conditional_schedule - yield the CPU if required
3347 *
3348 * If the console code is currently allowed to sleep, and
3349 * if this CPU should yield the CPU to another task, do
3350 * so here.
3351 *
3352 * Must be called within console_lock();.
3353 */
console_conditional_schedule(void)3354 void __sched console_conditional_schedule(void)
3355 {
3356 if (console_may_schedule)
3357 cond_resched();
3358 }
3359 EXPORT_SYMBOL(console_conditional_schedule);
3360
console_unblank(void)3361 void console_unblank(void)
3362 {
3363 bool found_unblank = false;
3364 struct console *c;
3365 int cookie;
3366
3367 /*
3368 * First check if there are any consoles implementing the unblank()
3369 * callback. If not, there is no reason to continue and take the
3370 * console lock, which in particular can be dangerous if
3371 * @oops_in_progress is set.
3372 */
3373 cookie = console_srcu_read_lock();
3374 for_each_console_srcu(c) {
3375 short flags = console_srcu_read_flags(c);
3376
3377 if (flags & CON_SUSPENDED)
3378 continue;
3379
3380 if ((flags & CON_ENABLED) && c->unblank) {
3381 found_unblank = true;
3382 break;
3383 }
3384 }
3385 console_srcu_read_unlock(cookie);
3386 if (!found_unblank)
3387 return;
3388
3389 /*
3390 * Stop console printing because the unblank() callback may
3391 * assume the console is not within its write() callback.
3392 *
3393 * If @oops_in_progress is set, this may be an atomic context.
3394 * In that case, attempt a trylock as best-effort.
3395 */
3396 if (oops_in_progress) {
3397 /* Semaphores are not NMI-safe. */
3398 if (in_nmi())
3399 return;
3400
3401 /*
3402 * Attempting to trylock the console lock can deadlock
3403 * if another CPU was stopped while modifying the
3404 * semaphore. "Hope and pray" that this is not the
3405 * current situation.
3406 */
3407 if (down_trylock_console_sem() != 0)
3408 return;
3409 } else
3410 console_lock();
3411
3412 console_locked = 1;
3413 console_may_schedule = 0;
3414
3415 cookie = console_srcu_read_lock();
3416 for_each_console_srcu(c) {
3417 short flags = console_srcu_read_flags(c);
3418
3419 if (flags & CON_SUSPENDED)
3420 continue;
3421
3422 if ((flags & CON_ENABLED) && c->unblank)
3423 c->unblank();
3424 }
3425 console_srcu_read_unlock(cookie);
3426
3427 console_unlock();
3428
3429 if (!oops_in_progress)
3430 pr_flush(1000, true);
3431 }
3432
3433 /*
3434 * Rewind all consoles to the oldest available record.
3435 *
3436 * IMPORTANT: The function is safe only when called under
3437 * console_lock(). It is not enforced because
3438 * it is used as a best effort in panic().
3439 */
__console_rewind_all(void)3440 static void __console_rewind_all(void)
3441 {
3442 struct console *c;
3443 short flags;
3444 int cookie;
3445 u64 seq;
3446
3447 seq = prb_first_valid_seq(prb);
3448
3449 cookie = console_srcu_read_lock();
3450 for_each_console_srcu(c) {
3451 flags = console_srcu_read_flags(c);
3452
3453 if (flags & CON_NBCON) {
3454 nbcon_seq_force(c, seq);
3455 } else {
3456 /*
3457 * This assignment is safe only when called under
3458 * console_lock(). On panic, legacy consoles are
3459 * only best effort.
3460 */
3461 c->seq = seq;
3462 }
3463 }
3464 console_srcu_read_unlock(cookie);
3465 }
3466
3467 /**
3468 * console_flush_on_panic - flush console content on panic
3469 * @mode: flush all messages in buffer or just the pending ones
3470 *
3471 * Immediately output all pending messages no matter what.
3472 */
console_flush_on_panic(enum con_flush_mode mode)3473 void console_flush_on_panic(enum con_flush_mode mode)
3474 {
3475 struct console_flush_type ft;
3476 bool handover;
3477 u64 next_seq;
3478
3479 /*
3480 * Ignore the console lock and flush out the messages. Attempting a
3481 * trylock would not be useful because:
3482 *
3483 * - if it is contended, it must be ignored anyway
3484 * - console_lock() and console_trylock() block and fail
3485 * respectively in panic for non-panic CPUs
3486 * - semaphores are not NMI-safe
3487 */
3488
3489 /*
3490 * If another context is holding the console lock,
3491 * @console_may_schedule might be set. Clear it so that
3492 * this context does not call cond_resched() while flushing.
3493 */
3494 console_may_schedule = 0;
3495
3496 if (mode == CONSOLE_REPLAY_ALL)
3497 __console_rewind_all();
3498
3499 printk_get_console_flush_type(&ft);
3500 if (ft.nbcon_atomic)
3501 nbcon_atomic_flush_pending();
3502
3503 /* Flush legacy consoles once allowed, even when dangerous. */
3504 if (legacy_allow_panic_sync)
3505 console_flush_all(false, &next_seq, &handover);
3506 }
3507
3508 /*
3509 * Return the console tty driver structure and its associated index
3510 */
console_device(int * index)3511 struct tty_driver *console_device(int *index)
3512 {
3513 struct console *c;
3514 struct tty_driver *driver = NULL;
3515 int cookie;
3516
3517 /*
3518 * Take console_lock to serialize device() callback with
3519 * other console operations. For example, fg_console is
3520 * modified under console_lock when switching vt.
3521 */
3522 console_lock();
3523
3524 cookie = console_srcu_read_lock();
3525 for_each_console_srcu(c) {
3526 if (!c->device)
3527 continue;
3528 driver = c->device(c, index);
3529 if (driver)
3530 break;
3531 }
3532 console_srcu_read_unlock(cookie);
3533
3534 console_unlock();
3535 return driver;
3536 }
3537
3538 /*
3539 * Prevent further output on the passed console device so that (for example)
3540 * serial drivers can disable console output before suspending a port, and can
3541 * re-enable output afterwards.
3542 */
console_stop(struct console * console)3543 void console_stop(struct console *console)
3544 {
3545 __pr_flush(console, 1000, true);
3546 console_list_lock();
3547 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3548 console_list_unlock();
3549
3550 /*
3551 * Ensure that all SRCU list walks have completed. All contexts must
3552 * be able to see that this console is disabled so that (for example)
3553 * the caller can suspend the port without risk of another context
3554 * using the port.
3555 */
3556 synchronize_srcu(&console_srcu);
3557 }
3558 EXPORT_SYMBOL(console_stop);
3559
console_start(struct console * console)3560 void console_start(struct console *console)
3561 {
3562 struct console_flush_type ft;
3563 bool is_nbcon;
3564
3565 console_list_lock();
3566 console_srcu_write_flags(console, console->flags | CON_ENABLED);
3567 is_nbcon = console->flags & CON_NBCON;
3568 console_list_unlock();
3569
3570 /*
3571 * Ensure that all SRCU list walks have completed. The related
3572 * printing context must be able to see it is enabled so that
3573 * it is guaranteed to wake up and resume printing.
3574 */
3575 synchronize_srcu(&console_srcu);
3576
3577 printk_get_console_flush_type(&ft);
3578 if (is_nbcon && ft.nbcon_offload)
3579 nbcon_kthread_wake(console);
3580 else if (ft.legacy_offload)
3581 defer_console_output();
3582
3583 __pr_flush(console, 1000, true);
3584 }
3585 EXPORT_SYMBOL(console_start);
3586
3587 #ifdef CONFIG_PRINTK
3588 static int unregister_console_locked(struct console *console);
3589
3590 /* True when system boot is far enough to create printer threads. */
3591 static bool printk_kthreads_ready __ro_after_init;
3592
3593 static struct task_struct *printk_legacy_kthread;
3594
legacy_kthread_should_wakeup(void)3595 static bool legacy_kthread_should_wakeup(void)
3596 {
3597 struct console_flush_type ft;
3598 struct console *con;
3599 bool ret = false;
3600 int cookie;
3601
3602 if (kthread_should_stop())
3603 return true;
3604
3605 printk_get_console_flush_type(&ft);
3606
3607 cookie = console_srcu_read_lock();
3608 for_each_console_srcu(con) {
3609 short flags = console_srcu_read_flags(con);
3610 u64 printk_seq;
3611
3612 /*
3613 * The legacy printer thread is only responsible for nbcon
3614 * consoles when the nbcon consoles cannot print via their
3615 * atomic or threaded flushing.
3616 */
3617 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3618 continue;
3619
3620 if (!console_is_usable(con, flags, false))
3621 continue;
3622
3623 if (flags & CON_NBCON) {
3624 printk_seq = nbcon_seq_read(con);
3625 } else {
3626 /*
3627 * It is safe to read @seq because only this
3628 * thread context updates @seq.
3629 */
3630 printk_seq = con->seq;
3631 }
3632
3633 if (prb_read_valid(prb, printk_seq, NULL)) {
3634 ret = true;
3635 break;
3636 }
3637 }
3638 console_srcu_read_unlock(cookie);
3639
3640 return ret;
3641 }
3642
legacy_kthread_func(void * unused)3643 static int legacy_kthread_func(void *unused)
3644 {
3645 for (;;) {
3646 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3647
3648 if (kthread_should_stop())
3649 break;
3650
3651 console_lock();
3652 __console_flush_and_unlock();
3653 }
3654
3655 return 0;
3656 }
3657
legacy_kthread_create(void)3658 static bool legacy_kthread_create(void)
3659 {
3660 struct task_struct *kt;
3661
3662 lockdep_assert_console_list_lock_held();
3663
3664 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
3665 if (WARN_ON(IS_ERR(kt))) {
3666 pr_err("failed to start legacy printing thread\n");
3667 return false;
3668 }
3669
3670 printk_legacy_kthread = kt;
3671
3672 /*
3673 * It is important that console printing threads are scheduled
3674 * shortly after a printk call and with generous runtime budgets.
3675 */
3676 sched_set_normal(printk_legacy_kthread, -20);
3677
3678 return true;
3679 }
3680
3681 /**
3682 * printk_kthreads_shutdown - shutdown all threaded printers
3683 *
3684 * On system shutdown all threaded printers are stopped. This allows printk
3685 * to transition back to atomic printing, thus providing a robust mechanism
3686 * for the final shutdown/reboot messages to be output.
3687 */
printk_kthreads_shutdown(void)3688 static void printk_kthreads_shutdown(void)
3689 {
3690 struct console *con;
3691
3692 console_list_lock();
3693 if (printk_kthreads_running) {
3694 printk_kthreads_running = false;
3695
3696 for_each_console(con) {
3697 if (con->flags & CON_NBCON)
3698 nbcon_kthread_stop(con);
3699 }
3700
3701 /*
3702 * The threads may have been stopped while printing a
3703 * backlog. Flush any records left over.
3704 */
3705 nbcon_atomic_flush_pending();
3706 }
3707 console_list_unlock();
3708 }
3709
3710 static struct syscore_ops printk_syscore_ops = {
3711 .shutdown = printk_kthreads_shutdown,
3712 };
3713
3714 /*
3715 * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
3716 * If any kthreads fail to start, those consoles are unregistered.
3717 *
3718 * Must be called under console_list_lock().
3719 */
printk_kthreads_check_locked(void)3720 static void printk_kthreads_check_locked(void)
3721 {
3722 struct hlist_node *tmp;
3723 struct console *con;
3724
3725 lockdep_assert_console_list_lock_held();
3726
3727 if (!printk_kthreads_ready)
3728 return;
3729
3730 if (have_legacy_console || have_boot_console) {
3731 if (!printk_legacy_kthread &&
3732 force_legacy_kthread() &&
3733 !legacy_kthread_create()) {
3734 /*
3735 * All legacy consoles must be unregistered. If there
3736 * are any nbcon consoles, they will set up their own
3737 * kthread.
3738 */
3739 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3740 if (con->flags & CON_NBCON)
3741 continue;
3742
3743 unregister_console_locked(con);
3744 }
3745 }
3746 } else if (printk_legacy_kthread) {
3747 kthread_stop(printk_legacy_kthread);
3748 printk_legacy_kthread = NULL;
3749 }
3750
3751 /*
3752 * Printer threads cannot be started as long as any boot console is
3753 * registered because there is no way to synchronize the hardware
3754 * registers between boot console code and regular console code.
3755 * It can only be known that there will be no new boot consoles when
3756 * an nbcon console is registered.
3757 */
3758 if (have_boot_console || !have_nbcon_console) {
3759 /* Clear flag in case all nbcon consoles unregistered. */
3760 printk_kthreads_running = false;
3761 return;
3762 }
3763
3764 if (printk_kthreads_running)
3765 return;
3766
3767 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3768 if (!(con->flags & CON_NBCON))
3769 continue;
3770
3771 if (!nbcon_kthread_create(con))
3772 unregister_console_locked(con);
3773 }
3774
3775 printk_kthreads_running = true;
3776 }
3777
printk_set_kthreads_ready(void)3778 static int __init printk_set_kthreads_ready(void)
3779 {
3780 register_syscore_ops(&printk_syscore_ops);
3781
3782 console_list_lock();
3783 printk_kthreads_ready = true;
3784 printk_kthreads_check_locked();
3785 console_list_unlock();
3786
3787 return 0;
3788 }
3789 early_initcall(printk_set_kthreads_ready);
3790 #endif /* CONFIG_PRINTK */
3791
3792 static int __read_mostly keep_bootcon;
3793
keep_bootcon_setup(char * str)3794 static int __init keep_bootcon_setup(char *str)
3795 {
3796 keep_bootcon = 1;
3797 pr_info("debug: skip boot console de-registration.\n");
3798
3799 return 0;
3800 }
3801
3802 early_param("keep_bootcon", keep_bootcon_setup);
3803
console_call_setup(struct console * newcon,char * options)3804 static int console_call_setup(struct console *newcon, char *options)
3805 {
3806 int err;
3807
3808 if (!newcon->setup)
3809 return 0;
3810
3811 /* Synchronize with possible boot console. */
3812 console_lock();
3813 err = newcon->setup(newcon, options);
3814 console_unlock();
3815
3816 return err;
3817 }
3818
3819 /*
3820 * This is called by register_console() to try to match
3821 * the newly registered console with any of the ones selected
3822 * by either the command line or add_preferred_console() and
3823 * setup/enable it.
3824 *
3825 * Care need to be taken with consoles that are statically
3826 * enabled such as netconsole
3827 */
try_enable_preferred_console(struct console * newcon,bool user_specified)3828 static int try_enable_preferred_console(struct console *newcon,
3829 bool user_specified)
3830 {
3831 struct console_cmdline *c;
3832 int i, err;
3833
3834 for (i = 0, c = console_cmdline;
3835 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
3836 i++, c++) {
3837 /* Console not yet initialized? */
3838 if (!c->name[0])
3839 continue;
3840 if (c->user_specified != user_specified)
3841 continue;
3842 if (!newcon->match ||
3843 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3844 /* default matching */
3845 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3846 if (strcmp(c->name, newcon->name) != 0)
3847 continue;
3848 if (newcon->index >= 0 &&
3849 newcon->index != c->index)
3850 continue;
3851 if (newcon->index < 0)
3852 newcon->index = c->index;
3853
3854 if (_braille_register_console(newcon, c))
3855 return 0;
3856
3857 err = console_call_setup(newcon, c->options);
3858 if (err)
3859 return err;
3860 }
3861 newcon->flags |= CON_ENABLED;
3862 if (i == preferred_console)
3863 newcon->flags |= CON_CONSDEV;
3864 return 0;
3865 }
3866
3867 /*
3868 * Some consoles, such as pstore and netconsole, can be enabled even
3869 * without matching. Accept the pre-enabled consoles only when match()
3870 * and setup() had a chance to be called.
3871 */
3872 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3873 return 0;
3874
3875 return -ENOENT;
3876 }
3877
3878 /* Try to enable the console unconditionally */
try_enable_default_console(struct console * newcon)3879 static void try_enable_default_console(struct console *newcon)
3880 {
3881 if (newcon->index < 0)
3882 newcon->index = 0;
3883
3884 if (console_call_setup(newcon, NULL) != 0)
3885 return;
3886
3887 newcon->flags |= CON_ENABLED;
3888
3889 if (newcon->device)
3890 newcon->flags |= CON_CONSDEV;
3891 }
3892
3893 /* Return the starting sequence number for a newly registered console. */
get_init_console_seq(struct console * newcon,bool bootcon_registered)3894 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
3895 {
3896 struct console *con;
3897 bool handover;
3898 u64 init_seq;
3899
3900 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3901 /* Get a consistent copy of @syslog_seq. */
3902 mutex_lock(&syslog_lock);
3903 init_seq = syslog_seq;
3904 mutex_unlock(&syslog_lock);
3905 } else {
3906 /* Begin with next message added to ringbuffer. */
3907 init_seq = prb_next_seq(prb);
3908
3909 /*
3910 * If any enabled boot consoles are due to be unregistered
3911 * shortly, some may not be caught up and may be the same
3912 * device as @newcon. Since it is not known which boot console
3913 * is the same device, flush all consoles and, if necessary,
3914 * start with the message of the enabled boot console that is
3915 * the furthest behind.
3916 */
3917 if (bootcon_registered && !keep_bootcon) {
3918 /*
3919 * Hold the console_lock to stop console printing and
3920 * guarantee safe access to console->seq.
3921 */
3922 console_lock();
3923
3924 /*
3925 * Flush all consoles and set the console to start at
3926 * the next unprinted sequence number.
3927 */
3928 if (!console_flush_all(true, &init_seq, &handover)) {
3929 /*
3930 * Flushing failed. Just choose the lowest
3931 * sequence of the enabled boot consoles.
3932 */
3933
3934 /*
3935 * If there was a handover, this context no
3936 * longer holds the console_lock.
3937 */
3938 if (handover)
3939 console_lock();
3940
3941 init_seq = prb_next_seq(prb);
3942 for_each_console(con) {
3943 u64 seq;
3944
3945 if (!(con->flags & CON_BOOT) ||
3946 !(con->flags & CON_ENABLED)) {
3947 continue;
3948 }
3949
3950 if (con->flags & CON_NBCON)
3951 seq = nbcon_seq_read(con);
3952 else
3953 seq = con->seq;
3954
3955 if (seq < init_seq)
3956 init_seq = seq;
3957 }
3958 }
3959
3960 console_unlock();
3961 }
3962 }
3963
3964 return init_seq;
3965 }
3966
3967 #define console_first() \
3968 hlist_entry(console_list.first, struct console, node)
3969
3970 static int unregister_console_locked(struct console *console);
3971
3972 /*
3973 * The console driver calls this routine during kernel initialization
3974 * to register the console printing procedure with printk() and to
3975 * print any messages that were printed by the kernel before the
3976 * console driver was initialized.
3977 *
3978 * This can happen pretty early during the boot process (because of
3979 * early_printk) - sometimes before setup_arch() completes - be careful
3980 * of what kernel features are used - they may not be initialised yet.
3981 *
3982 * There are two types of consoles - bootconsoles (early_printk) and
3983 * "real" consoles (everything which is not a bootconsole) which are
3984 * handled differently.
3985 * - Any number of bootconsoles can be registered at any time.
3986 * - As soon as a "real" console is registered, all bootconsoles
3987 * will be unregistered automatically.
3988 * - Once a "real" console is registered, any attempt to register a
3989 * bootconsoles will be rejected
3990 */
register_console(struct console * newcon)3991 void register_console(struct console *newcon)
3992 {
3993 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
3994 bool bootcon_registered = false;
3995 bool realcon_registered = false;
3996 struct console *con;
3997 unsigned long flags;
3998 u64 init_seq;
3999 int err;
4000
4001 console_list_lock();
4002
4003 for_each_console(con) {
4004 if (WARN(con == newcon, "console '%s%d' already registered\n",
4005 con->name, con->index)) {
4006 goto unlock;
4007 }
4008
4009 if (con->flags & CON_BOOT)
4010 bootcon_registered = true;
4011 else
4012 realcon_registered = true;
4013 }
4014
4015 /* Do not register boot consoles when there already is a real one. */
4016 if ((newcon->flags & CON_BOOT) && realcon_registered) {
4017 pr_info("Too late to register bootconsole %s%d\n",
4018 newcon->name, newcon->index);
4019 goto unlock;
4020 }
4021
4022 if (newcon->flags & CON_NBCON) {
4023 /*
4024 * Ensure the nbcon console buffers can be allocated
4025 * before modifying any global data.
4026 */
4027 if (!nbcon_alloc(newcon))
4028 goto unlock;
4029 }
4030
4031 /*
4032 * See if we want to enable this console driver by default.
4033 *
4034 * Nope when a console is preferred by the command line, device
4035 * tree, or SPCR.
4036 *
4037 * The first real console with tty binding (driver) wins. More
4038 * consoles might get enabled before the right one is found.
4039 *
4040 * Note that a console with tty binding will have CON_CONSDEV
4041 * flag set and will be first in the list.
4042 */
4043 if (preferred_console < 0) {
4044 if (hlist_empty(&console_list) || !console_first()->device ||
4045 console_first()->flags & CON_BOOT) {
4046 try_enable_default_console(newcon);
4047 }
4048 }
4049
4050 /* See if this console matches one we selected on the command line */
4051 err = try_enable_preferred_console(newcon, true);
4052
4053 /* If not, try to match against the platform default(s) */
4054 if (err == -ENOENT)
4055 err = try_enable_preferred_console(newcon, false);
4056
4057 /* printk() messages are not printed to the Braille console. */
4058 if (err || newcon->flags & CON_BRL) {
4059 if (newcon->flags & CON_NBCON)
4060 nbcon_free(newcon);
4061 goto unlock;
4062 }
4063
4064 /*
4065 * If we have a bootconsole, and are switching to a real console,
4066 * don't print everything out again, since when the boot console, and
4067 * the real console are the same physical device, it's annoying to
4068 * see the beginning boot messages twice
4069 */
4070 if (bootcon_registered &&
4071 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
4072 newcon->flags &= ~CON_PRINTBUFFER;
4073 }
4074
4075 newcon->dropped = 0;
4076 init_seq = get_init_console_seq(newcon, bootcon_registered);
4077
4078 if (newcon->flags & CON_NBCON) {
4079 have_nbcon_console = true;
4080 nbcon_seq_force(newcon, init_seq);
4081 } else {
4082 have_legacy_console = true;
4083 newcon->seq = init_seq;
4084 }
4085
4086 if (newcon->flags & CON_BOOT)
4087 have_boot_console = true;
4088
4089 /*
4090 * If another context is actively using the hardware of this new
4091 * console, it will not be aware of the nbcon synchronization. This
4092 * is a risk that two contexts could access the hardware
4093 * simultaneously if this new console is used for atomic printing
4094 * and the other context is still using the hardware.
4095 *
4096 * Use the driver synchronization to ensure that the hardware is not
4097 * in use while this new console transitions to being registered.
4098 */
4099 if (use_device_lock)
4100 newcon->device_lock(newcon, &flags);
4101
4102 /*
4103 * Put this console in the list - keep the
4104 * preferred driver at the head of the list.
4105 */
4106 if (hlist_empty(&console_list)) {
4107 /* Ensure CON_CONSDEV is always set for the head. */
4108 newcon->flags |= CON_CONSDEV;
4109 hlist_add_head_rcu(&newcon->node, &console_list);
4110
4111 } else if (newcon->flags & CON_CONSDEV) {
4112 /* Only the new head can have CON_CONSDEV set. */
4113 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
4114 hlist_add_head_rcu(&newcon->node, &console_list);
4115
4116 } else {
4117 hlist_add_behind_rcu(&newcon->node, console_list.first);
4118 }
4119
4120 /*
4121 * No need to synchronize SRCU here! The caller does not rely
4122 * on all contexts being able to see the new console before
4123 * register_console() completes.
4124 */
4125
4126 /* This new console is now registered. */
4127 if (use_device_lock)
4128 newcon->device_unlock(newcon, flags);
4129
4130 console_sysfs_notify();
4131
4132 /*
4133 * By unregistering the bootconsoles after we enable the real console
4134 * we get the "console xxx enabled" message on all the consoles -
4135 * boot consoles, real consoles, etc - this is to ensure that end
4136 * users know there might be something in the kernel's log buffer that
4137 * went to the bootconsole (that they do not see on the real console)
4138 */
4139 con_printk(KERN_INFO, newcon, "enabled\n");
4140 if (bootcon_registered &&
4141 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
4142 !keep_bootcon) {
4143 struct hlist_node *tmp;
4144
4145 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4146 if (con->flags & CON_BOOT)
4147 unregister_console_locked(con);
4148 }
4149 }
4150
4151 /* Changed console list, may require printer threads to start/stop. */
4152 printk_kthreads_check_locked();
4153 unlock:
4154 console_list_unlock();
4155 }
4156 EXPORT_SYMBOL(register_console);
4157
4158 /* Must be called under console_list_lock(). */
unregister_console_locked(struct console * console)4159 static int unregister_console_locked(struct console *console)
4160 {
4161 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
4162 bool found_legacy_con = false;
4163 bool found_nbcon_con = false;
4164 bool found_boot_con = false;
4165 unsigned long flags;
4166 struct console *c;
4167 int res;
4168
4169 lockdep_assert_console_list_lock_held();
4170
4171 con_printk(KERN_INFO, console, "disabled\n");
4172
4173 res = _braille_unregister_console(console);
4174 if (res < 0)
4175 return res;
4176 if (res > 0)
4177 return 0;
4178
4179 if (!console_is_registered_locked(console))
4180 res = -ENODEV;
4181 else if (console_is_usable(console, console->flags, true))
4182 __pr_flush(console, 1000, true);
4183
4184 /* Disable it unconditionally */
4185 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
4186
4187 if (res < 0)
4188 return res;
4189
4190 /*
4191 * Use the driver synchronization to ensure that the hardware is not
4192 * in use while this console transitions to being unregistered.
4193 */
4194 if (use_device_lock)
4195 console->device_lock(console, &flags);
4196
4197 hlist_del_init_rcu(&console->node);
4198
4199 if (use_device_lock)
4200 console->device_unlock(console, flags);
4201
4202 /*
4203 * <HISTORICAL>
4204 * If this isn't the last console and it has CON_CONSDEV set, we
4205 * need to set it on the next preferred console.
4206 * </HISTORICAL>
4207 *
4208 * The above makes no sense as there is no guarantee that the next
4209 * console has any device attached. Oh well....
4210 */
4211 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
4212 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
4213
4214 /*
4215 * Ensure that all SRCU list walks have completed. All contexts
4216 * must not be able to see this console in the list so that any
4217 * exit/cleanup routines can be performed safely.
4218 */
4219 synchronize_srcu(&console_srcu);
4220
4221 if (console->flags & CON_NBCON)
4222 nbcon_free(console);
4223
4224 console_sysfs_notify();
4225
4226 if (console->exit)
4227 res = console->exit(console);
4228
4229 /*
4230 * With this console gone, the global flags tracking registered
4231 * console types may have changed. Update them.
4232 */
4233 for_each_console(c) {
4234 if (c->flags & CON_BOOT)
4235 found_boot_con = true;
4236
4237 if (c->flags & CON_NBCON)
4238 found_nbcon_con = true;
4239 else
4240 found_legacy_con = true;
4241 }
4242 if (!found_boot_con)
4243 have_boot_console = found_boot_con;
4244 if (!found_legacy_con)
4245 have_legacy_console = found_legacy_con;
4246 if (!found_nbcon_con)
4247 have_nbcon_console = found_nbcon_con;
4248
4249 /* Changed console list, may require printer threads to start/stop. */
4250 printk_kthreads_check_locked();
4251
4252 return res;
4253 }
4254
unregister_console(struct console * console)4255 int unregister_console(struct console *console)
4256 {
4257 int res;
4258
4259 console_list_lock();
4260 res = unregister_console_locked(console);
4261 console_list_unlock();
4262 return res;
4263 }
4264 EXPORT_SYMBOL(unregister_console);
4265
4266 /**
4267 * console_force_preferred_locked - force a registered console preferred
4268 * @con: The registered console to force preferred.
4269 *
4270 * Must be called under console_list_lock().
4271 */
console_force_preferred_locked(struct console * con)4272 void console_force_preferred_locked(struct console *con)
4273 {
4274 struct console *cur_pref_con;
4275
4276 if (!console_is_registered_locked(con))
4277 return;
4278
4279 cur_pref_con = console_first();
4280
4281 /* Already preferred? */
4282 if (cur_pref_con == con)
4283 return;
4284
4285 /*
4286 * Delete, but do not re-initialize the entry. This allows the console
4287 * to continue to appear registered (via any hlist_unhashed_lockless()
4288 * checks), even though it was briefly removed from the console list.
4289 */
4290 hlist_del_rcu(&con->node);
4291
4292 /*
4293 * Ensure that all SRCU list walks have completed so that the console
4294 * can be added to the beginning of the console list and its forward
4295 * list pointer can be re-initialized.
4296 */
4297 synchronize_srcu(&console_srcu);
4298
4299 con->flags |= CON_CONSDEV;
4300 WARN_ON(!con->device);
4301
4302 /* Only the new head can have CON_CONSDEV set. */
4303 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
4304 hlist_add_head_rcu(&con->node, &console_list);
4305 }
4306 EXPORT_SYMBOL(console_force_preferred_locked);
4307
4308 /*
4309 * Initialize the console device. This is called *early*, so
4310 * we can't necessarily depend on lots of kernel help here.
4311 * Just do some early initializations, and do the complex setup
4312 * later.
4313 */
console_init(void)4314 void __init console_init(void)
4315 {
4316 int ret;
4317 initcall_t call;
4318 initcall_entry_t *ce;
4319
4320 /* Setup the default TTY line discipline. */
4321 n_tty_init();
4322
4323 /*
4324 * set up the console device so that later boot sequences can
4325 * inform about problems etc..
4326 */
4327 ce = __con_initcall_start;
4328 trace_initcall_level("console");
4329 while (ce < __con_initcall_end) {
4330 call = initcall_from_entry(ce);
4331 trace_initcall_start(call);
4332 ret = call();
4333 trace_initcall_finish(call, ret);
4334 ce++;
4335 }
4336 }
4337
4338 /*
4339 * Some boot consoles access data that is in the init section and which will
4340 * be discarded after the initcalls have been run. To make sure that no code
4341 * will access this data, unregister the boot consoles in a late initcall.
4342 *
4343 * If for some reason, such as deferred probe or the driver being a loadable
4344 * module, the real console hasn't registered yet at this point, there will
4345 * be a brief interval in which no messages are logged to the console, which
4346 * makes it difficult to diagnose problems that occur during this time.
4347 *
4348 * To mitigate this problem somewhat, only unregister consoles whose memory
4349 * intersects with the init section. Note that all other boot consoles will
4350 * get unregistered when the real preferred console is registered.
4351 */
printk_late_init(void)4352 static int __init printk_late_init(void)
4353 {
4354 struct hlist_node *tmp;
4355 struct console *con;
4356 int ret;
4357
4358 console_list_lock();
4359 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4360 if (!(con->flags & CON_BOOT))
4361 continue;
4362
4363 /* Check addresses that might be used for enabled consoles. */
4364 if (init_section_intersects(con, sizeof(*con)) ||
4365 init_section_contains(con->write, 0) ||
4366 init_section_contains(con->read, 0) ||
4367 init_section_contains(con->device, 0) ||
4368 init_section_contains(con->unblank, 0) ||
4369 init_section_contains(con->data, 0)) {
4370 /*
4371 * Please, consider moving the reported consoles out
4372 * of the init section.
4373 */
4374 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
4375 con->name, con->index);
4376 unregister_console_locked(con);
4377 }
4378 }
4379 console_list_unlock();
4380
4381 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
4382 console_cpu_notify);
4383 WARN_ON(ret < 0);
4384 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
4385 console_cpu_notify, NULL);
4386 WARN_ON(ret < 0);
4387 printk_sysctl_init();
4388 return 0;
4389 }
4390 late_initcall(printk_late_init);
4391
4392 #if defined CONFIG_PRINTK
4393 /* If @con is specified, only wait for that console. Otherwise wait for all. */
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)4394 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
4395 {
4396 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
4397 unsigned long remaining_jiffies = timeout_jiffies;
4398 struct console_flush_type ft;
4399 struct console *c;
4400 u64 last_diff = 0;
4401 u64 printk_seq;
4402 short flags;
4403 int cookie;
4404 u64 diff;
4405 u64 seq;
4406
4407 /* Sorry, pr_flush() will not work this early. */
4408 if (system_state < SYSTEM_SCHEDULING)
4409 return false;
4410
4411 might_sleep();
4412
4413 seq = prb_next_reserve_seq(prb);
4414
4415 /* Flush the consoles so that records up to @seq are printed. */
4416 printk_get_console_flush_type(&ft);
4417 if (ft.nbcon_atomic)
4418 nbcon_atomic_flush_pending();
4419 if (ft.legacy_direct) {
4420 console_lock();
4421 console_unlock();
4422 }
4423
4424 for (;;) {
4425 unsigned long begin_jiffies;
4426 unsigned long slept_jiffies;
4427
4428 diff = 0;
4429
4430 /*
4431 * Hold the console_lock to guarantee safe access to
4432 * console->seq. Releasing console_lock flushes more
4433 * records in case @seq is still not printed on all
4434 * usable consoles.
4435 *
4436 * Holding the console_lock is not necessary if there
4437 * are no legacy or boot consoles. However, such a
4438 * console could register at any time. Always hold the
4439 * console_lock as a precaution rather than
4440 * synchronizing against register_console().
4441 */
4442 console_lock();
4443
4444 cookie = console_srcu_read_lock();
4445 for_each_console_srcu(c) {
4446 if (con && con != c)
4447 continue;
4448
4449 flags = console_srcu_read_flags(c);
4450
4451 /*
4452 * If consoles are not usable, it cannot be expected
4453 * that they make forward progress, so only increment
4454 * @diff for usable consoles.
4455 */
4456 if (!console_is_usable(c, flags, true) &&
4457 !console_is_usable(c, flags, false)) {
4458 continue;
4459 }
4460
4461 if (flags & CON_NBCON) {
4462 printk_seq = nbcon_seq_read(c);
4463 } else {
4464 printk_seq = c->seq;
4465 }
4466
4467 if (printk_seq < seq)
4468 diff += seq - printk_seq;
4469 }
4470 console_srcu_read_unlock(cookie);
4471
4472 if (diff != last_diff && reset_on_progress)
4473 remaining_jiffies = timeout_jiffies;
4474
4475 console_unlock();
4476
4477 /* Note: @diff is 0 if there are no usable consoles. */
4478 if (diff == 0 || remaining_jiffies == 0)
4479 break;
4480
4481 /* msleep(1) might sleep much longer. Check time by jiffies. */
4482 begin_jiffies = jiffies;
4483 msleep(1);
4484 slept_jiffies = jiffies - begin_jiffies;
4485
4486 remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
4487
4488 last_diff = diff;
4489 }
4490
4491 return (diff == 0);
4492 }
4493
4494 /**
4495 * pr_flush() - Wait for printing threads to catch up.
4496 *
4497 * @timeout_ms: The maximum time (in ms) to wait.
4498 * @reset_on_progress: Reset the timeout if forward progress is seen.
4499 *
4500 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
4501 * represents infinite waiting.
4502 *
4503 * If @reset_on_progress is true, the timeout will be reset whenever any
4504 * printer has been seen to make some forward progress.
4505 *
4506 * Context: Process context. May sleep while acquiring console lock.
4507 * Return: true if all usable printers are caught up.
4508 */
pr_flush(int timeout_ms,bool reset_on_progress)4509 bool pr_flush(int timeout_ms, bool reset_on_progress)
4510 {
4511 return __pr_flush(NULL, timeout_ms, reset_on_progress);
4512 }
4513
4514 /*
4515 * Delayed printk version, for scheduler-internal messages:
4516 */
4517 #define PRINTK_PENDING_WAKEUP 0x01
4518 #define PRINTK_PENDING_OUTPUT 0x02
4519
4520 static DEFINE_PER_CPU(int, printk_pending);
4521
wake_up_klogd_work_func(struct irq_work * irq_work)4522 static void wake_up_klogd_work_func(struct irq_work *irq_work)
4523 {
4524 int pending = this_cpu_xchg(printk_pending, 0);
4525
4526 if (pending & PRINTK_PENDING_OUTPUT) {
4527 if (force_legacy_kthread()) {
4528 if (printk_legacy_kthread)
4529 wake_up_interruptible(&legacy_wait);
4530 } else {
4531 if (console_trylock())
4532 console_unlock();
4533 }
4534 }
4535
4536 if (pending & PRINTK_PENDING_WAKEUP)
4537 wake_up_interruptible(&log_wait);
4538 }
4539
4540 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
4541 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
4542
__wake_up_klogd(int val)4543 static void __wake_up_klogd(int val)
4544 {
4545 if (!printk_percpu_data_ready())
4546 return;
4547
4548 preempt_disable();
4549 /*
4550 * Guarantee any new records can be seen by tasks preparing to wait
4551 * before this context checks if the wait queue is empty.
4552 *
4553 * The full memory barrier within wq_has_sleeper() pairs with the full
4554 * memory barrier within set_current_state() of
4555 * prepare_to_wait_event(), which is called after ___wait_event() adds
4556 * the waiter but before it has checked the wait condition.
4557 *
4558 * This pairs with devkmsg_read:A and syslog_print:A.
4559 */
4560 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
4561 (val & PRINTK_PENDING_OUTPUT)) {
4562 this_cpu_or(printk_pending, val);
4563 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4564 }
4565 preempt_enable();
4566 }
4567
4568 /**
4569 * wake_up_klogd - Wake kernel logging daemon
4570 *
4571 * Use this function when new records have been added to the ringbuffer
4572 * and the console printing of those records has already occurred or is
4573 * known to be handled by some other context. This function will only
4574 * wake the logging daemon.
4575 *
4576 * Context: Any context.
4577 */
wake_up_klogd(void)4578 void wake_up_klogd(void)
4579 {
4580 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
4581 }
4582
4583 /**
4584 * defer_console_output - Wake kernel logging daemon and trigger
4585 * console printing in a deferred context
4586 *
4587 * Use this function when new records have been added to the ringbuffer,
4588 * this context is responsible for console printing those records, but
4589 * the current context is not allowed to perform the console printing.
4590 * Trigger an irq_work context to perform the console printing. This
4591 * function also wakes the logging daemon.
4592 *
4593 * Context: Any context.
4594 */
defer_console_output(void)4595 void defer_console_output(void)
4596 {
4597 /*
4598 * New messages may have been added directly to the ringbuffer
4599 * using vprintk_store(), so wake any waiters as well.
4600 */
4601 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4602 }
4603
printk_trigger_flush(void)4604 void printk_trigger_flush(void)
4605 {
4606 defer_console_output();
4607 }
4608
vprintk_deferred(const char * fmt,va_list args)4609 int vprintk_deferred(const char *fmt, va_list args)
4610 {
4611 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
4612 }
4613
_printk_deferred(const char * fmt,...)4614 int _printk_deferred(const char *fmt, ...)
4615 {
4616 va_list args;
4617 int r;
4618
4619 va_start(args, fmt);
4620 r = vprintk_deferred(fmt, args);
4621 va_end(args);
4622
4623 return r;
4624 }
4625 EXPORT_SYMBOL_GPL(_printk_deferred);
4626
4627 /*
4628 * printk rate limiting, lifted from the networking subsystem.
4629 *
4630 * This enforces a rate limit: not more than 10 kernel messages
4631 * every 5s to make a denial-of-service attack impossible.
4632 */
4633 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
4634
__printk_ratelimit(const char * func)4635 int __printk_ratelimit(const char *func)
4636 {
4637 return ___ratelimit(&printk_ratelimit_state, func);
4638 }
4639 EXPORT_SYMBOL(__printk_ratelimit);
4640
4641 /**
4642 * printk_timed_ratelimit - caller-controlled printk ratelimiting
4643 * @caller_jiffies: pointer to caller's state
4644 * @interval_msecs: minimum interval between prints
4645 *
4646 * printk_timed_ratelimit() returns true if more than @interval_msecs
4647 * milliseconds have elapsed since the last time printk_timed_ratelimit()
4648 * returned true.
4649 */
printk_timed_ratelimit(unsigned long * caller_jiffies,unsigned int interval_msecs)4650 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4651 unsigned int interval_msecs)
4652 {
4653 unsigned long elapsed = jiffies - *caller_jiffies;
4654
4655 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4656 return false;
4657
4658 *caller_jiffies = jiffies;
4659 return true;
4660 }
4661 EXPORT_SYMBOL(printk_timed_ratelimit);
4662
4663 static DEFINE_SPINLOCK(dump_list_lock);
4664 static LIST_HEAD(dump_list);
4665
4666 /**
4667 * kmsg_dump_register - register a kernel log dumper.
4668 * @dumper: pointer to the kmsg_dumper structure
4669 *
4670 * Adds a kernel log dumper to the system. The dump callback in the
4671 * structure will be called when the kernel oopses or panics and must be
4672 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4673 */
kmsg_dump_register(struct kmsg_dumper * dumper)4674 int kmsg_dump_register(struct kmsg_dumper *dumper)
4675 {
4676 unsigned long flags;
4677 int err = -EBUSY;
4678
4679 /* The dump callback needs to be set */
4680 if (!dumper->dump)
4681 return -EINVAL;
4682
4683 spin_lock_irqsave(&dump_list_lock, flags);
4684 /* Don't allow registering multiple times */
4685 if (!dumper->registered) {
4686 dumper->registered = 1;
4687 list_add_tail_rcu(&dumper->list, &dump_list);
4688 err = 0;
4689 }
4690 spin_unlock_irqrestore(&dump_list_lock, flags);
4691
4692 return err;
4693 }
4694 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4695
4696 /**
4697 * kmsg_dump_unregister - unregister a kmsg dumper.
4698 * @dumper: pointer to the kmsg_dumper structure
4699 *
4700 * Removes a dump device from the system. Returns zero on success and
4701 * %-EINVAL otherwise.
4702 */
kmsg_dump_unregister(struct kmsg_dumper * dumper)4703 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4704 {
4705 unsigned long flags;
4706 int err = -EINVAL;
4707
4708 spin_lock_irqsave(&dump_list_lock, flags);
4709 if (dumper->registered) {
4710 dumper->registered = 0;
4711 list_del_rcu(&dumper->list);
4712 err = 0;
4713 }
4714 spin_unlock_irqrestore(&dump_list_lock, flags);
4715 synchronize_rcu();
4716
4717 return err;
4718 }
4719 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4720
4721 static bool always_kmsg_dump;
4722 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4723
kmsg_dump_reason_str(enum kmsg_dump_reason reason)4724 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4725 {
4726 switch (reason) {
4727 case KMSG_DUMP_PANIC:
4728 return "Panic";
4729 case KMSG_DUMP_OOPS:
4730 return "Oops";
4731 case KMSG_DUMP_EMERG:
4732 return "Emergency";
4733 case KMSG_DUMP_SHUTDOWN:
4734 return "Shutdown";
4735 default:
4736 return "Unknown";
4737 }
4738 }
4739 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4740
4741 /**
4742 * kmsg_dump_desc - dump kernel log to kernel message dumpers.
4743 * @reason: the reason (oops, panic etc) for dumping
4744 * @desc: a short string to describe what caused the panic or oops. Can be NULL
4745 * if no additional description is available.
4746 *
4747 * Call each of the registered dumper's dump() callback, which can
4748 * retrieve the kmsg records with kmsg_dump_get_line() or
4749 * kmsg_dump_get_buffer().
4750 */
kmsg_dump_desc(enum kmsg_dump_reason reason,const char * desc)4751 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
4752 {
4753 struct kmsg_dumper *dumper;
4754 struct kmsg_dump_detail detail = {
4755 .reason = reason,
4756 .description = desc};
4757
4758 rcu_read_lock();
4759 list_for_each_entry_rcu(dumper, &dump_list, list) {
4760 enum kmsg_dump_reason max_reason = dumper->max_reason;
4761
4762 /*
4763 * If client has not provided a specific max_reason, default
4764 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4765 */
4766 if (max_reason == KMSG_DUMP_UNDEF) {
4767 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4768 KMSG_DUMP_OOPS;
4769 }
4770 if (reason > max_reason)
4771 continue;
4772
4773 /* invoke dumper which will iterate over records */
4774 dumper->dump(dumper, &detail);
4775 }
4776 rcu_read_unlock();
4777 }
4778
4779 /**
4780 * kmsg_dump_get_line - retrieve one kmsg log line
4781 * @iter: kmsg dump iterator
4782 * @syslog: include the "<4>" prefixes
4783 * @line: buffer to copy the line to
4784 * @size: maximum size of the buffer
4785 * @len: length of line placed into buffer
4786 *
4787 * Start at the beginning of the kmsg buffer, with the oldest kmsg
4788 * record, and copy one record into the provided buffer.
4789 *
4790 * Consecutive calls will return the next available record moving
4791 * towards the end of the buffer with the youngest messages.
4792 *
4793 * A return value of FALSE indicates that there are no more records to
4794 * read.
4795 */
kmsg_dump_get_line(struct kmsg_dump_iter * iter,bool syslog,char * line,size_t size,size_t * len)4796 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4797 char *line, size_t size, size_t *len)
4798 {
4799 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4800 struct printk_info info;
4801 unsigned int line_count;
4802 struct printk_record r;
4803 size_t l = 0;
4804 bool ret = false;
4805
4806 if (iter->cur_seq < min_seq)
4807 iter->cur_seq = min_seq;
4808
4809 prb_rec_init_rd(&r, &info, line, size);
4810
4811 /* Read text or count text lines? */
4812 if (line) {
4813 if (!prb_read_valid(prb, iter->cur_seq, &r))
4814 goto out;
4815 l = record_print_text(&r, syslog, printk_time);
4816 } else {
4817 if (!prb_read_valid_info(prb, iter->cur_seq,
4818 &info, &line_count)) {
4819 goto out;
4820 }
4821 l = get_record_print_text_size(&info, line_count, syslog,
4822 printk_time);
4823
4824 }
4825
4826 iter->cur_seq = r.info->seq + 1;
4827 ret = true;
4828 out:
4829 if (len)
4830 *len = l;
4831 return ret;
4832 }
4833 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4834
4835 /**
4836 * kmsg_dump_get_buffer - copy kmsg log lines
4837 * @iter: kmsg dump iterator
4838 * @syslog: include the "<4>" prefixes
4839 * @buf: buffer to copy the line to
4840 * @size: maximum size of the buffer
4841 * @len_out: length of line placed into buffer
4842 *
4843 * Start at the end of the kmsg buffer and fill the provided buffer
4844 * with as many of the *youngest* kmsg records that fit into it.
4845 * If the buffer is large enough, all available kmsg records will be
4846 * copied with a single call.
4847 *
4848 * Consecutive calls will fill the buffer with the next block of
4849 * available older records, not including the earlier retrieved ones.
4850 *
4851 * A return value of FALSE indicates that there are no more records to
4852 * read.
4853 */
kmsg_dump_get_buffer(struct kmsg_dump_iter * iter,bool syslog,char * buf,size_t size,size_t * len_out)4854 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4855 char *buf, size_t size, size_t *len_out)
4856 {
4857 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4858 struct printk_info info;
4859 struct printk_record r;
4860 u64 seq;
4861 u64 next_seq;
4862 size_t len = 0;
4863 bool ret = false;
4864 bool time = printk_time;
4865
4866 if (!buf || !size)
4867 goto out;
4868
4869 if (iter->cur_seq < min_seq)
4870 iter->cur_seq = min_seq;
4871
4872 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4873 if (info.seq != iter->cur_seq) {
4874 /* messages are gone, move to first available one */
4875 iter->cur_seq = info.seq;
4876 }
4877 }
4878
4879 /* last entry */
4880 if (iter->cur_seq >= iter->next_seq)
4881 goto out;
4882
4883 /*
4884 * Find first record that fits, including all following records,
4885 * into the user-provided buffer for this dump. Pass in size-1
4886 * because this function (by way of record_print_text()) will
4887 * not write more than size-1 bytes of text into @buf.
4888 */
4889 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4890 size - 1, syslog, time);
4891
4892 /*
4893 * Next kmsg_dump_get_buffer() invocation will dump block of
4894 * older records stored right before this one.
4895 */
4896 next_seq = seq;
4897
4898 prb_rec_init_rd(&r, &info, buf, size);
4899
4900 prb_for_each_record(seq, prb, seq, &r) {
4901 if (r.info->seq >= iter->next_seq)
4902 break;
4903
4904 len += record_print_text(&r, syslog, time);
4905
4906 /* Adjust record to store to remaining buffer space. */
4907 prb_rec_init_rd(&r, &info, buf + len, size - len);
4908 }
4909
4910 iter->next_seq = next_seq;
4911 ret = true;
4912 out:
4913 if (len_out)
4914 *len_out = len;
4915 return ret;
4916 }
4917 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4918
4919 /**
4920 * kmsg_dump_rewind - reset the iterator
4921 * @iter: kmsg dump iterator
4922 *
4923 * Reset the dumper's iterator so that kmsg_dump_get_line() and
4924 * kmsg_dump_get_buffer() can be called again and used multiple
4925 * times within the same dumper.dump() callback.
4926 */
kmsg_dump_rewind(struct kmsg_dump_iter * iter)4927 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4928 {
4929 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4930 iter->next_seq = prb_next_seq(prb);
4931 }
4932 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4933
4934 /**
4935 * console_try_replay_all - try to replay kernel log on consoles
4936 *
4937 * Try to obtain lock on console subsystem and replay all
4938 * available records in printk buffer on the consoles.
4939 * Does nothing if lock is not obtained.
4940 *
4941 * Context: Any, except for NMI.
4942 */
console_try_replay_all(void)4943 void console_try_replay_all(void)
4944 {
4945 struct console_flush_type ft;
4946
4947 printk_get_console_flush_type(&ft);
4948 if (console_trylock()) {
4949 __console_rewind_all();
4950 if (ft.nbcon_atomic)
4951 nbcon_atomic_flush_pending();
4952 if (ft.nbcon_offload)
4953 nbcon_kthreads_wake();
4954 if (ft.legacy_offload)
4955 defer_console_output();
4956 /* Consoles are flushed as part of console_unlock(). */
4957 console_unlock();
4958 }
4959 }
4960 #endif
4961
4962 #ifdef CONFIG_SMP
4963 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
4964 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
4965
is_printk_cpu_sync_owner(void)4966 bool is_printk_cpu_sync_owner(void)
4967 {
4968 return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
4969 }
4970
4971 /**
4972 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
4973 * spinning lock is not owned by any CPU.
4974 *
4975 * Context: Any context.
4976 */
__printk_cpu_sync_wait(void)4977 void __printk_cpu_sync_wait(void)
4978 {
4979 do {
4980 cpu_relax();
4981 } while (atomic_read(&printk_cpu_sync_owner) != -1);
4982 }
4983 EXPORT_SYMBOL(__printk_cpu_sync_wait);
4984
4985 /**
4986 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
4987 * spinning lock.
4988 *
4989 * If no processor has the lock, the calling processor takes the lock and
4990 * becomes the owner. If the calling processor is already the owner of the
4991 * lock, this function succeeds immediately.
4992 *
4993 * Context: Any context. Expects interrupts to be disabled.
4994 * Return: 1 on success, otherwise 0.
4995 */
__printk_cpu_sync_try_get(void)4996 int __printk_cpu_sync_try_get(void)
4997 {
4998 int cpu;
4999 int old;
5000
5001 cpu = smp_processor_id();
5002
5003 /*
5004 * Guarantee loads and stores from this CPU when it is the lock owner
5005 * are _not_ visible to the previous lock owner. This pairs with
5006 * __printk_cpu_sync_put:B.
5007 *
5008 * Memory barrier involvement:
5009 *
5010 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5011 * then __printk_cpu_sync_put:A can never read from
5012 * __printk_cpu_sync_try_get:B.
5013 *
5014 * Relies on:
5015 *
5016 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5017 * of the previous CPU
5018 * matching
5019 * ACQUIRE from __printk_cpu_sync_try_get:A to
5020 * __printk_cpu_sync_try_get:B of this CPU
5021 */
5022 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
5023 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
5024 if (old == -1) {
5025 /*
5026 * This CPU is now the owner and begins loading/storing
5027 * data: LMM(__printk_cpu_sync_try_get:B)
5028 */
5029 return 1;
5030
5031 } else if (old == cpu) {
5032 /* This CPU is already the owner. */
5033 atomic_inc(&printk_cpu_sync_nested);
5034 return 1;
5035 }
5036
5037 return 0;
5038 }
5039 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
5040
5041 /**
5042 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
5043 *
5044 * The calling processor must be the owner of the lock.
5045 *
5046 * Context: Any context. Expects interrupts to be disabled.
5047 */
__printk_cpu_sync_put(void)5048 void __printk_cpu_sync_put(void)
5049 {
5050 if (atomic_read(&printk_cpu_sync_nested)) {
5051 atomic_dec(&printk_cpu_sync_nested);
5052 return;
5053 }
5054
5055 /*
5056 * This CPU is finished loading/storing data:
5057 * LMM(__printk_cpu_sync_put:A)
5058 */
5059
5060 /*
5061 * Guarantee loads and stores from this CPU when it was the
5062 * lock owner are visible to the next lock owner. This pairs
5063 * with __printk_cpu_sync_try_get:A.
5064 *
5065 * Memory barrier involvement:
5066 *
5067 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5068 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
5069 *
5070 * Relies on:
5071 *
5072 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5073 * of this CPU
5074 * matching
5075 * ACQUIRE from __printk_cpu_sync_try_get:A to
5076 * __printk_cpu_sync_try_get:B of the next CPU
5077 */
5078 atomic_set_release(&printk_cpu_sync_owner,
5079 -1); /* LMM(__printk_cpu_sync_put:B) */
5080 }
5081 EXPORT_SYMBOL(__printk_cpu_sync_put);
5082 #endif /* CONFIG_SMP */
5083