1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * linux/kernel/printk.c |
4 | * |
5 | * Copyright (C) 1991, 1992 Linus Torvalds |
6 | * |
7 | * Modified to make sys_syslog() more flexible: added commands to |
8 | * return the last 4k of kernel messages, regardless of whether |
9 | * they've been read or not. Added option to suppress kernel printk's |
10 | * to the console. Added hook for sending the console messages |
11 | * elsewhere, in preparation for a serial line console (someday). |
12 | * Ted Ts'o, 2/11/93. |
13 | * Modified for sysctl support, 1/8/97, Chris Horn. |
14 | * Fixed SMP synchronization, 08/08/99, Manfred Spraul |
15 | * manfred@colorfullife.com |
16 | * Rewrote bits to get rid of console_lock |
17 | * 01Mar01 Andrew Morton |
18 | */ |
19 | |
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | |
22 | #include <linux/kernel.h> |
23 | #include <linux/mm.h> |
24 | #include <linux/tty.h> |
25 | #include <linux/tty_driver.h> |
26 | #include <linux/console.h> |
27 | #include <linux/init.h> |
28 | #include <linux/jiffies.h> |
29 | #include <linux/nmi.h> |
30 | #include <linux/module.h> |
31 | #include <linux/moduleparam.h> |
32 | #include <linux/delay.h> |
33 | #include <linux/smp.h> |
34 | #include <linux/security.h> |
35 | #include <linux/memblock.h> |
36 | #include <linux/syscalls.h> |
37 | #include <linux/vmcore_info.h> |
38 | #include <linux/ratelimit.h> |
39 | #include <linux/kmsg_dump.h> |
40 | #include <linux/syslog.h> |
41 | #include <linux/cpu.h> |
42 | #include <linux/rculist.h> |
43 | #include <linux/poll.h> |
44 | #include <linux/irq_work.h> |
45 | #include <linux/ctype.h> |
46 | #include <linux/uio.h> |
47 | #include <linux/sched/clock.h> |
48 | #include <linux/sched/debug.h> |
49 | #include <linux/sched/task_stack.h> |
50 | |
51 | #include <linux/uaccess.h> |
52 | #include <asm/sections.h> |
53 | |
54 | #include <trace/events/initcall.h> |
55 | #define CREATE_TRACE_POINTS |
56 | #include <trace/events/printk.h> |
57 | |
58 | #include "printk_ringbuffer.h" |
59 | #include "console_cmdline.h" |
60 | #include "braille.h" |
61 | #include "internal.h" |
62 | |
63 | int console_printk[4] = { |
64 | CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ |
65 | MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ |
66 | CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ |
67 | CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ |
68 | }; |
69 | EXPORT_SYMBOL_GPL(console_printk); |
70 | |
71 | atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0); |
72 | EXPORT_SYMBOL(ignore_console_lock_warning); |
73 | |
74 | EXPORT_TRACEPOINT_SYMBOL_GPL(console); |
75 | |
76 | /* |
77 | * Low level drivers may need that to know if they can schedule in |
78 | * their unblank() callback or not. So let's export it. |
79 | */ |
80 | int oops_in_progress; |
81 | EXPORT_SYMBOL(oops_in_progress); |
82 | |
83 | /* |
84 | * console_mutex protects console_list updates and console->flags updates. |
85 | * The flags are synchronized only for consoles that are registered, i.e. |
86 | * accessible via the console list. |
87 | */ |
88 | static DEFINE_MUTEX(console_mutex); |
89 | |
90 | /* |
91 | * console_sem protects updates to console->seq |
92 | * and also provides serialization for console printing. |
93 | */ |
94 | static DEFINE_SEMAPHORE(console_sem, 1); |
95 | HLIST_HEAD(console_list); |
96 | EXPORT_SYMBOL_GPL(console_list); |
97 | DEFINE_STATIC_SRCU(console_srcu); |
98 | |
99 | /* |
100 | * System may need to suppress printk message under certain |
101 | * circumstances, like after kernel panic happens. |
102 | */ |
103 | int __read_mostly suppress_printk; |
104 | |
105 | #ifdef CONFIG_LOCKDEP |
106 | static struct lockdep_map console_lock_dep_map = { |
107 | .name = "console_lock" |
108 | }; |
109 | |
110 | void lockdep_assert_console_list_lock_held(void) |
111 | { |
112 | lockdep_assert_held(&console_mutex); |
113 | } |
114 | EXPORT_SYMBOL(lockdep_assert_console_list_lock_held); |
115 | #endif |
116 | |
117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
118 | bool console_srcu_read_lock_is_held(void) |
119 | { |
120 | return srcu_read_lock_held(ssp: &console_srcu); |
121 | } |
122 | EXPORT_SYMBOL(console_srcu_read_lock_is_held); |
123 | #endif |
124 | |
125 | enum devkmsg_log_bits { |
126 | __DEVKMSG_LOG_BIT_ON = 0, |
127 | __DEVKMSG_LOG_BIT_OFF, |
128 | __DEVKMSG_LOG_BIT_LOCK, |
129 | }; |
130 | |
131 | enum devkmsg_log_masks { |
132 | DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), |
133 | DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), |
134 | DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), |
135 | }; |
136 | |
137 | /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ |
138 | #define DEVKMSG_LOG_MASK_DEFAULT 0 |
139 | |
140 | static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
141 | |
142 | static int __control_devkmsg(char *str) |
143 | { |
144 | size_t len; |
145 | |
146 | if (!str) |
147 | return -EINVAL; |
148 | |
149 | len = str_has_prefix(str, prefix: "on"); |
150 | if (len) { |
151 | devkmsg_log = DEVKMSG_LOG_MASK_ON; |
152 | return len; |
153 | } |
154 | |
155 | len = str_has_prefix(str, prefix: "off"); |
156 | if (len) { |
157 | devkmsg_log = DEVKMSG_LOG_MASK_OFF; |
158 | return len; |
159 | } |
160 | |
161 | len = str_has_prefix(str, prefix: "ratelimit"); |
162 | if (len) { |
163 | devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
164 | return len; |
165 | } |
166 | |
167 | return -EINVAL; |
168 | } |
169 | |
170 | static int __init control_devkmsg(char *str) |
171 | { |
172 | if (__control_devkmsg(str) < 0) { |
173 | pr_warn("printk.devkmsg: bad option string '%s'\n", str); |
174 | return 1; |
175 | } |
176 | |
177 | /* |
178 | * Set sysctl string accordingly: |
179 | */ |
180 | if (devkmsg_log == DEVKMSG_LOG_MASK_ON) |
181 | strcpy(p: devkmsg_log_str, q: "on"); |
182 | else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) |
183 | strcpy(p: devkmsg_log_str, q: "off"); |
184 | /* else "ratelimit" which is set by default. */ |
185 | |
186 | /* |
187 | * Sysctl cannot change it anymore. The kernel command line setting of |
188 | * this parameter is to force the setting to be permanent throughout the |
189 | * runtime of the system. This is a precation measure against userspace |
190 | * trying to be a smarta** and attempting to change it up on us. |
191 | */ |
192 | devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; |
193 | |
194 | return 1; |
195 | } |
196 | __setup("printk.devkmsg=", control_devkmsg); |
197 | |
198 | char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; |
199 | #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) |
200 | int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, |
201 | void *buffer, size_t *lenp, loff_t *ppos) |
202 | { |
203 | char old_str[DEVKMSG_STR_MAX_SIZE]; |
204 | unsigned int old; |
205 | int err; |
206 | |
207 | if (write) { |
208 | if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) |
209 | return -EINVAL; |
210 | |
211 | old = devkmsg_log; |
212 | strncpy(p: old_str, q: devkmsg_log_str, DEVKMSG_STR_MAX_SIZE); |
213 | } |
214 | |
215 | err = proc_dostring(table, write, buffer, lenp, ppos); |
216 | if (err) |
217 | return err; |
218 | |
219 | if (write) { |
220 | err = __control_devkmsg(str: devkmsg_log_str); |
221 | |
222 | /* |
223 | * Do not accept an unknown string OR a known string with |
224 | * trailing crap... |
225 | */ |
226 | if (err < 0 || (err + 1 != *lenp)) { |
227 | |
228 | /* ... and restore old setting. */ |
229 | devkmsg_log = old; |
230 | strncpy(p: devkmsg_log_str, q: old_str, DEVKMSG_STR_MAX_SIZE); |
231 | |
232 | return -EINVAL; |
233 | } |
234 | } |
235 | |
236 | return 0; |
237 | } |
238 | #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ |
239 | |
240 | /** |
241 | * console_list_lock - Lock the console list |
242 | * |
243 | * For console list or console->flags updates |
244 | */ |
245 | void console_list_lock(void) |
246 | { |
247 | /* |
248 | * In unregister_console() and console_force_preferred_locked(), |
249 | * synchronize_srcu() is called with the console_list_lock held. |
250 | * Therefore it is not allowed that the console_list_lock is taken |
251 | * with the srcu_lock held. |
252 | * |
253 | * Detecting if this context is really in the read-side critical |
254 | * section is only possible if the appropriate debug options are |
255 | * enabled. |
256 | */ |
257 | WARN_ON_ONCE(debug_lockdep_rcu_enabled() && |
258 | srcu_read_lock_held(&console_srcu)); |
259 | |
260 | mutex_lock(&console_mutex); |
261 | } |
262 | EXPORT_SYMBOL(console_list_lock); |
263 | |
264 | /** |
265 | * console_list_unlock - Unlock the console list |
266 | * |
267 | * Counterpart to console_list_lock() |
268 | */ |
269 | void console_list_unlock(void) |
270 | { |
271 | mutex_unlock(lock: &console_mutex); |
272 | } |
273 | EXPORT_SYMBOL(console_list_unlock); |
274 | |
275 | /** |
276 | * console_srcu_read_lock - Register a new reader for the |
277 | * SRCU-protected console list |
278 | * |
279 | * Use for_each_console_srcu() to iterate the console list |
280 | * |
281 | * Context: Any context. |
282 | * Return: A cookie to pass to console_srcu_read_unlock(). |
283 | */ |
284 | int console_srcu_read_lock(void) |
285 | { |
286 | return srcu_read_lock_nmisafe(ssp: &console_srcu); |
287 | } |
288 | EXPORT_SYMBOL(console_srcu_read_lock); |
289 | |
290 | /** |
291 | * console_srcu_read_unlock - Unregister an old reader from |
292 | * the SRCU-protected console list |
293 | * @cookie: cookie returned from console_srcu_read_lock() |
294 | * |
295 | * Counterpart to console_srcu_read_lock() |
296 | */ |
297 | void console_srcu_read_unlock(int cookie) |
298 | { |
299 | srcu_read_unlock_nmisafe(ssp: &console_srcu, idx: cookie); |
300 | } |
301 | EXPORT_SYMBOL(console_srcu_read_unlock); |
302 | |
303 | /* |
304 | * Helper macros to handle lockdep when locking/unlocking console_sem. We use |
305 | * macros instead of functions so that _RET_IP_ contains useful information. |
306 | */ |
307 | #define down_console_sem() do { \ |
308 | down(&console_sem);\ |
309 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ |
310 | } while (0) |
311 | |
312 | static int __down_trylock_console_sem(unsigned long ip) |
313 | { |
314 | int lock_failed; |
315 | unsigned long flags; |
316 | |
317 | /* |
318 | * Here and in __up_console_sem() we need to be in safe mode, |
319 | * because spindump/WARN/etc from under console ->lock will |
320 | * deadlock in printk()->down_trylock_console_sem() otherwise. |
321 | */ |
322 | printk_safe_enter_irqsave(flags); |
323 | lock_failed = down_trylock(sem: &console_sem); |
324 | printk_safe_exit_irqrestore(flags); |
325 | |
326 | if (lock_failed) |
327 | return 1; |
328 | mutex_acquire(&console_lock_dep_map, 0, 1, ip); |
329 | return 0; |
330 | } |
331 | #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) |
332 | |
333 | static void __up_console_sem(unsigned long ip) |
334 | { |
335 | unsigned long flags; |
336 | |
337 | mutex_release(&console_lock_dep_map, ip); |
338 | |
339 | printk_safe_enter_irqsave(flags); |
340 | up(sem: &console_sem); |
341 | printk_safe_exit_irqrestore(flags); |
342 | } |
343 | #define up_console_sem() __up_console_sem(_RET_IP_) |
344 | |
345 | static bool panic_in_progress(void) |
346 | { |
347 | return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID); |
348 | } |
349 | |
350 | /* Return true if a panic is in progress on the current CPU. */ |
351 | bool this_cpu_in_panic(void) |
352 | { |
353 | /* |
354 | * We can use raw_smp_processor_id() here because it is impossible for |
355 | * the task to be migrated to the panic_cpu, or away from it. If |
356 | * panic_cpu has already been set, and we're not currently executing on |
357 | * that CPU, then we never will be. |
358 | */ |
359 | return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id()); |
360 | } |
361 | |
362 | /* |
363 | * Return true if a panic is in progress on a remote CPU. |
364 | * |
365 | * On true, the local CPU should immediately release any printing resources |
366 | * that may be needed by the panic CPU. |
367 | */ |
368 | bool other_cpu_in_panic(void) |
369 | { |
370 | return (panic_in_progress() && !this_cpu_in_panic()); |
371 | } |
372 | |
373 | /* |
374 | * This is used for debugging the mess that is the VT code by |
375 | * keeping track if we have the console semaphore held. It's |
376 | * definitely not the perfect debug tool (we don't know if _WE_ |
377 | * hold it and are racing, but it helps tracking those weird code |
378 | * paths in the console code where we end up in places I want |
379 | * locked without the console semaphore held). |
380 | */ |
381 | static int console_locked; |
382 | |
383 | /* |
384 | * Array of consoles built from command line options (console=) |
385 | */ |
386 | |
387 | #define MAX_CMDLINECONSOLES 8 |
388 | |
389 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; |
390 | |
391 | static int preferred_console = -1; |
392 | int console_set_on_cmdline; |
393 | EXPORT_SYMBOL(console_set_on_cmdline); |
394 | |
395 | /* Flag: console code may call schedule() */ |
396 | static int console_may_schedule; |
397 | |
398 | enum con_msg_format_flags { |
399 | MSG_FORMAT_DEFAULT = 0, |
400 | MSG_FORMAT_SYSLOG = (1 << 0), |
401 | }; |
402 | |
403 | static int console_msg_format = MSG_FORMAT_DEFAULT; |
404 | |
405 | /* |
406 | * The printk log buffer consists of a sequenced collection of records, each |
407 | * containing variable length message text. Every record also contains its |
408 | * own meta-data (@info). |
409 | * |
410 | * Every record meta-data carries the timestamp in microseconds, as well as |
411 | * the standard userspace syslog level and syslog facility. The usual kernel |
412 | * messages use LOG_KERN; userspace-injected messages always carry a matching |
413 | * syslog facility, by default LOG_USER. The origin of every message can be |
414 | * reliably determined that way. |
415 | * |
416 | * The human readable log message of a record is available in @text, the |
417 | * length of the message text in @text_len. The stored message is not |
418 | * terminated. |
419 | * |
420 | * Optionally, a record can carry a dictionary of properties (key/value |
421 | * pairs), to provide userspace with a machine-readable message context. |
422 | * |
423 | * Examples for well-defined, commonly used property names are: |
424 | * DEVICE=b12:8 device identifier |
425 | * b12:8 block dev_t |
426 | * c127:3 char dev_t |
427 | * n8 netdev ifindex |
428 | * +sound:card0 subsystem:devname |
429 | * SUBSYSTEM=pci driver-core subsystem name |
430 | * |
431 | * Valid characters in property names are [a-zA-Z0-9.-_]. Property names |
432 | * and values are terminated by a '\0' character. |
433 | * |
434 | * Example of record values: |
435 | * record.text_buf = "it's a line" (unterminated) |
436 | * record.info.seq = 56 |
437 | * record.info.ts_nsec = 36863 |
438 | * record.info.text_len = 11 |
439 | * record.info.facility = 0 (LOG_KERN) |
440 | * record.info.flags = 0 |
441 | * record.info.level = 3 (LOG_ERR) |
442 | * record.info.caller_id = 299 (task 299) |
443 | * record.info.dev_info.subsystem = "pci" (terminated) |
444 | * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated) |
445 | * |
446 | * The 'struct printk_info' buffer must never be directly exported to |
447 | * userspace, it is a kernel-private implementation detail that might |
448 | * need to be changed in the future, when the requirements change. |
449 | * |
450 | * /dev/kmsg exports the structured data in the following line format: |
451 | * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" |
452 | * |
453 | * Users of the export format should ignore possible additional values |
454 | * separated by ',', and find the message after the ';' character. |
455 | * |
456 | * The optional key/value pairs are attached as continuation lines starting |
457 | * with a space character and terminated by a newline. All possible |
458 | * non-prinatable characters are escaped in the "\xff" notation. |
459 | */ |
460 | |
461 | /* syslog_lock protects syslog_* variables and write access to clear_seq. */ |
462 | static DEFINE_MUTEX(syslog_lock); |
463 | |
464 | #ifdef CONFIG_PRINTK |
465 | DECLARE_WAIT_QUEUE_HEAD(log_wait); |
466 | /* All 3 protected by @syslog_lock. */ |
467 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
468 | static u64 syslog_seq; |
469 | static size_t syslog_partial; |
470 | static bool syslog_time; |
471 | |
472 | struct latched_seq { |
473 | seqcount_latch_t latch; |
474 | u64 val[2]; |
475 | }; |
476 | |
477 | /* |
478 | * The next printk record to read after the last 'clear' command. There are |
479 | * two copies (updated with seqcount_latch) so that reads can locklessly |
480 | * access a valid value. Writers are synchronized by @syslog_lock. |
481 | */ |
482 | static struct latched_seq clear_seq = { |
483 | .latch = SEQCNT_LATCH_ZERO(clear_seq.latch), |
484 | .val[0] = 0, |
485 | .val[1] = 0, |
486 | }; |
487 | |
488 | #define LOG_LEVEL(v) ((v) & 0x07) |
489 | #define LOG_FACILITY(v) ((v) >> 3 & 0xff) |
490 | |
491 | /* record buffer */ |
492 | #define LOG_ALIGN __alignof__(unsigned long) |
493 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
494 | #define LOG_BUF_LEN_MAX (u32)(1 << 31) |
495 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
496 | static char *log_buf = __log_buf; |
497 | static u32 log_buf_len = __LOG_BUF_LEN; |
498 | |
499 | /* |
500 | * Define the average message size. This only affects the number of |
501 | * descriptors that will be available. Underestimating is better than |
502 | * overestimating (too many available descriptors is better than not enough). |
503 | */ |
504 | #define PRB_AVGBITS 5 /* 32 character average length */ |
505 | |
506 | #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS |
507 | #error CONFIG_LOG_BUF_SHIFT value too small. |
508 | #endif |
509 | _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, |
510 | PRB_AVGBITS, &__log_buf[0]); |
511 | |
512 | static struct printk_ringbuffer printk_rb_dynamic; |
513 | |
514 | struct printk_ringbuffer *prb = &printk_rb_static; |
515 | |
516 | /* |
517 | * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before |
518 | * per_cpu_areas are initialised. This variable is set to true when |
519 | * it's safe to access per-CPU data. |
520 | */ |
521 | static bool __printk_percpu_data_ready __ro_after_init; |
522 | |
523 | bool printk_percpu_data_ready(void) |
524 | { |
525 | return __printk_percpu_data_ready; |
526 | } |
527 | |
528 | /* Must be called under syslog_lock. */ |
529 | static void latched_seq_write(struct latched_seq *ls, u64 val) |
530 | { |
531 | raw_write_seqcount_latch(s: &ls->latch); |
532 | ls->val[0] = val; |
533 | raw_write_seqcount_latch(s: &ls->latch); |
534 | ls->val[1] = val; |
535 | } |
536 | |
537 | /* Can be called from any context. */ |
538 | static u64 latched_seq_read_nolock(struct latched_seq *ls) |
539 | { |
540 | unsigned int seq; |
541 | unsigned int idx; |
542 | u64 val; |
543 | |
544 | do { |
545 | seq = raw_read_seqcount_latch(s: &ls->latch); |
546 | idx = seq & 0x1; |
547 | val = ls->val[idx]; |
548 | } while (raw_read_seqcount_latch_retry(s: &ls->latch, start: seq)); |
549 | |
550 | return val; |
551 | } |
552 | |
553 | /* Return log buffer address */ |
554 | char *log_buf_addr_get(void) |
555 | { |
556 | return log_buf; |
557 | } |
558 | |
559 | /* Return log buffer size */ |
560 | u32 log_buf_len_get(void) |
561 | { |
562 | return log_buf_len; |
563 | } |
564 | |
565 | /* |
566 | * Define how much of the log buffer we could take at maximum. The value |
567 | * must be greater than two. Note that only half of the buffer is available |
568 | * when the index points to the middle. |
569 | */ |
570 | #define MAX_LOG_TAKE_PART 4 |
571 | static const char trunc_msg[] = "<truncated>"; |
572 | |
573 | static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) |
574 | { |
575 | /* |
576 | * The message should not take the whole buffer. Otherwise, it might |
577 | * get removed too soon. |
578 | */ |
579 | u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; |
580 | |
581 | if (*text_len > max_text_len) |
582 | *text_len = max_text_len; |
583 | |
584 | /* enable the warning message (if there is room) */ |
585 | *trunc_msg_len = strlen(trunc_msg); |
586 | if (*text_len >= *trunc_msg_len) |
587 | *text_len -= *trunc_msg_len; |
588 | else |
589 | *trunc_msg_len = 0; |
590 | } |
591 | |
592 | int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); |
593 | |
594 | static int syslog_action_restricted(int type) |
595 | { |
596 | if (dmesg_restrict) |
597 | return 1; |
598 | /* |
599 | * Unless restricted, we allow "read all" and "get buffer size" |
600 | * for everybody. |
601 | */ |
602 | return type != SYSLOG_ACTION_READ_ALL && |
603 | type != SYSLOG_ACTION_SIZE_BUFFER; |
604 | } |
605 | |
606 | static int check_syslog_permissions(int type, int source) |
607 | { |
608 | /* |
609 | * If this is from /proc/kmsg and we've already opened it, then we've |
610 | * already done the capabilities checks at open time. |
611 | */ |
612 | if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) |
613 | goto ok; |
614 | |
615 | if (syslog_action_restricted(type)) { |
616 | if (capable(CAP_SYSLOG)) |
617 | goto ok; |
618 | return -EPERM; |
619 | } |
620 | ok: |
621 | return security_syslog(type); |
622 | } |
623 | |
624 | static void append_char(char **pp, char *e, char c) |
625 | { |
626 | if (*pp < e) |
627 | *(*pp)++ = c; |
628 | } |
629 | |
630 | static ssize_t info_print_ext_header(char *buf, size_t size, |
631 | struct printk_info *info) |
632 | { |
633 | u64 ts_usec = info->ts_nsec; |
634 | char caller[20]; |
635 | #ifdef CONFIG_PRINTK_CALLER |
636 | u32 id = info->caller_id; |
637 | |
638 | snprintf(buf: caller, size: sizeof(caller), fmt: ",caller=%c%u", |
639 | id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); |
640 | #else |
641 | caller[0] = '\0'; |
642 | #endif |
643 | |
644 | do_div(ts_usec, 1000); |
645 | |
646 | return scnprintf(buf, size, fmt: "%u,%llu,%llu,%c%s;", |
647 | (info->facility << 3) | info->level, info->seq, |
648 | ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller); |
649 | } |
650 | |
651 | static ssize_t msg_add_ext_text(char *buf, size_t size, |
652 | const char *text, size_t text_len, |
653 | unsigned char endc) |
654 | { |
655 | char *p = buf, *e = buf + size; |
656 | size_t i; |
657 | |
658 | /* escape non-printable characters */ |
659 | for (i = 0; i < text_len; i++) { |
660 | unsigned char c = text[i]; |
661 | |
662 | if (c < ' ' || c >= 127 || c == '\\') |
663 | p += scnprintf(buf: p, size: e - p, fmt: "\\x%02x", c); |
664 | else |
665 | append_char(pp: &p, e, c); |
666 | } |
667 | append_char(pp: &p, e, c: endc); |
668 | |
669 | return p - buf; |
670 | } |
671 | |
672 | static ssize_t msg_add_dict_text(char *buf, size_t size, |
673 | const char *key, const char *val) |
674 | { |
675 | size_t val_len = strlen(val); |
676 | ssize_t len; |
677 | |
678 | if (!val_len) |
679 | return 0; |
680 | |
681 | len = msg_add_ext_text(buf, size, text: "", text_len: 0, endc: ' '); /* dict prefix */ |
682 | len += msg_add_ext_text(buf: buf + len, size: size - len, text: key, strlen(key), endc: '='); |
683 | len += msg_add_ext_text(buf: buf + len, size: size - len, text: val, text_len: val_len, endc: '\n'); |
684 | |
685 | return len; |
686 | } |
687 | |
688 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
689 | char *text, size_t text_len, |
690 | struct dev_printk_info *dev_info) |
691 | { |
692 | ssize_t len; |
693 | |
694 | len = msg_add_ext_text(buf, size, text, text_len, endc: '\n'); |
695 | |
696 | if (!dev_info) |
697 | goto out; |
698 | |
699 | len += msg_add_dict_text(buf: buf + len, size: size - len, key: "SUBSYSTEM", |
700 | val: dev_info->subsystem); |
701 | len += msg_add_dict_text(buf: buf + len, size: size - len, key: "DEVICE", |
702 | val: dev_info->device); |
703 | out: |
704 | return len; |
705 | } |
706 | |
707 | /* /dev/kmsg - userspace message inject/listen interface */ |
708 | struct devkmsg_user { |
709 | atomic64_t seq; |
710 | struct ratelimit_state rs; |
711 | struct mutex lock; |
712 | struct printk_buffers pbufs; |
713 | }; |
714 | |
715 | static __printf(3, 4) __cold |
716 | int devkmsg_emit(int facility, int level, const char *fmt, ...) |
717 | { |
718 | va_list args; |
719 | int r; |
720 | |
721 | va_start(args, fmt); |
722 | r = vprintk_emit(facility, level, NULL, fmt, args); |
723 | va_end(args); |
724 | |
725 | return r; |
726 | } |
727 | |
728 | static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) |
729 | { |
730 | char *buf, *line; |
731 | int level = default_message_loglevel; |
732 | int facility = 1; /* LOG_USER */ |
733 | struct file *file = iocb->ki_filp; |
734 | struct devkmsg_user *user = file->private_data; |
735 | size_t len = iov_iter_count(i: from); |
736 | ssize_t ret = len; |
737 | |
738 | if (len > PRINTKRB_RECORD_MAX) |
739 | return -EINVAL; |
740 | |
741 | /* Ignore when user logging is disabled. */ |
742 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
743 | return len; |
744 | |
745 | /* Ratelimit when not explicitly enabled. */ |
746 | if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { |
747 | if (!___ratelimit(rs: &user->rs, current->comm)) |
748 | return ret; |
749 | } |
750 | |
751 | buf = kmalloc(size: len+1, GFP_KERNEL); |
752 | if (buf == NULL) |
753 | return -ENOMEM; |
754 | |
755 | buf[len] = '\0'; |
756 | if (!copy_from_iter_full(addr: buf, bytes: len, i: from)) { |
757 | kfree(objp: buf); |
758 | return -EFAULT; |
759 | } |
760 | |
761 | /* |
762 | * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace |
763 | * the decimal value represents 32bit, the lower 3 bit are the log |
764 | * level, the rest are the log facility. |
765 | * |
766 | * If no prefix or no userspace facility is specified, we |
767 | * enforce LOG_USER, to be able to reliably distinguish |
768 | * kernel-generated messages from userspace-injected ones. |
769 | */ |
770 | line = buf; |
771 | if (line[0] == '<') { |
772 | char *endp = NULL; |
773 | unsigned int u; |
774 | |
775 | u = simple_strtoul(line + 1, &endp, 10); |
776 | if (endp && endp[0] == '>') { |
777 | level = LOG_LEVEL(u); |
778 | if (LOG_FACILITY(u) != 0) |
779 | facility = LOG_FACILITY(u); |
780 | endp++; |
781 | line = endp; |
782 | } |
783 | } |
784 | |
785 | devkmsg_emit(facility, level, fmt: "%s", line); |
786 | kfree(objp: buf); |
787 | return ret; |
788 | } |
789 | |
790 | static ssize_t devkmsg_read(struct file *file, char __user *buf, |
791 | size_t count, loff_t *ppos) |
792 | { |
793 | struct devkmsg_user *user = file->private_data; |
794 | char *outbuf = &user->pbufs.outbuf[0]; |
795 | struct printk_message pmsg = { |
796 | .pbufs = &user->pbufs, |
797 | }; |
798 | ssize_t ret; |
799 | |
800 | ret = mutex_lock_interruptible(&user->lock); |
801 | if (ret) |
802 | return ret; |
803 | |
804 | if (!printk_get_next_message(pmsg: &pmsg, seq: atomic64_read(v: &user->seq), is_extended: true, may_supress: false)) { |
805 | if (file->f_flags & O_NONBLOCK) { |
806 | ret = -EAGAIN; |
807 | goto out; |
808 | } |
809 | |
810 | /* |
811 | * Guarantee this task is visible on the waitqueue before |
812 | * checking the wake condition. |
813 | * |
814 | * The full memory barrier within set_current_state() of |
815 | * prepare_to_wait_event() pairs with the full memory barrier |
816 | * within wq_has_sleeper(). |
817 | * |
818 | * This pairs with __wake_up_klogd:A. |
819 | */ |
820 | ret = wait_event_interruptible(log_wait, |
821 | printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, |
822 | false)); /* LMM(devkmsg_read:A) */ |
823 | if (ret) |
824 | goto out; |
825 | } |
826 | |
827 | if (pmsg.dropped) { |
828 | /* our last seen message is gone, return error and reset */ |
829 | atomic64_set(v: &user->seq, i: pmsg.seq); |
830 | ret = -EPIPE; |
831 | goto out; |
832 | } |
833 | |
834 | atomic64_set(v: &user->seq, i: pmsg.seq + 1); |
835 | |
836 | if (pmsg.outbuf_len > count) { |
837 | ret = -EINVAL; |
838 | goto out; |
839 | } |
840 | |
841 | if (copy_to_user(to: buf, from: outbuf, n: pmsg.outbuf_len)) { |
842 | ret = -EFAULT; |
843 | goto out; |
844 | } |
845 | ret = pmsg.outbuf_len; |
846 | out: |
847 | mutex_unlock(lock: &user->lock); |
848 | return ret; |
849 | } |
850 | |
851 | /* |
852 | * Be careful when modifying this function!!! |
853 | * |
854 | * Only few operations are supported because the device works only with the |
855 | * entire variable length messages (records). Non-standard values are |
856 | * returned in the other cases and has been this way for quite some time. |
857 | * User space applications might depend on this behavior. |
858 | */ |
859 | static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) |
860 | { |
861 | struct devkmsg_user *user = file->private_data; |
862 | loff_t ret = 0; |
863 | |
864 | if (offset) |
865 | return -ESPIPE; |
866 | |
867 | switch (whence) { |
868 | case SEEK_SET: |
869 | /* the first record */ |
870 | atomic64_set(v: &user->seq, i: prb_first_valid_seq(rb: prb)); |
871 | break; |
872 | case SEEK_DATA: |
873 | /* |
874 | * The first record after the last SYSLOG_ACTION_CLEAR, |
875 | * like issued by 'dmesg -c'. Reading /dev/kmsg itself |
876 | * changes no global state, and does not clear anything. |
877 | */ |
878 | atomic64_set(v: &user->seq, i: latched_seq_read_nolock(ls: &clear_seq)); |
879 | break; |
880 | case SEEK_END: |
881 | /* after the last record */ |
882 | atomic64_set(v: &user->seq, i: prb_next_seq(rb: prb)); |
883 | break; |
884 | default: |
885 | ret = -EINVAL; |
886 | } |
887 | return ret; |
888 | } |
889 | |
890 | static __poll_t devkmsg_poll(struct file *file, poll_table *wait) |
891 | { |
892 | struct devkmsg_user *user = file->private_data; |
893 | struct printk_info info; |
894 | __poll_t ret = 0; |
895 | |
896 | poll_wait(filp: file, wait_address: &log_wait, p: wait); |
897 | |
898 | if (prb_read_valid_info(rb: prb, seq: atomic64_read(v: &user->seq), info: &info, NULL)) { |
899 | /* return error when data has vanished underneath us */ |
900 | if (info.seq != atomic64_read(v: &user->seq)) |
901 | ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; |
902 | else |
903 | ret = EPOLLIN|EPOLLRDNORM; |
904 | } |
905 | |
906 | return ret; |
907 | } |
908 | |
909 | static int devkmsg_open(struct inode *inode, struct file *file) |
910 | { |
911 | struct devkmsg_user *user; |
912 | int err; |
913 | |
914 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
915 | return -EPERM; |
916 | |
917 | /* write-only does not need any file context */ |
918 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
919 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, |
920 | SYSLOG_FROM_READER); |
921 | if (err) |
922 | return err; |
923 | } |
924 | |
925 | user = kvmalloc(size: sizeof(struct devkmsg_user), GFP_KERNEL); |
926 | if (!user) |
927 | return -ENOMEM; |
928 | |
929 | ratelimit_default_init(rs: &user->rs); |
930 | ratelimit_set_flags(rs: &user->rs, RATELIMIT_MSG_ON_RELEASE); |
931 | |
932 | mutex_init(&user->lock); |
933 | |
934 | atomic64_set(v: &user->seq, i: prb_first_valid_seq(rb: prb)); |
935 | |
936 | file->private_data = user; |
937 | return 0; |
938 | } |
939 | |
940 | static int devkmsg_release(struct inode *inode, struct file *file) |
941 | { |
942 | struct devkmsg_user *user = file->private_data; |
943 | |
944 | ratelimit_state_exit(rs: &user->rs); |
945 | |
946 | mutex_destroy(lock: &user->lock); |
947 | kvfree(addr: user); |
948 | return 0; |
949 | } |
950 | |
951 | const struct file_operations kmsg_fops = { |
952 | .open = devkmsg_open, |
953 | .read = devkmsg_read, |
954 | .write_iter = devkmsg_write, |
955 | .llseek = devkmsg_llseek, |
956 | .poll = devkmsg_poll, |
957 | .release = devkmsg_release, |
958 | }; |
959 | |
960 | #ifdef CONFIG_VMCORE_INFO |
961 | /* |
962 | * This appends the listed symbols to /proc/vmcore |
963 | * |
964 | * /proc/vmcore is used by various utilities, like crash and makedumpfile to |
965 | * obtain access to symbols that are otherwise very difficult to locate. These |
966 | * symbols are specifically used so that utilities can access and extract the |
967 | * dmesg log from a vmcore file after a crash. |
968 | */ |
969 | void log_buf_vmcoreinfo_setup(void) |
970 | { |
971 | struct dev_printk_info *dev_info = NULL; |
972 | |
973 | VMCOREINFO_SYMBOL(prb); |
974 | VMCOREINFO_SYMBOL(printk_rb_static); |
975 | VMCOREINFO_SYMBOL(clear_seq); |
976 | |
977 | /* |
978 | * Export struct size and field offsets. User space tools can |
979 | * parse it and detect any changes to structure down the line. |
980 | */ |
981 | |
982 | VMCOREINFO_STRUCT_SIZE(printk_ringbuffer); |
983 | VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring); |
984 | VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring); |
985 | VMCOREINFO_OFFSET(printk_ringbuffer, fail); |
986 | |
987 | VMCOREINFO_STRUCT_SIZE(prb_desc_ring); |
988 | VMCOREINFO_OFFSET(prb_desc_ring, count_bits); |
989 | VMCOREINFO_OFFSET(prb_desc_ring, descs); |
990 | VMCOREINFO_OFFSET(prb_desc_ring, infos); |
991 | VMCOREINFO_OFFSET(prb_desc_ring, head_id); |
992 | VMCOREINFO_OFFSET(prb_desc_ring, tail_id); |
993 | |
994 | VMCOREINFO_STRUCT_SIZE(prb_desc); |
995 | VMCOREINFO_OFFSET(prb_desc, state_var); |
996 | VMCOREINFO_OFFSET(prb_desc, text_blk_lpos); |
997 | |
998 | VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos); |
999 | VMCOREINFO_OFFSET(prb_data_blk_lpos, begin); |
1000 | VMCOREINFO_OFFSET(prb_data_blk_lpos, next); |
1001 | |
1002 | VMCOREINFO_STRUCT_SIZE(printk_info); |
1003 | VMCOREINFO_OFFSET(printk_info, seq); |
1004 | VMCOREINFO_OFFSET(printk_info, ts_nsec); |
1005 | VMCOREINFO_OFFSET(printk_info, text_len); |
1006 | VMCOREINFO_OFFSET(printk_info, caller_id); |
1007 | VMCOREINFO_OFFSET(printk_info, dev_info); |
1008 | |
1009 | VMCOREINFO_STRUCT_SIZE(dev_printk_info); |
1010 | VMCOREINFO_OFFSET(dev_printk_info, subsystem); |
1011 | VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem)); |
1012 | VMCOREINFO_OFFSET(dev_printk_info, device); |
1013 | VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device)); |
1014 | |
1015 | VMCOREINFO_STRUCT_SIZE(prb_data_ring); |
1016 | VMCOREINFO_OFFSET(prb_data_ring, size_bits); |
1017 | VMCOREINFO_OFFSET(prb_data_ring, data); |
1018 | VMCOREINFO_OFFSET(prb_data_ring, head_lpos); |
1019 | VMCOREINFO_OFFSET(prb_data_ring, tail_lpos); |
1020 | |
1021 | VMCOREINFO_SIZE(atomic_long_t); |
1022 | VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); |
1023 | |
1024 | VMCOREINFO_STRUCT_SIZE(latched_seq); |
1025 | VMCOREINFO_OFFSET(latched_seq, val); |
1026 | } |
1027 | #endif |
1028 | |
1029 | /* requested log_buf_len from kernel cmdline */ |
1030 | static unsigned long __initdata new_log_buf_len; |
1031 | |
1032 | /* we practice scaling the ring buffer by powers of 2 */ |
1033 | static void __init log_buf_len_update(u64 size) |
1034 | { |
1035 | if (size > (u64)LOG_BUF_LEN_MAX) { |
1036 | size = (u64)LOG_BUF_LEN_MAX; |
1037 | pr_err("log_buf over 2G is not supported.\n"); |
1038 | } |
1039 | |
1040 | if (size) |
1041 | size = roundup_pow_of_two(size); |
1042 | if (size > log_buf_len) |
1043 | new_log_buf_len = (unsigned long)size; |
1044 | } |
1045 | |
1046 | /* save requested log_buf_len since it's too early to process it */ |
1047 | static int __init log_buf_len_setup(char *str) |
1048 | { |
1049 | u64 size; |
1050 | |
1051 | if (!str) |
1052 | return -EINVAL; |
1053 | |
1054 | size = memparse(ptr: str, retptr: &str); |
1055 | |
1056 | log_buf_len_update(size); |
1057 | |
1058 | return 0; |
1059 | } |
1060 | early_param("log_buf_len", log_buf_len_setup); |
1061 | |
1062 | #ifdef CONFIG_SMP |
1063 | #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) |
1064 | |
1065 | static void __init log_buf_add_cpu(void) |
1066 | { |
1067 | unsigned int cpu_extra; |
1068 | |
1069 | /* |
1070 | * archs should set up cpu_possible_bits properly with |
1071 | * set_cpu_possible() after setup_arch() but just in |
1072 | * case lets ensure this is valid. |
1073 | */ |
1074 | if (num_possible_cpus() == 1) |
1075 | return; |
1076 | |
1077 | cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; |
1078 | |
1079 | /* by default this will only continue through for large > 64 CPUs */ |
1080 | if (cpu_extra <= __LOG_BUF_LEN / 2) |
1081 | return; |
1082 | |
1083 | pr_info("log_buf_len individual max cpu contribution: %d bytes\n", |
1084 | __LOG_CPU_MAX_BUF_LEN); |
1085 | pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", |
1086 | cpu_extra); |
1087 | pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); |
1088 | |
1089 | log_buf_len_update(size: cpu_extra + __LOG_BUF_LEN); |
1090 | } |
1091 | #else /* !CONFIG_SMP */ |
1092 | static inline void log_buf_add_cpu(void) {} |
1093 | #endif /* CONFIG_SMP */ |
1094 | |
1095 | static void __init set_percpu_data_ready(void) |
1096 | { |
1097 | __printk_percpu_data_ready = true; |
1098 | } |
1099 | |
1100 | static unsigned int __init add_to_rb(struct printk_ringbuffer *rb, |
1101 | struct printk_record *r) |
1102 | { |
1103 | struct prb_reserved_entry e; |
1104 | struct printk_record dest_r; |
1105 | |
1106 | prb_rec_init_wr(r: &dest_r, text_buf_size: r->info->text_len); |
1107 | |
1108 | if (!prb_reserve(e: &e, rb, r: &dest_r)) |
1109 | return 0; |
1110 | |
1111 | memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len); |
1112 | dest_r.info->text_len = r->info->text_len; |
1113 | dest_r.info->facility = r->info->facility; |
1114 | dest_r.info->level = r->info->level; |
1115 | dest_r.info->flags = r->info->flags; |
1116 | dest_r.info->ts_nsec = r->info->ts_nsec; |
1117 | dest_r.info->caller_id = r->info->caller_id; |
1118 | memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info)); |
1119 | |
1120 | prb_final_commit(e: &e); |
1121 | |
1122 | return prb_record_text_space(e: &e); |
1123 | } |
1124 | |
1125 | static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata; |
1126 | |
1127 | void __init setup_log_buf(int early) |
1128 | { |
1129 | struct printk_info *new_infos; |
1130 | unsigned int new_descs_count; |
1131 | struct prb_desc *new_descs; |
1132 | struct printk_info info; |
1133 | struct printk_record r; |
1134 | unsigned int text_size; |
1135 | size_t new_descs_size; |
1136 | size_t new_infos_size; |
1137 | unsigned long flags; |
1138 | char *new_log_buf; |
1139 | unsigned int free; |
1140 | u64 seq; |
1141 | |
1142 | /* |
1143 | * Some archs call setup_log_buf() multiple times - first is very |
1144 | * early, e.g. from setup_arch(), and second - when percpu_areas |
1145 | * are initialised. |
1146 | */ |
1147 | if (!early) |
1148 | set_percpu_data_ready(); |
1149 | |
1150 | if (log_buf != __log_buf) |
1151 | return; |
1152 | |
1153 | if (!early && !new_log_buf_len) |
1154 | log_buf_add_cpu(); |
1155 | |
1156 | if (!new_log_buf_len) |
1157 | return; |
1158 | |
1159 | new_descs_count = new_log_buf_len >> PRB_AVGBITS; |
1160 | if (new_descs_count == 0) { |
1161 | pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len); |
1162 | return; |
1163 | } |
1164 | |
1165 | new_log_buf = memblock_alloc(size: new_log_buf_len, LOG_ALIGN); |
1166 | if (unlikely(!new_log_buf)) { |
1167 | pr_err("log_buf_len: %lu text bytes not available\n", |
1168 | new_log_buf_len); |
1169 | return; |
1170 | } |
1171 | |
1172 | new_descs_size = new_descs_count * sizeof(struct prb_desc); |
1173 | new_descs = memblock_alloc(size: new_descs_size, LOG_ALIGN); |
1174 | if (unlikely(!new_descs)) { |
1175 | pr_err("log_buf_len: %zu desc bytes not available\n", |
1176 | new_descs_size); |
1177 | goto err_free_log_buf; |
1178 | } |
1179 | |
1180 | new_infos_size = new_descs_count * sizeof(struct printk_info); |
1181 | new_infos = memblock_alloc(size: new_infos_size, LOG_ALIGN); |
1182 | if (unlikely(!new_infos)) { |
1183 | pr_err("log_buf_len: %zu info bytes not available\n", |
1184 | new_infos_size); |
1185 | goto err_free_descs; |
1186 | } |
1187 | |
1188 | prb_rec_init_rd(r: &r, info: &info, text_buf: &setup_text_buf[0], text_buf_size: sizeof(setup_text_buf)); |
1189 | |
1190 | prb_init(rb: &printk_rb_dynamic, |
1191 | text_buf: new_log_buf, ilog2(new_log_buf_len), |
1192 | descs: new_descs, ilog2(new_descs_count), |
1193 | infos: new_infos); |
1194 | |
1195 | local_irq_save(flags); |
1196 | |
1197 | log_buf_len = new_log_buf_len; |
1198 | log_buf = new_log_buf; |
1199 | new_log_buf_len = 0; |
1200 | |
1201 | free = __LOG_BUF_LEN; |
1202 | prb_for_each_record(0, &printk_rb_static, seq, &r) { |
1203 | text_size = add_to_rb(rb: &printk_rb_dynamic, r: &r); |
1204 | if (text_size > free) |
1205 | free = 0; |
1206 | else |
1207 | free -= text_size; |
1208 | } |
1209 | |
1210 | prb = &printk_rb_dynamic; |
1211 | |
1212 | local_irq_restore(flags); |
1213 | |
1214 | /* |
1215 | * Copy any remaining messages that might have appeared from |
1216 | * NMI context after copying but before switching to the |
1217 | * dynamic buffer. |
1218 | */ |
1219 | prb_for_each_record(seq, &printk_rb_static, seq, &r) { |
1220 | text_size = add_to_rb(rb: &printk_rb_dynamic, r: &r); |
1221 | if (text_size > free) |
1222 | free = 0; |
1223 | else |
1224 | free -= text_size; |
1225 | } |
1226 | |
1227 | if (seq != prb_next_seq(rb: &printk_rb_static)) { |
1228 | pr_err("dropped %llu messages\n", |
1229 | prb_next_seq(&printk_rb_static) - seq); |
1230 | } |
1231 | |
1232 | pr_info("log_buf_len: %u bytes\n", log_buf_len); |
1233 | pr_info("early log buf free: %u(%u%%)\n", |
1234 | free, (free * 100) / __LOG_BUF_LEN); |
1235 | return; |
1236 | |
1237 | err_free_descs: |
1238 | memblock_free(ptr: new_descs, size: new_descs_size); |
1239 | err_free_log_buf: |
1240 | memblock_free(ptr: new_log_buf, size: new_log_buf_len); |
1241 | } |
1242 | |
1243 | static bool __read_mostly ignore_loglevel; |
1244 | |
1245 | static int __init ignore_loglevel_setup(char *str) |
1246 | { |
1247 | ignore_loglevel = true; |
1248 | pr_info("debug: ignoring loglevel setting.\n"); |
1249 | |
1250 | return 0; |
1251 | } |
1252 | |
1253 | early_param("ignore_loglevel", ignore_loglevel_setup); |
1254 | module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); |
1255 | MODULE_PARM_DESC(ignore_loglevel, |
1256 | "ignore loglevel setting (prints all kernel messages to the console)"); |
1257 | |
1258 | static bool suppress_message_printing(int level) |
1259 | { |
1260 | return (level >= console_loglevel && !ignore_loglevel); |
1261 | } |
1262 | |
1263 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
1264 | |
1265 | static int boot_delay; /* msecs delay after each printk during bootup */ |
1266 | static unsigned long long loops_per_msec; /* based on boot_delay */ |
1267 | |
1268 | static int __init boot_delay_setup(char *str) |
1269 | { |
1270 | unsigned long lpj; |
1271 | |
1272 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ |
1273 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; |
1274 | |
1275 | get_option(str: &str, pint: &boot_delay); |
1276 | if (boot_delay > 10 * 1000) |
1277 | boot_delay = 0; |
1278 | |
1279 | pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " |
1280 | "HZ: %d, loops_per_msec: %llu\n", |
1281 | boot_delay, preset_lpj, lpj, HZ, loops_per_msec); |
1282 | return 0; |
1283 | } |
1284 | early_param("boot_delay", boot_delay_setup); |
1285 | |
1286 | static void boot_delay_msec(int level) |
1287 | { |
1288 | unsigned long long k; |
1289 | unsigned long timeout; |
1290 | |
1291 | if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) |
1292 | || suppress_message_printing(level)) { |
1293 | return; |
1294 | } |
1295 | |
1296 | k = (unsigned long long)loops_per_msec * boot_delay; |
1297 | |
1298 | timeout = jiffies + msecs_to_jiffies(m: boot_delay); |
1299 | while (k) { |
1300 | k--; |
1301 | cpu_relax(); |
1302 | /* |
1303 | * use (volatile) jiffies to prevent |
1304 | * compiler reduction; loop termination via jiffies |
1305 | * is secondary and may or may not happen. |
1306 | */ |
1307 | if (time_after(jiffies, timeout)) |
1308 | break; |
1309 | touch_nmi_watchdog(); |
1310 | } |
1311 | } |
1312 | #else |
1313 | static inline void boot_delay_msec(int level) |
1314 | { |
1315 | } |
1316 | #endif |
1317 | |
1318 | static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); |
1319 | module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); |
1320 | |
1321 | static size_t print_syslog(unsigned int level, char *buf) |
1322 | { |
1323 | return sprintf(buf, fmt: "<%u>", level); |
1324 | } |
1325 | |
1326 | static size_t print_time(u64 ts, char *buf) |
1327 | { |
1328 | unsigned long rem_nsec = do_div(ts, 1000000000); |
1329 | |
1330 | return sprintf(buf, fmt: "[%5lu.%06lu]", |
1331 | (unsigned long)ts, rem_nsec / 1000); |
1332 | } |
1333 | |
1334 | #ifdef CONFIG_PRINTK_CALLER |
1335 | static size_t print_caller(u32 id, char *buf) |
1336 | { |
1337 | char caller[12]; |
1338 | |
1339 | snprintf(buf: caller, size: sizeof(caller), fmt: "%c%u", |
1340 | id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); |
1341 | return sprintf(buf, fmt: "[%6s]", caller); |
1342 | } |
1343 | #else |
1344 | #define print_caller(id, buf) 0 |
1345 | #endif |
1346 | |
1347 | static size_t info_print_prefix(const struct printk_info *info, bool syslog, |
1348 | bool time, char *buf) |
1349 | { |
1350 | size_t len = 0; |
1351 | |
1352 | if (syslog) |
1353 | len = print_syslog(level: (info->facility << 3) | info->level, buf); |
1354 | |
1355 | if (time) |
1356 | len += print_time(ts: info->ts_nsec, buf: buf + len); |
1357 | |
1358 | len += print_caller(id: info->caller_id, buf: buf + len); |
1359 | |
1360 | if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) { |
1361 | buf[len++] = ' '; |
1362 | buf[len] = '\0'; |
1363 | } |
1364 | |
1365 | return len; |
1366 | } |
1367 | |
1368 | /* |
1369 | * Prepare the record for printing. The text is shifted within the given |
1370 | * buffer to avoid a need for another one. The following operations are |
1371 | * done: |
1372 | * |
1373 | * - Add prefix for each line. |
1374 | * - Drop truncated lines that no longer fit into the buffer. |
1375 | * - Add the trailing newline that has been removed in vprintk_store(). |
1376 | * - Add a string terminator. |
1377 | * |
1378 | * Since the produced string is always terminated, the maximum possible |
1379 | * return value is @r->text_buf_size - 1; |
1380 | * |
1381 | * Return: The length of the updated/prepared text, including the added |
1382 | * prefixes and the newline. The terminator is not counted. The dropped |
1383 | * line(s) are not counted. |
1384 | */ |
1385 | static size_t record_print_text(struct printk_record *r, bool syslog, |
1386 | bool time) |
1387 | { |
1388 | size_t text_len = r->info->text_len; |
1389 | size_t buf_size = r->text_buf_size; |
1390 | char *text = r->text_buf; |
1391 | char prefix[PRINTK_PREFIX_MAX]; |
1392 | bool truncated = false; |
1393 | size_t prefix_len; |
1394 | size_t line_len; |
1395 | size_t len = 0; |
1396 | char *next; |
1397 | |
1398 | /* |
1399 | * If the message was truncated because the buffer was not large |
1400 | * enough, treat the available text as if it were the full text. |
1401 | */ |
1402 | if (text_len > buf_size) |
1403 | text_len = buf_size; |
1404 | |
1405 | prefix_len = info_print_prefix(info: r->info, syslog, time, buf: prefix); |
1406 | |
1407 | /* |
1408 | * @text_len: bytes of unprocessed text |
1409 | * @line_len: bytes of current line _without_ newline |
1410 | * @text: pointer to beginning of current line |
1411 | * @len: number of bytes prepared in r->text_buf |
1412 | */ |
1413 | for (;;) { |
1414 | next = memchr(p: text, c: '\n', size: text_len); |
1415 | if (next) { |
1416 | line_len = next - text; |
1417 | } else { |
1418 | /* Drop truncated line(s). */ |
1419 | if (truncated) |
1420 | break; |
1421 | line_len = text_len; |
1422 | } |
1423 | |
1424 | /* |
1425 | * Truncate the text if there is not enough space to add the |
1426 | * prefix and a trailing newline and a terminator. |
1427 | */ |
1428 | if (len + prefix_len + text_len + 1 + 1 > buf_size) { |
1429 | /* Drop even the current line if no space. */ |
1430 | if (len + prefix_len + line_len + 1 + 1 > buf_size) |
1431 | break; |
1432 | |
1433 | text_len = buf_size - len - prefix_len - 1 - 1; |
1434 | truncated = true; |
1435 | } |
1436 | |
1437 | memmove(text + prefix_len, text, text_len); |
1438 | memcpy(text, prefix, prefix_len); |
1439 | |
1440 | /* |
1441 | * Increment the prepared length to include the text and |
1442 | * prefix that were just moved+copied. Also increment for the |
1443 | * newline at the end of this line. If this is the last line, |
1444 | * there is no newline, but it will be added immediately below. |
1445 | */ |
1446 | len += prefix_len + line_len + 1; |
1447 | if (text_len == line_len) { |
1448 | /* |
1449 | * This is the last line. Add the trailing newline |
1450 | * removed in vprintk_store(). |
1451 | */ |
1452 | text[prefix_len + line_len] = '\n'; |
1453 | break; |
1454 | } |
1455 | |
1456 | /* |
1457 | * Advance beyond the added prefix and the related line with |
1458 | * its newline. |
1459 | */ |
1460 | text += prefix_len + line_len + 1; |
1461 | |
1462 | /* |
1463 | * The remaining text has only decreased by the line with its |
1464 | * newline. |
1465 | * |
1466 | * Note that @text_len can become zero. It happens when @text |
1467 | * ended with a newline (either due to truncation or the |
1468 | * original string ending with "\n\n"). The loop is correctly |
1469 | * repeated and (if not truncated) an empty line with a prefix |
1470 | * will be prepared. |
1471 | */ |
1472 | text_len -= line_len + 1; |
1473 | } |
1474 | |
1475 | /* |
1476 | * If a buffer was provided, it will be terminated. Space for the |
1477 | * string terminator is guaranteed to be available. The terminator is |
1478 | * not counted in the return value. |
1479 | */ |
1480 | if (buf_size > 0) |
1481 | r->text_buf[len] = 0; |
1482 | |
1483 | return len; |
1484 | } |
1485 | |
1486 | static size_t get_record_print_text_size(struct printk_info *info, |
1487 | unsigned int line_count, |
1488 | bool syslog, bool time) |
1489 | { |
1490 | char prefix[PRINTK_PREFIX_MAX]; |
1491 | size_t prefix_len; |
1492 | |
1493 | prefix_len = info_print_prefix(info, syslog, time, buf: prefix); |
1494 | |
1495 | /* |
1496 | * Each line will be preceded with a prefix. The intermediate |
1497 | * newlines are already within the text, but a final trailing |
1498 | * newline will be added. |
1499 | */ |
1500 | return ((prefix_len * line_count) + info->text_len + 1); |
1501 | } |
1502 | |
1503 | /* |
1504 | * Beginning with @start_seq, find the first record where it and all following |
1505 | * records up to (but not including) @max_seq fit into @size. |
1506 | * |
1507 | * @max_seq is simply an upper bound and does not need to exist. If the caller |
1508 | * does not require an upper bound, -1 can be used for @max_seq. |
1509 | */ |
1510 | static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size, |
1511 | bool syslog, bool time) |
1512 | { |
1513 | struct printk_info info; |
1514 | unsigned int line_count; |
1515 | size_t len = 0; |
1516 | u64 seq; |
1517 | |
1518 | /* Determine the size of the records up to @max_seq. */ |
1519 | prb_for_each_info(start_seq, prb, seq, &info, &line_count) { |
1520 | if (info.seq >= max_seq) |
1521 | break; |
1522 | len += get_record_print_text_size(info: &info, line_count, syslog, time); |
1523 | } |
1524 | |
1525 | /* |
1526 | * Adjust the upper bound for the next loop to avoid subtracting |
1527 | * lengths that were never added. |
1528 | */ |
1529 | if (seq < max_seq) |
1530 | max_seq = seq; |
1531 | |
1532 | /* |
1533 | * Move first record forward until length fits into the buffer. Ignore |
1534 | * newest messages that were not counted in the above cycle. Messages |
1535 | * might appear and get lost in the meantime. This is a best effort |
1536 | * that prevents an infinite loop that could occur with a retry. |
1537 | */ |
1538 | prb_for_each_info(start_seq, prb, seq, &info, &line_count) { |
1539 | if (len <= size || info.seq >= max_seq) |
1540 | break; |
1541 | len -= get_record_print_text_size(info: &info, line_count, syslog, time); |
1542 | } |
1543 | |
1544 | return seq; |
1545 | } |
1546 | |
1547 | /* The caller is responsible for making sure @size is greater than 0. */ |
1548 | static int syslog_print(char __user *buf, int size) |
1549 | { |
1550 | struct printk_info info; |
1551 | struct printk_record r; |
1552 | char *text; |
1553 | int len = 0; |
1554 | u64 seq; |
1555 | |
1556 | text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); |
1557 | if (!text) |
1558 | return -ENOMEM; |
1559 | |
1560 | prb_rec_init_rd(r: &r, info: &info, text_buf: text, PRINTK_MESSAGE_MAX); |
1561 | |
1562 | mutex_lock(&syslog_lock); |
1563 | |
1564 | /* |
1565 | * Wait for the @syslog_seq record to be available. @syslog_seq may |
1566 | * change while waiting. |
1567 | */ |
1568 | do { |
1569 | seq = syslog_seq; |
1570 | |
1571 | mutex_unlock(lock: &syslog_lock); |
1572 | /* |
1573 | * Guarantee this task is visible on the waitqueue before |
1574 | * checking the wake condition. |
1575 | * |
1576 | * The full memory barrier within set_current_state() of |
1577 | * prepare_to_wait_event() pairs with the full memory barrier |
1578 | * within wq_has_sleeper(). |
1579 | * |
1580 | * This pairs with __wake_up_klogd:A. |
1581 | */ |
1582 | len = wait_event_interruptible(log_wait, |
1583 | prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ |
1584 | mutex_lock(&syslog_lock); |
1585 | |
1586 | if (len) |
1587 | goto out; |
1588 | } while (syslog_seq != seq); |
1589 | |
1590 | /* |
1591 | * Copy records that fit into the buffer. The above cycle makes sure |
1592 | * that the first record is always available. |
1593 | */ |
1594 | do { |
1595 | size_t n; |
1596 | size_t skip; |
1597 | int err; |
1598 | |
1599 | if (!prb_read_valid(rb: prb, seq: syslog_seq, r: &r)) |
1600 | break; |
1601 | |
1602 | if (r.info->seq != syslog_seq) { |
1603 | /* message is gone, move to next valid one */ |
1604 | syslog_seq = r.info->seq; |
1605 | syslog_partial = 0; |
1606 | } |
1607 | |
1608 | /* |
1609 | * To keep reading/counting partial line consistent, |
1610 | * use printk_time value as of the beginning of a line. |
1611 | */ |
1612 | if (!syslog_partial) |
1613 | syslog_time = printk_time; |
1614 | |
1615 | skip = syslog_partial; |
1616 | n = record_print_text(r: &r, syslog: true, time: syslog_time); |
1617 | if (n - syslog_partial <= size) { |
1618 | /* message fits into buffer, move forward */ |
1619 | syslog_seq = r.info->seq + 1; |
1620 | n -= syslog_partial; |
1621 | syslog_partial = 0; |
1622 | } else if (!len){ |
1623 | /* partial read(), remember position */ |
1624 | n = size; |
1625 | syslog_partial += n; |
1626 | } else |
1627 | n = 0; |
1628 | |
1629 | if (!n) |
1630 | break; |
1631 | |
1632 | mutex_unlock(lock: &syslog_lock); |
1633 | err = copy_to_user(to: buf, from: text + skip, n); |
1634 | mutex_lock(&syslog_lock); |
1635 | |
1636 | if (err) { |
1637 | if (!len) |
1638 | len = -EFAULT; |
1639 | break; |
1640 | } |
1641 | |
1642 | len += n; |
1643 | size -= n; |
1644 | buf += n; |
1645 | } while (size); |
1646 | out: |
1647 | mutex_unlock(lock: &syslog_lock); |
1648 | kfree(objp: text); |
1649 | return len; |
1650 | } |
1651 | |
1652 | static int syslog_print_all(char __user *buf, int size, bool clear) |
1653 | { |
1654 | struct printk_info info; |
1655 | struct printk_record r; |
1656 | char *text; |
1657 | int len = 0; |
1658 | u64 seq; |
1659 | bool time; |
1660 | |
1661 | text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); |
1662 | if (!text) |
1663 | return -ENOMEM; |
1664 | |
1665 | time = printk_time; |
1666 | /* |
1667 | * Find first record that fits, including all following records, |
1668 | * into the user-provided buffer for this dump. |
1669 | */ |
1670 | seq = find_first_fitting_seq(start_seq: latched_seq_read_nolock(ls: &clear_seq), max_seq: -1, |
1671 | size, syslog: true, time); |
1672 | |
1673 | prb_rec_init_rd(r: &r, info: &info, text_buf: text, PRINTK_MESSAGE_MAX); |
1674 | |
1675 | prb_for_each_record(seq, prb, seq, &r) { |
1676 | int textlen; |
1677 | |
1678 | textlen = record_print_text(r: &r, syslog: true, time); |
1679 | |
1680 | if (len + textlen > size) { |
1681 | seq--; |
1682 | break; |
1683 | } |
1684 | |
1685 | if (copy_to_user(to: buf + len, from: text, n: textlen)) |
1686 | len = -EFAULT; |
1687 | else |
1688 | len += textlen; |
1689 | |
1690 | if (len < 0) |
1691 | break; |
1692 | } |
1693 | |
1694 | if (clear) { |
1695 | mutex_lock(&syslog_lock); |
1696 | latched_seq_write(ls: &clear_seq, val: seq); |
1697 | mutex_unlock(lock: &syslog_lock); |
1698 | } |
1699 | |
1700 | kfree(objp: text); |
1701 | return len; |
1702 | } |
1703 | |
1704 | static void syslog_clear(void) |
1705 | { |
1706 | mutex_lock(&syslog_lock); |
1707 | latched_seq_write(ls: &clear_seq, val: prb_next_seq(rb: prb)); |
1708 | mutex_unlock(lock: &syslog_lock); |
1709 | } |
1710 | |
1711 | int do_syslog(int type, char __user *buf, int len, int source) |
1712 | { |
1713 | struct printk_info info; |
1714 | bool clear = false; |
1715 | static int saved_console_loglevel = LOGLEVEL_DEFAULT; |
1716 | int error; |
1717 | |
1718 | error = check_syslog_permissions(type, source); |
1719 | if (error) |
1720 | return error; |
1721 | |
1722 | switch (type) { |
1723 | case SYSLOG_ACTION_CLOSE: /* Close log */ |
1724 | break; |
1725 | case SYSLOG_ACTION_OPEN: /* Open log */ |
1726 | break; |
1727 | case SYSLOG_ACTION_READ: /* Read from log */ |
1728 | if (!buf || len < 0) |
1729 | return -EINVAL; |
1730 | if (!len) |
1731 | return 0; |
1732 | if (!access_ok(buf, len)) |
1733 | return -EFAULT; |
1734 | error = syslog_print(buf, size: len); |
1735 | break; |
1736 | /* Read/clear last kernel messages */ |
1737 | case SYSLOG_ACTION_READ_CLEAR: |
1738 | clear = true; |
1739 | fallthrough; |
1740 | /* Read last kernel messages */ |
1741 | case SYSLOG_ACTION_READ_ALL: |
1742 | if (!buf || len < 0) |
1743 | return -EINVAL; |
1744 | if (!len) |
1745 | return 0; |
1746 | if (!access_ok(buf, len)) |
1747 | return -EFAULT; |
1748 | error = syslog_print_all(buf, size: len, clear); |
1749 | break; |
1750 | /* Clear ring buffer */ |
1751 | case SYSLOG_ACTION_CLEAR: |
1752 | syslog_clear(); |
1753 | break; |
1754 | /* Disable logging to console */ |
1755 | case SYSLOG_ACTION_CONSOLE_OFF: |
1756 | if (saved_console_loglevel == LOGLEVEL_DEFAULT) |
1757 | saved_console_loglevel = console_loglevel; |
1758 | console_loglevel = minimum_console_loglevel; |
1759 | break; |
1760 | /* Enable logging to console */ |
1761 | case SYSLOG_ACTION_CONSOLE_ON: |
1762 | if (saved_console_loglevel != LOGLEVEL_DEFAULT) { |
1763 | console_loglevel = saved_console_loglevel; |
1764 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1765 | } |
1766 | break; |
1767 | /* Set level of messages printed to console */ |
1768 | case SYSLOG_ACTION_CONSOLE_LEVEL: |
1769 | if (len < 1 || len > 8) |
1770 | return -EINVAL; |
1771 | if (len < minimum_console_loglevel) |
1772 | len = minimum_console_loglevel; |
1773 | console_loglevel = len; |
1774 | /* Implicitly re-enable logging to console */ |
1775 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1776 | break; |
1777 | /* Number of chars in the log buffer */ |
1778 | case SYSLOG_ACTION_SIZE_UNREAD: |
1779 | mutex_lock(&syslog_lock); |
1780 | if (!prb_read_valid_info(rb: prb, seq: syslog_seq, info: &info, NULL)) { |
1781 | /* No unread messages. */ |
1782 | mutex_unlock(lock: &syslog_lock); |
1783 | return 0; |
1784 | } |
1785 | if (info.seq != syslog_seq) { |
1786 | /* messages are gone, move to first one */ |
1787 | syslog_seq = info.seq; |
1788 | syslog_partial = 0; |
1789 | } |
1790 | if (source == SYSLOG_FROM_PROC) { |
1791 | /* |
1792 | * Short-cut for poll(/"proc/kmsg") which simply checks |
1793 | * for pending data, not the size; return the count of |
1794 | * records, not the length. |
1795 | */ |
1796 | error = prb_next_seq(rb: prb) - syslog_seq; |
1797 | } else { |
1798 | bool time = syslog_partial ? syslog_time : printk_time; |
1799 | unsigned int line_count; |
1800 | u64 seq; |
1801 | |
1802 | prb_for_each_info(syslog_seq, prb, seq, &info, |
1803 | &line_count) { |
1804 | error += get_record_print_text_size(info: &info, line_count, |
1805 | syslog: true, time); |
1806 | time = printk_time; |
1807 | } |
1808 | error -= syslog_partial; |
1809 | } |
1810 | mutex_unlock(lock: &syslog_lock); |
1811 | break; |
1812 | /* Size of the log buffer */ |
1813 | case SYSLOG_ACTION_SIZE_BUFFER: |
1814 | error = log_buf_len; |
1815 | break; |
1816 | default: |
1817 | error = -EINVAL; |
1818 | break; |
1819 | } |
1820 | |
1821 | return error; |
1822 | } |
1823 | |
1824 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
1825 | { |
1826 | return do_syslog(type, buf, len, SYSLOG_FROM_READER); |
1827 | } |
1828 | |
1829 | /* |
1830 | * Special console_lock variants that help to reduce the risk of soft-lockups. |
1831 | * They allow to pass console_lock to another printk() call using a busy wait. |
1832 | */ |
1833 | |
1834 | #ifdef CONFIG_LOCKDEP |
1835 | static struct lockdep_map console_owner_dep_map = { |
1836 | .name = "console_owner" |
1837 | }; |
1838 | #endif |
1839 | |
1840 | static DEFINE_RAW_SPINLOCK(console_owner_lock); |
1841 | static struct task_struct *console_owner; |
1842 | static bool console_waiter; |
1843 | |
1844 | /** |
1845 | * console_lock_spinning_enable - mark beginning of code where another |
1846 | * thread might safely busy wait |
1847 | * |
1848 | * This basically converts console_lock into a spinlock. This marks |
1849 | * the section where the console_lock owner can not sleep, because |
1850 | * there may be a waiter spinning (like a spinlock). Also it must be |
1851 | * ready to hand over the lock at the end of the section. |
1852 | */ |
1853 | static void console_lock_spinning_enable(void) |
1854 | { |
1855 | /* |
1856 | * Do not use spinning in panic(). The panic CPU wants to keep the lock. |
1857 | * Non-panic CPUs abandon the flush anyway. |
1858 | * |
1859 | * Just keep the lockdep annotation. The panic-CPU should avoid |
1860 | * taking console_owner_lock because it might cause a deadlock. |
1861 | * This looks like the easiest way how to prevent false lockdep |
1862 | * reports without handling races a lockless way. |
1863 | */ |
1864 | if (panic_in_progress()) |
1865 | goto lockdep; |
1866 | |
1867 | raw_spin_lock(&console_owner_lock); |
1868 | console_owner = current; |
1869 | raw_spin_unlock(&console_owner_lock); |
1870 | |
1871 | lockdep: |
1872 | /* The waiter may spin on us after setting console_owner */ |
1873 | spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); |
1874 | } |
1875 | |
1876 | /** |
1877 | * console_lock_spinning_disable_and_check - mark end of code where another |
1878 | * thread was able to busy wait and check if there is a waiter |
1879 | * @cookie: cookie returned from console_srcu_read_lock() |
1880 | * |
1881 | * This is called at the end of the section where spinning is allowed. |
1882 | * It has two functions. First, it is a signal that it is no longer |
1883 | * safe to start busy waiting for the lock. Second, it checks if |
1884 | * there is a busy waiter and passes the lock rights to her. |
1885 | * |
1886 | * Important: Callers lose both the console_lock and the SRCU read lock if |
1887 | * there was a busy waiter. They must not touch items synchronized by |
1888 | * console_lock or SRCU read lock in this case. |
1889 | * |
1890 | * Return: 1 if the lock rights were passed, 0 otherwise. |
1891 | */ |
1892 | static int console_lock_spinning_disable_and_check(int cookie) |
1893 | { |
1894 | int waiter; |
1895 | |
1896 | /* |
1897 | * Ignore spinning waiters during panic() because they might get stopped |
1898 | * or blocked at any time, |
1899 | * |
1900 | * It is safe because nobody is allowed to start spinning during panic |
1901 | * in the first place. If there has been a waiter then non panic CPUs |
1902 | * might stay spinning. They would get stopped anyway. The panic context |
1903 | * will never start spinning and an interrupted spin on panic CPU will |
1904 | * never continue. |
1905 | */ |
1906 | if (panic_in_progress()) { |
1907 | /* Keep lockdep happy. */ |
1908 | spin_release(&console_owner_dep_map, _THIS_IP_); |
1909 | return 0; |
1910 | } |
1911 | |
1912 | raw_spin_lock(&console_owner_lock); |
1913 | waiter = READ_ONCE(console_waiter); |
1914 | console_owner = NULL; |
1915 | raw_spin_unlock(&console_owner_lock); |
1916 | |
1917 | if (!waiter) { |
1918 | spin_release(&console_owner_dep_map, _THIS_IP_); |
1919 | return 0; |
1920 | } |
1921 | |
1922 | /* The waiter is now free to continue */ |
1923 | WRITE_ONCE(console_waiter, false); |
1924 | |
1925 | spin_release(&console_owner_dep_map, _THIS_IP_); |
1926 | |
1927 | /* |
1928 | * Preserve lockdep lock ordering. Release the SRCU read lock before |
1929 | * releasing the console_lock. |
1930 | */ |
1931 | console_srcu_read_unlock(cookie); |
1932 | |
1933 | /* |
1934 | * Hand off console_lock to waiter. The waiter will perform |
1935 | * the up(). After this, the waiter is the console_lock owner. |
1936 | */ |
1937 | mutex_release(&console_lock_dep_map, _THIS_IP_); |
1938 | return 1; |
1939 | } |
1940 | |
1941 | /** |
1942 | * console_trylock_spinning - try to get console_lock by busy waiting |
1943 | * |
1944 | * This allows to busy wait for the console_lock when the current |
1945 | * owner is running in specially marked sections. It means that |
1946 | * the current owner is running and cannot reschedule until it |
1947 | * is ready to lose the lock. |
1948 | * |
1949 | * Return: 1 if we got the lock, 0 othrewise |
1950 | */ |
1951 | static int console_trylock_spinning(void) |
1952 | { |
1953 | struct task_struct *owner = NULL; |
1954 | bool waiter; |
1955 | bool spin = false; |
1956 | unsigned long flags; |
1957 | |
1958 | if (console_trylock()) |
1959 | return 1; |
1960 | |
1961 | /* |
1962 | * It's unsafe to spin once a panic has begun. If we are the |
1963 | * panic CPU, we may have already halted the owner of the |
1964 | * console_sem. If we are not the panic CPU, then we should |
1965 | * avoid taking console_sem, so the panic CPU has a better |
1966 | * chance of cleanly acquiring it later. |
1967 | */ |
1968 | if (panic_in_progress()) |
1969 | return 0; |
1970 | |
1971 | printk_safe_enter_irqsave(flags); |
1972 | |
1973 | raw_spin_lock(&console_owner_lock); |
1974 | owner = READ_ONCE(console_owner); |
1975 | waiter = READ_ONCE(console_waiter); |
1976 | if (!waiter && owner && owner != current) { |
1977 | WRITE_ONCE(console_waiter, true); |
1978 | spin = true; |
1979 | } |
1980 | raw_spin_unlock(&console_owner_lock); |
1981 | |
1982 | /* |
1983 | * If there is an active printk() writing to the |
1984 | * consoles, instead of having it write our data too, |
1985 | * see if we can offload that load from the active |
1986 | * printer, and do some printing ourselves. |
1987 | * Go into a spin only if there isn't already a waiter |
1988 | * spinning, and there is an active printer, and |
1989 | * that active printer isn't us (recursive printk?). |
1990 | */ |
1991 | if (!spin) { |
1992 | printk_safe_exit_irqrestore(flags); |
1993 | return 0; |
1994 | } |
1995 | |
1996 | /* We spin waiting for the owner to release us */ |
1997 | spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); |
1998 | /* Owner will clear console_waiter on hand off */ |
1999 | while (READ_ONCE(console_waiter)) |
2000 | cpu_relax(); |
2001 | spin_release(&console_owner_dep_map, _THIS_IP_); |
2002 | |
2003 | printk_safe_exit_irqrestore(flags); |
2004 | /* |
2005 | * The owner passed the console lock to us. |
2006 | * Since we did not spin on console lock, annotate |
2007 | * this as a trylock. Otherwise lockdep will |
2008 | * complain. |
2009 | */ |
2010 | mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); |
2011 | |
2012 | /* |
2013 | * Update @console_may_schedule for trylock because the previous |
2014 | * owner may have been schedulable. |
2015 | */ |
2016 | console_may_schedule = 0; |
2017 | |
2018 | return 1; |
2019 | } |
2020 | |
2021 | /* |
2022 | * Recursion is tracked separately on each CPU. If NMIs are supported, an |
2023 | * additional NMI context per CPU is also separately tracked. Until per-CPU |
2024 | * is available, a separate "early tracking" is performed. |
2025 | */ |
2026 | static DEFINE_PER_CPU(u8, printk_count); |
2027 | static u8 printk_count_early; |
2028 | #ifdef CONFIG_HAVE_NMI |
2029 | static DEFINE_PER_CPU(u8, printk_count_nmi); |
2030 | static u8 printk_count_nmi_early; |
2031 | #endif |
2032 | |
2033 | /* |
2034 | * Recursion is limited to keep the output sane. printk() should not require |
2035 | * more than 1 level of recursion (allowing, for example, printk() to trigger |
2036 | * a WARN), but a higher value is used in case some printk-internal errors |
2037 | * exist, such as the ringbuffer validation checks failing. |
2038 | */ |
2039 | #define PRINTK_MAX_RECURSION 3 |
2040 | |
2041 | /* |
2042 | * Return a pointer to the dedicated counter for the CPU+context of the |
2043 | * caller. |
2044 | */ |
2045 | static u8 *__printk_recursion_counter(void) |
2046 | { |
2047 | #ifdef CONFIG_HAVE_NMI |
2048 | if (in_nmi()) { |
2049 | if (printk_percpu_data_ready()) |
2050 | return this_cpu_ptr(&printk_count_nmi); |
2051 | return &printk_count_nmi_early; |
2052 | } |
2053 | #endif |
2054 | if (printk_percpu_data_ready()) |
2055 | return this_cpu_ptr(&printk_count); |
2056 | return &printk_count_early; |
2057 | } |
2058 | |
2059 | /* |
2060 | * Enter recursion tracking. Interrupts are disabled to simplify tracking. |
2061 | * The caller must check the boolean return value to see if the recursion is |
2062 | * allowed. On failure, interrupts are not disabled. |
2063 | * |
2064 | * @recursion_ptr must be a variable of type (u8 *) and is the same variable |
2065 | * that is passed to printk_exit_irqrestore(). |
2066 | */ |
2067 | #define printk_enter_irqsave(recursion_ptr, flags) \ |
2068 | ({ \ |
2069 | bool success = true; \ |
2070 | \ |
2071 | typecheck(u8 *, recursion_ptr); \ |
2072 | local_irq_save(flags); \ |
2073 | (recursion_ptr) = __printk_recursion_counter(); \ |
2074 | if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \ |
2075 | local_irq_restore(flags); \ |
2076 | success = false; \ |
2077 | } else { \ |
2078 | (*(recursion_ptr))++; \ |
2079 | } \ |
2080 | success; \ |
2081 | }) |
2082 | |
2083 | /* Exit recursion tracking, restoring interrupts. */ |
2084 | #define printk_exit_irqrestore(recursion_ptr, flags) \ |
2085 | do { \ |
2086 | typecheck(u8 *, recursion_ptr); \ |
2087 | (*(recursion_ptr))--; \ |
2088 | local_irq_restore(flags); \ |
2089 | } while (0) |
2090 | |
2091 | int printk_delay_msec __read_mostly; |
2092 | |
2093 | static inline void printk_delay(int level) |
2094 | { |
2095 | boot_delay_msec(level); |
2096 | |
2097 | if (unlikely(printk_delay_msec)) { |
2098 | int m = printk_delay_msec; |
2099 | |
2100 | while (m--) { |
2101 | mdelay(1); |
2102 | touch_nmi_watchdog(); |
2103 | } |
2104 | } |
2105 | } |
2106 | |
2107 | static inline u32 printk_caller_id(void) |
2108 | { |
2109 | return in_task() ? task_pid_nr(current) : |
2110 | 0x80000000 + smp_processor_id(); |
2111 | } |
2112 | |
2113 | /** |
2114 | * printk_parse_prefix - Parse level and control flags. |
2115 | * |
2116 | * @text: The terminated text message. |
2117 | * @level: A pointer to the current level value, will be updated. |
2118 | * @flags: A pointer to the current printk_info flags, will be updated. |
2119 | * |
2120 | * @level may be NULL if the caller is not interested in the parsed value. |
2121 | * Otherwise the variable pointed to by @level must be set to |
2122 | * LOGLEVEL_DEFAULT in order to be updated with the parsed value. |
2123 | * |
2124 | * @flags may be NULL if the caller is not interested in the parsed value. |
2125 | * Otherwise the variable pointed to by @flags will be OR'd with the parsed |
2126 | * value. |
2127 | * |
2128 | * Return: The length of the parsed level and control flags. |
2129 | */ |
2130 | u16 printk_parse_prefix(const char *text, int *level, |
2131 | enum printk_info_flags *flags) |
2132 | { |
2133 | u16 prefix_len = 0; |
2134 | int kern_level; |
2135 | |
2136 | while (*text) { |
2137 | kern_level = printk_get_level(buffer: text); |
2138 | if (!kern_level) |
2139 | break; |
2140 | |
2141 | switch (kern_level) { |
2142 | case '0' ... '7': |
2143 | if (level && *level == LOGLEVEL_DEFAULT) |
2144 | *level = kern_level - '0'; |
2145 | break; |
2146 | case 'c': /* KERN_CONT */ |
2147 | if (flags) |
2148 | *flags |= LOG_CONT; |
2149 | } |
2150 | |
2151 | prefix_len += 2; |
2152 | text += 2; |
2153 | } |
2154 | |
2155 | return prefix_len; |
2156 | } |
2157 | |
2158 | __printf(5, 0) |
2159 | static u16 printk_sprint(char *text, u16 size, int facility, |
2160 | enum printk_info_flags *flags, const char *fmt, |
2161 | va_list args) |
2162 | { |
2163 | u16 text_len; |
2164 | |
2165 | text_len = vscnprintf(buf: text, size, fmt, args); |
2166 | |
2167 | /* Mark and strip a trailing newline. */ |
2168 | if (text_len && text[text_len - 1] == '\n') { |
2169 | text_len--; |
2170 | *flags |= LOG_NEWLINE; |
2171 | } |
2172 | |
2173 | /* Strip log level and control flags. */ |
2174 | if (facility == 0) { |
2175 | u16 prefix_len; |
2176 | |
2177 | prefix_len = printk_parse_prefix(text, NULL, NULL); |
2178 | if (prefix_len) { |
2179 | text_len -= prefix_len; |
2180 | memmove(text, text + prefix_len, text_len); |
2181 | } |
2182 | } |
2183 | |
2184 | trace_console(text, len: text_len); |
2185 | |
2186 | return text_len; |
2187 | } |
2188 | |
2189 | __printf(4, 0) |
2190 | int vprintk_store(int facility, int level, |
2191 | const struct dev_printk_info *dev_info, |
2192 | const char *fmt, va_list args) |
2193 | { |
2194 | struct prb_reserved_entry e; |
2195 | enum printk_info_flags flags = 0; |
2196 | struct printk_record r; |
2197 | unsigned long irqflags; |
2198 | u16 trunc_msg_len = 0; |
2199 | char prefix_buf[8]; |
2200 | u8 *recursion_ptr; |
2201 | u16 reserve_size; |
2202 | va_list args2; |
2203 | u32 caller_id; |
2204 | u16 text_len; |
2205 | int ret = 0; |
2206 | u64 ts_nsec; |
2207 | |
2208 | if (!printk_enter_irqsave(recursion_ptr, irqflags)) |
2209 | return 0; |
2210 | |
2211 | /* |
2212 | * Since the duration of printk() can vary depending on the message |
2213 | * and state of the ringbuffer, grab the timestamp now so that it is |
2214 | * close to the call of printk(). This provides a more deterministic |
2215 | * timestamp with respect to the caller. |
2216 | */ |
2217 | ts_nsec = local_clock(); |
2218 | |
2219 | caller_id = printk_caller_id(); |
2220 | |
2221 | /* |
2222 | * The sprintf needs to come first since the syslog prefix might be |
2223 | * passed in as a parameter. An extra byte must be reserved so that |
2224 | * later the vscnprintf() into the reserved buffer has room for the |
2225 | * terminating '\0', which is not counted by vsnprintf(). |
2226 | */ |
2227 | va_copy(args2, args); |
2228 | reserve_size = vsnprintf(buf: &prefix_buf[0], size: sizeof(prefix_buf), fmt, args: args2) + 1; |
2229 | va_end(args2); |
2230 | |
2231 | if (reserve_size > PRINTKRB_RECORD_MAX) |
2232 | reserve_size = PRINTKRB_RECORD_MAX; |
2233 | |
2234 | /* Extract log level or control flags. */ |
2235 | if (facility == 0) |
2236 | printk_parse_prefix(text: &prefix_buf[0], level: &level, flags: &flags); |
2237 | |
2238 | if (level == LOGLEVEL_DEFAULT) |
2239 | level = default_message_loglevel; |
2240 | |
2241 | if (dev_info) |
2242 | flags |= LOG_NEWLINE; |
2243 | |
2244 | if (flags & LOG_CONT) { |
2245 | prb_rec_init_wr(r: &r, text_buf_size: reserve_size); |
2246 | if (prb_reserve_in_last(e: &e, rb: prb, r: &r, caller_id, PRINTKRB_RECORD_MAX)) { |
2247 | text_len = printk_sprint(text: &r.text_buf[r.info->text_len], size: reserve_size, |
2248 | facility, flags: &flags, fmt, args); |
2249 | r.info->text_len += text_len; |
2250 | |
2251 | if (flags & LOG_NEWLINE) { |
2252 | r.info->flags |= LOG_NEWLINE; |
2253 | prb_final_commit(e: &e); |
2254 | } else { |
2255 | prb_commit(e: &e); |
2256 | } |
2257 | |
2258 | ret = text_len; |
2259 | goto out; |
2260 | } |
2261 | } |
2262 | |
2263 | /* |
2264 | * Explicitly initialize the record before every prb_reserve() call. |
2265 | * prb_reserve_in_last() and prb_reserve() purposely invalidate the |
2266 | * structure when they fail. |
2267 | */ |
2268 | prb_rec_init_wr(r: &r, text_buf_size: reserve_size); |
2269 | if (!prb_reserve(e: &e, rb: prb, r: &r)) { |
2270 | /* truncate the message if it is too long for empty buffer */ |
2271 | truncate_msg(text_len: &reserve_size, trunc_msg_len: &trunc_msg_len); |
2272 | |
2273 | prb_rec_init_wr(r: &r, text_buf_size: reserve_size + trunc_msg_len); |
2274 | if (!prb_reserve(e: &e, rb: prb, r: &r)) |
2275 | goto out; |
2276 | } |
2277 | |
2278 | /* fill message */ |
2279 | text_len = printk_sprint(text: &r.text_buf[0], size: reserve_size, facility, flags: &flags, fmt, args); |
2280 | if (trunc_msg_len) |
2281 | memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len); |
2282 | r.info->text_len = text_len + trunc_msg_len; |
2283 | r.info->facility = facility; |
2284 | r.info->level = level & 7; |
2285 | r.info->flags = flags & 0x1f; |
2286 | r.info->ts_nsec = ts_nsec; |
2287 | r.info->caller_id = caller_id; |
2288 | if (dev_info) |
2289 | memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info)); |
2290 | |
2291 | /* A message without a trailing newline can be continued. */ |
2292 | if (!(flags & LOG_NEWLINE)) |
2293 | prb_commit(e: &e); |
2294 | else |
2295 | prb_final_commit(e: &e); |
2296 | |
2297 | ret = text_len + trunc_msg_len; |
2298 | out: |
2299 | printk_exit_irqrestore(recursion_ptr, irqflags); |
2300 | return ret; |
2301 | } |
2302 | |
2303 | asmlinkage int vprintk_emit(int facility, int level, |
2304 | const struct dev_printk_info *dev_info, |
2305 | const char *fmt, va_list args) |
2306 | { |
2307 | int printed_len; |
2308 | bool in_sched = false; |
2309 | |
2310 | /* Suppress unimportant messages after panic happens */ |
2311 | if (unlikely(suppress_printk)) |
2312 | return 0; |
2313 | |
2314 | /* |
2315 | * The messages on the panic CPU are the most important. If |
2316 | * non-panic CPUs are generating any messages, they will be |
2317 | * silently dropped. |
2318 | */ |
2319 | if (other_cpu_in_panic()) |
2320 | return 0; |
2321 | |
2322 | if (level == LOGLEVEL_SCHED) { |
2323 | level = LOGLEVEL_DEFAULT; |
2324 | in_sched = true; |
2325 | } |
2326 | |
2327 | printk_delay(level); |
2328 | |
2329 | printed_len = vprintk_store(facility, level, dev_info, fmt, args); |
2330 | |
2331 | /* If called from the scheduler, we can not call up(). */ |
2332 | if (!in_sched) { |
2333 | /* |
2334 | * The caller may be holding system-critical or |
2335 | * timing-sensitive locks. Disable preemption during |
2336 | * printing of all remaining records to all consoles so that |
2337 | * this context can return as soon as possible. Hopefully |
2338 | * another printk() caller will take over the printing. |
2339 | */ |
2340 | preempt_disable(); |
2341 | /* |
2342 | * Try to acquire and then immediately release the console |
2343 | * semaphore. The release will print out buffers. With the |
2344 | * spinning variant, this context tries to take over the |
2345 | * printing from another printing context. |
2346 | */ |
2347 | if (console_trylock_spinning()) |
2348 | console_unlock(); |
2349 | preempt_enable(); |
2350 | } |
2351 | |
2352 | if (in_sched) |
2353 | defer_console_output(); |
2354 | else |
2355 | wake_up_klogd(); |
2356 | |
2357 | return printed_len; |
2358 | } |
2359 | EXPORT_SYMBOL(vprintk_emit); |
2360 | |
2361 | int vprintk_default(const char *fmt, va_list args) |
2362 | { |
2363 | return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); |
2364 | } |
2365 | EXPORT_SYMBOL_GPL(vprintk_default); |
2366 | |
2367 | asmlinkage __visible int _printk(const char *fmt, ...) |
2368 | { |
2369 | va_list args; |
2370 | int r; |
2371 | |
2372 | va_start(args, fmt); |
2373 | r = vprintk(fmt, args); |
2374 | va_end(args); |
2375 | |
2376 | return r; |
2377 | } |
2378 | EXPORT_SYMBOL(_printk); |
2379 | |
2380 | static bool pr_flush(int timeout_ms, bool reset_on_progress); |
2381 | static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); |
2382 | |
2383 | #else /* CONFIG_PRINTK */ |
2384 | |
2385 | #define printk_time false |
2386 | |
2387 | #define prb_read_valid(rb, seq, r) false |
2388 | #define prb_first_valid_seq(rb) 0 |
2389 | #define prb_next_seq(rb) 0 |
2390 | |
2391 | static u64 syslog_seq; |
2392 | |
2393 | static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } |
2394 | static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } |
2395 | |
2396 | #endif /* CONFIG_PRINTK */ |
2397 | |
2398 | #ifdef CONFIG_EARLY_PRINTK |
2399 | struct console *early_console; |
2400 | |
2401 | asmlinkage __visible void early_printk(const char *fmt, ...) |
2402 | { |
2403 | va_list ap; |
2404 | char buf[512]; |
2405 | int n; |
2406 | |
2407 | if (!early_console) |
2408 | return; |
2409 | |
2410 | va_start(ap, fmt); |
2411 | n = vscnprintf(buf, size: sizeof(buf), fmt, args: ap); |
2412 | va_end(ap); |
2413 | |
2414 | early_console->write(early_console, buf, n); |
2415 | } |
2416 | #endif |
2417 | |
2418 | static void set_user_specified(struct console_cmdline *c, bool user_specified) |
2419 | { |
2420 | if (!user_specified) |
2421 | return; |
2422 | |
2423 | /* |
2424 | * @c console was defined by the user on the command line. |
2425 | * Do not clear when added twice also by SPCR or the device tree. |
2426 | */ |
2427 | c->user_specified = true; |
2428 | /* At least one console defined by the user on the command line. */ |
2429 | console_set_on_cmdline = 1; |
2430 | } |
2431 | |
2432 | static int __add_preferred_console(const char *name, const short idx, char *options, |
2433 | char *brl_options, bool user_specified) |
2434 | { |
2435 | struct console_cmdline *c; |
2436 | int i; |
2437 | |
2438 | /* |
2439 | * We use a signed short index for struct console for device drivers to |
2440 | * indicate a not yet assigned index or port. However, a negative index |
2441 | * value is not valid for preferred console. |
2442 | */ |
2443 | if (idx < 0) |
2444 | return -EINVAL; |
2445 | |
2446 | /* |
2447 | * See if this tty is not yet registered, and |
2448 | * if we have a slot free. |
2449 | */ |
2450 | for (i = 0, c = console_cmdline; |
2451 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2452 | i++, c++) { |
2453 | if (strcmp(c->name, name) == 0 && c->index == idx) { |
2454 | if (!brl_options) |
2455 | preferred_console = i; |
2456 | set_user_specified(c, user_specified); |
2457 | return 0; |
2458 | } |
2459 | } |
2460 | if (i == MAX_CMDLINECONSOLES) |
2461 | return -E2BIG; |
2462 | if (!brl_options) |
2463 | preferred_console = i; |
2464 | strscpy(c->name, name, sizeof(c->name)); |
2465 | c->options = options; |
2466 | set_user_specified(c, user_specified); |
2467 | braille_set_options(c, brl_options); |
2468 | |
2469 | c->index = idx; |
2470 | return 0; |
2471 | } |
2472 | |
2473 | static int __init console_msg_format_setup(char *str) |
2474 | { |
2475 | if (!strcmp(str, "syslog")) |
2476 | console_msg_format = MSG_FORMAT_SYSLOG; |
2477 | if (!strcmp(str, "default")) |
2478 | console_msg_format = MSG_FORMAT_DEFAULT; |
2479 | return 1; |
2480 | } |
2481 | __setup("console_msg_format=", console_msg_format_setup); |
2482 | |
2483 | /* |
2484 | * Set up a console. Called via do_early_param() in init/main.c |
2485 | * for each "console=" parameter in the boot command line. |
2486 | */ |
2487 | static int __init console_setup(char *str) |
2488 | { |
2489 | char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */ |
2490 | char *s, *options, *brl_options = NULL; |
2491 | int idx; |
2492 | |
2493 | /* |
2494 | * console="" or console=null have been suggested as a way to |
2495 | * disable console output. Use ttynull that has been created |
2496 | * for exactly this purpose. |
2497 | */ |
2498 | if (str[0] == 0 || strcmp(str, "null") == 0) { |
2499 | __add_preferred_console(name: "ttynull", idx: 0, NULL, NULL, user_specified: true); |
2500 | return 1; |
2501 | } |
2502 | |
2503 | if (_braille_console_setup(str: &str, brl_options: &brl_options)) |
2504 | return 1; |
2505 | |
2506 | /* |
2507 | * Decode str into name, index, options. |
2508 | */ |
2509 | if (str[0] >= '0' && str[0] <= '9') { |
2510 | strcpy(p: buf, q: "ttyS"); |
2511 | strncpy(p: buf + 4, q: str, size: sizeof(buf) - 5); |
2512 | } else { |
2513 | strncpy(p: buf, q: str, size: sizeof(buf) - 1); |
2514 | } |
2515 | buf[sizeof(buf) - 1] = 0; |
2516 | options = strchr(str, ','); |
2517 | if (options) |
2518 | *(options++) = 0; |
2519 | #ifdef __sparc__ |
2520 | if (!strcmp(str, "ttya")) |
2521 | strcpy(buf, "ttyS0"); |
2522 | if (!strcmp(str, "ttyb")) |
2523 | strcpy(buf, "ttyS1"); |
2524 | #endif |
2525 | for (s = buf; *s; s++) |
2526 | if (isdigit(c: *s) || *s == ',') |
2527 | break; |
2528 | idx = simple_strtoul(s, NULL, 10); |
2529 | *s = 0; |
2530 | |
2531 | __add_preferred_console(name: buf, idx, options, brl_options, user_specified: true); |
2532 | return 1; |
2533 | } |
2534 | __setup("console=", console_setup); |
2535 | |
2536 | /** |
2537 | * add_preferred_console - add a device to the list of preferred consoles. |
2538 | * @name: device name |
2539 | * @idx: device index |
2540 | * @options: options for this console |
2541 | * |
2542 | * The last preferred console added will be used for kernel messages |
2543 | * and stdin/out/err for init. Normally this is used by console_setup |
2544 | * above to handle user-supplied console arguments; however it can also |
2545 | * be used by arch-specific code either to override the user or more |
2546 | * commonly to provide a default console (ie from PROM variables) when |
2547 | * the user has not supplied one. |
2548 | */ |
2549 | int add_preferred_console(const char *name, const short idx, char *options) |
2550 | { |
2551 | return __add_preferred_console(name, idx, options, NULL, user_specified: false); |
2552 | } |
2553 | |
2554 | bool console_suspend_enabled = true; |
2555 | EXPORT_SYMBOL(console_suspend_enabled); |
2556 | |
2557 | static int __init console_suspend_disable(char *str) |
2558 | { |
2559 | console_suspend_enabled = false; |
2560 | return 1; |
2561 | } |
2562 | __setup("no_console_suspend", console_suspend_disable); |
2563 | module_param_named(console_suspend, console_suspend_enabled, |
2564 | bool, S_IRUGO | S_IWUSR); |
2565 | MODULE_PARM_DESC(console_suspend, "suspend console during suspend" |
2566 | " and hibernate operations"); |
2567 | |
2568 | static bool printk_console_no_auto_verbose; |
2569 | |
2570 | void console_verbose(void) |
2571 | { |
2572 | if (console_loglevel && !printk_console_no_auto_verbose) |
2573 | console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; |
2574 | } |
2575 | EXPORT_SYMBOL_GPL(console_verbose); |
2576 | |
2577 | module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644); |
2578 | MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc"); |
2579 | |
2580 | /** |
2581 | * suspend_console - suspend the console subsystem |
2582 | * |
2583 | * This disables printk() while we go into suspend states |
2584 | */ |
2585 | void suspend_console(void) |
2586 | { |
2587 | struct console *con; |
2588 | |
2589 | if (!console_suspend_enabled) |
2590 | return; |
2591 | pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); |
2592 | pr_flush(timeout_ms: 1000, reset_on_progress: true); |
2593 | |
2594 | console_list_lock(); |
2595 | for_each_console(con) |
2596 | console_srcu_write_flags(con, flags: con->flags | CON_SUSPENDED); |
2597 | console_list_unlock(); |
2598 | |
2599 | /* |
2600 | * Ensure that all SRCU list walks have completed. All printing |
2601 | * contexts must be able to see that they are suspended so that it |
2602 | * is guaranteed that all printing has stopped when this function |
2603 | * completes. |
2604 | */ |
2605 | synchronize_srcu(ssp: &console_srcu); |
2606 | } |
2607 | |
2608 | void resume_console(void) |
2609 | { |
2610 | struct console *con; |
2611 | |
2612 | if (!console_suspend_enabled) |
2613 | return; |
2614 | |
2615 | console_list_lock(); |
2616 | for_each_console(con) |
2617 | console_srcu_write_flags(con, flags: con->flags & ~CON_SUSPENDED); |
2618 | console_list_unlock(); |
2619 | |
2620 | /* |
2621 | * Ensure that all SRCU list walks have completed. All printing |
2622 | * contexts must be able to see they are no longer suspended so |
2623 | * that they are guaranteed to wake up and resume printing. |
2624 | */ |
2625 | synchronize_srcu(ssp: &console_srcu); |
2626 | |
2627 | pr_flush(timeout_ms: 1000, reset_on_progress: true); |
2628 | } |
2629 | |
2630 | /** |
2631 | * console_cpu_notify - print deferred console messages after CPU hotplug |
2632 | * @cpu: unused |
2633 | * |
2634 | * If printk() is called from a CPU that is not online yet, the messages |
2635 | * will be printed on the console only if there are CON_ANYTIME consoles. |
2636 | * This function is called when a new CPU comes online (or fails to come |
2637 | * up) or goes offline. |
2638 | */ |
2639 | static int console_cpu_notify(unsigned int cpu) |
2640 | { |
2641 | if (!cpuhp_tasks_frozen) { |
2642 | /* If trylock fails, someone else is doing the printing */ |
2643 | if (console_trylock()) |
2644 | console_unlock(); |
2645 | } |
2646 | return 0; |
2647 | } |
2648 | |
2649 | /** |
2650 | * console_lock - block the console subsystem from printing |
2651 | * |
2652 | * Acquires a lock which guarantees that no consoles will |
2653 | * be in or enter their write() callback. |
2654 | * |
2655 | * Can sleep, returns nothing. |
2656 | */ |
2657 | void console_lock(void) |
2658 | { |
2659 | might_sleep(); |
2660 | |
2661 | /* On panic, the console_lock must be left to the panic cpu. */ |
2662 | while (other_cpu_in_panic()) |
2663 | msleep(msecs: 1000); |
2664 | |
2665 | down_console_sem(); |
2666 | console_locked = 1; |
2667 | console_may_schedule = 1; |
2668 | } |
2669 | EXPORT_SYMBOL(console_lock); |
2670 | |
2671 | /** |
2672 | * console_trylock - try to block the console subsystem from printing |
2673 | * |
2674 | * Try to acquire a lock which guarantees that no consoles will |
2675 | * be in or enter their write() callback. |
2676 | * |
2677 | * returns 1 on success, and 0 on failure to acquire the lock. |
2678 | */ |
2679 | int console_trylock(void) |
2680 | { |
2681 | /* On panic, the console_lock must be left to the panic cpu. */ |
2682 | if (other_cpu_in_panic()) |
2683 | return 0; |
2684 | if (down_trylock_console_sem()) |
2685 | return 0; |
2686 | console_locked = 1; |
2687 | console_may_schedule = 0; |
2688 | return 1; |
2689 | } |
2690 | EXPORT_SYMBOL(console_trylock); |
2691 | |
2692 | int is_console_locked(void) |
2693 | { |
2694 | return console_locked; |
2695 | } |
2696 | EXPORT_SYMBOL(is_console_locked); |
2697 | |
2698 | /* |
2699 | * Check if the given console is currently capable and allowed to print |
2700 | * records. |
2701 | * |
2702 | * Requires the console_srcu_read_lock. |
2703 | */ |
2704 | static inline bool console_is_usable(struct console *con) |
2705 | { |
2706 | short flags = console_srcu_read_flags(con); |
2707 | |
2708 | if (!(flags & CON_ENABLED)) |
2709 | return false; |
2710 | |
2711 | if ((flags & CON_SUSPENDED)) |
2712 | return false; |
2713 | |
2714 | if (!con->write) |
2715 | return false; |
2716 | |
2717 | /* |
2718 | * Console drivers may assume that per-cpu resources have been |
2719 | * allocated. So unless they're explicitly marked as being able to |
2720 | * cope (CON_ANYTIME) don't call them until this CPU is officially up. |
2721 | */ |
2722 | if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME)) |
2723 | return false; |
2724 | |
2725 | return true; |
2726 | } |
2727 | |
2728 | static void __console_unlock(void) |
2729 | { |
2730 | console_locked = 0; |
2731 | up_console_sem(); |
2732 | } |
2733 | |
2734 | #ifdef CONFIG_PRINTK |
2735 | |
2736 | /* |
2737 | * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This |
2738 | * is achieved by shifting the existing message over and inserting the dropped |
2739 | * message. |
2740 | * |
2741 | * @pmsg is the printk message to prepend. |
2742 | * |
2743 | * @dropped is the dropped count to report in the dropped message. |
2744 | * |
2745 | * If the message text in @pmsg->pbufs->outbuf does not have enough space for |
2746 | * the dropped message, the message text will be sufficiently truncated. |
2747 | * |
2748 | * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated. |
2749 | */ |
2750 | void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) |
2751 | { |
2752 | struct printk_buffers *pbufs = pmsg->pbufs; |
2753 | const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); |
2754 | const size_t outbuf_sz = sizeof(pbufs->outbuf); |
2755 | char *scratchbuf = &pbufs->scratchbuf[0]; |
2756 | char *outbuf = &pbufs->outbuf[0]; |
2757 | size_t len; |
2758 | |
2759 | len = scnprintf(buf: scratchbuf, size: scratchbuf_sz, |
2760 | fmt: "** %lu printk messages dropped **\n", dropped); |
2761 | |
2762 | /* |
2763 | * Make sure outbuf is sufficiently large before prepending. |
2764 | * Keep at least the prefix when the message must be truncated. |
2765 | * It is a rather theoretical problem when someone tries to |
2766 | * use a minimalist buffer. |
2767 | */ |
2768 | if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz)) |
2769 | return; |
2770 | |
2771 | if (pmsg->outbuf_len + len >= outbuf_sz) { |
2772 | /* Truncate the message, but keep it terminated. */ |
2773 | pmsg->outbuf_len = outbuf_sz - (len + 1); |
2774 | outbuf[pmsg->outbuf_len] = 0; |
2775 | } |
2776 | |
2777 | memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1); |
2778 | memcpy(outbuf, scratchbuf, len); |
2779 | pmsg->outbuf_len += len; |
2780 | } |
2781 | |
2782 | /* |
2783 | * Read and format the specified record (or a later record if the specified |
2784 | * record is not available). |
2785 | * |
2786 | * @pmsg will contain the formatted result. @pmsg->pbufs must point to a |
2787 | * struct printk_buffers. |
2788 | * |
2789 | * @seq is the record to read and format. If it is not available, the next |
2790 | * valid record is read. |
2791 | * |
2792 | * @is_extended specifies if the message should be formatted for extended |
2793 | * console output. |
2794 | * |
2795 | * @may_supress specifies if records may be skipped based on loglevel. |
2796 | * |
2797 | * Returns false if no record is available. Otherwise true and all fields |
2798 | * of @pmsg are valid. (See the documentation of struct printk_message |
2799 | * for information about the @pmsg fields.) |
2800 | */ |
2801 | bool printk_get_next_message(struct printk_message *pmsg, u64 seq, |
2802 | bool is_extended, bool may_suppress) |
2803 | { |
2804 | struct printk_buffers *pbufs = pmsg->pbufs; |
2805 | const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); |
2806 | const size_t outbuf_sz = sizeof(pbufs->outbuf); |
2807 | char *scratchbuf = &pbufs->scratchbuf[0]; |
2808 | char *outbuf = &pbufs->outbuf[0]; |
2809 | struct printk_info info; |
2810 | struct printk_record r; |
2811 | size_t len = 0; |
2812 | |
2813 | /* |
2814 | * Formatting extended messages requires a separate buffer, so use the |
2815 | * scratch buffer to read in the ringbuffer text. |
2816 | * |
2817 | * Formatting normal messages is done in-place, so read the ringbuffer |
2818 | * text directly into the output buffer. |
2819 | */ |
2820 | if (is_extended) |
2821 | prb_rec_init_rd(r: &r, info: &info, text_buf: scratchbuf, text_buf_size: scratchbuf_sz); |
2822 | else |
2823 | prb_rec_init_rd(r: &r, info: &info, text_buf: outbuf, text_buf_size: outbuf_sz); |
2824 | |
2825 | if (!prb_read_valid(rb: prb, seq, r: &r)) |
2826 | return false; |
2827 | |
2828 | pmsg->seq = r.info->seq; |
2829 | pmsg->dropped = r.info->seq - seq; |
2830 | |
2831 | /* Skip record that has level above the console loglevel. */ |
2832 | if (may_suppress && suppress_message_printing(level: r.info->level)) |
2833 | goto out; |
2834 | |
2835 | if (is_extended) { |
2836 | len = info_print_ext_header(buf: outbuf, size: outbuf_sz, info: r.info); |
2837 | len += msg_print_ext_body(buf: outbuf + len, size: outbuf_sz - len, |
2838 | text: &r.text_buf[0], text_len: r.info->text_len, dev_info: &r.info->dev_info); |
2839 | } else { |
2840 | len = record_print_text(r: &r, syslog: console_msg_format & MSG_FORMAT_SYSLOG, time: printk_time); |
2841 | } |
2842 | out: |
2843 | pmsg->outbuf_len = len; |
2844 | return true; |
2845 | } |
2846 | |
2847 | /* |
2848 | * Used as the printk buffers for non-panic, serialized console printing. |
2849 | * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles. |
2850 | * Its usage requires the console_lock held. |
2851 | */ |
2852 | struct printk_buffers printk_shared_pbufs; |
2853 | |
2854 | /* |
2855 | * Print one record for the given console. The record printed is whatever |
2856 | * record is the next available record for the given console. |
2857 | * |
2858 | * @handover will be set to true if a printk waiter has taken over the |
2859 | * console_lock, in which case the caller is no longer holding both the |
2860 | * console_lock and the SRCU read lock. Otherwise it is set to false. |
2861 | * |
2862 | * @cookie is the cookie from the SRCU read lock. |
2863 | * |
2864 | * Returns false if the given console has no next record to print, otherwise |
2865 | * true. |
2866 | * |
2867 | * Requires the console_lock and the SRCU read lock. |
2868 | */ |
2869 | static bool console_emit_next_record(struct console *con, bool *handover, int cookie) |
2870 | { |
2871 | bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; |
2872 | char *outbuf = &printk_shared_pbufs.outbuf[0]; |
2873 | struct printk_message pmsg = { |
2874 | .pbufs = &printk_shared_pbufs, |
2875 | }; |
2876 | unsigned long flags; |
2877 | |
2878 | *handover = false; |
2879 | |
2880 | if (!printk_get_next_message(pmsg: &pmsg, seq: con->seq, is_extended, may_suppress: true)) |
2881 | return false; |
2882 | |
2883 | con->dropped += pmsg.dropped; |
2884 | |
2885 | /* Skip messages of formatted length 0. */ |
2886 | if (pmsg.outbuf_len == 0) { |
2887 | con->seq = pmsg.seq + 1; |
2888 | goto skip; |
2889 | } |
2890 | |
2891 | if (con->dropped && !is_extended) { |
2892 | console_prepend_dropped(pmsg: &pmsg, dropped: con->dropped); |
2893 | con->dropped = 0; |
2894 | } |
2895 | |
2896 | /* |
2897 | * While actively printing out messages, if another printk() |
2898 | * were to occur on another CPU, it may wait for this one to |
2899 | * finish. This task can not be preempted if there is a |
2900 | * waiter waiting to take over. |
2901 | * |
2902 | * Interrupts are disabled because the hand over to a waiter |
2903 | * must not be interrupted until the hand over is completed |
2904 | * (@console_waiter is cleared). |
2905 | */ |
2906 | printk_safe_enter_irqsave(flags); |
2907 | console_lock_spinning_enable(); |
2908 | |
2909 | /* Do not trace print latency. */ |
2910 | stop_critical_timings(); |
2911 | |
2912 | /* Write everything out to the hardware. */ |
2913 | con->write(con, outbuf, pmsg.outbuf_len); |
2914 | |
2915 | start_critical_timings(); |
2916 | |
2917 | con->seq = pmsg.seq + 1; |
2918 | |
2919 | *handover = console_lock_spinning_disable_and_check(cookie); |
2920 | printk_safe_exit_irqrestore(flags); |
2921 | skip: |
2922 | return true; |
2923 | } |
2924 | |
2925 | #else |
2926 | |
2927 | static bool console_emit_next_record(struct console *con, bool *handover, int cookie) |
2928 | { |
2929 | *handover = false; |
2930 | return false; |
2931 | } |
2932 | |
2933 | #endif /* CONFIG_PRINTK */ |
2934 | |
2935 | /* |
2936 | * Print out all remaining records to all consoles. |
2937 | * |
2938 | * @do_cond_resched is set by the caller. It can be true only in schedulable |
2939 | * context. |
2940 | * |
2941 | * @next_seq is set to the sequence number after the last available record. |
2942 | * The value is valid only when this function returns true. It means that all |
2943 | * usable consoles are completely flushed. |
2944 | * |
2945 | * @handover will be set to true if a printk waiter has taken over the |
2946 | * console_lock, in which case the caller is no longer holding the |
2947 | * console_lock. Otherwise it is set to false. |
2948 | * |
2949 | * Returns true when there was at least one usable console and all messages |
2950 | * were flushed to all usable consoles. A returned false informs the caller |
2951 | * that everything was not flushed (either there were no usable consoles or |
2952 | * another context has taken over printing or it is a panic situation and this |
2953 | * is not the panic CPU). Regardless the reason, the caller should assume it |
2954 | * is not useful to immediately try again. |
2955 | * |
2956 | * Requires the console_lock. |
2957 | */ |
2958 | static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) |
2959 | { |
2960 | bool any_usable = false; |
2961 | struct console *con; |
2962 | bool any_progress; |
2963 | int cookie; |
2964 | |
2965 | *next_seq = 0; |
2966 | *handover = false; |
2967 | |
2968 | do { |
2969 | any_progress = false; |
2970 | |
2971 | cookie = console_srcu_read_lock(); |
2972 | for_each_console_srcu(con) { |
2973 | bool progress; |
2974 | |
2975 | if (!console_is_usable(con)) |
2976 | continue; |
2977 | any_usable = true; |
2978 | |
2979 | progress = console_emit_next_record(con, handover, cookie); |
2980 | |
2981 | /* |
2982 | * If a handover has occurred, the SRCU read lock |
2983 | * is already released. |
2984 | */ |
2985 | if (*handover) |
2986 | return false; |
2987 | |
2988 | /* Track the next of the highest seq flushed. */ |
2989 | if (con->seq > *next_seq) |
2990 | *next_seq = con->seq; |
2991 | |
2992 | if (!progress) |
2993 | continue; |
2994 | any_progress = true; |
2995 | |
2996 | /* Allow panic_cpu to take over the consoles safely. */ |
2997 | if (other_cpu_in_panic()) |
2998 | goto abandon; |
2999 | |
3000 | if (do_cond_resched) |
3001 | cond_resched(); |
3002 | } |
3003 | console_srcu_read_unlock(cookie); |
3004 | } while (any_progress); |
3005 | |
3006 | return any_usable; |
3007 | |
3008 | abandon: |
3009 | console_srcu_read_unlock(cookie); |
3010 | return false; |
3011 | } |
3012 | |
3013 | /** |
3014 | * console_unlock - unblock the console subsystem from printing |
3015 | * |
3016 | * Releases the console_lock which the caller holds to block printing of |
3017 | * the console subsystem. |
3018 | * |
3019 | * While the console_lock was held, console output may have been buffered |
3020 | * by printk(). If this is the case, console_unlock(); emits |
3021 | * the output prior to releasing the lock. |
3022 | * |
3023 | * console_unlock(); may be called from any context. |
3024 | */ |
3025 | void console_unlock(void) |
3026 | { |
3027 | bool do_cond_resched; |
3028 | bool handover; |
3029 | bool flushed; |
3030 | u64 next_seq; |
3031 | |
3032 | /* |
3033 | * Console drivers are called with interrupts disabled, so |
3034 | * @console_may_schedule should be cleared before; however, we may |
3035 | * end up dumping a lot of lines, for example, if called from |
3036 | * console registration path, and should invoke cond_resched() |
3037 | * between lines if allowable. Not doing so can cause a very long |
3038 | * scheduling stall on a slow console leading to RCU stall and |
3039 | * softlockup warnings which exacerbate the issue with more |
3040 | * messages practically incapacitating the system. Therefore, create |
3041 | * a local to use for the printing loop. |
3042 | */ |
3043 | do_cond_resched = console_may_schedule; |
3044 | |
3045 | do { |
3046 | console_may_schedule = 0; |
3047 | |
3048 | flushed = console_flush_all(do_cond_resched, next_seq: &next_seq, handover: &handover); |
3049 | if (!handover) |
3050 | __console_unlock(); |
3051 | |
3052 | /* |
3053 | * Abort if there was a failure to flush all messages to all |
3054 | * usable consoles. Either it is not possible to flush (in |
3055 | * which case it would be an infinite loop of retrying) or |
3056 | * another context has taken over printing. |
3057 | */ |
3058 | if (!flushed) |
3059 | break; |
3060 | |
3061 | /* |
3062 | * Some context may have added new records after |
3063 | * console_flush_all() but before unlocking the console. |
3064 | * Re-check if there is a new record to flush. If the trylock |
3065 | * fails, another context is already handling the printing. |
3066 | */ |
3067 | } while (prb_read_valid(rb: prb, seq: next_seq, NULL) && console_trylock()); |
3068 | } |
3069 | EXPORT_SYMBOL(console_unlock); |
3070 | |
3071 | /** |
3072 | * console_conditional_schedule - yield the CPU if required |
3073 | * |
3074 | * If the console code is currently allowed to sleep, and |
3075 | * if this CPU should yield the CPU to another task, do |
3076 | * so here. |
3077 | * |
3078 | * Must be called within console_lock();. |
3079 | */ |
3080 | void __sched console_conditional_schedule(void) |
3081 | { |
3082 | if (console_may_schedule) |
3083 | cond_resched(); |
3084 | } |
3085 | EXPORT_SYMBOL(console_conditional_schedule); |
3086 | |
3087 | void console_unblank(void) |
3088 | { |
3089 | bool found_unblank = false; |
3090 | struct console *c; |
3091 | int cookie; |
3092 | |
3093 | /* |
3094 | * First check if there are any consoles implementing the unblank() |
3095 | * callback. If not, there is no reason to continue and take the |
3096 | * console lock, which in particular can be dangerous if |
3097 | * @oops_in_progress is set. |
3098 | */ |
3099 | cookie = console_srcu_read_lock(); |
3100 | for_each_console_srcu(c) { |
3101 | if ((console_srcu_read_flags(con: c) & CON_ENABLED) && c->unblank) { |
3102 | found_unblank = true; |
3103 | break; |
3104 | } |
3105 | } |
3106 | console_srcu_read_unlock(cookie); |
3107 | if (!found_unblank) |
3108 | return; |
3109 | |
3110 | /* |
3111 | * Stop console printing because the unblank() callback may |
3112 | * assume the console is not within its write() callback. |
3113 | * |
3114 | * If @oops_in_progress is set, this may be an atomic context. |
3115 | * In that case, attempt a trylock as best-effort. |
3116 | */ |
3117 | if (oops_in_progress) { |
3118 | /* Semaphores are not NMI-safe. */ |
3119 | if (in_nmi()) |
3120 | return; |
3121 | |
3122 | /* |
3123 | * Attempting to trylock the console lock can deadlock |
3124 | * if another CPU was stopped while modifying the |
3125 | * semaphore. "Hope and pray" that this is not the |
3126 | * current situation. |
3127 | */ |
3128 | if (down_trylock_console_sem() != 0) |
3129 | return; |
3130 | } else |
3131 | console_lock(); |
3132 | |
3133 | console_locked = 1; |
3134 | console_may_schedule = 0; |
3135 | |
3136 | cookie = console_srcu_read_lock(); |
3137 | for_each_console_srcu(c) { |
3138 | if ((console_srcu_read_flags(con: c) & CON_ENABLED) && c->unblank) |
3139 | c->unblank(); |
3140 | } |
3141 | console_srcu_read_unlock(cookie); |
3142 | |
3143 | console_unlock(); |
3144 | |
3145 | if (!oops_in_progress) |
3146 | pr_flush(timeout_ms: 1000, reset_on_progress: true); |
3147 | } |
3148 | |
3149 | /** |
3150 | * console_flush_on_panic - flush console content on panic |
3151 | * @mode: flush all messages in buffer or just the pending ones |
3152 | * |
3153 | * Immediately output all pending messages no matter what. |
3154 | */ |
3155 | void console_flush_on_panic(enum con_flush_mode mode) |
3156 | { |
3157 | bool handover; |
3158 | u64 next_seq; |
3159 | |
3160 | /* |
3161 | * Ignore the console lock and flush out the messages. Attempting a |
3162 | * trylock would not be useful because: |
3163 | * |
3164 | * - if it is contended, it must be ignored anyway |
3165 | * - console_lock() and console_trylock() block and fail |
3166 | * respectively in panic for non-panic CPUs |
3167 | * - semaphores are not NMI-safe |
3168 | */ |
3169 | |
3170 | /* |
3171 | * If another context is holding the console lock, |
3172 | * @console_may_schedule might be set. Clear it so that |
3173 | * this context does not call cond_resched() while flushing. |
3174 | */ |
3175 | console_may_schedule = 0; |
3176 | |
3177 | if (mode == CONSOLE_REPLAY_ALL) { |
3178 | struct console *c; |
3179 | short flags; |
3180 | int cookie; |
3181 | u64 seq; |
3182 | |
3183 | seq = prb_first_valid_seq(rb: prb); |
3184 | |
3185 | cookie = console_srcu_read_lock(); |
3186 | for_each_console_srcu(c) { |
3187 | flags = console_srcu_read_flags(con: c); |
3188 | |
3189 | if (flags & CON_NBCON) { |
3190 | nbcon_seq_force(con: c, seq); |
3191 | } else { |
3192 | /* |
3193 | * This is an unsynchronized assignment. On |
3194 | * panic legacy consoles are only best effort. |
3195 | */ |
3196 | c->seq = seq; |
3197 | } |
3198 | } |
3199 | console_srcu_read_unlock(cookie); |
3200 | } |
3201 | |
3202 | console_flush_all(do_cond_resched: false, next_seq: &next_seq, handover: &handover); |
3203 | } |
3204 | |
3205 | /* |
3206 | * Return the console tty driver structure and its associated index |
3207 | */ |
3208 | struct tty_driver *console_device(int *index) |
3209 | { |
3210 | struct console *c; |
3211 | struct tty_driver *driver = NULL; |
3212 | int cookie; |
3213 | |
3214 | /* |
3215 | * Take console_lock to serialize device() callback with |
3216 | * other console operations. For example, fg_console is |
3217 | * modified under console_lock when switching vt. |
3218 | */ |
3219 | console_lock(); |
3220 | |
3221 | cookie = console_srcu_read_lock(); |
3222 | for_each_console_srcu(c) { |
3223 | if (!c->device) |
3224 | continue; |
3225 | driver = c->device(c, index); |
3226 | if (driver) |
3227 | break; |
3228 | } |
3229 | console_srcu_read_unlock(cookie); |
3230 | |
3231 | console_unlock(); |
3232 | return driver; |
3233 | } |
3234 | |
3235 | /* |
3236 | * Prevent further output on the passed console device so that (for example) |
3237 | * serial drivers can disable console output before suspending a port, and can |
3238 | * re-enable output afterwards. |
3239 | */ |
3240 | void console_stop(struct console *console) |
3241 | { |
3242 | __pr_flush(con: console, timeout_ms: 1000, reset_on_progress: true); |
3243 | console_list_lock(); |
3244 | console_srcu_write_flags(con: console, flags: console->flags & ~CON_ENABLED); |
3245 | console_list_unlock(); |
3246 | |
3247 | /* |
3248 | * Ensure that all SRCU list walks have completed. All contexts must |
3249 | * be able to see that this console is disabled so that (for example) |
3250 | * the caller can suspend the port without risk of another context |
3251 | * using the port. |
3252 | */ |
3253 | synchronize_srcu(ssp: &console_srcu); |
3254 | } |
3255 | EXPORT_SYMBOL(console_stop); |
3256 | |
3257 | void console_start(struct console *console) |
3258 | { |
3259 | console_list_lock(); |
3260 | console_srcu_write_flags(con: console, flags: console->flags | CON_ENABLED); |
3261 | console_list_unlock(); |
3262 | __pr_flush(con: console, timeout_ms: 1000, reset_on_progress: true); |
3263 | } |
3264 | EXPORT_SYMBOL(console_start); |
3265 | |
3266 | static int __read_mostly keep_bootcon; |
3267 | |
3268 | static int __init keep_bootcon_setup(char *str) |
3269 | { |
3270 | keep_bootcon = 1; |
3271 | pr_info("debug: skip boot console de-registration.\n"); |
3272 | |
3273 | return 0; |
3274 | } |
3275 | |
3276 | early_param("keep_bootcon", keep_bootcon_setup); |
3277 | |
3278 | static int console_call_setup(struct console *newcon, char *options) |
3279 | { |
3280 | int err; |
3281 | |
3282 | if (!newcon->setup) |
3283 | return 0; |
3284 | |
3285 | /* Synchronize with possible boot console. */ |
3286 | console_lock(); |
3287 | err = newcon->setup(newcon, options); |
3288 | console_unlock(); |
3289 | |
3290 | return err; |
3291 | } |
3292 | |
3293 | /* |
3294 | * This is called by register_console() to try to match |
3295 | * the newly registered console with any of the ones selected |
3296 | * by either the command line or add_preferred_console() and |
3297 | * setup/enable it. |
3298 | * |
3299 | * Care need to be taken with consoles that are statically |
3300 | * enabled such as netconsole |
3301 | */ |
3302 | static int try_enable_preferred_console(struct console *newcon, |
3303 | bool user_specified) |
3304 | { |
3305 | struct console_cmdline *c; |
3306 | int i, err; |
3307 | |
3308 | for (i = 0, c = console_cmdline; |
3309 | i < MAX_CMDLINECONSOLES && c->name[0]; |
3310 | i++, c++) { |
3311 | if (c->user_specified != user_specified) |
3312 | continue; |
3313 | if (!newcon->match || |
3314 | newcon->match(newcon, c->name, c->index, c->options) != 0) { |
3315 | /* default matching */ |
3316 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); |
3317 | if (strcmp(c->name, newcon->name) != 0) |
3318 | continue; |
3319 | if (newcon->index >= 0 && |
3320 | newcon->index != c->index) |
3321 | continue; |
3322 | if (newcon->index < 0) |
3323 | newcon->index = c->index; |
3324 | |
3325 | if (_braille_register_console(console: newcon, c)) |
3326 | return 0; |
3327 | |
3328 | err = console_call_setup(newcon, options: c->options); |
3329 | if (err) |
3330 | return err; |
3331 | } |
3332 | newcon->flags |= CON_ENABLED; |
3333 | if (i == preferred_console) |
3334 | newcon->flags |= CON_CONSDEV; |
3335 | return 0; |
3336 | } |
3337 | |
3338 | /* |
3339 | * Some consoles, such as pstore and netconsole, can be enabled even |
3340 | * without matching. Accept the pre-enabled consoles only when match() |
3341 | * and setup() had a chance to be called. |
3342 | */ |
3343 | if (newcon->flags & CON_ENABLED && c->user_specified == user_specified) |
3344 | return 0; |
3345 | |
3346 | return -ENOENT; |
3347 | } |
3348 | |
3349 | /* Try to enable the console unconditionally */ |
3350 | static void try_enable_default_console(struct console *newcon) |
3351 | { |
3352 | if (newcon->index < 0) |
3353 | newcon->index = 0; |
3354 | |
3355 | if (console_call_setup(newcon, NULL) != 0) |
3356 | return; |
3357 | |
3358 | newcon->flags |= CON_ENABLED; |
3359 | |
3360 | if (newcon->device) |
3361 | newcon->flags |= CON_CONSDEV; |
3362 | } |
3363 | |
3364 | static void console_init_seq(struct console *newcon, bool bootcon_registered) |
3365 | { |
3366 | struct console *con; |
3367 | bool handover; |
3368 | |
3369 | if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) { |
3370 | /* Get a consistent copy of @syslog_seq. */ |
3371 | mutex_lock(&syslog_lock); |
3372 | newcon->seq = syslog_seq; |
3373 | mutex_unlock(lock: &syslog_lock); |
3374 | } else { |
3375 | /* Begin with next message added to ringbuffer. */ |
3376 | newcon->seq = prb_next_seq(rb: prb); |
3377 | |
3378 | /* |
3379 | * If any enabled boot consoles are due to be unregistered |
3380 | * shortly, some may not be caught up and may be the same |
3381 | * device as @newcon. Since it is not known which boot console |
3382 | * is the same device, flush all consoles and, if necessary, |
3383 | * start with the message of the enabled boot console that is |
3384 | * the furthest behind. |
3385 | */ |
3386 | if (bootcon_registered && !keep_bootcon) { |
3387 | /* |
3388 | * Hold the console_lock to stop console printing and |
3389 | * guarantee safe access to console->seq. |
3390 | */ |
3391 | console_lock(); |
3392 | |
3393 | /* |
3394 | * Flush all consoles and set the console to start at |
3395 | * the next unprinted sequence number. |
3396 | */ |
3397 | if (!console_flush_all(do_cond_resched: true, next_seq: &newcon->seq, handover: &handover)) { |
3398 | /* |
3399 | * Flushing failed. Just choose the lowest |
3400 | * sequence of the enabled boot consoles. |
3401 | */ |
3402 | |
3403 | /* |
3404 | * If there was a handover, this context no |
3405 | * longer holds the console_lock. |
3406 | */ |
3407 | if (handover) |
3408 | console_lock(); |
3409 | |
3410 | newcon->seq = prb_next_seq(rb: prb); |
3411 | for_each_console(con) { |
3412 | if ((con->flags & CON_BOOT) && |
3413 | (con->flags & CON_ENABLED) && |
3414 | con->seq < newcon->seq) { |
3415 | newcon->seq = con->seq; |
3416 | } |
3417 | } |
3418 | } |
3419 | |
3420 | console_unlock(); |
3421 | } |
3422 | } |
3423 | } |
3424 | |
3425 | #define console_first() \ |
3426 | hlist_entry(console_list.first, struct console, node) |
3427 | |
3428 | static int unregister_console_locked(struct console *console); |
3429 | |
3430 | /* |
3431 | * The console driver calls this routine during kernel initialization |
3432 | * to register the console printing procedure with printk() and to |
3433 | * print any messages that were printed by the kernel before the |
3434 | * console driver was initialized. |
3435 | * |
3436 | * This can happen pretty early during the boot process (because of |
3437 | * early_printk) - sometimes before setup_arch() completes - be careful |
3438 | * of what kernel features are used - they may not be initialised yet. |
3439 | * |
3440 | * There are two types of consoles - bootconsoles (early_printk) and |
3441 | * "real" consoles (everything which is not a bootconsole) which are |
3442 | * handled differently. |
3443 | * - Any number of bootconsoles can be registered at any time. |
3444 | * - As soon as a "real" console is registered, all bootconsoles |
3445 | * will be unregistered automatically. |
3446 | * - Once a "real" console is registered, any attempt to register a |
3447 | * bootconsoles will be rejected |
3448 | */ |
3449 | void register_console(struct console *newcon) |
3450 | { |
3451 | struct console *con; |
3452 | bool bootcon_registered = false; |
3453 | bool realcon_registered = false; |
3454 | int err; |
3455 | |
3456 | console_list_lock(); |
3457 | |
3458 | for_each_console(con) { |
3459 | if (WARN(con == newcon, "console '%s%d' already registered\n", |
3460 | con->name, con->index)) { |
3461 | goto unlock; |
3462 | } |
3463 | |
3464 | if (con->flags & CON_BOOT) |
3465 | bootcon_registered = true; |
3466 | else |
3467 | realcon_registered = true; |
3468 | } |
3469 | |
3470 | /* Do not register boot consoles when there already is a real one. */ |
3471 | if ((newcon->flags & CON_BOOT) && realcon_registered) { |
3472 | pr_info("Too late to register bootconsole %s%d\n", |
3473 | newcon->name, newcon->index); |
3474 | goto unlock; |
3475 | } |
3476 | |
3477 | if (newcon->flags & CON_NBCON) { |
3478 | /* |
3479 | * Ensure the nbcon console buffers can be allocated |
3480 | * before modifying any global data. |
3481 | */ |
3482 | if (!nbcon_alloc(con: newcon)) |
3483 | goto unlock; |
3484 | } |
3485 | |
3486 | /* |
3487 | * See if we want to enable this console driver by default. |
3488 | * |
3489 | * Nope when a console is preferred by the command line, device |
3490 | * tree, or SPCR. |
3491 | * |
3492 | * The first real console with tty binding (driver) wins. More |
3493 | * consoles might get enabled before the right one is found. |
3494 | * |
3495 | * Note that a console with tty binding will have CON_CONSDEV |
3496 | * flag set and will be first in the list. |
3497 | */ |
3498 | if (preferred_console < 0) { |
3499 | if (hlist_empty(h: &console_list) || !console_first()->device || |
3500 | console_first()->flags & CON_BOOT) { |
3501 | try_enable_default_console(newcon); |
3502 | } |
3503 | } |
3504 | |
3505 | /* See if this console matches one we selected on the command line */ |
3506 | err = try_enable_preferred_console(newcon, user_specified: true); |
3507 | |
3508 | /* If not, try to match against the platform default(s) */ |
3509 | if (err == -ENOENT) |
3510 | err = try_enable_preferred_console(newcon, user_specified: false); |
3511 | |
3512 | /* printk() messages are not printed to the Braille console. */ |
3513 | if (err || newcon->flags & CON_BRL) { |
3514 | if (newcon->flags & CON_NBCON) |
3515 | nbcon_free(con: newcon); |
3516 | goto unlock; |
3517 | } |
3518 | |
3519 | /* |
3520 | * If we have a bootconsole, and are switching to a real console, |
3521 | * don't print everything out again, since when the boot console, and |
3522 | * the real console are the same physical device, it's annoying to |
3523 | * see the beginning boot messages twice |
3524 | */ |
3525 | if (bootcon_registered && |
3526 | ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { |
3527 | newcon->flags &= ~CON_PRINTBUFFER; |
3528 | } |
3529 | |
3530 | newcon->dropped = 0; |
3531 | console_init_seq(newcon, bootcon_registered); |
3532 | |
3533 | if (newcon->flags & CON_NBCON) |
3534 | nbcon_init(con: newcon); |
3535 | |
3536 | /* |
3537 | * Put this console in the list - keep the |
3538 | * preferred driver at the head of the list. |
3539 | */ |
3540 | if (hlist_empty(h: &console_list)) { |
3541 | /* Ensure CON_CONSDEV is always set for the head. */ |
3542 | newcon->flags |= CON_CONSDEV; |
3543 | hlist_add_head_rcu(n: &newcon->node, h: &console_list); |
3544 | |
3545 | } else if (newcon->flags & CON_CONSDEV) { |
3546 | /* Only the new head can have CON_CONSDEV set. */ |
3547 | console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV); |
3548 | hlist_add_head_rcu(n: &newcon->node, h: &console_list); |
3549 | |
3550 | } else { |
3551 | hlist_add_behind_rcu(n: &newcon->node, prev: console_list.first); |
3552 | } |
3553 | |
3554 | /* |
3555 | * No need to synchronize SRCU here! The caller does not rely |
3556 | * on all contexts being able to see the new console before |
3557 | * register_console() completes. |
3558 | */ |
3559 | |
3560 | console_sysfs_notify(); |
3561 | |
3562 | /* |
3563 | * By unregistering the bootconsoles after we enable the real console |
3564 | * we get the "console xxx enabled" message on all the consoles - |
3565 | * boot consoles, real consoles, etc - this is to ensure that end |
3566 | * users know there might be something in the kernel's log buffer that |
3567 | * went to the bootconsole (that they do not see on the real console) |
3568 | */ |
3569 | con_printk(KERN_INFO, newcon, "enabled\n"); |
3570 | if (bootcon_registered && |
3571 | ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && |
3572 | !keep_bootcon) { |
3573 | struct hlist_node *tmp; |
3574 | |
3575 | hlist_for_each_entry_safe(con, tmp, &console_list, node) { |
3576 | if (con->flags & CON_BOOT) |
3577 | unregister_console_locked(console: con); |
3578 | } |
3579 | } |
3580 | unlock: |
3581 | console_list_unlock(); |
3582 | } |
3583 | EXPORT_SYMBOL(register_console); |
3584 | |
3585 | /* Must be called under console_list_lock(). */ |
3586 | static int unregister_console_locked(struct console *console) |
3587 | { |
3588 | int res; |
3589 | |
3590 | lockdep_assert_console_list_lock_held(); |
3591 | |
3592 | con_printk(KERN_INFO, console, "disabled\n"); |
3593 | |
3594 | res = _braille_unregister_console(console); |
3595 | if (res < 0) |
3596 | return res; |
3597 | if (res > 0) |
3598 | return 0; |
3599 | |
3600 | /* Disable it unconditionally */ |
3601 | console_srcu_write_flags(con: console, flags: console->flags & ~CON_ENABLED); |
3602 | |
3603 | if (!console_is_registered_locked(con: console)) |
3604 | return -ENODEV; |
3605 | |
3606 | hlist_del_init_rcu(n: &console->node); |
3607 | |
3608 | /* |
3609 | * <HISTORICAL> |
3610 | * If this isn't the last console and it has CON_CONSDEV set, we |
3611 | * need to set it on the next preferred console. |
3612 | * </HISTORICAL> |
3613 | * |
3614 | * The above makes no sense as there is no guarantee that the next |
3615 | * console has any device attached. Oh well.... |
3616 | */ |
3617 | if (!hlist_empty(h: &console_list) && console->flags & CON_CONSDEV) |
3618 | console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV); |
3619 | |
3620 | /* |
3621 | * Ensure that all SRCU list walks have completed. All contexts |
3622 | * must not be able to see this console in the list so that any |
3623 | * exit/cleanup routines can be performed safely. |
3624 | */ |
3625 | synchronize_srcu(ssp: &console_srcu); |
3626 | |
3627 | if (console->flags & CON_NBCON) |
3628 | nbcon_free(con: console); |
3629 | |
3630 | console_sysfs_notify(); |
3631 | |
3632 | if (console->exit) |
3633 | res = console->exit(console); |
3634 | |
3635 | return res; |
3636 | } |
3637 | |
3638 | int unregister_console(struct console *console) |
3639 | { |
3640 | int res; |
3641 | |
3642 | console_list_lock(); |
3643 | res = unregister_console_locked(console); |
3644 | console_list_unlock(); |
3645 | return res; |
3646 | } |
3647 | EXPORT_SYMBOL(unregister_console); |
3648 | |
3649 | /** |
3650 | * console_force_preferred_locked - force a registered console preferred |
3651 | * @con: The registered console to force preferred. |
3652 | * |
3653 | * Must be called under console_list_lock(). |
3654 | */ |
3655 | void console_force_preferred_locked(struct console *con) |
3656 | { |
3657 | struct console *cur_pref_con; |
3658 | |
3659 | if (!console_is_registered_locked(con)) |
3660 | return; |
3661 | |
3662 | cur_pref_con = console_first(); |
3663 | |
3664 | /* Already preferred? */ |
3665 | if (cur_pref_con == con) |
3666 | return; |
3667 | |
3668 | /* |
3669 | * Delete, but do not re-initialize the entry. This allows the console |
3670 | * to continue to appear registered (via any hlist_unhashed_lockless() |
3671 | * checks), even though it was briefly removed from the console list. |
3672 | */ |
3673 | hlist_del_rcu(n: &con->node); |
3674 | |
3675 | /* |
3676 | * Ensure that all SRCU list walks have completed so that the console |
3677 | * can be added to the beginning of the console list and its forward |
3678 | * list pointer can be re-initialized. |
3679 | */ |
3680 | synchronize_srcu(ssp: &console_srcu); |
3681 | |
3682 | con->flags |= CON_CONSDEV; |
3683 | WARN_ON(!con->device); |
3684 | |
3685 | /* Only the new head can have CON_CONSDEV set. */ |
3686 | console_srcu_write_flags(con: cur_pref_con, flags: cur_pref_con->flags & ~CON_CONSDEV); |
3687 | hlist_add_head_rcu(n: &con->node, h: &console_list); |
3688 | } |
3689 | EXPORT_SYMBOL(console_force_preferred_locked); |
3690 | |
3691 | /* |
3692 | * Initialize the console device. This is called *early*, so |
3693 | * we can't necessarily depend on lots of kernel help here. |
3694 | * Just do some early initializations, and do the complex setup |
3695 | * later. |
3696 | */ |
3697 | void __init console_init(void) |
3698 | { |
3699 | int ret; |
3700 | initcall_t call; |
3701 | initcall_entry_t *ce; |
3702 | |
3703 | /* Setup the default TTY line discipline. */ |
3704 | n_tty_init(); |
3705 | |
3706 | /* |
3707 | * set up the console device so that later boot sequences can |
3708 | * inform about problems etc.. |
3709 | */ |
3710 | ce = __con_initcall_start; |
3711 | trace_initcall_level(level: "console"); |
3712 | while (ce < __con_initcall_end) { |
3713 | call = initcall_from_entry(entry: ce); |
3714 | trace_initcall_start(func: call); |
3715 | ret = call(); |
3716 | trace_initcall_finish(func: call, ret); |
3717 | ce++; |
3718 | } |
3719 | } |
3720 | |
3721 | /* |
3722 | * Some boot consoles access data that is in the init section and which will |
3723 | * be discarded after the initcalls have been run. To make sure that no code |
3724 | * will access this data, unregister the boot consoles in a late initcall. |
3725 | * |
3726 | * If for some reason, such as deferred probe or the driver being a loadable |
3727 | * module, the real console hasn't registered yet at this point, there will |
3728 | * be a brief interval in which no messages are logged to the console, which |
3729 | * makes it difficult to diagnose problems that occur during this time. |
3730 | * |
3731 | * To mitigate this problem somewhat, only unregister consoles whose memory |
3732 | * intersects with the init section. Note that all other boot consoles will |
3733 | * get unregistered when the real preferred console is registered. |
3734 | */ |
3735 | static int __init printk_late_init(void) |
3736 | { |
3737 | struct hlist_node *tmp; |
3738 | struct console *con; |
3739 | int ret; |
3740 | |
3741 | console_list_lock(); |
3742 | hlist_for_each_entry_safe(con, tmp, &console_list, node) { |
3743 | if (!(con->flags & CON_BOOT)) |
3744 | continue; |
3745 | |
3746 | /* Check addresses that might be used for enabled consoles. */ |
3747 | if (init_section_intersects(virt: con, size: sizeof(*con)) || |
3748 | init_section_contains(virt: con->write, size: 0) || |
3749 | init_section_contains(virt: con->read, size: 0) || |
3750 | init_section_contains(virt: con->device, size: 0) || |
3751 | init_section_contains(virt: con->unblank, size: 0) || |
3752 | init_section_contains(virt: con->data, size: 0)) { |
3753 | /* |
3754 | * Please, consider moving the reported consoles out |
3755 | * of the init section. |
3756 | */ |
3757 | pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n", |
3758 | con->name, con->index); |
3759 | unregister_console_locked(console: con); |
3760 | } |
3761 | } |
3762 | console_list_unlock(); |
3763 | |
3764 | ret = cpuhp_setup_state_nocalls(state: CPUHP_PRINTK_DEAD, name: "printk:dead", NULL, |
3765 | teardown: console_cpu_notify); |
3766 | WARN_ON(ret < 0); |
3767 | ret = cpuhp_setup_state_nocalls(state: CPUHP_AP_ONLINE_DYN, name: "printk:online", |
3768 | startup: console_cpu_notify, NULL); |
3769 | WARN_ON(ret < 0); |
3770 | printk_sysctl_init(); |
3771 | return 0; |
3772 | } |
3773 | late_initcall(printk_late_init); |
3774 | |
3775 | #if defined CONFIG_PRINTK |
3776 | /* If @con is specified, only wait for that console. Otherwise wait for all. */ |
3777 | static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) |
3778 | { |
3779 | unsigned long timeout_jiffies = msecs_to_jiffies(m: timeout_ms); |
3780 | unsigned long remaining_jiffies = timeout_jiffies; |
3781 | struct console *c; |
3782 | u64 last_diff = 0; |
3783 | u64 printk_seq; |
3784 | short flags; |
3785 | int cookie; |
3786 | u64 diff; |
3787 | u64 seq; |
3788 | |
3789 | might_sleep(); |
3790 | |
3791 | seq = prb_next_reserve_seq(rb: prb); |
3792 | |
3793 | /* Flush the consoles so that records up to @seq are printed. */ |
3794 | console_lock(); |
3795 | console_unlock(); |
3796 | |
3797 | for (;;) { |
3798 | unsigned long begin_jiffies; |
3799 | unsigned long slept_jiffies; |
3800 | |
3801 | diff = 0; |
3802 | |
3803 | /* |
3804 | * Hold the console_lock to guarantee safe access to |
3805 | * console->seq. Releasing console_lock flushes more |
3806 | * records in case @seq is still not printed on all |
3807 | * usable consoles. |
3808 | */ |
3809 | console_lock(); |
3810 | |
3811 | cookie = console_srcu_read_lock(); |
3812 | for_each_console_srcu(c) { |
3813 | if (con && con != c) |
3814 | continue; |
3815 | |
3816 | flags = console_srcu_read_flags(con: c); |
3817 | |
3818 | /* |
3819 | * If consoles are not usable, it cannot be expected |
3820 | * that they make forward progress, so only increment |
3821 | * @diff for usable consoles. |
3822 | */ |
3823 | if (!console_is_usable(con: c)) |
3824 | continue; |
3825 | |
3826 | if (flags & CON_NBCON) { |
3827 | printk_seq = nbcon_seq_read(con: c); |
3828 | } else { |
3829 | printk_seq = c->seq; |
3830 | } |
3831 | |
3832 | if (printk_seq < seq) |
3833 | diff += seq - printk_seq; |
3834 | } |
3835 | console_srcu_read_unlock(cookie); |
3836 | |
3837 | if (diff != last_diff && reset_on_progress) |
3838 | remaining_jiffies = timeout_jiffies; |
3839 | |
3840 | console_unlock(); |
3841 | |
3842 | /* Note: @diff is 0 if there are no usable consoles. */ |
3843 | if (diff == 0 || remaining_jiffies == 0) |
3844 | break; |
3845 | |
3846 | /* msleep(1) might sleep much longer. Check time by jiffies. */ |
3847 | begin_jiffies = jiffies; |
3848 | msleep(msecs: 1); |
3849 | slept_jiffies = jiffies - begin_jiffies; |
3850 | |
3851 | remaining_jiffies -= min(slept_jiffies, remaining_jiffies); |
3852 | |
3853 | last_diff = diff; |
3854 | } |
3855 | |
3856 | return (diff == 0); |
3857 | } |
3858 | |
3859 | /** |
3860 | * pr_flush() - Wait for printing threads to catch up. |
3861 | * |
3862 | * @timeout_ms: The maximum time (in ms) to wait. |
3863 | * @reset_on_progress: Reset the timeout if forward progress is seen. |
3864 | * |
3865 | * A value of 0 for @timeout_ms means no waiting will occur. A value of -1 |
3866 | * represents infinite waiting. |
3867 | * |
3868 | * If @reset_on_progress is true, the timeout will be reset whenever any |
3869 | * printer has been seen to make some forward progress. |
3870 | * |
3871 | * Context: Process context. May sleep while acquiring console lock. |
3872 | * Return: true if all usable printers are caught up. |
3873 | */ |
3874 | static bool pr_flush(int timeout_ms, bool reset_on_progress) |
3875 | { |
3876 | return __pr_flush(NULL, timeout_ms, reset_on_progress); |
3877 | } |
3878 | |
3879 | /* |
3880 | * Delayed printk version, for scheduler-internal messages: |
3881 | */ |
3882 | #define PRINTK_PENDING_WAKEUP 0x01 |
3883 | #define PRINTK_PENDING_OUTPUT 0x02 |
3884 | |
3885 | static DEFINE_PER_CPU(int, printk_pending); |
3886 | |
3887 | static void wake_up_klogd_work_func(struct irq_work *irq_work) |
3888 | { |
3889 | int pending = this_cpu_xchg(printk_pending, 0); |
3890 | |
3891 | if (pending & PRINTK_PENDING_OUTPUT) { |
3892 | /* If trylock fails, someone else is doing the printing */ |
3893 | if (console_trylock()) |
3894 | console_unlock(); |
3895 | } |
3896 | |
3897 | if (pending & PRINTK_PENDING_WAKEUP) |
3898 | wake_up_interruptible(&log_wait); |
3899 | } |
3900 | |
3901 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = |
3902 | IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func); |
3903 | |
3904 | static void __wake_up_klogd(int val) |
3905 | { |
3906 | if (!printk_percpu_data_ready()) |
3907 | return; |
3908 | |
3909 | preempt_disable(); |
3910 | /* |
3911 | * Guarantee any new records can be seen by tasks preparing to wait |
3912 | * before this context checks if the wait queue is empty. |
3913 | * |
3914 | * The full memory barrier within wq_has_sleeper() pairs with the full |
3915 | * memory barrier within set_current_state() of |
3916 | * prepare_to_wait_event(), which is called after ___wait_event() adds |
3917 | * the waiter but before it has checked the wait condition. |
3918 | * |
3919 | * This pairs with devkmsg_read:A and syslog_print:A. |
3920 | */ |
3921 | if (wq_has_sleeper(wq_head: &log_wait) || /* LMM(__wake_up_klogd:A) */ |
3922 | (val & PRINTK_PENDING_OUTPUT)) { |
3923 | this_cpu_or(printk_pending, val); |
3924 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
3925 | } |
3926 | preempt_enable(); |
3927 | } |
3928 | |
3929 | /** |
3930 | * wake_up_klogd - Wake kernel logging daemon |
3931 | * |
3932 | * Use this function when new records have been added to the ringbuffer |
3933 | * and the console printing of those records has already occurred or is |
3934 | * known to be handled by some other context. This function will only |
3935 | * wake the logging daemon. |
3936 | * |
3937 | * Context: Any context. |
3938 | */ |
3939 | void wake_up_klogd(void) |
3940 | { |
3941 | __wake_up_klogd(PRINTK_PENDING_WAKEUP); |
3942 | } |
3943 | |
3944 | /** |
3945 | * defer_console_output - Wake kernel logging daemon and trigger |
3946 | * console printing in a deferred context |
3947 | * |
3948 | * Use this function when new records have been added to the ringbuffer, |
3949 | * this context is responsible for console printing those records, but |
3950 | * the current context is not allowed to perform the console printing. |
3951 | * Trigger an irq_work context to perform the console printing. This |
3952 | * function also wakes the logging daemon. |
3953 | * |
3954 | * Context: Any context. |
3955 | */ |
3956 | void defer_console_output(void) |
3957 | { |
3958 | /* |
3959 | * New messages may have been added directly to the ringbuffer |
3960 | * using vprintk_store(), so wake any waiters as well. |
3961 | */ |
3962 | __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); |
3963 | } |
3964 | |
3965 | void printk_trigger_flush(void) |
3966 | { |
3967 | defer_console_output(); |
3968 | } |
3969 | |
3970 | int vprintk_deferred(const char *fmt, va_list args) |
3971 | { |
3972 | return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args); |
3973 | } |
3974 | |
3975 | int _printk_deferred(const char *fmt, ...) |
3976 | { |
3977 | va_list args; |
3978 | int r; |
3979 | |
3980 | va_start(args, fmt); |
3981 | r = vprintk_deferred(fmt, args); |
3982 | va_end(args); |
3983 | |
3984 | return r; |
3985 | } |
3986 | |
3987 | /* |
3988 | * printk rate limiting, lifted from the networking subsystem. |
3989 | * |
3990 | * This enforces a rate limit: not more than 10 kernel messages |
3991 | * every 5s to make a denial-of-service attack impossible. |
3992 | */ |
3993 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
3994 | |
3995 | int __printk_ratelimit(const char *func) |
3996 | { |
3997 | return ___ratelimit(rs: &printk_ratelimit_state, func); |
3998 | } |
3999 | EXPORT_SYMBOL(__printk_ratelimit); |
4000 | |
4001 | /** |
4002 | * printk_timed_ratelimit - caller-controlled printk ratelimiting |
4003 | * @caller_jiffies: pointer to caller's state |
4004 | * @interval_msecs: minimum interval between prints |
4005 | * |
4006 | * printk_timed_ratelimit() returns true if more than @interval_msecs |
4007 | * milliseconds have elapsed since the last time printk_timed_ratelimit() |
4008 | * returned true. |
4009 | */ |
4010 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
4011 | unsigned int interval_msecs) |
4012 | { |
4013 | unsigned long elapsed = jiffies - *caller_jiffies; |
4014 | |
4015 | if (*caller_jiffies && elapsed <= msecs_to_jiffies(m: interval_msecs)) |
4016 | return false; |
4017 | |
4018 | *caller_jiffies = jiffies; |
4019 | return true; |
4020 | } |
4021 | EXPORT_SYMBOL(printk_timed_ratelimit); |
4022 | |
4023 | static DEFINE_SPINLOCK(dump_list_lock); |
4024 | static LIST_HEAD(dump_list); |
4025 | |
4026 | /** |
4027 | * kmsg_dump_register - register a kernel log dumper. |
4028 | * @dumper: pointer to the kmsg_dumper structure |
4029 | * |
4030 | * Adds a kernel log dumper to the system. The dump callback in the |
4031 | * structure will be called when the kernel oopses or panics and must be |
4032 | * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. |
4033 | */ |
4034 | int kmsg_dump_register(struct kmsg_dumper *dumper) |
4035 | { |
4036 | unsigned long flags; |
4037 | int err = -EBUSY; |
4038 | |
4039 | /* The dump callback needs to be set */ |
4040 | if (!dumper->dump) |
4041 | return -EINVAL; |
4042 | |
4043 | spin_lock_irqsave(&dump_list_lock, flags); |
4044 | /* Don't allow registering multiple times */ |
4045 | if (!dumper->registered) { |
4046 | dumper->registered = 1; |
4047 | list_add_tail_rcu(new: &dumper->list, head: &dump_list); |
4048 | err = 0; |
4049 | } |
4050 | spin_unlock_irqrestore(lock: &dump_list_lock, flags); |
4051 | |
4052 | return err; |
4053 | } |
4054 | EXPORT_SYMBOL_GPL(kmsg_dump_register); |
4055 | |
4056 | /** |
4057 | * kmsg_dump_unregister - unregister a kmsg dumper. |
4058 | * @dumper: pointer to the kmsg_dumper structure |
4059 | * |
4060 | * Removes a dump device from the system. Returns zero on success and |
4061 | * %-EINVAL otherwise. |
4062 | */ |
4063 | int kmsg_dump_unregister(struct kmsg_dumper *dumper) |
4064 | { |
4065 | unsigned long flags; |
4066 | int err = -EINVAL; |
4067 | |
4068 | spin_lock_irqsave(&dump_list_lock, flags); |
4069 | if (dumper->registered) { |
4070 | dumper->registered = 0; |
4071 | list_del_rcu(entry: &dumper->list); |
4072 | err = 0; |
4073 | } |
4074 | spin_unlock_irqrestore(lock: &dump_list_lock, flags); |
4075 | synchronize_rcu(); |
4076 | |
4077 | return err; |
4078 | } |
4079 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); |
4080 | |
4081 | static bool always_kmsg_dump; |
4082 | module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); |
4083 | |
4084 | const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) |
4085 | { |
4086 | switch (reason) { |
4087 | case KMSG_DUMP_PANIC: |
4088 | return "Panic"; |
4089 | case KMSG_DUMP_OOPS: |
4090 | return "Oops"; |
4091 | case KMSG_DUMP_EMERG: |
4092 | return "Emergency"; |
4093 | case KMSG_DUMP_SHUTDOWN: |
4094 | return "Shutdown"; |
4095 | default: |
4096 | return "Unknown"; |
4097 | } |
4098 | } |
4099 | EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); |
4100 | |
4101 | /** |
4102 | * kmsg_dump - dump kernel log to kernel message dumpers. |
4103 | * @reason: the reason (oops, panic etc) for dumping |
4104 | * |
4105 | * Call each of the registered dumper's dump() callback, which can |
4106 | * retrieve the kmsg records with kmsg_dump_get_line() or |
4107 | * kmsg_dump_get_buffer(). |
4108 | */ |
4109 | void kmsg_dump(enum kmsg_dump_reason reason) |
4110 | { |
4111 | struct kmsg_dumper *dumper; |
4112 | |
4113 | rcu_read_lock(); |
4114 | list_for_each_entry_rcu(dumper, &dump_list, list) { |
4115 | enum kmsg_dump_reason max_reason = dumper->max_reason; |
4116 | |
4117 | /* |
4118 | * If client has not provided a specific max_reason, default |
4119 | * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set. |
4120 | */ |
4121 | if (max_reason == KMSG_DUMP_UNDEF) { |
4122 | max_reason = always_kmsg_dump ? KMSG_DUMP_MAX : |
4123 | KMSG_DUMP_OOPS; |
4124 | } |
4125 | if (reason > max_reason) |
4126 | continue; |
4127 | |
4128 | /* invoke dumper which will iterate over records */ |
4129 | dumper->dump(dumper, reason); |
4130 | } |
4131 | rcu_read_unlock(); |
4132 | } |
4133 | |
4134 | /** |
4135 | * kmsg_dump_get_line - retrieve one kmsg log line |
4136 | * @iter: kmsg dump iterator |
4137 | * @syslog: include the "<4>" prefixes |
4138 | * @line: buffer to copy the line to |
4139 | * @size: maximum size of the buffer |
4140 | * @len: length of line placed into buffer |
4141 | * |
4142 | * Start at the beginning of the kmsg buffer, with the oldest kmsg |
4143 | * record, and copy one record into the provided buffer. |
4144 | * |
4145 | * Consecutive calls will return the next available record moving |
4146 | * towards the end of the buffer with the youngest messages. |
4147 | * |
4148 | * A return value of FALSE indicates that there are no more records to |
4149 | * read. |
4150 | */ |
4151 | bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, |
4152 | char *line, size_t size, size_t *len) |
4153 | { |
4154 | u64 min_seq = latched_seq_read_nolock(ls: &clear_seq); |
4155 | struct printk_info info; |
4156 | unsigned int line_count; |
4157 | struct printk_record r; |
4158 | size_t l = 0; |
4159 | bool ret = false; |
4160 | |
4161 | if (iter->cur_seq < min_seq) |
4162 | iter->cur_seq = min_seq; |
4163 | |
4164 | prb_rec_init_rd(r: &r, info: &info, text_buf: line, text_buf_size: size); |
4165 | |
4166 | /* Read text or count text lines? */ |
4167 | if (line) { |
4168 | if (!prb_read_valid(rb: prb, seq: iter->cur_seq, r: &r)) |
4169 | goto out; |
4170 | l = record_print_text(r: &r, syslog, time: printk_time); |
4171 | } else { |
4172 | if (!prb_read_valid_info(rb: prb, seq: iter->cur_seq, |
4173 | info: &info, line_count: &line_count)) { |
4174 | goto out; |
4175 | } |
4176 | l = get_record_print_text_size(info: &info, line_count, syslog, |
4177 | time: printk_time); |
4178 | |
4179 | } |
4180 | |
4181 | iter->cur_seq = r.info->seq + 1; |
4182 | ret = true; |
4183 | out: |
4184 | if (len) |
4185 | *len = l; |
4186 | return ret; |
4187 | } |
4188 | EXPORT_SYMBOL_GPL(kmsg_dump_get_line); |
4189 | |
4190 | /** |
4191 | * kmsg_dump_get_buffer - copy kmsg log lines |
4192 | * @iter: kmsg dump iterator |
4193 | * @syslog: include the "<4>" prefixes |
4194 | * @buf: buffer to copy the line to |
4195 | * @size: maximum size of the buffer |
4196 | * @len_out: length of line placed into buffer |
4197 | * |
4198 | * Start at the end of the kmsg buffer and fill the provided buffer |
4199 | * with as many of the *youngest* kmsg records that fit into it. |
4200 | * If the buffer is large enough, all available kmsg records will be |
4201 | * copied with a single call. |
4202 | * |
4203 | * Consecutive calls will fill the buffer with the next block of |
4204 | * available older records, not including the earlier retrieved ones. |
4205 | * |
4206 | * A return value of FALSE indicates that there are no more records to |
4207 | * read. |
4208 | */ |
4209 | bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, |
4210 | char *buf, size_t size, size_t *len_out) |
4211 | { |
4212 | u64 min_seq = latched_seq_read_nolock(ls: &clear_seq); |
4213 | struct printk_info info; |
4214 | struct printk_record r; |
4215 | u64 seq; |
4216 | u64 next_seq; |
4217 | size_t len = 0; |
4218 | bool ret = false; |
4219 | bool time = printk_time; |
4220 | |
4221 | if (!buf || !size) |
4222 | goto out; |
4223 | |
4224 | if (iter->cur_seq < min_seq) |
4225 | iter->cur_seq = min_seq; |
4226 | |
4227 | if (prb_read_valid_info(rb: prb, seq: iter->cur_seq, info: &info, NULL)) { |
4228 | if (info.seq != iter->cur_seq) { |
4229 | /* messages are gone, move to first available one */ |
4230 | iter->cur_seq = info.seq; |
4231 | } |
4232 | } |
4233 | |
4234 | /* last entry */ |
4235 | if (iter->cur_seq >= iter->next_seq) |
4236 | goto out; |
4237 | |
4238 | /* |
4239 | * Find first record that fits, including all following records, |
4240 | * into the user-provided buffer for this dump. Pass in size-1 |
4241 | * because this function (by way of record_print_text()) will |
4242 | * not write more than size-1 bytes of text into @buf. |
4243 | */ |
4244 | seq = find_first_fitting_seq(start_seq: iter->cur_seq, max_seq: iter->next_seq, |
4245 | size: size - 1, syslog, time); |
4246 | |
4247 | /* |
4248 | * Next kmsg_dump_get_buffer() invocation will dump block of |
4249 | * older records stored right before this one. |
4250 | */ |
4251 | next_seq = seq; |
4252 | |
4253 | prb_rec_init_rd(r: &r, info: &info, text_buf: buf, text_buf_size: size); |
4254 | |
4255 | prb_for_each_record(seq, prb, seq, &r) { |
4256 | if (r.info->seq >= iter->next_seq) |
4257 | break; |
4258 | |
4259 | len += record_print_text(r: &r, syslog, time); |
4260 | |
4261 | /* Adjust record to store to remaining buffer space. */ |
4262 | prb_rec_init_rd(r: &r, info: &info, text_buf: buf + len, text_buf_size: size - len); |
4263 | } |
4264 | |
4265 | iter->next_seq = next_seq; |
4266 | ret = true; |
4267 | out: |
4268 | if (len_out) |
4269 | *len_out = len; |
4270 | return ret; |
4271 | } |
4272 | EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); |
4273 | |
4274 | /** |
4275 | * kmsg_dump_rewind - reset the iterator |
4276 | * @iter: kmsg dump iterator |
4277 | * |
4278 | * Reset the dumper's iterator so that kmsg_dump_get_line() and |
4279 | * kmsg_dump_get_buffer() can be called again and used multiple |
4280 | * times within the same dumper.dump() callback. |
4281 | */ |
4282 | void kmsg_dump_rewind(struct kmsg_dump_iter *iter) |
4283 | { |
4284 | iter->cur_seq = latched_seq_read_nolock(ls: &clear_seq); |
4285 | iter->next_seq = prb_next_seq(rb: prb); |
4286 | } |
4287 | EXPORT_SYMBOL_GPL(kmsg_dump_rewind); |
4288 | |
4289 | #endif |
4290 | |
4291 | #ifdef CONFIG_SMP |
4292 | static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1); |
4293 | static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0); |
4294 | |
4295 | /** |
4296 | * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant |
4297 | * spinning lock is not owned by any CPU. |
4298 | * |
4299 | * Context: Any context. |
4300 | */ |
4301 | void __printk_cpu_sync_wait(void) |
4302 | { |
4303 | do { |
4304 | cpu_relax(); |
4305 | } while (atomic_read(v: &printk_cpu_sync_owner) != -1); |
4306 | } |
4307 | EXPORT_SYMBOL(__printk_cpu_sync_wait); |
4308 | |
4309 | /** |
4310 | * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant |
4311 | * spinning lock. |
4312 | * |
4313 | * If no processor has the lock, the calling processor takes the lock and |
4314 | * becomes the owner. If the calling processor is already the owner of the |
4315 | * lock, this function succeeds immediately. |
4316 | * |
4317 | * Context: Any context. Expects interrupts to be disabled. |
4318 | * Return: 1 on success, otherwise 0. |
4319 | */ |
4320 | int __printk_cpu_sync_try_get(void) |
4321 | { |
4322 | int cpu; |
4323 | int old; |
4324 | |
4325 | cpu = smp_processor_id(); |
4326 | |
4327 | /* |
4328 | * Guarantee loads and stores from this CPU when it is the lock owner |
4329 | * are _not_ visible to the previous lock owner. This pairs with |
4330 | * __printk_cpu_sync_put:B. |
4331 | * |
4332 | * Memory barrier involvement: |
4333 | * |
4334 | * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, |
4335 | * then __printk_cpu_sync_put:A can never read from |
4336 | * __printk_cpu_sync_try_get:B. |
4337 | * |
4338 | * Relies on: |
4339 | * |
4340 | * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B |
4341 | * of the previous CPU |
4342 | * matching |
4343 | * ACQUIRE from __printk_cpu_sync_try_get:A to |
4344 | * __printk_cpu_sync_try_get:B of this CPU |
4345 | */ |
4346 | old = atomic_cmpxchg_acquire(v: &printk_cpu_sync_owner, old: -1, |
4347 | new: cpu); /* LMM(__printk_cpu_sync_try_get:A) */ |
4348 | if (old == -1) { |
4349 | /* |
4350 | * This CPU is now the owner and begins loading/storing |
4351 | * data: LMM(__printk_cpu_sync_try_get:B) |
4352 | */ |
4353 | return 1; |
4354 | |
4355 | } else if (old == cpu) { |
4356 | /* This CPU is already the owner. */ |
4357 | atomic_inc(v: &printk_cpu_sync_nested); |
4358 | return 1; |
4359 | } |
4360 | |
4361 | return 0; |
4362 | } |
4363 | EXPORT_SYMBOL(__printk_cpu_sync_try_get); |
4364 | |
4365 | /** |
4366 | * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock. |
4367 | * |
4368 | * The calling processor must be the owner of the lock. |
4369 | * |
4370 | * Context: Any context. Expects interrupts to be disabled. |
4371 | */ |
4372 | void __printk_cpu_sync_put(void) |
4373 | { |
4374 | if (atomic_read(v: &printk_cpu_sync_nested)) { |
4375 | atomic_dec(v: &printk_cpu_sync_nested); |
4376 | return; |
4377 | } |
4378 | |
4379 | /* |
4380 | * This CPU is finished loading/storing data: |
4381 | * LMM(__printk_cpu_sync_put:A) |
4382 | */ |
4383 | |
4384 | /* |
4385 | * Guarantee loads and stores from this CPU when it was the |
4386 | * lock owner are visible to the next lock owner. This pairs |
4387 | * with __printk_cpu_sync_try_get:A. |
4388 | * |
4389 | * Memory barrier involvement: |
4390 | * |
4391 | * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, |
4392 | * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A. |
4393 | * |
4394 | * Relies on: |
4395 | * |
4396 | * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B |
4397 | * of this CPU |
4398 | * matching |
4399 | * ACQUIRE from __printk_cpu_sync_try_get:A to |
4400 | * __printk_cpu_sync_try_get:B of the next CPU |
4401 | */ |
4402 | atomic_set_release(v: &printk_cpu_sync_owner, |
4403 | i: -1); /* LMM(__printk_cpu_sync_put:B) */ |
4404 | } |
4405 | EXPORT_SYMBOL(__printk_cpu_sync_put); |
4406 | #endif /* CONFIG_SMP */ |
4407 |
Definitions
- console_printk
- ignore_console_lock_warning
- oops_in_progress
- console_mutex
- console_sem
- console_list
- console_srcu
- suppress_printk
- console_lock_dep_map
- lockdep_assert_console_list_lock_held
- console_srcu_read_lock_is_held
- devkmsg_log_bits
- devkmsg_log_masks
- devkmsg_log
- __control_devkmsg
- control_devkmsg
- devkmsg_log_str
- devkmsg_sysctl_set_loglvl
- console_list_lock
- console_list_unlock
- console_srcu_read_lock
- console_srcu_read_unlock
- __down_trylock_console_sem
- __up_console_sem
- panic_in_progress
- this_cpu_in_panic
- other_cpu_in_panic
- console_locked
- console_cmdline
- preferred_console
- console_set_on_cmdline
- console_may_schedule
- con_msg_format_flags
- console_msg_format
- syslog_lock
- log_wait
- syslog_seq
- syslog_partial
- syslog_time
- latched_seq
- clear_seq
- __log_buf
- log_buf
- log_buf_len
- printk_rb_static
- printk_rb_dynamic
- prb
- __printk_percpu_data_ready
- printk_percpu_data_ready
- latched_seq_write
- latched_seq_read_nolock
- log_buf_addr_get
- log_buf_len_get
- trunc_msg
- truncate_msg
- dmesg_restrict
- syslog_action_restricted
- check_syslog_permissions
- append_char
- info_print_ext_header
- msg_add_ext_text
- msg_add_dict_text
- msg_print_ext_body
- devkmsg_user
- devkmsg_emit
- devkmsg_write
- devkmsg_read
- devkmsg_llseek
- devkmsg_poll
- devkmsg_open
- devkmsg_release
- kmsg_fops
- log_buf_vmcoreinfo_setup
- new_log_buf_len
- log_buf_len_update
- log_buf_len_setup
- log_buf_add_cpu
- set_percpu_data_ready
- add_to_rb
- setup_text_buf
- setup_log_buf
- ignore_loglevel
- ignore_loglevel_setup
- suppress_message_printing
- boot_delay
- loops_per_msec
- boot_delay_setup
- boot_delay_msec
- printk_time
- print_syslog
- print_time
- print_caller
- info_print_prefix
- record_print_text
- get_record_print_text_size
- find_first_fitting_seq
- syslog_print
- syslog_print_all
- syslog_clear
- do_syslog
- console_owner_dep_map
- console_owner_lock
- console_owner
- console_waiter
- console_lock_spinning_enable
- console_lock_spinning_disable_and_check
- console_trylock_spinning
- printk_count
- printk_count_early
- printk_count_nmi
- printk_count_nmi_early
- __printk_recursion_counter
- printk_delay_msec
- printk_delay
- printk_caller_id
- printk_parse_prefix
- printk_sprint
- vprintk_store
- vprintk_emit
- vprintk_default
- _printk
- early_console
- early_printk
- set_user_specified
- __add_preferred_console
- console_msg_format_setup
- console_setup
- add_preferred_console
- console_suspend_enabled
- console_suspend_disable
- printk_console_no_auto_verbose
- console_verbose
- suspend_console
- resume_console
- console_cpu_notify
- console_lock
- console_trylock
- is_console_locked
- console_is_usable
- __console_unlock
- console_prepend_dropped
- printk_get_next_message
- printk_shared_pbufs
- console_emit_next_record
- console_flush_all
- console_unlock
- console_conditional_schedule
- console_unblank
- console_flush_on_panic
- console_device
- console_stop
- console_start
- keep_bootcon
- keep_bootcon_setup
- console_call_setup
- try_enable_preferred_console
- try_enable_default_console
- console_init_seq
- register_console
- unregister_console_locked
- unregister_console
- console_force_preferred_locked
- console_init
- printk_late_init
- __pr_flush
- pr_flush
- printk_pending
- wake_up_klogd_work_func
- wake_up_klogd_work
- __wake_up_klogd
- wake_up_klogd
- defer_console_output
- printk_trigger_flush
- vprintk_deferred
- _printk_deferred
- printk_ratelimit_state
- __printk_ratelimit
- printk_timed_ratelimit
- dump_list_lock
- dump_list
- kmsg_dump_register
- kmsg_dump_unregister
- always_kmsg_dump
- kmsg_dump_reason_str
- kmsg_dump
- kmsg_dump_get_line
- kmsg_dump_get_buffer
- kmsg_dump_rewind
- printk_cpu_sync_owner
- printk_cpu_sync_nested
- __printk_cpu_sync_wait
- __printk_cpu_sync_try_get
Improve your Profiling and Debugging skills
Find out more