• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * printk_safe.c - Safe printk for printk-deadlock-prone contexts
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/debug_locks.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/irq_work.h>
24 #include <linux/printk.h>
25 
26 #include "internal.h"
27 
28 /*
29  * printk() could not take logbuf_lock in NMI context. Instead,
30  * it uses an alternative implementation that temporary stores
31  * the strings into a per-CPU buffer. The content of the buffer
32  * is later flushed into the main ring buffer via IRQ work.
33  *
34  * The alternative implementation is chosen transparently
35  * by examinig current printk() context mask stored in @printk_context
36  * per-CPU variable.
37  *
38  * The implementation allows to flush the strings also from another CPU.
39  * There are situations when we want to make sure that all buffers
40  * were handled or when IRQs are blocked.
41  */
42 static int printk_safe_irq_ready;
43 
44 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -	\
45 				sizeof(atomic_t) -			\
46 				sizeof(atomic_t) -			\
47 				sizeof(struct irq_work))
48 
49 struct printk_safe_seq_buf {
50 	atomic_t		len;	/* length of written data */
51 	atomic_t		message_lost;
52 	struct irq_work		work;	/* IRQ work that flushes the buffer */
53 	unsigned char		buffer[SAFE_LOG_BUF_LEN];
54 };
55 
56 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
57 static DEFINE_PER_CPU(int, printk_context);
58 
59 #ifdef CONFIG_PRINTK_NMI
60 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
61 #endif
62 
63 /* Get flushed in a more safe context. */
queue_flush_work(struct printk_safe_seq_buf * s)64 static void queue_flush_work(struct printk_safe_seq_buf *s)
65 {
66 	if (printk_safe_irq_ready) {
67 		/* Make sure that IRQ work is really initialized. */
68 		smp_rmb();
69 		irq_work_queue(&s->work);
70 	}
71 }
72 
73 /*
74  * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
75  * have dedicated buffers, because otherwise printk-safe preempted by
76  * NMI-printk would have overwritten the NMI messages.
77  *
78  * The messages are fushed from irq work (or from panic()), possibly,
79  * from other CPU, concurrently with printk_safe_log_store(). Should this
80  * happen, printk_safe_log_store() will notice the buffer->len mismatch
81  * and repeat the write.
82  */
printk_safe_log_store(struct printk_safe_seq_buf * s,const char * fmt,va_list args)83 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
84 						const char *fmt, va_list args)
85 {
86 	int add;
87 	size_t len;
88 	va_list ap;
89 
90 again:
91 	len = atomic_read(&s->len);
92 
93 	/* The trailing '\0' is not counted into len. */
94 	if (len >= sizeof(s->buffer) - 1) {
95 		atomic_inc(&s->message_lost);
96 		queue_flush_work(s);
97 		return 0;
98 	}
99 
100 	/*
101 	 * Make sure that all old data have been read before the buffer
102 	 * was reset. This is not needed when we just append data.
103 	 */
104 	if (!len)
105 		smp_rmb();
106 
107 	va_copy(ap, args);
108 	add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
109 	va_end(ap);
110 	if (!add)
111 		return 0;
112 
113 	/*
114 	 * Do it once again if the buffer has been flushed in the meantime.
115 	 * Note that atomic_cmpxchg() is an implicit memory barrier that
116 	 * makes sure that the data were written before updating s->len.
117 	 */
118 	if (atomic_cmpxchg(&s->len, len, len + add) != len)
119 		goto again;
120 
121 	queue_flush_work(s);
122 	return add;
123 }
124 
printk_safe_flush_line(const char * text,int len)125 static inline void printk_safe_flush_line(const char *text, int len)
126 {
127 	/*
128 	 * Avoid any console drivers calls from here, because we may be
129 	 * in NMI or printk_safe context (when in panic). The messages
130 	 * must go only into the ring buffer at this stage.  Consoles will
131 	 * get explicitly called later when a crashdump is not generated.
132 	 */
133 	printk_deferred("%.*s", len, text);
134 }
135 
136 /* printk part of the temporary buffer line by line */
printk_safe_flush_buffer(const char * start,size_t len)137 static int printk_safe_flush_buffer(const char *start, size_t len)
138 {
139 	const char *c, *end;
140 	bool header;
141 
142 	c = start;
143 	end = start + len;
144 	header = true;
145 
146 	/* Print line by line. */
147 	while (c < end) {
148 		if (*c == '\n') {
149 			printk_safe_flush_line(start, c - start + 1);
150 			start = ++c;
151 			header = true;
152 			continue;
153 		}
154 
155 		/* Handle continuous lines or missing new line. */
156 		if ((c + 1 < end) && printk_get_level(c)) {
157 			if (header) {
158 				c = printk_skip_level(c);
159 				continue;
160 			}
161 
162 			printk_safe_flush_line(start, c - start);
163 			start = c++;
164 			header = true;
165 			continue;
166 		}
167 
168 		header = false;
169 		c++;
170 	}
171 
172 	/* Check if there was a partial line. Ignore pure header. */
173 	if (start < end && !header) {
174 		static const char newline[] = KERN_CONT "\n";
175 
176 		printk_safe_flush_line(start, end - start);
177 		printk_safe_flush_line(newline, strlen(newline));
178 	}
179 
180 	return len;
181 }
182 
report_message_lost(struct printk_safe_seq_buf * s)183 static void report_message_lost(struct printk_safe_seq_buf *s)
184 {
185 	int lost = atomic_xchg(&s->message_lost, 0);
186 
187 	if (lost)
188 		printk_deferred("Lost %d message(s)!\n", lost);
189 }
190 
191 /*
192  * Flush data from the associated per-CPU buffer. The function
193  * can be called either via IRQ work or independently.
194  */
__printk_safe_flush(struct irq_work * work)195 static void __printk_safe_flush(struct irq_work *work)
196 {
197 	static raw_spinlock_t read_lock =
198 		__RAW_SPIN_LOCK_INITIALIZER(read_lock);
199 	struct printk_safe_seq_buf *s =
200 		container_of(work, struct printk_safe_seq_buf, work);
201 	unsigned long flags;
202 	size_t len;
203 	int i;
204 
205 	/*
206 	 * The lock has two functions. First, one reader has to flush all
207 	 * available message to make the lockless synchronization with
208 	 * writers easier. Second, we do not want to mix messages from
209 	 * different CPUs. This is especially important when printing
210 	 * a backtrace.
211 	 */
212 	raw_spin_lock_irqsave(&read_lock, flags);
213 
214 	i = 0;
215 more:
216 	len = atomic_read(&s->len);
217 
218 	/*
219 	 * This is just a paranoid check that nobody has manipulated
220 	 * the buffer an unexpected way. If we printed something then
221 	 * @len must only increase. Also it should never overflow the
222 	 * buffer size.
223 	 */
224 	if ((i && i >= len) || len > sizeof(s->buffer)) {
225 		const char *msg = "printk_safe_flush: internal error\n";
226 
227 		printk_safe_flush_line(msg, strlen(msg));
228 		len = 0;
229 	}
230 
231 	if (!len)
232 		goto out; /* Someone else has already flushed the buffer. */
233 
234 	/* Make sure that data has been written up to the @len */
235 	smp_rmb();
236 	i += printk_safe_flush_buffer(s->buffer + i, len - i);
237 
238 	/*
239 	 * Check that nothing has got added in the meantime and truncate
240 	 * the buffer. Note that atomic_cmpxchg() is an implicit memory
241 	 * barrier that makes sure that the data were copied before
242 	 * updating s->len.
243 	 */
244 	if (atomic_cmpxchg(&s->len, len, 0) != len)
245 		goto more;
246 
247 out:
248 	report_message_lost(s);
249 	raw_spin_unlock_irqrestore(&read_lock, flags);
250 }
251 
252 /**
253  * printk_safe_flush - flush all per-cpu nmi buffers.
254  *
255  * The buffers are flushed automatically via IRQ work. This function
256  * is useful only when someone wants to be sure that all buffers have
257  * been flushed at some point.
258  */
printk_safe_flush(void)259 void printk_safe_flush(void)
260 {
261 	int cpu;
262 
263 	for_each_possible_cpu(cpu) {
264 #ifdef CONFIG_PRINTK_NMI
265 		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
266 #endif
267 		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
268 	}
269 }
270 
271 /**
272  * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
273  *	goes down.
274  *
275  * Similar to printk_safe_flush() but it can be called even in NMI context when
276  * the system goes down. It does the best effort to get NMI messages into
277  * the main ring buffer.
278  *
279  * Note that it could try harder when there is only one CPU online.
280  */
printk_safe_flush_on_panic(void)281 void printk_safe_flush_on_panic(void)
282 {
283 	/*
284 	 * Make sure that we could access the main ring buffer.
285 	 * Do not risk a double release when more CPUs are up.
286 	 */
287 	if (raw_spin_is_locked(&logbuf_lock)) {
288 		if (num_online_cpus() > 1)
289 			return;
290 
291 		debug_locks_off();
292 		raw_spin_lock_init(&logbuf_lock);
293 	}
294 
295 	printk_safe_flush();
296 }
297 
298 #ifdef CONFIG_PRINTK_NMI
299 /*
300  * Safe printk() for NMI context. It uses a per-CPU buffer to
301  * store the message. NMIs are not nested, so there is always only
302  * one writer running. But the buffer might get flushed from another
303  * CPU, so we need to be careful.
304  */
vprintk_nmi(const char * fmt,va_list args)305 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
306 {
307 	struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
308 
309 	return printk_safe_log_store(s, fmt, args);
310 }
311 
printk_nmi_enter(void)312 void notrace printk_nmi_enter(void)
313 {
314 	this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
315 }
316 
printk_nmi_exit(void)317 void notrace printk_nmi_exit(void)
318 {
319 	this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
320 }
321 
322 /*
323  * Marks a code that might produce many messages in NMI context
324  * and the risk of losing them is more critical than eventual
325  * reordering.
326  *
327  * It has effect only when called in NMI context. Then printk()
328  * will try to store the messages into the main logbuf directly
329  * and use the per-CPU buffers only as a fallback when the lock
330  * is not available.
331  */
printk_nmi_direct_enter(void)332 void printk_nmi_direct_enter(void)
333 {
334 	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
335 		this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
336 }
337 
printk_nmi_direct_exit(void)338 void printk_nmi_direct_exit(void)
339 {
340 	this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
341 }
342 
343 #else
344 
vprintk_nmi(const char * fmt,va_list args)345 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
346 {
347 	return 0;
348 }
349 
350 #endif /* CONFIG_PRINTK_NMI */
351 
352 /*
353  * Lock-less printk(), to avoid deadlocks should the printk() recurse
354  * into itself. It uses a per-CPU buffer to store the message, just like
355  * NMI.
356  */
vprintk_safe(const char * fmt,va_list args)357 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
358 {
359 	struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
360 
361 	return printk_safe_log_store(s, fmt, args);
362 }
363 
364 /* Can be preempted by NMI. */
__printk_safe_enter(void)365 void __printk_safe_enter(void)
366 {
367 	this_cpu_inc(printk_context);
368 }
369 
370 /* Can be preempted by NMI. */
__printk_safe_exit(void)371 void __printk_safe_exit(void)
372 {
373 	this_cpu_dec(printk_context);
374 }
375 
vprintk_func(const char * fmt,va_list args)376 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
377 {
378 	/*
379 	 * Try to use the main logbuf even in NMI. But avoid calling console
380 	 * drivers that might have their own locks.
381 	 */
382 	if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
383 	    raw_spin_trylock(&logbuf_lock)) {
384 		int len;
385 
386 		len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
387 		raw_spin_unlock(&logbuf_lock);
388 		defer_console_output();
389 		return len;
390 	}
391 
392 	/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
393 	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
394 		return vprintk_nmi(fmt, args);
395 
396 	/* Use extra buffer to prevent a recursion deadlock in safe mode. */
397 	if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
398 		return vprintk_safe(fmt, args);
399 
400 	/* No obstacles. */
401 	return vprintk_default(fmt, args);
402 }
403 
printk_safe_init(void)404 void __init printk_safe_init(void)
405 {
406 	int cpu;
407 
408 	for_each_possible_cpu(cpu) {
409 		struct printk_safe_seq_buf *s;
410 
411 		s = &per_cpu(safe_print_seq, cpu);
412 		init_irq_work(&s->work, __printk_safe_flush);
413 
414 #ifdef CONFIG_PRINTK_NMI
415 		s = &per_cpu(nmi_print_seq, cpu);
416 		init_irq_work(&s->work, __printk_safe_flush);
417 #endif
418 	}
419 
420 	/* Make sure that IRQ works are initialized before enabling. */
421 	smp_wmb();
422 	printk_safe_irq_ready = 1;
423 
424 	/* Flush pending messages that did not have scheduled IRQ works. */
425 	printk_safe_flush();
426 }
427