1
2
3
4
5
6#include <linux/preempt.h>
7#include <linux/spinlock.h>
8#include <linux/debug_locks.h>
9#include <linux/smp.h>
10#include <linux/cpumask.h>
11#include <linux/irq_work.h>
12#include <linux/printk.h>
13
14#include "internal.h"
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30static int printk_safe_irq_ready __read_mostly;
31
32#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
33 sizeof(atomic_t) - \
34 sizeof(atomic_t) - \
35 sizeof(struct irq_work))
36
37struct printk_safe_seq_buf {
38 atomic_t len;
39 atomic_t message_lost;
40 struct irq_work work;
41 unsigned char buffer[SAFE_LOG_BUF_LEN];
42};
43
44static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
45static DEFINE_PER_CPU(int, printk_context);
46
47#ifdef CONFIG_PRINTK_NMI
48static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
49#endif
50
51
52static void queue_flush_work(struct printk_safe_seq_buf *s)
53{
54 if (printk_safe_irq_ready)
55 irq_work_queue(&s->work);
56}
57
58
59
60
61
62
63
64
65
66
67
68static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
69 const char *fmt, va_list args)
70{
71 int add;
72 size_t len;
73 va_list ap;
74
75again:
76 len = atomic_read(&s->len);
77
78
79 if (len >= sizeof(s->buffer) - 1) {
80 atomic_inc(&s->message_lost);
81 queue_flush_work(s);
82 return 0;
83 }
84
85
86
87
88
89 if (!len)
90 smp_rmb();
91
92 va_copy(ap, args);
93 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
94 va_end(ap);
95 if (!add)
96 return 0;
97
98
99
100
101
102
103 if (atomic_cmpxchg(&s->len, len, len + add) != len)
104 goto again;
105
106 queue_flush_work(s);
107 return add;
108}
109
110static inline void printk_safe_flush_line(const char *text, int len)
111{
112
113
114
115
116
117
118 printk_deferred("%.*s", len, text);
119}
120
121
122static int printk_safe_flush_buffer(const char *start, size_t len)
123{
124 const char *c, *end;
125 bool header;
126
127 c = start;
128 end = start + len;
129 header = true;
130
131
132 while (c < end) {
133 if (*c == '\n') {
134 printk_safe_flush_line(start, c - start + 1);
135 start = ++c;
136 header = true;
137 continue;
138 }
139
140
141 if ((c + 1 < end) && printk_get_level(c)) {
142 if (header) {
143 c = printk_skip_level(c);
144 continue;
145 }
146
147 printk_safe_flush_line(start, c - start);
148 start = c++;
149 header = true;
150 continue;
151 }
152
153 header = false;
154 c++;
155 }
156
157
158 if (start < end && !header) {
159 static const char newline[] = KERN_CONT "\n";
160
161 printk_safe_flush_line(start, end - start);
162 printk_safe_flush_line(newline, strlen(newline));
163 }
164
165 return len;
166}
167
168static void report_message_lost(struct printk_safe_seq_buf *s)
169{
170 int lost = atomic_xchg(&s->message_lost, 0);
171
172 if (lost)
173 printk_deferred("Lost %d message(s)!\n", lost);
174}
175
176
177
178
179
180static void __printk_safe_flush(struct irq_work *work)
181{
182 static raw_spinlock_t read_lock =
183 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
184 struct printk_safe_seq_buf *s =
185 container_of(work, struct printk_safe_seq_buf, work);
186 unsigned long flags;
187 size_t len;
188 int i;
189
190
191
192
193
194
195
196
197 raw_spin_lock_irqsave(&read_lock, flags);
198
199 i = 0;
200more:
201 len = atomic_read(&s->len);
202
203
204
205
206
207
208
209 if ((i && i >= len) || len > sizeof(s->buffer)) {
210 const char *msg = "printk_safe_flush: internal error\n";
211
212 printk_safe_flush_line(msg, strlen(msg));
213 len = 0;
214 }
215
216 if (!len)
217 goto out;
218
219
220 smp_rmb();
221 i += printk_safe_flush_buffer(s->buffer + i, len - i);
222
223
224
225
226
227
228
229 if (atomic_cmpxchg(&s->len, len, 0) != len)
230 goto more;
231
232out:
233 report_message_lost(s);
234 raw_spin_unlock_irqrestore(&read_lock, flags);
235}
236
237
238
239
240
241
242
243
244void printk_safe_flush(void)
245{
246 int cpu;
247
248 for_each_possible_cpu(cpu) {
249#ifdef CONFIG_PRINTK_NMI
250 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251#endif
252 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
253 }
254}
255
256
257
258
259
260
261
262
263
264
265
266void printk_safe_flush_on_panic(void)
267{
268
269
270
271
272 if (raw_spin_is_locked(&logbuf_lock)) {
273 if (num_online_cpus() > 1)
274 return;
275
276 debug_locks_off();
277 raw_spin_lock_init(&logbuf_lock);
278 }
279
280 printk_safe_flush();
281}
282
283#ifdef CONFIG_PRINTK_NMI
284
285
286
287
288
289
290static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291{
292 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293
294 return printk_safe_log_store(s, fmt, args);
295}
296
297void notrace printk_nmi_enter(void)
298{
299 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
300}
301
302void notrace printk_nmi_exit(void)
303{
304 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
305}
306
307
308
309
310
311
312
313
314
315
316
317void printk_nmi_direct_enter(void)
318{
319 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
320 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
321}
322
323void printk_nmi_direct_exit(void)
324{
325 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
326}
327
328#else
329
330static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331{
332 return 0;
333}
334
335#endif
336
337
338
339
340
341
342static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343{
344 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346 return printk_safe_log_store(s, fmt, args);
347}
348
349
350void __printk_safe_enter(void)
351{
352 this_cpu_inc(printk_context);
353}
354
355
356void __printk_safe_exit(void)
357{
358 this_cpu_dec(printk_context);
359}
360
361__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362{
363
364
365
366
367 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
368 raw_spin_trylock(&logbuf_lock)) {
369 int len;
370
371 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
372 raw_spin_unlock(&logbuf_lock);
373 defer_console_output();
374 return len;
375 }
376
377
378 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
379 return vprintk_nmi(fmt, args);
380
381
382 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
383 return vprintk_safe(fmt, args);
384
385
386 return vprintk_default(fmt, args);
387}
388
389void __init printk_safe_init(void)
390{
391 int cpu;
392
393 for_each_possible_cpu(cpu) {
394 struct printk_safe_seq_buf *s;
395
396 s = &per_cpu(safe_print_seq, cpu);
397 init_irq_work(&s->work, __printk_safe_flush);
398
399#ifdef CONFIG_PRINTK_NMI
400 s = &per_cpu(nmi_print_seq, cpu);
401 init_irq_work(&s->work, __printk_safe_flush);
402#endif
403 }
404
405
406
407
408
409
410 barrier();
411 printk_safe_irq_ready = 1;
412
413
414 printk_safe_flush();
415}
416