1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/spinlock.h>
15#include <linux/kprobes.h>
16#include <linux/kdebug.h>
17#include <linux/sched/debug.h>
18#include <linux/nmi.h>
19#include <linux/debugfs.h>
20#include <linux/delay.h>
21#include <linux/hardirq.h>
22#include <linux/ratelimit.h>
23#include <linux/slab.h>
24#include <linux/export.h>
25#include <linux/atomic.h>
26#include <linux/sched/clock.h>
27
28#include <asm/cpu_entry_area.h>
29#include <asm/traps.h>
30#include <asm/mach_traps.h>
31#include <asm/nmi.h>
32#include <asm/x86_init.h>
33#include <asm/reboot.h>
34#include <asm/cache.h>
35#include <asm/nospec-branch.h>
36#include <asm/sev.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/nmi.h>
40
41struct nmi_desc {
42 raw_spinlock_t lock;
43 struct list_head head;
44};
45
46static struct nmi_desc nmi_desc[NMI_MAX] =
47{
48 {
49 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
50 .head = LIST_HEAD_INIT(nmi_desc[0].head),
51 },
52 {
53 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
54 .head = LIST_HEAD_INIT(nmi_desc[1].head),
55 },
56 {
57 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
58 .head = LIST_HEAD_INIT(nmi_desc[2].head),
59 },
60 {
61 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
62 .head = LIST_HEAD_INIT(nmi_desc[3].head),
63 },
64
65};
66
67struct nmi_stats {
68 unsigned int normal;
69 unsigned int unknown;
70 unsigned int external;
71 unsigned int swallow;
72};
73
74static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
75
76static int ignore_nmis __read_mostly;
77
78int unknown_nmi_panic;
79
80
81
82
83static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
84
85static int __init setup_unknown_nmi_panic(char *str)
86{
87 unknown_nmi_panic = 1;
88 return 1;
89}
90__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
91
92#define nmi_to_desc(type) (&nmi_desc[type])
93
94static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
95
96static int __init nmi_warning_debugfs(void)
97{
98 debugfs_create_u64("nmi_longest_ns", 0644,
99 arch_debugfs_dir, &nmi_longest_ns);
100 return 0;
101}
102fs_initcall(nmi_warning_debugfs);
103
104static void nmi_check_duration(struct nmiaction *action, u64 duration)
105{
106 int remainder_ns, decimal_msecs;
107
108 if (duration < nmi_longest_ns || duration < action->max_duration)
109 return;
110
111 action->max_duration = duration;
112
113 remainder_ns = do_div(duration, (1000 * 1000));
114 decimal_msecs = remainder_ns / 1000;
115
116 printk_ratelimited(KERN_INFO
117 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
118 action->handler, duration, decimal_msecs);
119}
120
121static int nmi_handle(unsigned int type, struct pt_regs *regs)
122{
123 struct nmi_desc *desc = nmi_to_desc(type);
124 struct nmiaction *a;
125 int handled=0;
126
127 rcu_read_lock();
128
129
130
131
132
133
134
135 list_for_each_entry_rcu(a, &desc->head, list) {
136 int thishandled;
137 u64 delta;
138
139 delta = sched_clock();
140 thishandled = a->handler(type, regs);
141 handled += thishandled;
142 delta = sched_clock() - delta;
143 trace_nmi_handler(a->handler, (int)delta, thishandled);
144
145 nmi_check_duration(a, delta);
146 }
147
148 rcu_read_unlock();
149
150
151 return handled;
152}
153NOKPROBE_SYMBOL(nmi_handle);
154
155int __register_nmi_handler(unsigned int type, struct nmiaction *action)
156{
157 struct nmi_desc *desc = nmi_to_desc(type);
158 unsigned long flags;
159
160 if (!action->handler)
161 return -EINVAL;
162
163 raw_spin_lock_irqsave(&desc->lock, flags);
164
165
166
167
168
169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
171
172
173
174
175
176 if (action->flags & NMI_FLAG_FIRST)
177 list_add_rcu(&action->list, &desc->head);
178 else
179 list_add_tail_rcu(&action->list, &desc->head);
180
181 raw_spin_unlock_irqrestore(&desc->lock, flags);
182 return 0;
183}
184EXPORT_SYMBOL(__register_nmi_handler);
185
186void unregister_nmi_handler(unsigned int type, const char *name)
187{
188 struct nmi_desc *desc = nmi_to_desc(type);
189 struct nmiaction *n;
190 unsigned long flags;
191
192 raw_spin_lock_irqsave(&desc->lock, flags);
193
194 list_for_each_entry_rcu(n, &desc->head, list) {
195
196
197
198
199 if (!strcmp(n->name, name)) {
200 WARN(in_nmi(),
201 "Trying to free NMI (%s) from NMI context!\n", n->name);
202 list_del_rcu(&n->list);
203 break;
204 }
205 }
206
207 raw_spin_unlock_irqrestore(&desc->lock, flags);
208 synchronize_rcu();
209}
210EXPORT_SYMBOL_GPL(unregister_nmi_handler);
211
212static void
213pci_serr_error(unsigned char reason, struct pt_regs *regs)
214{
215
216 if (nmi_handle(NMI_SERR, regs))
217 return;
218
219 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
220 reason, smp_processor_id());
221
222 if (panic_on_unrecovered_nmi)
223 nmi_panic(regs, "NMI: Not continuing");
224
225 pr_emerg("Dazed and confused, but trying to continue\n");
226
227
228 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
229 outb(reason, NMI_REASON_PORT);
230}
231NOKPROBE_SYMBOL(pci_serr_error);
232
233static void
234io_check_error(unsigned char reason, struct pt_regs *regs)
235{
236 unsigned long i;
237
238
239 if (nmi_handle(NMI_IO_CHECK, regs))
240 return;
241
242 pr_emerg(
243 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
244 reason, smp_processor_id());
245 show_regs(regs);
246
247 if (panic_on_io_nmi) {
248 nmi_panic(regs, "NMI IOCK error: Not continuing");
249
250
251
252
253
254
255 return;
256 }
257
258
259 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
260 outb(reason, NMI_REASON_PORT);
261
262 i = 20000;
263 while (--i) {
264 touch_nmi_watchdog();
265 udelay(100);
266 }
267
268 reason &= ~NMI_REASON_CLEAR_IOCHK;
269 outb(reason, NMI_REASON_PORT);
270}
271NOKPROBE_SYMBOL(io_check_error);
272
273static void
274unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
275{
276 int handled;
277
278
279
280
281
282
283
284 handled = nmi_handle(NMI_UNKNOWN, regs);
285 if (handled) {
286 __this_cpu_add(nmi_stats.unknown, handled);
287 return;
288 }
289
290 __this_cpu_add(nmi_stats.unknown, 1);
291
292 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
293 reason, smp_processor_id());
294
295 pr_emerg("Do you have a strange power saving mode enabled?\n");
296 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
297 nmi_panic(regs, "NMI: Not continuing");
298
299 pr_emerg("Dazed and confused, but trying to continue\n");
300}
301NOKPROBE_SYMBOL(unknown_nmi_error);
302
303static DEFINE_PER_CPU(bool, swallow_nmi);
304static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
305
306static noinstr void default_do_nmi(struct pt_regs *regs)
307{
308 unsigned char reason = 0;
309 int handled;
310 bool b2b = false;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 if (regs->ip == __this_cpu_read(last_nmi_rip))
326 b2b = true;
327 else
328 __this_cpu_write(swallow_nmi, false);
329
330 __this_cpu_write(last_nmi_rip, regs->ip);
331
332 instrumentation_begin();
333
334 handled = nmi_handle(NMI_LOCAL, regs);
335 __this_cpu_add(nmi_stats.normal, handled);
336 if (handled) {
337
338
339
340
341
342
343
344
345 if (handled > 1)
346 __this_cpu_write(swallow_nmi, true);
347 goto out;
348 }
349
350
351
352
353
354
355
356
357
358 while (!raw_spin_trylock(&nmi_reason_lock)) {
359 run_crash_ipi_callback(regs);
360 cpu_relax();
361 }
362
363 reason = x86_platform.get_nmi_reason();
364
365 if (reason & NMI_REASON_MASK) {
366 if (reason & NMI_REASON_SERR)
367 pci_serr_error(reason, regs);
368 else if (reason & NMI_REASON_IOCHK)
369 io_check_error(reason, regs);
370#ifdef CONFIG_X86_32
371
372
373
374
375 reassert_nmi();
376#endif
377 __this_cpu_add(nmi_stats.external, 1);
378 raw_spin_unlock(&nmi_reason_lock);
379 goto out;
380 }
381 raw_spin_unlock(&nmi_reason_lock);
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413 if (b2b && __this_cpu_read(swallow_nmi))
414 __this_cpu_add(nmi_stats.swallow, 1);
415 else
416 unknown_nmi_error(reason, regs);
417
418out:
419 instrumentation_end();
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467enum nmi_states {
468 NMI_NOT_RUNNING = 0,
469 NMI_EXECUTING,
470 NMI_LATCHED,
471};
472static DEFINE_PER_CPU(enum nmi_states, nmi_state);
473static DEFINE_PER_CPU(unsigned long, nmi_cr2);
474static DEFINE_PER_CPU(unsigned long, nmi_dr7);
475
476DEFINE_IDTENTRY_RAW(exc_nmi)
477{
478 irqentry_state_t irq_state;
479
480
481
482
483
484 sev_es_nmi_complete();
485
486 if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
487 return;
488
489 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
490 this_cpu_write(nmi_state, NMI_LATCHED);
491 return;
492 }
493 this_cpu_write(nmi_state, NMI_EXECUTING);
494 this_cpu_write(nmi_cr2, read_cr2());
495nmi_restart:
496
497
498
499
500
501 sev_es_ist_enter(regs);
502
503 this_cpu_write(nmi_dr7, local_db_save());
504
505 irq_state = irqentry_nmi_enter(regs);
506
507 inc_irq_stat(__nmi_count);
508
509 if (!ignore_nmis)
510 default_do_nmi(regs);
511
512 irqentry_nmi_exit(regs, irq_state);
513
514 local_db_restore(this_cpu_read(nmi_dr7));
515
516 sev_es_ist_exit();
517
518 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
519 write_cr2(this_cpu_read(nmi_cr2));
520 if (this_cpu_dec_return(nmi_state))
521 goto nmi_restart;
522
523 if (user_mode(regs))
524 mds_user_clear_cpu_buffers();
525}
526
527#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
528DEFINE_IDTENTRY_RAW(exc_nmi_noist)
529{
530 exc_nmi(regs);
531}
532#endif
533#if IS_MODULE(CONFIG_KVM_INTEL)
534EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
535#endif
536
537void stop_nmi(void)
538{
539 ignore_nmis++;
540}
541
542void restart_nmi(void)
543{
544 ignore_nmis--;
545}
546
547
548void local_touch_nmi(void)
549{
550 __this_cpu_write(last_nmi_rip, 0);
551}
552EXPORT_SYMBOL_GPL(local_touch_nmi);
553