1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/list.h>
12#include <linux/rculist.h>
13#include <linux/spinlock.h>
14#include <linux/hash.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/uaccess.h>
18#include <linux/ptrace.h>
19#include <linux/preempt.h>
20#include <linux/percpu.h>
21#include <linux/kdebug.h>
22#include <linux/mutex.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <linux/errno.h>
28#include <asm/debugreg.h>
29#include <linux/mmiotrace.h>
30
31#define KMMIO_PAGE_HASH_BITS 4
32#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
33
34struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
37 unsigned long addr;
38 pteval_t old_presence;
39 bool armed;
40
41
42
43
44
45
46
47 int count;
48
49 bool scheduled_for_release;
50};
51
52struct kmmio_delayed_release {
53 struct rcu_head rcu;
54 struct kmmio_fault_page *release_list;
55};
56
57struct kmmio_context {
58 struct kmmio_fault_page *fpage;
59 struct kmmio_probe *probe;
60 unsigned long saved_flags;
61 unsigned long addr;
62 int active;
63};
64
65static DEFINE_SPINLOCK(kmmio_lock);
66
67
68unsigned int kmmio_count;
69
70
71static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
72static LIST_HEAD(kmmio_probes);
73
74static struct list_head *kmmio_page_list(unsigned long addr)
75{
76 unsigned int l;
77 pte_t *pte = lookup_address(addr, &l);
78
79 if (!pte)
80 return NULL;
81 addr &= page_level_mask(l);
82
83 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
84}
85
86
87static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
88
89
90
91
92
93
94
95
96
97
98static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
99{
100 struct kmmio_probe *p;
101 list_for_each_entry_rcu(p, &kmmio_probes, list) {
102 if (addr >= p->addr && addr < (p->addr + p->len))
103 return p;
104 }
105 return NULL;
106}
107
108
109static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
110{
111 struct list_head *head;
112 struct kmmio_fault_page *f;
113 unsigned int l;
114 pte_t *pte = lookup_address(addr, &l);
115
116 if (!pte)
117 return NULL;
118 addr &= page_level_mask(l);
119 head = kmmio_page_list(addr);
120 list_for_each_entry_rcu(f, head, list) {
121 if (f->addr == addr)
122 return f;
123 }
124 return NULL;
125}
126
127static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
128{
129 pmd_t new_pmd;
130 pmdval_t v = pmd_val(*pmd);
131 if (clear) {
132 *old = v;
133 new_pmd = pmd_mkinvalid(*pmd);
134 } else {
135
136 new_pmd = __pmd(*old);
137 }
138 set_pmd(pmd, new_pmd);
139}
140
141static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
142{
143 pteval_t v = pte_val(*pte);
144 if (clear) {
145 *old = v;
146
147 pte_clear(&init_mm, 0, pte);
148 } else {
149
150 set_pte_atomic(pte, __pte(*old));
151 }
152}
153
154static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
155{
156 unsigned int level;
157 pte_t *pte = lookup_address(f->addr, &level);
158
159 if (!pte) {
160 pr_err("no pte for addr 0x%08lx\n", f->addr);
161 return -1;
162 }
163
164 switch (level) {
165 case PG_LEVEL_2M:
166 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
167 break;
168 case PG_LEVEL_4K:
169 clear_pte_presence(pte, clear, &f->old_presence);
170 break;
171 default:
172 pr_err("unexpected page level 0x%x.\n", level);
173 return -1;
174 }
175
176 flush_tlb_one_kernel(f->addr);
177 return 0;
178}
179
180
181
182
183
184
185
186
187
188
189
190
191static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
192{
193 int ret;
194 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
195 if (f->armed) {
196 pr_warn("double-arm: addr 0x%08lx, ref %d, old %d\n",
197 f->addr, f->count, !!f->old_presence);
198 }
199 ret = clear_page_presence(f, true);
200 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
201 f->addr);
202 f->armed = true;
203 return ret;
204}
205
206
207static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
208{
209 int ret = clear_page_presence(f, false);
210 WARN_ONCE(ret < 0,
211 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
212 f->armed = false;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230int kmmio_handler(struct pt_regs *regs, unsigned long addr)
231{
232 struct kmmio_context *ctx;
233 struct kmmio_fault_page *faultpage;
234 int ret = 0;
235 unsigned long page_base = addr;
236 unsigned int l;
237 pte_t *pte = lookup_address(addr, &l);
238 if (!pte)
239 return -EINVAL;
240 page_base &= page_level_mask(l);
241
242
243
244
245
246
247
248
249
250 preempt_disable();
251 rcu_read_lock();
252
253 faultpage = get_kmmio_fault_page(page_base);
254 if (!faultpage) {
255
256
257
258
259
260 goto no_kmmio;
261 }
262
263 ctx = this_cpu_ptr(&kmmio_ctx);
264 if (ctx->active) {
265 if (page_base == ctx->addr) {
266
267
268
269
270
271 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
272 addr, smp_processor_id());
273
274 if (!faultpage->old_presence)
275 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
276 addr, smp_processor_id());
277 } else {
278
279
280
281
282
283 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
284 smp_processor_id(), addr);
285 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
286 disarm_kmmio_fault_page(faultpage);
287 }
288 goto no_kmmio;
289 }
290 ctx->active++;
291
292 ctx->fpage = faultpage;
293 ctx->probe = get_kmmio_probe(page_base);
294 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
295 ctx->addr = page_base;
296
297 if (ctx->probe && ctx->probe->pre_handler)
298 ctx->probe->pre_handler(ctx->probe, regs, addr);
299
300
301
302
303
304 regs->flags |= X86_EFLAGS_TF;
305 regs->flags &= ~X86_EFLAGS_IF;
306
307
308 disarm_kmmio_fault_page(ctx->fpage);
309
310
311
312
313
314
315
316
317 return 1;
318
319no_kmmio:
320 rcu_read_unlock();
321 preempt_enable_no_resched();
322 return ret;
323}
324
325
326
327
328
329
330static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
331{
332 int ret = 0;
333 struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx);
334
335 if (!ctx->active) {
336
337
338
339
340
341 pr_warn("unexpected debug trap on CPU %d.\n", smp_processor_id());
342 goto out;
343 }
344
345 if (ctx->probe && ctx->probe->post_handler)
346 ctx->probe->post_handler(ctx->probe, condition, regs);
347
348
349 spin_lock(&kmmio_lock);
350 if (ctx->fpage->count)
351 arm_kmmio_fault_page(ctx->fpage);
352 spin_unlock(&kmmio_lock);
353
354 regs->flags &= ~X86_EFLAGS_TF;
355 regs->flags |= ctx->saved_flags;
356
357
358 ctx->active--;
359 BUG_ON(ctx->active);
360 rcu_read_unlock();
361 preempt_enable_no_resched();
362
363
364
365
366
367
368 if (!(regs->flags & X86_EFLAGS_TF))
369 ret = 1;
370out:
371 return ret;
372}
373
374
375static int add_kmmio_fault_page(unsigned long addr)
376{
377 struct kmmio_fault_page *f;
378
379 f = get_kmmio_fault_page(addr);
380 if (f) {
381 if (!f->count)
382 arm_kmmio_fault_page(f);
383 f->count++;
384 return 0;
385 }
386
387 f = kzalloc(sizeof(*f), GFP_ATOMIC);
388 if (!f)
389 return -1;
390
391 f->count = 1;
392 f->addr = addr;
393
394 if (arm_kmmio_fault_page(f)) {
395 kfree(f);
396 return -1;
397 }
398
399 list_add_rcu(&f->list, kmmio_page_list(f->addr));
400
401 return 0;
402}
403
404
405static void release_kmmio_fault_page(unsigned long addr,
406 struct kmmio_fault_page **release_list)
407{
408 struct kmmio_fault_page *f;
409
410 f = get_kmmio_fault_page(addr);
411 if (!f)
412 return;
413
414 f->count--;
415 BUG_ON(f->count < 0);
416 if (!f->count) {
417 disarm_kmmio_fault_page(f);
418 if (!f->scheduled_for_release) {
419 f->release_next = *release_list;
420 *release_list = f;
421 f->scheduled_for_release = true;
422 }
423 }
424}
425
426
427
428
429
430
431
432
433int register_kmmio_probe(struct kmmio_probe *p)
434{
435 unsigned long flags;
436 int ret = 0;
437 unsigned long size = 0;
438 unsigned long addr = p->addr & PAGE_MASK;
439 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
440 unsigned int l;
441 pte_t *pte;
442
443 spin_lock_irqsave(&kmmio_lock, flags);
444 if (get_kmmio_probe(addr)) {
445 ret = -EEXIST;
446 goto out;
447 }
448
449 pte = lookup_address(addr, &l);
450 if (!pte) {
451 ret = -EINVAL;
452 goto out;
453 }
454
455 kmmio_count++;
456 list_add_rcu(&p->list, &kmmio_probes);
457 while (size < size_lim) {
458 if (add_kmmio_fault_page(addr + size))
459 pr_err("Unable to set page fault.\n");
460 size += page_level_size(l);
461 }
462out:
463 spin_unlock_irqrestore(&kmmio_lock, flags);
464
465
466
467
468
469 return ret;
470}
471EXPORT_SYMBOL(register_kmmio_probe);
472
473static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
474{
475 struct kmmio_delayed_release *dr = container_of(
476 head,
477 struct kmmio_delayed_release,
478 rcu);
479 struct kmmio_fault_page *f = dr->release_list;
480 while (f) {
481 struct kmmio_fault_page *next = f->release_next;
482 BUG_ON(f->count);
483 kfree(f);
484 f = next;
485 }
486 kfree(dr);
487}
488
489static void remove_kmmio_fault_pages(struct rcu_head *head)
490{
491 struct kmmio_delayed_release *dr =
492 container_of(head, struct kmmio_delayed_release, rcu);
493 struct kmmio_fault_page *f = dr->release_list;
494 struct kmmio_fault_page **prevp = &dr->release_list;
495 unsigned long flags;
496
497 spin_lock_irqsave(&kmmio_lock, flags);
498 while (f) {
499 if (!f->count) {
500 list_del_rcu(&f->list);
501 prevp = &f->release_next;
502 } else {
503 *prevp = f->release_next;
504 f->release_next = NULL;
505 f->scheduled_for_release = false;
506 }
507 f = *prevp;
508 }
509 spin_unlock_irqrestore(&kmmio_lock, flags);
510
511
512 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
513}
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528void unregister_kmmio_probe(struct kmmio_probe *p)
529{
530 unsigned long flags;
531 unsigned long size = 0;
532 unsigned long addr = p->addr & PAGE_MASK;
533 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
534 struct kmmio_fault_page *release_list = NULL;
535 struct kmmio_delayed_release *drelease;
536 unsigned int l;
537 pte_t *pte;
538
539 pte = lookup_address(addr, &l);
540 if (!pte)
541 return;
542
543 spin_lock_irqsave(&kmmio_lock, flags);
544 while (size < size_lim) {
545 release_kmmio_fault_page(addr + size, &release_list);
546 size += page_level_size(l);
547 }
548 list_del_rcu(&p->list);
549 kmmio_count--;
550 spin_unlock_irqrestore(&kmmio_lock, flags);
551
552 if (!release_list)
553 return;
554
555 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
556 if (!drelease) {
557 pr_crit("leaking kmmio_fault_page objects.\n");
558 return;
559 }
560 drelease->release_list = release_list;
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
577}
578EXPORT_SYMBOL(unregister_kmmio_probe);
579
580static int
581kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
582{
583 struct die_args *arg = args;
584 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
585
586 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
587 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
588
589
590
591
592 *dr6_p &= ~DR_STEP;
593 return NOTIFY_STOP;
594 }
595
596 return NOTIFY_DONE;
597}
598
599static struct notifier_block nb_die = {
600 .notifier_call = kmmio_die_notifier
601};
602
603int kmmio_init(void)
604{
605 int i;
606
607 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
608 INIT_LIST_HEAD(&kmmio_page_table[i]);
609
610 return register_die_notifier(&nb_die);
611}
612
613void kmmio_cleanup(void)
614{
615 int i;
616
617 unregister_die_notifier(&nb_die);
618 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
619 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
620 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
621 }
622}
623