1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/list.h>
11#include <linux/rculist.h>
12#include <linux/spinlock.h>
13#include <linux/hash.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/uaccess.h>
17#include <linux/ptrace.h>
18#include <linux/preempt.h>
19#include <linux/percpu.h>
20#include <linux/kdebug.h>
21#include <linux/mutex.h>
22#include <linux/io.h>
23#include <linux/slab.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <linux/errno.h>
27#include <asm/debugreg.h>
28#include <linux/mmiotrace.h>
29
30#define KMMIO_PAGE_HASH_BITS 4
31#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
32
33struct kmmio_fault_page {
34 struct list_head list;
35 struct kmmio_fault_page *release_next;
36 unsigned long addr;
37 pteval_t old_presence;
38 bool armed;
39
40
41
42
43
44
45
46 int count;
47
48 bool scheduled_for_release;
49};
50
51struct kmmio_delayed_release {
52 struct rcu_head rcu;
53 struct kmmio_fault_page *release_list;
54};
55
56struct kmmio_context {
57 struct kmmio_fault_page *fpage;
58 struct kmmio_probe *probe;
59 unsigned long saved_flags;
60 unsigned long addr;
61 int active;
62};
63
64static DEFINE_SPINLOCK(kmmio_lock);
65
66
67unsigned int kmmio_count;
68
69
70static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
71static LIST_HEAD(kmmio_probes);
72
73static struct list_head *kmmio_page_list(unsigned long addr)
74{
75 unsigned int l;
76 pte_t *pte = lookup_address(addr, &l);
77
78 if (!pte)
79 return NULL;
80 addr &= page_level_mask(l);
81
82 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
83}
84
85
86static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
87
88
89
90
91
92
93
94
95
96
97static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
98{
99 struct kmmio_probe *p;
100 list_for_each_entry_rcu(p, &kmmio_probes, list) {
101 if (addr >= p->addr && addr < (p->addr + p->len))
102 return p;
103 }
104 return NULL;
105}
106
107
108static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
109{
110 struct list_head *head;
111 struct kmmio_fault_page *f;
112 unsigned int l;
113 pte_t *pte = lookup_address(addr, &l);
114
115 if (!pte)
116 return NULL;
117 addr &= page_level_mask(l);
118 head = kmmio_page_list(addr);
119 list_for_each_entry_rcu(f, head, list) {
120 if (f->addr == addr)
121 return f;
122 }
123 return NULL;
124}
125
126static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
127{
128 pmdval_t v = pmd_val(*pmd);
129 if (clear) {
130 *old = v & _PAGE_PRESENT;
131 v &= ~_PAGE_PRESENT;
132 } else
133 v |= *old;
134 set_pmd(pmd, __pmd(v));
135}
136
137static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
138{
139 pteval_t v = pte_val(*pte);
140 if (clear) {
141 *old = v & _PAGE_PRESENT;
142 v &= ~_PAGE_PRESENT;
143 } else
144 v |= *old;
145 set_pte_atomic(pte, __pte(v));
146}
147
148static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
149{
150 unsigned int level;
151 pte_t *pte = lookup_address(f->addr, &level);
152
153 if (!pte) {
154 pr_err("no pte for addr 0x%08lx\n", f->addr);
155 return -1;
156 }
157
158 switch (level) {
159 case PG_LEVEL_2M:
160 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
161 break;
162 case PG_LEVEL_4K:
163 clear_pte_presence(pte, clear, &f->old_presence);
164 break;
165 default:
166 pr_err("unexpected page level 0x%x.\n", level);
167 return -1;
168 }
169
170 __flush_tlb_one(f->addr);
171 return 0;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
186{
187 int ret;
188 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
189 if (f->armed) {
190 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
191 f->addr, f->count, !!f->old_presence);
192 }
193 ret = clear_page_presence(f, true);
194 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
195 f->addr);
196 f->armed = true;
197 return ret;
198}
199
200
201static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
202{
203 int ret = clear_page_presence(f, false);
204 WARN_ONCE(ret < 0,
205 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
206 f->armed = false;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224int kmmio_handler(struct pt_regs *regs, unsigned long addr)
225{
226 struct kmmio_context *ctx;
227 struct kmmio_fault_page *faultpage;
228 int ret = 0;
229 unsigned long page_base = addr;
230 unsigned int l;
231 pte_t *pte = lookup_address(addr, &l);
232 if (!pte)
233 return -EINVAL;
234 page_base &= page_level_mask(l);
235
236
237
238
239
240
241
242
243
244 preempt_disable();
245 rcu_read_lock();
246
247 faultpage = get_kmmio_fault_page(page_base);
248 if (!faultpage) {
249
250
251
252
253
254 goto no_kmmio;
255 }
256
257 ctx = &get_cpu_var(kmmio_ctx);
258 if (ctx->active) {
259 if (page_base == ctx->addr) {
260
261
262
263
264
265 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
266 addr, smp_processor_id());
267
268 if (!faultpage->old_presence)
269 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
270 addr, smp_processor_id());
271 } else {
272
273
274
275
276
277 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
278 smp_processor_id(), addr);
279 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
280 disarm_kmmio_fault_page(faultpage);
281 }
282 goto no_kmmio_ctx;
283 }
284 ctx->active++;
285
286 ctx->fpage = faultpage;
287 ctx->probe = get_kmmio_probe(page_base);
288 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
289 ctx->addr = page_base;
290
291 if (ctx->probe && ctx->probe->pre_handler)
292 ctx->probe->pre_handler(ctx->probe, regs, addr);
293
294
295
296
297
298 regs->flags |= X86_EFLAGS_TF;
299 regs->flags &= ~X86_EFLAGS_IF;
300
301
302 disarm_kmmio_fault_page(ctx->fpage);
303
304
305
306
307
308
309
310
311 put_cpu_var(kmmio_ctx);
312 return 1;
313
314no_kmmio_ctx:
315 put_cpu_var(kmmio_ctx);
316no_kmmio:
317 rcu_read_unlock();
318 preempt_enable_no_resched();
319 return ret;
320}
321
322
323
324
325
326
327static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
328{
329 int ret = 0;
330 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
331
332 if (!ctx->active) {
333
334
335
336
337
338 pr_warning("unexpected debug trap on CPU %d.\n",
339 smp_processor_id());
340 goto out;
341 }
342
343 if (ctx->probe && ctx->probe->post_handler)
344 ctx->probe->post_handler(ctx->probe, condition, regs);
345
346
347 spin_lock(&kmmio_lock);
348 if (ctx->fpage->count)
349 arm_kmmio_fault_page(ctx->fpage);
350 spin_unlock(&kmmio_lock);
351
352 regs->flags &= ~X86_EFLAGS_TF;
353 regs->flags |= ctx->saved_flags;
354
355
356 ctx->active--;
357 BUG_ON(ctx->active);
358 rcu_read_unlock();
359 preempt_enable_no_resched();
360
361
362
363
364
365
366 if (!(regs->flags & X86_EFLAGS_TF))
367 ret = 1;
368out:
369 put_cpu_var(kmmio_ctx);
370 return ret;
371}
372
373
374static int add_kmmio_fault_page(unsigned long addr)
375{
376 struct kmmio_fault_page *f;
377
378 f = get_kmmio_fault_page(addr);
379 if (f) {
380 if (!f->count)
381 arm_kmmio_fault_page(f);
382 f->count++;
383 return 0;
384 }
385
386 f = kzalloc(sizeof(*f), GFP_ATOMIC);
387 if (!f)
388 return -1;
389
390 f->count = 1;
391 f->addr = addr;
392
393 if (arm_kmmio_fault_page(f)) {
394 kfree(f);
395 return -1;
396 }
397
398 list_add_rcu(&f->list, kmmio_page_list(f->addr));
399
400 return 0;
401}
402
403
404static void release_kmmio_fault_page(unsigned long addr,
405 struct kmmio_fault_page **release_list)
406{
407 struct kmmio_fault_page *f;
408
409 f = get_kmmio_fault_page(addr);
410 if (!f)
411 return;
412
413 f->count--;
414 BUG_ON(f->count < 0);
415 if (!f->count) {
416 disarm_kmmio_fault_page(f);
417 if (!f->scheduled_for_release) {
418 f->release_next = *release_list;
419 *release_list = f;
420 f->scheduled_for_release = true;
421 }
422 }
423}
424
425
426
427
428
429
430
431
432int register_kmmio_probe(struct kmmio_probe *p)
433{
434 unsigned long flags;
435 int ret = 0;
436 unsigned long size = 0;
437 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
438 unsigned int l;
439 pte_t *pte;
440
441 spin_lock_irqsave(&kmmio_lock, flags);
442 if (get_kmmio_probe(p->addr)) {
443 ret = -EEXIST;
444 goto out;
445 }
446
447 pte = lookup_address(p->addr, &l);
448 if (!pte) {
449 ret = -EINVAL;
450 goto out;
451 }
452
453 kmmio_count++;
454 list_add_rcu(&p->list, &kmmio_probes);
455 while (size < size_lim) {
456 if (add_kmmio_fault_page(p->addr + size))
457 pr_err("Unable to set page fault.\n");
458 size += page_level_size(l);
459 }
460out:
461 spin_unlock_irqrestore(&kmmio_lock, flags);
462
463
464
465
466
467 return ret;
468}
469EXPORT_SYMBOL(register_kmmio_probe);
470
471static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
472{
473 struct kmmio_delayed_release *dr = container_of(
474 head,
475 struct kmmio_delayed_release,
476 rcu);
477 struct kmmio_fault_page *f = dr->release_list;
478 while (f) {
479 struct kmmio_fault_page *next = f->release_next;
480 BUG_ON(f->count);
481 kfree(f);
482 f = next;
483 }
484 kfree(dr);
485}
486
487static void remove_kmmio_fault_pages(struct rcu_head *head)
488{
489 struct kmmio_delayed_release *dr =
490 container_of(head, struct kmmio_delayed_release, rcu);
491 struct kmmio_fault_page *f = dr->release_list;
492 struct kmmio_fault_page **prevp = &dr->release_list;
493 unsigned long flags;
494
495 spin_lock_irqsave(&kmmio_lock, flags);
496 while (f) {
497 if (!f->count) {
498 list_del_rcu(&f->list);
499 prevp = &f->release_next;
500 } else {
501 *prevp = f->release_next;
502 f->release_next = NULL;
503 f->scheduled_for_release = false;
504 }
505 f = *prevp;
506 }
507 spin_unlock_irqrestore(&kmmio_lock, flags);
508
509
510 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
511}
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526void unregister_kmmio_probe(struct kmmio_probe *p)
527{
528 unsigned long flags;
529 unsigned long size = 0;
530 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
531 struct kmmio_fault_page *release_list = NULL;
532 struct kmmio_delayed_release *drelease;
533 unsigned int l;
534 pte_t *pte;
535
536 pte = lookup_address(p->addr, &l);
537 if (!pte)
538 return;
539
540 spin_lock_irqsave(&kmmio_lock, flags);
541 while (size < size_lim) {
542 release_kmmio_fault_page(p->addr + size, &release_list);
543 size += page_level_size(l);
544 }
545 list_del_rcu(&p->list);
546 kmmio_count--;
547 spin_unlock_irqrestore(&kmmio_lock, flags);
548
549 if (!release_list)
550 return;
551
552 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
553 if (!drelease) {
554 pr_crit("leaking kmmio_fault_page objects.\n");
555 return;
556 }
557 drelease->release_list = release_list;
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
574}
575EXPORT_SYMBOL(unregister_kmmio_probe);
576
577static int
578kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
579{
580 struct die_args *arg = args;
581 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
582
583 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
584 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
585
586
587
588
589 *dr6_p &= ~DR_STEP;
590 return NOTIFY_STOP;
591 }
592
593 return NOTIFY_DONE;
594}
595
596static struct notifier_block nb_die = {
597 .notifier_call = kmmio_die_notifier
598};
599
600int kmmio_init(void)
601{
602 int i;
603
604 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
605 INIT_LIST_HEAD(&kmmio_page_table[i]);
606
607 return register_die_notifier(&nb_die);
608}
609
610void kmmio_cleanup(void)
611{
612 int i;
613
614 unregister_die_notifier(&nb_die);
615 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
616 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
617 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
618 }
619}
620