1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/list.h>
11#include <linux/rculist.h>
12#include <linux/spinlock.h>
13#include <linux/hash.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/uaccess.h>
17#include <linux/ptrace.h>
18#include <linux/preempt.h>
19#include <linux/percpu.h>
20#include <linux/kdebug.h>
21#include <linux/mutex.h>
22#include <linux/io.h>
23#include <linux/slab.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <linux/errno.h>
27#include <asm/debugreg.h>
28#include <linux/mmiotrace.h>
29
30#define KMMIO_PAGE_HASH_BITS 4
31#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
32
33struct kmmio_fault_page {
34 struct list_head list;
35 struct kmmio_fault_page *release_next;
36 unsigned long page;
37 pteval_t old_presence;
38 bool armed;
39
40
41
42
43
44
45
46 int count;
47
48 bool scheduled_for_release;
49};
50
51struct kmmio_delayed_release {
52 struct rcu_head rcu;
53 struct kmmio_fault_page *release_list;
54};
55
56struct kmmio_context {
57 struct kmmio_fault_page *fpage;
58 struct kmmio_probe *probe;
59 unsigned long saved_flags;
60 unsigned long addr;
61 int active;
62};
63
64static DEFINE_SPINLOCK(kmmio_lock);
65
66
67unsigned int kmmio_count;
68
69
70static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
71static LIST_HEAD(kmmio_probes);
72
73static struct list_head *kmmio_page_list(unsigned long page)
74{
75 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
76}
77
78
79static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
80
81
82
83
84
85
86
87
88
89
90static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
91{
92 struct kmmio_probe *p;
93 list_for_each_entry_rcu(p, &kmmio_probes, list) {
94 if (addr >= p->addr && addr < (p->addr + p->len))
95 return p;
96 }
97 return NULL;
98}
99
100
101static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
102{
103 struct list_head *head;
104 struct kmmio_fault_page *f;
105
106 page &= PAGE_MASK;
107 head = kmmio_page_list(page);
108 list_for_each_entry_rcu(f, head, list) {
109 if (f->page == page)
110 return f;
111 }
112 return NULL;
113}
114
115static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
116{
117 pmdval_t v = pmd_val(*pmd);
118 if (clear) {
119 *old = v & _PAGE_PRESENT;
120 v &= ~_PAGE_PRESENT;
121 } else
122 v |= *old;
123 set_pmd(pmd, __pmd(v));
124}
125
126static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
127{
128 pteval_t v = pte_val(*pte);
129 if (clear) {
130 *old = v & _PAGE_PRESENT;
131 v &= ~_PAGE_PRESENT;
132 } else
133 v |= *old;
134 set_pte_atomic(pte, __pte(v));
135}
136
137static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
138{
139 unsigned int level;
140 pte_t *pte = lookup_address(f->page, &level);
141
142 if (!pte) {
143 pr_err("no pte for page 0x%08lx\n", f->page);
144 return -1;
145 }
146
147 switch (level) {
148 case PG_LEVEL_2M:
149 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
150 break;
151 case PG_LEVEL_4K:
152 clear_pte_presence(pte, clear, &f->old_presence);
153 break;
154 default:
155 pr_err("unexpected page level 0x%x.\n", level);
156 return -1;
157 }
158
159 __flush_tlb_one(f->page);
160 return 0;
161}
162
163
164
165
166
167
168
169
170
171
172
173
174static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
175{
176 int ret;
177 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
178 if (f->armed) {
179 pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
180 f->page, f->count, !!f->old_presence);
181 }
182 ret = clear_page_presence(f, true);
183 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
184 f->page);
185 f->armed = true;
186 return ret;
187}
188
189
190static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
191{
192 int ret = clear_page_presence(f, false);
193 WARN_ONCE(ret < 0,
194 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
195 f->armed = false;
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213int kmmio_handler(struct pt_regs *regs, unsigned long addr)
214{
215 struct kmmio_context *ctx;
216 struct kmmio_fault_page *faultpage;
217 int ret = 0;
218
219
220
221
222
223
224
225
226
227 preempt_disable();
228 rcu_read_lock();
229
230 faultpage = get_kmmio_fault_page(addr);
231 if (!faultpage) {
232
233
234
235
236
237 goto no_kmmio;
238 }
239
240 ctx = &get_cpu_var(kmmio_ctx);
241 if (ctx->active) {
242 if (addr == ctx->addr) {
243
244
245
246
247
248 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
249 addr, smp_processor_id());
250
251 if (!faultpage->old_presence)
252 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
253 addr, smp_processor_id());
254 } else {
255
256
257
258
259
260 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
261 smp_processor_id(), addr);
262 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
263 disarm_kmmio_fault_page(faultpage);
264 }
265 goto no_kmmio_ctx;
266 }
267 ctx->active++;
268
269 ctx->fpage = faultpage;
270 ctx->probe = get_kmmio_probe(addr);
271 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
272 ctx->addr = addr;
273
274 if (ctx->probe && ctx->probe->pre_handler)
275 ctx->probe->pre_handler(ctx->probe, regs, addr);
276
277
278
279
280
281 regs->flags |= X86_EFLAGS_TF;
282 regs->flags &= ~X86_EFLAGS_IF;
283
284
285 disarm_kmmio_fault_page(ctx->fpage);
286
287
288
289
290
291
292
293
294 put_cpu_var(kmmio_ctx);
295 return 1;
296
297no_kmmio_ctx:
298 put_cpu_var(kmmio_ctx);
299no_kmmio:
300 rcu_read_unlock();
301 preempt_enable_no_resched();
302 return ret;
303}
304
305
306
307
308
309
310static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
311{
312 int ret = 0;
313 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
314
315 if (!ctx->active) {
316
317
318
319
320
321 pr_warning("unexpected debug trap on CPU %d.\n",
322 smp_processor_id());
323 goto out;
324 }
325
326 if (ctx->probe && ctx->probe->post_handler)
327 ctx->probe->post_handler(ctx->probe, condition, regs);
328
329
330 spin_lock(&kmmio_lock);
331 if (ctx->fpage->count)
332 arm_kmmio_fault_page(ctx->fpage);
333 spin_unlock(&kmmio_lock);
334
335 regs->flags &= ~X86_EFLAGS_TF;
336 regs->flags |= ctx->saved_flags;
337
338
339 ctx->active--;
340 BUG_ON(ctx->active);
341 rcu_read_unlock();
342 preempt_enable_no_resched();
343
344
345
346
347
348
349 if (!(regs->flags & X86_EFLAGS_TF))
350 ret = 1;
351out:
352 put_cpu_var(kmmio_ctx);
353 return ret;
354}
355
356
357static int add_kmmio_fault_page(unsigned long page)
358{
359 struct kmmio_fault_page *f;
360
361 page &= PAGE_MASK;
362 f = get_kmmio_fault_page(page);
363 if (f) {
364 if (!f->count)
365 arm_kmmio_fault_page(f);
366 f->count++;
367 return 0;
368 }
369
370 f = kzalloc(sizeof(*f), GFP_ATOMIC);
371 if (!f)
372 return -1;
373
374 f->count = 1;
375 f->page = page;
376
377 if (arm_kmmio_fault_page(f)) {
378 kfree(f);
379 return -1;
380 }
381
382 list_add_rcu(&f->list, kmmio_page_list(f->page));
383
384 return 0;
385}
386
387
388static void release_kmmio_fault_page(unsigned long page,
389 struct kmmio_fault_page **release_list)
390{
391 struct kmmio_fault_page *f;
392
393 page &= PAGE_MASK;
394 f = get_kmmio_fault_page(page);
395 if (!f)
396 return;
397
398 f->count--;
399 BUG_ON(f->count < 0);
400 if (!f->count) {
401 disarm_kmmio_fault_page(f);
402 if (!f->scheduled_for_release) {
403 f->release_next = *release_list;
404 *release_list = f;
405 f->scheduled_for_release = true;
406 }
407 }
408}
409
410
411
412
413
414
415
416
417int register_kmmio_probe(struct kmmio_probe *p)
418{
419 unsigned long flags;
420 int ret = 0;
421 unsigned long size = 0;
422 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
423
424 spin_lock_irqsave(&kmmio_lock, flags);
425 if (get_kmmio_probe(p->addr)) {
426 ret = -EEXIST;
427 goto out;
428 }
429 kmmio_count++;
430 list_add_rcu(&p->list, &kmmio_probes);
431 while (size < size_lim) {
432 if (add_kmmio_fault_page(p->addr + size))
433 pr_err("Unable to set page fault.\n");
434 size += PAGE_SIZE;
435 }
436out:
437 spin_unlock_irqrestore(&kmmio_lock, flags);
438
439
440
441
442
443 return ret;
444}
445EXPORT_SYMBOL(register_kmmio_probe);
446
447static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
448{
449 struct kmmio_delayed_release *dr = container_of(
450 head,
451 struct kmmio_delayed_release,
452 rcu);
453 struct kmmio_fault_page *f = dr->release_list;
454 while (f) {
455 struct kmmio_fault_page *next = f->release_next;
456 BUG_ON(f->count);
457 kfree(f);
458 f = next;
459 }
460 kfree(dr);
461}
462
463static void remove_kmmio_fault_pages(struct rcu_head *head)
464{
465 struct kmmio_delayed_release *dr =
466 container_of(head, struct kmmio_delayed_release, rcu);
467 struct kmmio_fault_page *f = dr->release_list;
468 struct kmmio_fault_page **prevp = &dr->release_list;
469 unsigned long flags;
470
471 spin_lock_irqsave(&kmmio_lock, flags);
472 while (f) {
473 if (!f->count) {
474 list_del_rcu(&f->list);
475 prevp = &f->release_next;
476 } else {
477 *prevp = f->release_next;
478 f->release_next = NULL;
479 f->scheduled_for_release = false;
480 }
481 f = *prevp;
482 }
483 spin_unlock_irqrestore(&kmmio_lock, flags);
484
485
486 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502void unregister_kmmio_probe(struct kmmio_probe *p)
503{
504 unsigned long flags;
505 unsigned long size = 0;
506 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
507 struct kmmio_fault_page *release_list = NULL;
508 struct kmmio_delayed_release *drelease;
509
510 spin_lock_irqsave(&kmmio_lock, flags);
511 while (size < size_lim) {
512 release_kmmio_fault_page(p->addr + size, &release_list);
513 size += PAGE_SIZE;
514 }
515 list_del_rcu(&p->list);
516 kmmio_count--;
517 spin_unlock_irqrestore(&kmmio_lock, flags);
518
519 if (!release_list)
520 return;
521
522 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
523 if (!drelease) {
524 pr_crit("leaking kmmio_fault_page objects.\n");
525 return;
526 }
527 drelease->release_list = release_list;
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
544}
545EXPORT_SYMBOL(unregister_kmmio_probe);
546
547static int
548kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
549{
550 struct die_args *arg = args;
551 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
552
553 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
554 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
555
556
557
558
559 *dr6_p &= ~DR_STEP;
560 return NOTIFY_STOP;
561 }
562
563 return NOTIFY_DONE;
564}
565
566static struct notifier_block nb_die = {
567 .notifier_call = kmmio_die_notifier
568};
569
570int kmmio_init(void)
571{
572 int i;
573
574 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
575 INIT_LIST_HEAD(&kmmio_page_table[i]);
576
577 return register_die_notifier(&nb_die);
578}
579
580void kmmio_cleanup(void)
581{
582 int i;
583
584 unregister_die_notifier(&nb_die);
585 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
586 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
587 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
588 }
589}
590