1
2
3
4
5
6
7
8#include <linux/list.h>
9#include <linux/rculist.h>
10#include <linux/spinlock.h>
11#include <linux/hash.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/uaccess.h>
16#include <linux/ptrace.h>
17#include <linux/preempt.h>
18#include <linux/percpu.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21#include <linux/io.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <linux/errno.h>
25#include <asm/debugreg.h>
26#include <linux/mmiotrace.h>
27
28#define KMMIO_PAGE_HASH_BITS 4
29#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
30
31struct kmmio_fault_page {
32 struct list_head list;
33 struct kmmio_fault_page *release_next;
34 unsigned long page;
35 pteval_t old_presence;
36 bool armed;
37
38
39
40
41
42
43
44 int count;
45};
46
47struct kmmio_delayed_release {
48 struct rcu_head rcu;
49 struct kmmio_fault_page *release_list;
50};
51
52struct kmmio_context {
53 struct kmmio_fault_page *fpage;
54 struct kmmio_probe *probe;
55 unsigned long saved_flags;
56 unsigned long addr;
57 int active;
58};
59
60static DEFINE_SPINLOCK(kmmio_lock);
61
62
63unsigned int kmmio_count;
64
65
66static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
67static LIST_HEAD(kmmio_probes);
68
69static struct list_head *kmmio_page_list(unsigned long page)
70{
71 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
72}
73
74
75static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
76
77
78
79
80
81
82
83
84
85
86static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87{
88 struct kmmio_probe *p;
89 list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 if (addr >= p->addr && addr < (p->addr + p->len))
91 return p;
92 }
93 return NULL;
94}
95
96
97static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
98{
99 struct list_head *head;
100 struct kmmio_fault_page *f;
101
102 page &= PAGE_MASK;
103 head = kmmio_page_list(page);
104 list_for_each_entry_rcu(f, head, list) {
105 if (f->page == page)
106 return f;
107 }
108 return NULL;
109}
110
111static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
112{
113 pmdval_t v = pmd_val(*pmd);
114 if (clear) {
115 *old = v & _PAGE_PRESENT;
116 v &= ~_PAGE_PRESENT;
117 } else
118 v |= *old;
119 set_pmd(pmd, __pmd(v));
120}
121
122static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
123{
124 pteval_t v = pte_val(*pte);
125 if (clear) {
126 *old = v & _PAGE_PRESENT;
127 v &= ~_PAGE_PRESENT;
128 } else
129 v |= *old;
130 set_pte_atomic(pte, __pte(v));
131}
132
133static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
134{
135 unsigned int level;
136 pte_t *pte = lookup_address(f->page, &level);
137
138 if (!pte) {
139 pr_err("kmmio: no pte for page 0x%08lx\n", f->page);
140 return -1;
141 }
142
143 switch (level) {
144 case PG_LEVEL_2M:
145 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
146 break;
147 case PG_LEVEL_4K:
148 clear_pte_presence(pte, clear, &f->old_presence);
149 break;
150 default:
151 pr_err("kmmio: unexpected page level 0x%x.\n", level);
152 return -1;
153 }
154
155 __flush_tlb_one(f->page);
156 return 0;
157}
158
159
160
161
162
163
164
165
166
167
168
169
170static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
171{
172 int ret;
173 WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
174 if (f->armed) {
175 pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
176 f->page, f->count, !!f->old_presence);
177 }
178 ret = clear_page_presence(f, true);
179 WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
180 f->armed = true;
181 return ret;
182}
183
184
185static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
186{
187 int ret = clear_page_presence(f, false);
188 WARN_ONCE(ret < 0,
189 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
190 f->armed = false;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208int kmmio_handler(struct pt_regs *regs, unsigned long addr)
209{
210 struct kmmio_context *ctx;
211 struct kmmio_fault_page *faultpage;
212 int ret = 0;
213
214
215
216
217
218
219
220
221
222 preempt_disable();
223 rcu_read_lock();
224
225 faultpage = get_kmmio_fault_page(addr);
226 if (!faultpage) {
227
228
229
230
231
232 goto no_kmmio;
233 }
234
235 ctx = &get_cpu_var(kmmio_ctx);
236 if (ctx->active) {
237 if (addr == ctx->addr) {
238
239
240
241
242
243 pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
244 addr, smp_processor_id());
245
246 if (!faultpage->old_presence)
247 pr_info("kmmio: unexpected secondary hit for "
248 "address 0x%08lx on CPU %d.\n", addr,
249 smp_processor_id());
250 } else {
251
252
253
254
255
256 pr_emerg("kmmio: recursive probe hit on CPU %d, "
257 "for address 0x%08lx. Ignoring.\n",
258 smp_processor_id(), addr);
259 pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
260 ctx->addr);
261 disarm_kmmio_fault_page(faultpage);
262 }
263 goto no_kmmio_ctx;
264 }
265 ctx->active++;
266
267 ctx->fpage = faultpage;
268 ctx->probe = get_kmmio_probe(addr);
269 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
270 ctx->addr = addr;
271
272 if (ctx->probe && ctx->probe->pre_handler)
273 ctx->probe->pre_handler(ctx->probe, regs, addr);
274
275
276
277
278
279 regs->flags |= X86_EFLAGS_TF;
280 regs->flags &= ~X86_EFLAGS_IF;
281
282
283 disarm_kmmio_fault_page(ctx->fpage);
284
285
286
287
288
289
290
291
292 put_cpu_var(kmmio_ctx);
293 return 1;
294
295no_kmmio_ctx:
296 put_cpu_var(kmmio_ctx);
297no_kmmio:
298 rcu_read_unlock();
299 preempt_enable_no_resched();
300 return ret;
301}
302
303
304
305
306
307
308static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
309{
310 int ret = 0;
311 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
312
313 if (!ctx->active) {
314
315
316
317
318
319 pr_warning("kmmio: unexpected debug trap on CPU %d.\n",
320 smp_processor_id());
321 goto out;
322 }
323
324 if (ctx->probe && ctx->probe->post_handler)
325 ctx->probe->post_handler(ctx->probe, condition, regs);
326
327
328 spin_lock(&kmmio_lock);
329 if (ctx->fpage->count)
330 arm_kmmio_fault_page(ctx->fpage);
331 spin_unlock(&kmmio_lock);
332
333 regs->flags &= ~X86_EFLAGS_TF;
334 regs->flags |= ctx->saved_flags;
335
336
337 ctx->active--;
338 BUG_ON(ctx->active);
339 rcu_read_unlock();
340 preempt_enable_no_resched();
341
342
343
344
345
346
347 if (!(regs->flags & X86_EFLAGS_TF))
348 ret = 1;
349out:
350 put_cpu_var(kmmio_ctx);
351 return ret;
352}
353
354
355static int add_kmmio_fault_page(unsigned long page)
356{
357 struct kmmio_fault_page *f;
358
359 page &= PAGE_MASK;
360 f = get_kmmio_fault_page(page);
361 if (f) {
362 if (!f->count)
363 arm_kmmio_fault_page(f);
364 f->count++;
365 return 0;
366 }
367
368 f = kzalloc(sizeof(*f), GFP_ATOMIC);
369 if (!f)
370 return -1;
371
372 f->count = 1;
373 f->page = page;
374
375 if (arm_kmmio_fault_page(f)) {
376 kfree(f);
377 return -1;
378 }
379
380 list_add_rcu(&f->list, kmmio_page_list(f->page));
381
382 return 0;
383}
384
385
386static void release_kmmio_fault_page(unsigned long page,
387 struct kmmio_fault_page **release_list)
388{
389 struct kmmio_fault_page *f;
390
391 page &= PAGE_MASK;
392 f = get_kmmio_fault_page(page);
393 if (!f)
394 return;
395
396 f->count--;
397 BUG_ON(f->count < 0);
398 if (!f->count) {
399 disarm_kmmio_fault_page(f);
400 f->release_next = *release_list;
401 *release_list = f;
402 }
403}
404
405
406
407
408
409
410
411
412int register_kmmio_probe(struct kmmio_probe *p)
413{
414 unsigned long flags;
415 int ret = 0;
416 unsigned long size = 0;
417 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
418
419 spin_lock_irqsave(&kmmio_lock, flags);
420 if (get_kmmio_probe(p->addr)) {
421 ret = -EEXIST;
422 goto out;
423 }
424 kmmio_count++;
425 list_add_rcu(&p->list, &kmmio_probes);
426 while (size < size_lim) {
427 if (add_kmmio_fault_page(p->addr + size))
428 pr_err("kmmio: Unable to set page fault.\n");
429 size += PAGE_SIZE;
430 }
431out:
432 spin_unlock_irqrestore(&kmmio_lock, flags);
433
434
435
436
437
438 return ret;
439}
440EXPORT_SYMBOL(register_kmmio_probe);
441
442static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
443{
444 struct kmmio_delayed_release *dr = container_of(
445 head,
446 struct kmmio_delayed_release,
447 rcu);
448 struct kmmio_fault_page *f = dr->release_list;
449 while (f) {
450 struct kmmio_fault_page *next = f->release_next;
451 BUG_ON(f->count);
452 kfree(f);
453 f = next;
454 }
455 kfree(dr);
456}
457
458static void remove_kmmio_fault_pages(struct rcu_head *head)
459{
460 struct kmmio_delayed_release *dr =
461 container_of(head, struct kmmio_delayed_release, rcu);
462 struct kmmio_fault_page *f = dr->release_list;
463 struct kmmio_fault_page **prevp = &dr->release_list;
464 unsigned long flags;
465
466 spin_lock_irqsave(&kmmio_lock, flags);
467 while (f) {
468 if (!f->count) {
469 list_del_rcu(&f->list);
470 prevp = &f->release_next;
471 } else {
472 *prevp = f->release_next;
473 }
474 f = f->release_next;
475 }
476 spin_unlock_irqrestore(&kmmio_lock, flags);
477
478
479 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495void unregister_kmmio_probe(struct kmmio_probe *p)
496{
497 unsigned long flags;
498 unsigned long size = 0;
499 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
500 struct kmmio_fault_page *release_list = NULL;
501 struct kmmio_delayed_release *drelease;
502
503 spin_lock_irqsave(&kmmio_lock, flags);
504 while (size < size_lim) {
505 release_kmmio_fault_page(p->addr + size, &release_list);
506 size += PAGE_SIZE;
507 }
508 list_del_rcu(&p->list);
509 kmmio_count--;
510 spin_unlock_irqrestore(&kmmio_lock, flags);
511
512 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
513 if (!drelease) {
514 pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
515 return;
516 }
517 drelease->release_list = release_list;
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
534}
535EXPORT_SYMBOL(unregister_kmmio_probe);
536
537static int
538kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
539{
540 struct die_args *arg = args;
541
542 if (val == DIE_DEBUG && (arg->err & DR_STEP))
543 if (post_kmmio_handler(arg->err, arg->regs) == 1)
544 return NOTIFY_STOP;
545
546 return NOTIFY_DONE;
547}
548
549static struct notifier_block nb_die = {
550 .notifier_call = kmmio_die_notifier
551};
552
553int kmmio_init(void)
554{
555 int i;
556
557 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
558 INIT_LIST_HEAD(&kmmio_page_table[i]);
559
560 return register_die_notifier(&nb_die);
561}
562
563void kmmio_cleanup(void)
564{
565 int i;
566
567 unregister_die_notifier(&nb_die);
568 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
569 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
570 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
571 }
572}
573