1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/kprobes.h>
43#include <linux/ptrace.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/hardirq.h>
47#include <linux/preempt.h>
48#include <linux/module.h>
49#include <linux/kdebug.h>
50#include <linux/kallsyms.h>
51#include <linux/ftrace.h>
52#include <linux/frame.h>
53
54#include <asm/cacheflush.h>
55#include <asm/desc.h>
56#include <asm/pgtable.h>
57#include <asm/uaccess.h>
58#include <asm/alternative.h>
59#include <asm/insn.h>
60#include <asm/debugreg.h>
61
62#include "common.h"
63
64void jprobe_return_end(void);
65
66DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
67DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
68
69#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
70
71#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
72 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
73 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
74 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
75 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
76 << (row % 32))
77
78
79
80
81
82
83
84static volatile u32 twobyte_is_boostable[256 / 32] = {
85
86
87 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) |
88 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ,
89 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
90 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ,
91 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
92 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ,
93 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) |
94 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) ,
95 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
96 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
97 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
98 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) ,
99 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
100 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) ,
101 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
102 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)
103
104
105};
106#undef W
107
108struct kretprobe_blackpoint kretprobe_blacklist[] = {
109 {"__switch_to", },
110
111 {NULL, NULL}
112};
113
114const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
115
116static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
117{
118 struct __arch_relative_insn {
119 u8 op;
120 s32 raddr;
121 } __packed *insn;
122
123 insn = (struct __arch_relative_insn *)from;
124 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
125 insn->op = op;
126}
127
128
129void __kprobes synthesize_reljump(void *from, void *to)
130{
131 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
132}
133
134
135void __kprobes synthesize_relcall(void *from, void *to)
136{
137 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
138}
139
140
141
142
143static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
144{
145 insn_attr_t attr;
146
147 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
148 while (inat_is_legacy_prefix(attr)) {
149 insn++;
150 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
151 }
152#ifdef CONFIG_X86_64
153 if (inat_is_rex_prefix(attr))
154 insn++;
155#endif
156 return insn;
157}
158
159
160
161
162
163int __kprobes can_boost(kprobe_opcode_t *opcodes)
164{
165 kprobe_opcode_t opcode;
166 kprobe_opcode_t *orig_opcodes = opcodes;
167
168 if (search_exception_tables((unsigned long)opcodes))
169 return 0;
170
171retry:
172 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
173 return 0;
174 opcode = *(opcodes++);
175
176
177 if (opcode == 0x0f) {
178 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
179 return 0;
180 return test_bit(*opcodes,
181 (unsigned long *)twobyte_is_boostable);
182 }
183
184 switch (opcode & 0xf0) {
185#ifdef CONFIG_X86_64
186 case 0x40:
187 goto retry;
188#endif
189 case 0x60:
190 if (0x63 < opcode && opcode < 0x67)
191 goto retry;
192
193 return (opcode != 0x62 && opcode != 0x67);
194 case 0x70:
195 return 0;
196 case 0xc0:
197
198 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
199 case 0xd0:
200
201 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
202 case 0xe0:
203
204 return ((opcode & 0x04) || opcode == 0xea);
205 case 0xf0:
206 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
207 goto retry;
208
209 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
210 default:
211
212 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
213 goto retry;
214
215 return (opcode != 0x2e && opcode != 0x9a);
216 }
217}
218
219static unsigned long
220__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
221{
222 struct kprobe *kp;
223 unsigned long faddr;
224
225 kp = get_kprobe((void *)addr);
226 faddr = ftrace_location(addr);
227
228
229
230
231 if (!kp && !faddr)
232 return addr;
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
254 if (faddr)
255 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
256 else
257 buf[0] = kp->opcode;
258 return (unsigned long)buf;
259}
260
261
262
263
264
265
266unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
267{
268 unsigned long __addr;
269
270 __addr = __recover_optprobed_insn(buf, addr);
271 if (__addr != addr)
272 return __addr;
273
274 return __recover_probed_insn(buf, addr);
275}
276
277
278static int __kprobes can_probe(unsigned long paddr)
279{
280 unsigned long addr, __addr, offset = 0;
281 struct insn insn;
282 kprobe_opcode_t buf[MAX_INSN_SIZE];
283
284 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
285 return 0;
286
287
288 addr = paddr - offset;
289 while (addr < paddr) {
290
291
292
293
294
295
296
297
298 __addr = recover_probed_instruction(buf, addr);
299 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
300 insn_get_length(&insn);
301
302
303
304
305
306 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
307 return 0;
308 addr += insn.length;
309 }
310
311 return (addr == paddr);
312}
313
314
315
316
317static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
318{
319
320 insn = skip_prefixes(insn);
321
322 switch (*insn) {
323 case 0xfa:
324 case 0xfb:
325 case 0xcf:
326 case 0x9d:
327 return 1;
328 }
329
330 return 0;
331}
332
333
334
335
336
337
338
339
340int __kprobes __copy_instruction(u8 *dest, u8 *src)
341{
342 struct insn insn;
343 kprobe_opcode_t buf[MAX_INSN_SIZE];
344 unsigned long recovered_insn =
345 recover_probed_instruction(buf, (unsigned long)src);
346
347 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
348 insn_get_length(&insn);
349
350 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
351 return 0;
352 memcpy(dest, insn.kaddr, insn.length);
353
354#ifdef CONFIG_X86_64
355 if (insn_rip_relative(&insn)) {
356 s64 newdisp;
357 u8 *disp;
358 kernel_insn_init(&insn, dest, insn.length);
359 insn_get_displacement(&insn);
360
361
362
363
364
365
366
367
368
369
370
371
372 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
373 if ((s64) (s32) newdisp != newdisp) {
374 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
375 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
376 return 0;
377 }
378 disp = (u8 *) dest + insn_offset_displacement(&insn);
379 *(s32 *) disp = (s32) newdisp;
380 }
381#endif
382 return insn.length;
383}
384
385static int __kprobes arch_copy_kprobe(struct kprobe *p)
386{
387 int ret;
388
389
390 ret = __copy_instruction(p->ainsn.insn, p->addr);
391 if (!ret)
392 return -EINVAL;
393
394
395
396
397
398 if (can_boost(p->ainsn.insn))
399 p->ainsn.boostable = 0;
400 else
401 p->ainsn.boostable = -1;
402
403
404 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
405
406
407 p->opcode = p->ainsn.insn[0];
408
409 return 0;
410}
411
412int __kprobes arch_prepare_kprobe(struct kprobe *p)
413{
414 if (alternatives_text_reserved(p->addr, p->addr))
415 return -EINVAL;
416
417 if (!can_probe((unsigned long)p->addr))
418 return -EILSEQ;
419
420 p->ainsn.insn = get_insn_slot();
421 if (!p->ainsn.insn)
422 return -ENOMEM;
423
424 return arch_copy_kprobe(p);
425}
426
427void __kprobes arch_arm_kprobe(struct kprobe *p)
428{
429 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
430}
431
432void __kprobes arch_disarm_kprobe(struct kprobe *p)
433{
434 text_poke(p->addr, &p->opcode, 1);
435}
436
437void __kprobes arch_remove_kprobe(struct kprobe *p)
438{
439 if (p->ainsn.insn) {
440 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
441 p->ainsn.insn = NULL;
442 }
443}
444
445static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
446{
447 kcb->prev_kprobe.kp = kprobe_running();
448 kcb->prev_kprobe.status = kcb->kprobe_status;
449 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
450 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
451}
452
453static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
454{
455 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
456 kcb->kprobe_status = kcb->prev_kprobe.status;
457 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
458 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
459}
460
461static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
462 struct kprobe_ctlblk *kcb)
463{
464 __this_cpu_write(current_kprobe, p);
465 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
466 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
467 if (p->ainsn.if_modifier)
468 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
469}
470
471static void __kprobes clear_btf(void)
472{
473 if (test_thread_flag(TIF_BLOCKSTEP)) {
474 unsigned long debugctl = get_debugctlmsr();
475
476 debugctl &= ~DEBUGCTLMSR_BTF;
477 update_debugctlmsr(debugctl);
478 }
479}
480
481static void __kprobes restore_btf(void)
482{
483 if (test_thread_flag(TIF_BLOCKSTEP)) {
484 unsigned long debugctl = get_debugctlmsr();
485
486 debugctl |= DEBUGCTLMSR_BTF;
487 update_debugctlmsr(debugctl);
488 }
489}
490
491void __kprobes
492arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
493{
494 unsigned long *sara = stack_addr(regs);
495
496 ri->ret_addr = (kprobe_opcode_t *) *sara;
497
498
499 *sara = (unsigned long) &kretprobe_trampoline;
500}
501
502static void __kprobes
503setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
504{
505 if (setup_detour_execution(p, regs, reenter))
506 return;
507
508#if !defined(CONFIG_PREEMPT)
509 if (p->ainsn.boostable == 1 && !p->post_handler) {
510
511 if (!reenter)
512 reset_current_kprobe();
513
514
515
516
517
518 regs->ip = (unsigned long)p->ainsn.insn;
519 preempt_enable_no_resched();
520 return;
521 }
522#endif
523 if (reenter) {
524 save_previous_kprobe(kcb);
525 set_current_kprobe(p, regs, kcb);
526 kcb->kprobe_status = KPROBE_REENTER;
527 } else
528 kcb->kprobe_status = KPROBE_HIT_SS;
529
530 clear_btf();
531 regs->flags |= X86_EFLAGS_TF;
532 regs->flags &= ~X86_EFLAGS_IF;
533
534 if (p->opcode == BREAKPOINT_INSTRUCTION)
535 regs->ip = (unsigned long)p->addr;
536 else
537 regs->ip = (unsigned long)p->ainsn.insn;
538}
539
540
541
542
543
544
545static int __kprobes
546reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
547{
548 switch (kcb->kprobe_status) {
549 case KPROBE_HIT_SSDONE:
550 case KPROBE_HIT_ACTIVE:
551 kprobes_inc_nmissed_count(p);
552 setup_singlestep(p, regs, kcb, 1);
553 break;
554 case KPROBE_HIT_SS:
555
556
557
558
559
560
561 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
562 p->addr);
563 dump_kprobe(p);
564 BUG();
565 default:
566
567 WARN_ON(1);
568 return 0;
569 }
570
571 return 1;
572}
573
574
575
576
577
578static int __kprobes kprobe_handler(struct pt_regs *regs)
579{
580 kprobe_opcode_t *addr;
581 struct kprobe *p;
582 struct kprobe_ctlblk *kcb;
583
584 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
585
586
587
588
589
590
591 preempt_disable();
592
593 kcb = get_kprobe_ctlblk();
594 p = get_kprobe(addr);
595
596 if (p) {
597 if (kprobe_running()) {
598 if (reenter_kprobe(p, regs, kcb))
599 return 1;
600 } else {
601 set_current_kprobe(p, regs, kcb);
602 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
603
604
605
606
607
608
609
610
611
612 if (!p->pre_handler || !p->pre_handler(p, regs))
613 setup_singlestep(p, regs, kcb, 0);
614 return 1;
615 }
616 } else if (*addr != BREAKPOINT_INSTRUCTION) {
617
618
619
620
621
622
623
624
625
626 regs->ip = (unsigned long)addr;
627 preempt_enable_no_resched();
628 return 1;
629 } else if (kprobe_running()) {
630 p = __this_cpu_read(current_kprobe);
631 if (p->break_handler && p->break_handler(p, regs)) {
632 if (!skip_singlestep(p, regs, kcb))
633 setup_singlestep(p, regs, kcb, 0);
634 return 1;
635 }
636 }
637
638 preempt_enable_no_resched();
639 return 0;
640}
641
642
643
644
645
646static void __used __kprobes kretprobe_trampoline_holder(void)
647{
648 asm volatile (
649 ".global kretprobe_trampoline\n"
650 "kretprobe_trampoline: \n"
651#ifdef CONFIG_X86_64
652
653 " pushq %rsp\n"
654 " pushfq\n"
655 SAVE_REGS_STRING
656 " movq %rsp, %rdi\n"
657 " call trampoline_handler\n"
658
659 " movq %rax, 152(%rsp)\n"
660 RESTORE_REGS_STRING
661 " popfq\n"
662#else
663 " pushf\n"
664 SAVE_REGS_STRING
665 " movl %esp, %eax\n"
666 " call trampoline_handler\n"
667
668 " movl 56(%esp), %edx\n"
669 " movl %edx, 52(%esp)\n"
670
671 " movl %eax, 56(%esp)\n"
672 RESTORE_REGS_STRING
673 " popf\n"
674#endif
675 " ret\n");
676}
677STACK_FRAME_NON_STANDARD(kretprobe_trampoline_holder);
678STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
679
680
681
682
683__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
684{
685 struct kretprobe_instance *ri = NULL;
686 struct hlist_head *head, empty_rp;
687 struct hlist_node *tmp;
688 unsigned long flags, orig_ret_address = 0;
689 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
690 kprobe_opcode_t *correct_ret_addr = NULL;
691
692 INIT_HLIST_HEAD(&empty_rp);
693 kretprobe_hash_lock(current, &head, &flags);
694
695#ifdef CONFIG_X86_64
696 regs->cs = __KERNEL_CS;
697#else
698 regs->cs = __KERNEL_CS | get_kernel_rpl();
699 regs->gs = 0;
700#endif
701 regs->ip = trampoline_address;
702 regs->orig_ax = ~0UL;
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
718 if (ri->task != current)
719
720 continue;
721
722 orig_ret_address = (unsigned long)ri->ret_addr;
723
724 if (orig_ret_address != trampoline_address)
725
726
727
728
729
730 break;
731 }
732
733 kretprobe_assert(ri, orig_ret_address, trampoline_address);
734
735 correct_ret_addr = ri->ret_addr;
736 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
737 if (ri->task != current)
738
739 continue;
740
741 orig_ret_address = (unsigned long)ri->ret_addr;
742 if (ri->rp && ri->rp->handler) {
743 __this_cpu_write(current_kprobe, &ri->rp->kp);
744 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
745 ri->ret_addr = correct_ret_addr;
746 ri->rp->handler(ri, regs);
747 __this_cpu_write(current_kprobe, NULL);
748 }
749
750 recycle_rp_inst(ri, &empty_rp);
751
752 if (orig_ret_address != trampoline_address)
753
754
755
756
757
758 break;
759 }
760
761 kretprobe_hash_unlock(current, &flags);
762
763 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
764 hlist_del(&ri->hlist);
765 kfree(ri);
766 }
767 return (void *)orig_ret_address;
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797static void __kprobes
798resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
799{
800 unsigned long *tos = stack_addr(regs);
801 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
802 unsigned long orig_ip = (unsigned long)p->addr;
803 kprobe_opcode_t *insn = p->ainsn.insn;
804
805
806 insn = skip_prefixes(insn);
807
808 regs->flags &= ~X86_EFLAGS_TF;
809 switch (*insn) {
810 case 0x9c:
811 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
812 *tos |= kcb->kprobe_old_flags;
813 break;
814 case 0xc2:
815 case 0xc3:
816 case 0xca:
817 case 0xcb:
818 case 0xcf:
819 case 0xea:
820
821 p->ainsn.boostable = 1;
822 goto no_change;
823 case 0xe8:
824 *tos = orig_ip + (*tos - copy_ip);
825 break;
826#ifdef CONFIG_X86_32
827 case 0x9a:
828 *tos = orig_ip + (*tos - copy_ip);
829 goto no_change;
830#endif
831 case 0xff:
832 if ((insn[1] & 0x30) == 0x10) {
833
834
835
836
837
838 *tos = orig_ip + (*tos - copy_ip);
839 goto no_change;
840 } else if (((insn[1] & 0x31) == 0x20) ||
841 ((insn[1] & 0x31) == 0x21)) {
842
843
844
845
846 p->ainsn.boostable = 1;
847 goto no_change;
848 }
849 default:
850 break;
851 }
852
853 if (p->ainsn.boostable == 0) {
854 if ((regs->ip > copy_ip) &&
855 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
856
857
858
859
860 synthesize_reljump((void *)regs->ip,
861 (void *)orig_ip + (regs->ip - copy_ip));
862 p->ainsn.boostable = 1;
863 } else {
864 p->ainsn.boostable = -1;
865 }
866 }
867
868 regs->ip += orig_ip - copy_ip;
869
870no_change:
871 restore_btf();
872}
873
874
875
876
877
878static int __kprobes post_kprobe_handler(struct pt_regs *regs)
879{
880 struct kprobe *cur = kprobe_running();
881 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
882
883 if (!cur)
884 return 0;
885
886 resume_execution(cur, regs, kcb);
887 regs->flags |= kcb->kprobe_saved_flags;
888
889 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
890 kcb->kprobe_status = KPROBE_HIT_SSDONE;
891 cur->post_handler(cur, regs, 0);
892 }
893
894
895 if (kcb->kprobe_status == KPROBE_REENTER) {
896 restore_previous_kprobe(kcb);
897 goto out;
898 }
899 reset_current_kprobe();
900out:
901 preempt_enable_no_resched();
902
903
904
905
906
907
908 if (regs->flags & X86_EFLAGS_TF)
909 return 0;
910
911 return 1;
912}
913
914int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
915{
916 struct kprobe *cur = kprobe_running();
917 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
918
919 switch (kcb->kprobe_status) {
920 case KPROBE_HIT_SS:
921 case KPROBE_REENTER:
922
923
924
925
926
927
928
929 regs->ip = (unsigned long)cur->addr;
930 regs->flags |= kcb->kprobe_old_flags;
931 if (kcb->kprobe_status == KPROBE_REENTER)
932 restore_previous_kprobe(kcb);
933 else
934 reset_current_kprobe();
935 preempt_enable_no_resched();
936 break;
937 case KPROBE_HIT_ACTIVE:
938 case KPROBE_HIT_SSDONE:
939
940
941
942
943
944 kprobes_inc_nmissed_count(cur);
945
946
947
948
949
950
951
952
953 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
954 return 1;
955
956
957
958
959
960 if (fixup_exception(regs))
961 return 1;
962
963
964
965
966
967 break;
968 default:
969 break;
970 }
971 return 0;
972}
973
974
975
976
977int __kprobes
978kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
979{
980 struct die_args *args = data;
981 int ret = NOTIFY_DONE;
982
983 if (args->regs && user_mode_vm(args->regs))
984 return ret;
985
986 switch (val) {
987 case DIE_INT3:
988 if (kprobe_handler(args->regs))
989 ret = NOTIFY_STOP;
990 break;
991 case DIE_DEBUG:
992 if (post_kprobe_handler(args->regs)) {
993
994
995
996
997 (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
998 ret = NOTIFY_STOP;
999 }
1000 break;
1001 case DIE_GPF:
1002
1003
1004
1005
1006
1007 if (!preemptible() && kprobe_running() &&
1008 kprobe_fault_handler(args->regs, args->trapnr))
1009 ret = NOTIFY_STOP;
1010 break;
1011 default:
1012 break;
1013 }
1014 return ret;
1015}
1016
1017int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1018{
1019 struct jprobe *jp = container_of(p, struct jprobe, kp);
1020 unsigned long addr;
1021 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1022
1023 kcb->jprobe_saved_regs = *regs;
1024 kcb->jprobe_saved_sp = stack_addr(regs);
1025 addr = (unsigned long)(kcb->jprobe_saved_sp);
1026
1027
1028
1029
1030
1031
1032
1033
1034 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1035 MIN_STACK_SIZE(addr));
1036 regs->flags &= ~X86_EFLAGS_IF;
1037 trace_hardirqs_off();
1038 regs->ip = (unsigned long)(jp->entry);
1039
1040
1041
1042
1043
1044
1045
1046
1047 pause_graph_tracing();
1048 return 1;
1049}
1050
1051void __kprobes jprobe_return(void)
1052{
1053 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1054
1055 asm volatile (
1056#ifdef CONFIG_X86_64
1057 " xchg %%rbx,%%rsp \n"
1058#else
1059 " xchgl %%ebx,%%esp \n"
1060#endif
1061 " int3 \n"
1062 " .globl jprobe_return_end\n"
1063 " jprobe_return_end: \n"
1064 " nop \n"::"b"
1065 (kcb->jprobe_saved_sp):"memory");
1066}
1067
1068int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1069{
1070 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1071 u8 *addr = (u8 *) (regs->ip - 1);
1072 struct jprobe *jp = container_of(p, struct jprobe, kp);
1073 void *saved_sp = kcb->jprobe_saved_sp;
1074
1075 if ((addr > (u8 *) jprobe_return) &&
1076 (addr < (u8 *) jprobe_return_end)) {
1077 if (stack_addr(regs) != saved_sp) {
1078 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1079 printk(KERN_ERR
1080 "current sp %p does not match saved sp %p\n",
1081 stack_addr(regs), saved_sp);
1082 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1083 show_regs(saved_regs);
1084 printk(KERN_ERR "Current registers\n");
1085 show_regs(regs);
1086 BUG();
1087 }
1088
1089 unpause_graph_tracing();
1090 *regs = kcb->jprobe_saved_regs;
1091 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1092 preempt_enable_no_resched();
1093 return 1;
1094 }
1095 return 0;
1096}
1097
1098int __init arch_init_kprobes(void)
1099{
1100 return arch_init_optprobes();
1101}
1102
1103int __kprobes arch_trampoline_kprobe(struct kprobe *p)
1104{
1105 return 0;
1106}
1107