1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/kprobes.h>
43#include <linux/ptrace.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/hardirq.h>
47#include <linux/preempt.h>
48#include <linux/module.h>
49#include <linux/kdebug.h>
50#include <linux/kallsyms.h>
51#include <linux/ftrace.h>
52#include <linux/frame.h>
53
54#include <asm/text-patching.h>
55#include <asm/cacheflush.h>
56#include <asm/desc.h>
57#include <asm/pgtable.h>
58#include <asm/uaccess.h>
59#include <asm/alternative.h>
60#include <asm/insn.h>
61#include <asm/debugreg.h>
62
63#include "common.h"
64
65void jprobe_return_end(void);
66
67DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
68DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
69
70#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
71
72#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
73 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
74 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
75 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
76 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
77 << (row % 32))
78
79
80
81
82
83
84
85static volatile u32 twobyte_is_boostable[256 / 32] = {
86
87
88 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) |
89 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) ,
90 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
91 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ,
92 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
93 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ,
94 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) |
95 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) ,
96 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
97 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
98 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
99 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) ,
100 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
101 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) ,
102 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
103 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)
104
105
106};
107#undef W
108
109struct kretprobe_blackpoint kretprobe_blacklist[] = {
110 {"__switch_to", },
111
112 {NULL, NULL}
113};
114
115const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
116
117static nokprobe_inline void
118__synthesize_relative_insn(void *from, void *to, u8 op)
119{
120 struct __arch_relative_insn {
121 u8 op;
122 s32 raddr;
123 } __packed *insn;
124
125 insn = (struct __arch_relative_insn *)from;
126 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
127 insn->op = op;
128}
129
130
131void synthesize_reljump(void *from, void *to)
132{
133 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
134}
135NOKPROBE_SYMBOL(synthesize_reljump);
136
137
138void synthesize_relcall(void *from, void *to)
139{
140 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
141}
142NOKPROBE_SYMBOL(synthesize_relcall);
143
144
145
146
147static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
148{
149 insn_attr_t attr;
150
151 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
152 while (inat_is_legacy_prefix(attr)) {
153 insn++;
154 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
155 }
156#ifdef CONFIG_X86_64
157 if (inat_is_rex_prefix(attr))
158 insn++;
159#endif
160 return insn;
161}
162NOKPROBE_SYMBOL(skip_prefixes);
163
164
165
166
167
168int can_boost(kprobe_opcode_t *opcodes)
169{
170 kprobe_opcode_t opcode;
171 kprobe_opcode_t *orig_opcodes = opcodes;
172
173 if (search_exception_tables((unsigned long)opcodes))
174 return 0;
175
176retry:
177 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
178 return 0;
179 opcode = *(opcodes++);
180
181
182 if (opcode == 0x0f) {
183 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
184 return 0;
185 return test_bit(*opcodes,
186 (unsigned long *)twobyte_is_boostable);
187 }
188
189 switch (opcode & 0xf0) {
190#ifdef CONFIG_X86_64
191 case 0x40:
192 goto retry;
193#endif
194 case 0x60:
195 if (0x63 < opcode && opcode < 0x67)
196 goto retry;
197
198 return (opcode != 0x62 && opcode != 0x67);
199 case 0x70:
200 return 0;
201 case 0xc0:
202
203 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
204 case 0xd0:
205
206 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
207 case 0xe0:
208
209 return ((opcode & 0x04) || opcode == 0xea);
210 case 0xf0:
211 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
212 goto retry;
213
214 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
215 default:
216
217 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
218 goto retry;
219
220 return (opcode != 0x2e && opcode != 0x9a);
221 }
222}
223
224static unsigned long
225__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
226{
227 struct kprobe *kp;
228 unsigned long faddr;
229
230 kp = get_kprobe((void *)addr);
231 faddr = ftrace_location(addr);
232
233
234
235
236
237 if (WARN_ON(faddr && faddr != addr))
238 return 0UL;
239
240
241
242
243 if (!kp && !faddr)
244 return addr;
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
266 if (faddr)
267 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
268 else
269 buf[0] = kp->opcode;
270 return (unsigned long)buf;
271}
272
273
274
275
276
277
278
279unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
280{
281 unsigned long __addr;
282
283 __addr = __recover_optprobed_insn(buf, addr);
284 if (__addr != addr)
285 return __addr;
286
287 return __recover_probed_insn(buf, addr);
288}
289
290
291static int can_probe(unsigned long paddr)
292{
293 unsigned long addr, __addr, offset = 0;
294 struct insn insn;
295 kprobe_opcode_t buf[MAX_INSN_SIZE];
296
297 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
298 return 0;
299
300
301 addr = paddr - offset;
302 while (addr < paddr) {
303
304
305
306
307
308
309
310
311 __addr = recover_probed_instruction(buf, addr);
312 if (!__addr)
313 return 0;
314 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
315 insn_get_length(&insn);
316
317
318
319
320
321 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
322 return 0;
323 addr += insn.length;
324 }
325
326 return (addr == paddr);
327}
328
329
330
331
332static int is_IF_modifier(kprobe_opcode_t *insn)
333{
334
335 insn = skip_prefixes(insn);
336
337 switch (*insn) {
338 case 0xfa:
339 case 0xfb:
340 case 0xcf:
341 case 0x9d:
342 return 1;
343 }
344
345 return 0;
346}
347
348
349
350
351
352
353
354
355int __copy_instruction(u8 *dest, u8 *src)
356{
357 struct insn insn;
358 kprobe_opcode_t buf[MAX_INSN_SIZE];
359 int length;
360 unsigned long recovered_insn =
361 recover_probed_instruction(buf, (unsigned long)src);
362
363 if (!recovered_insn)
364 return 0;
365 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
366 insn_get_length(&insn);
367 length = insn.length;
368
369
370 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
371 return 0;
372 memcpy(dest, insn.kaddr, length);
373
374#ifdef CONFIG_X86_64
375 if (insn_rip_relative(&insn)) {
376 s64 newdisp;
377 u8 *disp;
378 kernel_insn_init(&insn, dest, length);
379 insn_get_displacement(&insn);
380
381
382
383
384
385
386
387
388
389
390
391
392 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
393 if ((s64) (s32) newdisp != newdisp) {
394 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
395 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
396 return 0;
397 }
398 disp = (u8 *) dest + insn_offset_displacement(&insn);
399 *(s32 *) disp = (s32) newdisp;
400 }
401#endif
402 return length;
403}
404
405static int arch_copy_kprobe(struct kprobe *p)
406{
407 int ret;
408
409
410 ret = __copy_instruction(p->ainsn.insn, p->addr);
411 if (!ret)
412 return -EINVAL;
413
414
415
416
417
418 if (can_boost(p->ainsn.insn))
419 p->ainsn.boostable = 0;
420 else
421 p->ainsn.boostable = -1;
422
423
424 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
425
426
427 p->opcode = p->ainsn.insn[0];
428
429 return 0;
430}
431
432int arch_prepare_kprobe(struct kprobe *p)
433{
434 if (alternatives_text_reserved(p->addr, p->addr))
435 return -EINVAL;
436
437 if (!can_probe((unsigned long)p->addr))
438 return -EILSEQ;
439
440 p->ainsn.insn = get_insn_slot();
441 if (!p->ainsn.insn)
442 return -ENOMEM;
443
444 return arch_copy_kprobe(p);
445}
446
447void arch_arm_kprobe(struct kprobe *p)
448{
449 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
450}
451
452void arch_disarm_kprobe(struct kprobe *p)
453{
454 text_poke(p->addr, &p->opcode, 1);
455}
456
457void arch_remove_kprobe(struct kprobe *p)
458{
459 if (p->ainsn.insn) {
460 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
461 p->ainsn.insn = NULL;
462 }
463}
464
465static nokprobe_inline void
466save_previous_kprobe(struct kprobe_ctlblk *kcb)
467{
468 kcb->prev_kprobe.kp = kprobe_running();
469 kcb->prev_kprobe.status = kcb->kprobe_status;
470 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
471 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
472}
473
474static nokprobe_inline void
475restore_previous_kprobe(struct kprobe_ctlblk *kcb)
476{
477 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
478 kcb->kprobe_status = kcb->prev_kprobe.status;
479 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
480 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
481}
482
483static nokprobe_inline void
484set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
485 struct kprobe_ctlblk *kcb)
486{
487 __this_cpu_write(current_kprobe, p);
488 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
489 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
490 if (p->ainsn.if_modifier)
491 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
492}
493
494static nokprobe_inline void clear_btf(void)
495{
496 if (test_thread_flag(TIF_BLOCKSTEP)) {
497 unsigned long debugctl = get_debugctlmsr();
498
499 debugctl &= ~DEBUGCTLMSR_BTF;
500 update_debugctlmsr(debugctl);
501 }
502}
503
504static nokprobe_inline void restore_btf(void)
505{
506 if (test_thread_flag(TIF_BLOCKSTEP)) {
507 unsigned long debugctl = get_debugctlmsr();
508
509 debugctl |= DEBUGCTLMSR_BTF;
510 update_debugctlmsr(debugctl);
511 }
512}
513
514void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
515{
516 unsigned long *sara = stack_addr(regs);
517
518 ri->ret_addr = (kprobe_opcode_t *) *sara;
519
520
521 *sara = (unsigned long) &kretprobe_trampoline;
522}
523NOKPROBE_SYMBOL(arch_prepare_kretprobe);
524
525static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
526 struct kprobe_ctlblk *kcb, int reenter)
527{
528 if (setup_detour_execution(p, regs, reenter))
529 return;
530
531#if !defined(CONFIG_PREEMPT)
532 if (p->ainsn.boostable == 1 && !p->post_handler) {
533
534 if (!reenter)
535 reset_current_kprobe();
536
537
538
539
540
541 regs->ip = (unsigned long)p->ainsn.insn;
542 preempt_enable_no_resched();
543 return;
544 }
545#endif
546 if (reenter) {
547 save_previous_kprobe(kcb);
548 set_current_kprobe(p, regs, kcb);
549 kcb->kprobe_status = KPROBE_REENTER;
550 } else
551 kcb->kprobe_status = KPROBE_HIT_SS;
552
553 clear_btf();
554 regs->flags |= X86_EFLAGS_TF;
555 regs->flags &= ~X86_EFLAGS_IF;
556
557 if (p->opcode == BREAKPOINT_INSTRUCTION)
558 regs->ip = (unsigned long)p->addr;
559 else
560 regs->ip = (unsigned long)p->ainsn.insn;
561}
562NOKPROBE_SYMBOL(setup_singlestep);
563
564
565
566
567
568
569static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
570 struct kprobe_ctlblk *kcb)
571{
572 switch (kcb->kprobe_status) {
573 case KPROBE_HIT_SSDONE:
574 case KPROBE_HIT_ACTIVE:
575 case KPROBE_HIT_SS:
576 kprobes_inc_nmissed_count(p);
577 setup_singlestep(p, regs, kcb, 1);
578 break;
579 case KPROBE_REENTER:
580
581
582
583
584
585
586 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
587 p->addr);
588 dump_kprobe(p);
589 BUG();
590 default:
591
592 WARN_ON(1);
593 return 0;
594 }
595
596 return 1;
597}
598NOKPROBE_SYMBOL(reenter_kprobe);
599
600
601
602
603
604int kprobe_int3_handler(struct pt_regs *regs)
605{
606 kprobe_opcode_t *addr;
607 struct kprobe *p;
608 struct kprobe_ctlblk *kcb;
609
610 if (user_mode(regs))
611 return 0;
612
613 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
614
615
616
617
618
619
620 preempt_disable();
621
622 kcb = get_kprobe_ctlblk();
623 p = get_kprobe(addr);
624
625 if (p) {
626 if (kprobe_running()) {
627 if (reenter_kprobe(p, regs, kcb))
628 return 1;
629 } else {
630 set_current_kprobe(p, regs, kcb);
631 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
632
633
634
635
636
637
638
639
640
641 if (!p->pre_handler || !p->pre_handler(p, regs))
642 setup_singlestep(p, regs, kcb, 0);
643 return 1;
644 }
645 } else if (*addr != BREAKPOINT_INSTRUCTION) {
646
647
648
649
650
651
652
653
654
655 regs->ip = (unsigned long)addr;
656 preempt_enable_no_resched();
657 return 1;
658 } else if (kprobe_running()) {
659 p = __this_cpu_read(current_kprobe);
660 if (p->break_handler && p->break_handler(p, regs)) {
661 if (!skip_singlestep(p, regs, kcb))
662 setup_singlestep(p, regs, kcb, 0);
663 return 1;
664 }
665 }
666
667 preempt_enable_no_resched();
668 return 0;
669}
670NOKPROBE_SYMBOL(kprobe_int3_handler);
671
672
673
674
675
676asm(
677 ".global kretprobe_trampoline\n"
678 ".type kretprobe_trampoline, @function\n"
679 "kretprobe_trampoline:\n"
680#ifdef CONFIG_X86_64
681
682 " pushq %rsp\n"
683 " pushfq\n"
684 SAVE_REGS_STRING
685 " movq %rsp, %rdi\n"
686 " call trampoline_handler\n"
687
688 " movq %rax, 152(%rsp)\n"
689 RESTORE_REGS_STRING
690 " popfq\n"
691#else
692 " pushf\n"
693 SAVE_REGS_STRING
694 " movl %esp, %eax\n"
695 " call trampoline_handler\n"
696
697 " movl 56(%esp), %edx\n"
698 " movl %edx, 52(%esp)\n"
699
700 " movl %eax, 56(%esp)\n"
701 RESTORE_REGS_STRING
702 " popf\n"
703#endif
704 " ret\n"
705 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
706);
707NOKPROBE_SYMBOL(kretprobe_trampoline);
708STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
709
710
711
712
713__visible __used void *trampoline_handler(struct pt_regs *regs)
714{
715 struct kretprobe_instance *ri = NULL;
716 struct hlist_head *head, empty_rp;
717 struct hlist_node *tmp;
718 unsigned long flags, orig_ret_address = 0;
719 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
720 kprobe_opcode_t *correct_ret_addr = NULL;
721
722 INIT_HLIST_HEAD(&empty_rp);
723 kretprobe_hash_lock(current, &head, &flags);
724
725#ifdef CONFIG_X86_64
726 regs->cs = __KERNEL_CS;
727#else
728 regs->cs = __KERNEL_CS | get_kernel_rpl();
729 regs->gs = 0;
730#endif
731 regs->ip = trampoline_address;
732 regs->orig_ax = ~0UL;
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
748 if (ri->task != current)
749
750 continue;
751
752 orig_ret_address = (unsigned long)ri->ret_addr;
753
754 if (orig_ret_address != trampoline_address)
755
756
757
758
759
760 break;
761 }
762
763 kretprobe_assert(ri, orig_ret_address, trampoline_address);
764
765 correct_ret_addr = ri->ret_addr;
766 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
767 if (ri->task != current)
768
769 continue;
770
771 orig_ret_address = (unsigned long)ri->ret_addr;
772 if (ri->rp && ri->rp->handler) {
773 __this_cpu_write(current_kprobe, &ri->rp->kp);
774 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
775 ri->ret_addr = correct_ret_addr;
776 ri->rp->handler(ri, regs);
777 __this_cpu_write(current_kprobe, NULL);
778 }
779
780 recycle_rp_inst(ri, &empty_rp);
781
782 if (orig_ret_address != trampoline_address)
783
784
785
786
787
788 break;
789 }
790
791 kretprobe_hash_unlock(current, &flags);
792
793 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
794 hlist_del(&ri->hlist);
795 kfree(ri);
796 }
797 return (void *)orig_ret_address;
798}
799NOKPROBE_SYMBOL(trampoline_handler);
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828static void resume_execution(struct kprobe *p, struct pt_regs *regs,
829 struct kprobe_ctlblk *kcb)
830{
831 unsigned long *tos = stack_addr(regs);
832 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
833 unsigned long orig_ip = (unsigned long)p->addr;
834 kprobe_opcode_t *insn = p->ainsn.insn;
835
836
837 insn = skip_prefixes(insn);
838
839 regs->flags &= ~X86_EFLAGS_TF;
840 switch (*insn) {
841 case 0x9c:
842 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
843 *tos |= kcb->kprobe_old_flags;
844 break;
845 case 0xc2:
846 case 0xc3:
847 case 0xca:
848 case 0xcb:
849 case 0xcf:
850 case 0xea:
851
852 p->ainsn.boostable = 1;
853 goto no_change;
854 case 0xe8:
855 *tos = orig_ip + (*tos - copy_ip);
856 break;
857#ifdef CONFIG_X86_32
858 case 0x9a:
859 *tos = orig_ip + (*tos - copy_ip);
860 goto no_change;
861#endif
862 case 0xff:
863 if ((insn[1] & 0x30) == 0x10) {
864
865
866
867
868
869 *tos = orig_ip + (*tos - copy_ip);
870 goto no_change;
871 } else if (((insn[1] & 0x31) == 0x20) ||
872 ((insn[1] & 0x31) == 0x21)) {
873
874
875
876
877 p->ainsn.boostable = 1;
878 goto no_change;
879 }
880 default:
881 break;
882 }
883
884 if (p->ainsn.boostable == 0) {
885 if ((regs->ip > copy_ip) &&
886 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
887
888
889
890
891 synthesize_reljump((void *)regs->ip,
892 (void *)orig_ip + (regs->ip - copy_ip));
893 p->ainsn.boostable = 1;
894 } else {
895 p->ainsn.boostable = -1;
896 }
897 }
898
899 regs->ip += orig_ip - copy_ip;
900
901no_change:
902 restore_btf();
903}
904NOKPROBE_SYMBOL(resume_execution);
905
906
907
908
909
910int kprobe_debug_handler(struct pt_regs *regs)
911{
912 struct kprobe *cur = kprobe_running();
913 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
914
915 if (!cur)
916 return 0;
917
918 resume_execution(cur, regs, kcb);
919 regs->flags |= kcb->kprobe_saved_flags;
920
921 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
922 kcb->kprobe_status = KPROBE_HIT_SSDONE;
923 cur->post_handler(cur, regs, 0);
924 }
925
926
927 if (kcb->kprobe_status == KPROBE_REENTER) {
928 restore_previous_kprobe(kcb);
929 goto out;
930 }
931 reset_current_kprobe();
932out:
933 preempt_enable_no_resched();
934
935
936
937
938
939
940 if (regs->flags & X86_EFLAGS_TF)
941 return 0;
942
943 return 1;
944}
945NOKPROBE_SYMBOL(kprobe_debug_handler);
946
947int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
948{
949 struct kprobe *cur = kprobe_running();
950 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
951
952 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
953
954 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
955 kcb->kprobe_status != KPROBE_REENTER);
956
957
958
959
960
961
962
963 regs->ip = (unsigned long)cur->addr;
964
965
966
967
968
969 regs->flags &= ~X86_EFLAGS_TF;
970
971
972
973
974
975 regs->flags |= kcb->kprobe_old_flags;
976
977 if (kcb->kprobe_status == KPROBE_REENTER)
978 restore_previous_kprobe(kcb);
979 else
980 reset_current_kprobe();
981 preempt_enable_no_resched();
982 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
983 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
984
985
986
987
988
989 kprobes_inc_nmissed_count(cur);
990
991
992
993
994
995
996
997
998 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
999 return 1;
1000
1001
1002
1003
1004
1005 if (fixup_exception(regs, trapnr))
1006 return 1;
1007
1008
1009
1010
1011
1012 }
1013
1014 return 0;
1015}
1016NOKPROBE_SYMBOL(kprobe_fault_handler);
1017
1018
1019
1020
1021int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
1022 void *data)
1023{
1024 struct die_args *args = data;
1025 int ret = NOTIFY_DONE;
1026
1027 if (args->regs && user_mode(args->regs))
1028 return ret;
1029
1030 if (val == DIE_GPF) {
1031
1032
1033
1034
1035
1036 if (!preemptible() && kprobe_running() &&
1037 kprobe_fault_handler(args->regs, args->trapnr))
1038 ret = NOTIFY_STOP;
1039 }
1040 return ret;
1041}
1042NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1043
1044int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1045{
1046 struct jprobe *jp = container_of(p, struct jprobe, kp);
1047 unsigned long addr;
1048 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1049
1050 kcb->jprobe_saved_regs = *regs;
1051 kcb->jprobe_saved_sp = stack_addr(regs);
1052 addr = (unsigned long)(kcb->jprobe_saved_sp);
1053
1054
1055
1056
1057
1058
1059
1060
1061 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1062 MIN_STACK_SIZE(addr));
1063 regs->flags &= ~X86_EFLAGS_IF;
1064 trace_hardirqs_off();
1065 regs->ip = (unsigned long)(jp->entry);
1066
1067
1068
1069
1070
1071
1072
1073
1074 pause_graph_tracing();
1075 return 1;
1076}
1077NOKPROBE_SYMBOL(setjmp_pre_handler);
1078
1079void jprobe_return(void)
1080{
1081 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1082
1083 asm volatile (
1084#ifdef CONFIG_X86_64
1085 " xchg %%rbx,%%rsp \n"
1086#else
1087 " xchgl %%ebx,%%esp \n"
1088#endif
1089 " int3 \n"
1090 " .globl jprobe_return_end\n"
1091 " jprobe_return_end: \n"
1092 " nop \n"::"b"
1093 (kcb->jprobe_saved_sp):"memory");
1094}
1095NOKPROBE_SYMBOL(jprobe_return);
1096NOKPROBE_SYMBOL(jprobe_return_end);
1097
1098int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1099{
1100 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1101 u8 *addr = (u8 *) (regs->ip - 1);
1102 struct jprobe *jp = container_of(p, struct jprobe, kp);
1103 void *saved_sp = kcb->jprobe_saved_sp;
1104
1105 if ((addr > (u8 *) jprobe_return) &&
1106 (addr < (u8 *) jprobe_return_end)) {
1107 if (stack_addr(regs) != saved_sp) {
1108 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1109 printk(KERN_ERR
1110 "current sp %p does not match saved sp %p\n",
1111 stack_addr(regs), saved_sp);
1112 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1113 show_regs(saved_regs);
1114 printk(KERN_ERR "Current registers\n");
1115 show_regs(regs);
1116 BUG();
1117 }
1118
1119 unpause_graph_tracing();
1120 *regs = kcb->jprobe_saved_regs;
1121 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1122 preempt_enable_no_resched();
1123 return 1;
1124 }
1125 return 0;
1126}
1127NOKPROBE_SYMBOL(longjmp_break_handler);
1128
1129bool arch_within_kprobe_blacklist(unsigned long addr)
1130{
1131 return (addr >= (unsigned long)__kprobes_text_start &&
1132 addr < (unsigned long)__kprobes_text_end) ||
1133 (addr >= (unsigned long)__entry_text_start &&
1134 addr < (unsigned long)__entry_text_end);
1135}
1136
1137int __init arch_init_kprobes(void)
1138{
1139 return 0;
1140}
1141
1142int arch_trampoline_kprobe(struct kprobe *p)
1143{
1144 return 0;
1145}
1146