1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/export.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/sysctl.h>
46#include <linux/kdebug.h>
47#include <linux/memory.h>
48#include <linux/ftrace.h>
49#include <linux/cpu.h>
50#include <linux/jump_label.h>
51
52#include <asm-generic/sections.h>
53#include <asm/cacheflush.h>
54#include <asm/errno.h>
55#include <asm/uaccess.h>
56
57#define KPROBE_HASH_BITS 6
58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
60
61
62
63
64
65#ifndef kprobe_lookup_name
66#define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
68#endif
69
70static int kprobes_initialized;
71static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
72static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
73
74
75static bool kprobes_all_disarmed;
76
77
78static DEFINE_MUTEX(kprobe_mutex);
79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
80static struct {
81 raw_spinlock_t lock ____cacheline_aligned_in_smp;
82} kretprobe_table_locks[KPROBE_TABLE_SIZE];
83
84static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85{
86 return &(kretprobe_table_locks[hash].lock);
87}
88
89
90
91
92
93
94
95
96static struct kprobe_blackpoint kprobe_blacklist[] = {
97 {"preempt_schedule",},
98 {"native_get_debugreg",},
99 {"irq_entries_start",},
100 {"common_interrupt",},
101 {"mcount",},
102 {NULL}
103};
104
105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
106
107
108
109
110
111
112struct kprobe_insn_page {
113 struct list_head list;
114 kprobe_opcode_t *insns;
115 int nused;
116 int ngarbage;
117 char slot_used[];
118};
119
120#define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
123
124struct kprobe_insn_cache {
125 struct list_head pages;
126 size_t insn_size;
127 int nr_garbage;
128};
129
130static int slots_per_page(struct kprobe_insn_cache *c)
131{
132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
133}
134
135enum kprobe_slot_state {
136 SLOT_CLEAN = 0,
137 SLOT_DIRTY = 1,
138 SLOT_USED = 2,
139};
140
141static DEFINE_MUTEX(kprobe_insn_mutex);
142static struct kprobe_insn_cache kprobe_insn_slots = {
143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 .insn_size = MAX_INSN_SIZE,
145 .nr_garbage = 0,
146};
147static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
148
149
150
151
152
153static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
154{
155 struct kprobe_insn_page *kip;
156
157 retry:
158 list_for_each_entry(kip, &c->pages, list) {
159 if (kip->nused < slots_per_page(c)) {
160 int i;
161 for (i = 0; i < slots_per_page(c); i++) {
162 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[i] = SLOT_USED;
164 kip->nused++;
165 return kip->insns + (i * c->insn_size);
166 }
167 }
168
169 kip->nused = slots_per_page(c);
170 WARN_ON(1);
171 }
172 }
173
174
175 if (c->nr_garbage && collect_garbage_slots(c) == 0)
176 goto retry;
177
178
179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
180 if (!kip)
181 return NULL;
182
183
184
185
186
187
188 kip->insns = module_alloc(PAGE_SIZE);
189 if (!kip->insns) {
190 kfree(kip);
191 return NULL;
192 }
193 INIT_LIST_HEAD(&kip->list);
194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
195 kip->slot_used[0] = SLOT_USED;
196 kip->nused = 1;
197 kip->ngarbage = 0;
198 list_add(&kip->list, &c->pages);
199 return kip->insns;
200}
201
202
203kprobe_opcode_t __kprobes *get_insn_slot(void)
204{
205 kprobe_opcode_t *ret = NULL;
206
207 mutex_lock(&kprobe_insn_mutex);
208 ret = __get_insn_slot(&kprobe_insn_slots);
209 mutex_unlock(&kprobe_insn_mutex);
210
211 return ret;
212}
213
214
215static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
216{
217 kip->slot_used[idx] = SLOT_CLEAN;
218 kip->nused--;
219 if (kip->nused == 0) {
220
221
222
223
224
225
226 if (!list_is_singular(&kip->list)) {
227 list_del(&kip->list);
228 module_free(NULL, kip->insns);
229 kfree(kip);
230 }
231 return 1;
232 }
233 return 0;
234}
235
236static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
237{
238 struct kprobe_insn_page *kip, *next;
239
240
241 synchronize_sched();
242
243 list_for_each_entry_safe(kip, next, &c->pages, list) {
244 int i;
245 if (kip->ngarbage == 0)
246 continue;
247 kip->ngarbage = 0;
248 for (i = 0; i < slots_per_page(c); i++) {
249 if (kip->slot_used[i] == SLOT_DIRTY &&
250 collect_one_slot(kip, i))
251 break;
252 }
253 }
254 c->nr_garbage = 0;
255 return 0;
256}
257
258static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty)
260{
261 struct kprobe_insn_page *kip;
262
263 list_for_each_entry(kip, &c->pages, list) {
264 long idx = ((long)slot - (long)kip->insns) /
265 (c->insn_size * sizeof(kprobe_opcode_t));
266 if (idx >= 0 && idx < slots_per_page(c)) {
267 WARN_ON(kip->slot_used[idx] != SLOT_USED);
268 if (dirty) {
269 kip->slot_used[idx] = SLOT_DIRTY;
270 kip->ngarbage++;
271 if (++c->nr_garbage > slots_per_page(c))
272 collect_garbage_slots(c);
273 } else
274 collect_one_slot(kip, idx);
275 return;
276 }
277 }
278
279 WARN_ON(1);
280}
281
282void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
283{
284 mutex_lock(&kprobe_insn_mutex);
285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
286 mutex_unlock(&kprobe_insn_mutex);
287}
288#ifdef CONFIG_OPTPROBES
289
290static DEFINE_MUTEX(kprobe_optinsn_mutex);
291static struct kprobe_insn_cache kprobe_optinsn_slots = {
292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293
294 .nr_garbage = 0,
295};
296
297kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298{
299 kprobe_opcode_t *ret = NULL;
300
301 mutex_lock(&kprobe_optinsn_mutex);
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
303 mutex_unlock(&kprobe_optinsn_mutex);
304
305 return ret;
306}
307
308void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309{
310 mutex_lock(&kprobe_optinsn_mutex);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 mutex_unlock(&kprobe_optinsn_mutex);
313}
314#endif
315#endif
316
317
318static inline void set_kprobe_instance(struct kprobe *kp)
319{
320 __this_cpu_write(kprobe_instance, kp);
321}
322
323static inline void reset_kprobe_instance(void)
324{
325 __this_cpu_write(kprobe_instance, NULL);
326}
327
328
329
330
331
332
333
334struct kprobe __kprobes *get_kprobe(void *addr)
335{
336 struct hlist_head *head;
337 struct kprobe *p;
338
339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
340 hlist_for_each_entry_rcu(p, head, hlist) {
341 if (p->addr == addr)
342 return p;
343 }
344
345 return NULL;
346}
347
348static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
349
350
351static inline int kprobe_aggrprobe(struct kprobe *p)
352{
353 return p->pre_handler == aggr_pre_handler;
354}
355
356
357static inline int kprobe_unused(struct kprobe *p)
358{
359 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
360 list_empty(&p->list);
361}
362
363
364
365
366static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
367{
368 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
369 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
370}
371
372#ifdef CONFIG_OPTPROBES
373
374static bool kprobes_allow_optimization;
375
376
377
378
379
380void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
381{
382 struct kprobe *kp;
383
384 list_for_each_entry_rcu(kp, &p->list, list) {
385 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
386 set_kprobe_instance(kp);
387 kp->pre_handler(kp, regs);
388 }
389 reset_kprobe_instance();
390 }
391}
392
393
394static __kprobes void free_aggr_kprobe(struct kprobe *p)
395{
396 struct optimized_kprobe *op;
397
398 op = container_of(p, struct optimized_kprobe, kp);
399 arch_remove_optimized_kprobe(op);
400 arch_remove_kprobe(p);
401 kfree(op);
402}
403
404
405static inline int kprobe_optready(struct kprobe *p)
406{
407 struct optimized_kprobe *op;
408
409 if (kprobe_aggrprobe(p)) {
410 op = container_of(p, struct optimized_kprobe, kp);
411 return arch_prepared_optinsn(&op->optinsn);
412 }
413
414 return 0;
415}
416
417
418static inline int kprobe_disarmed(struct kprobe *p)
419{
420 struct optimized_kprobe *op;
421
422
423 if (!kprobe_aggrprobe(p))
424 return kprobe_disabled(p);
425
426 op = container_of(p, struct optimized_kprobe, kp);
427
428 return kprobe_disabled(p) && list_empty(&op->list);
429}
430
431
432static int __kprobes kprobe_queued(struct kprobe *p)
433{
434 struct optimized_kprobe *op;
435
436 if (kprobe_aggrprobe(p)) {
437 op = container_of(p, struct optimized_kprobe, kp);
438 if (!list_empty(&op->list))
439 return 1;
440 }
441 return 0;
442}
443
444
445
446
447
448static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
449{
450 int i;
451 struct kprobe *p = NULL;
452 struct optimized_kprobe *op;
453
454
455 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
456 p = get_kprobe((void *)(addr - i));
457
458 if (p && kprobe_optready(p)) {
459 op = container_of(p, struct optimized_kprobe, kp);
460 if (arch_within_optimized_kprobe(op, addr))
461 return p;
462 }
463
464 return NULL;
465}
466
467
468static LIST_HEAD(optimizing_list);
469static LIST_HEAD(unoptimizing_list);
470static LIST_HEAD(freeing_list);
471
472static void kprobe_optimizer(struct work_struct *work);
473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474#define OPTIMIZE_DELAY 5
475
476
477
478
479
480static __kprobes void do_optimize_kprobes(void)
481{
482
483 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
484 list_empty(&optimizing_list))
485 return;
486
487
488
489
490
491
492
493
494
495
496
497 get_online_cpus();
498 mutex_lock(&text_mutex);
499 arch_optimize_kprobes(&optimizing_list);
500 mutex_unlock(&text_mutex);
501 put_online_cpus();
502}
503
504
505
506
507
508static __kprobes void do_unoptimize_kprobes(void)
509{
510 struct optimized_kprobe *op, *tmp;
511
512
513 if (list_empty(&unoptimizing_list))
514 return;
515
516
517 get_online_cpus();
518 mutex_lock(&text_mutex);
519 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
520
521 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
522
523 if (kprobe_disabled(&op->kp))
524 arch_disarm_kprobe(&op->kp);
525 if (kprobe_unused(&op->kp)) {
526
527
528
529
530
531 hlist_del_rcu(&op->kp.hlist);
532 } else
533 list_del_init(&op->list);
534 }
535 mutex_unlock(&text_mutex);
536 put_online_cpus();
537}
538
539
540static __kprobes void do_free_cleaned_kprobes(void)
541{
542 struct optimized_kprobe *op, *tmp;
543
544 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
545 BUG_ON(!kprobe_unused(&op->kp));
546 list_del_init(&op->list);
547 free_aggr_kprobe(&op->kp);
548 }
549}
550
551
552static __kprobes void kick_kprobe_optimizer(void)
553{
554 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
555}
556
557
558static __kprobes void kprobe_optimizer(struct work_struct *work)
559{
560 mutex_lock(&kprobe_mutex);
561
562 mutex_lock(&module_mutex);
563
564
565
566
567
568 do_unoptimize_kprobes();
569
570
571
572
573
574
575
576
577 synchronize_sched();
578
579
580 do_optimize_kprobes();
581
582
583 do_free_cleaned_kprobes();
584
585 mutex_unlock(&module_mutex);
586 mutex_unlock(&kprobe_mutex);
587
588
589 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
590 kick_kprobe_optimizer();
591}
592
593
594static __kprobes void wait_for_kprobe_optimizer(void)
595{
596 mutex_lock(&kprobe_mutex);
597
598 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
599 mutex_unlock(&kprobe_mutex);
600
601
602 flush_delayed_work(&optimizing_work);
603
604 cpu_relax();
605
606 mutex_lock(&kprobe_mutex);
607 }
608
609 mutex_unlock(&kprobe_mutex);
610}
611
612
613static __kprobes void optimize_kprobe(struct kprobe *p)
614{
615 struct optimized_kprobe *op;
616
617
618 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
619 (kprobe_disabled(p) || kprobes_all_disarmed))
620 return;
621
622
623 if (p->break_handler || p->post_handler)
624 return;
625
626 op = container_of(p, struct optimized_kprobe, kp);
627
628
629 if (arch_check_optimized_kprobe(op) < 0)
630 return;
631
632
633 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
634 return;
635 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
636
637 if (!list_empty(&op->list))
638
639 list_del_init(&op->list);
640 else {
641 list_add(&op->list, &optimizing_list);
642 kick_kprobe_optimizer();
643 }
644}
645
646
647static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
648{
649 get_online_cpus();
650 arch_unoptimize_kprobe(op);
651 put_online_cpus();
652 if (kprobe_disabled(&op->kp))
653 arch_disarm_kprobe(&op->kp);
654}
655
656
657static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
658{
659 struct optimized_kprobe *op;
660
661 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
662 return;
663
664 op = container_of(p, struct optimized_kprobe, kp);
665 if (!kprobe_optimized(p)) {
666
667 if (force && !list_empty(&op->list)) {
668
669
670
671
672
673 list_del_init(&op->list);
674 force_unoptimize_kprobe(op);
675 }
676 return;
677 }
678
679 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
680 if (!list_empty(&op->list)) {
681
682 list_del_init(&op->list);
683 return;
684 }
685
686 if (force)
687
688 force_unoptimize_kprobe(op);
689 else {
690 list_add(&op->list, &unoptimizing_list);
691 kick_kprobe_optimizer();
692 }
693}
694
695
696static void reuse_unused_kprobe(struct kprobe *ap)
697{
698 struct optimized_kprobe *op;
699
700 BUG_ON(!kprobe_unused(ap));
701
702
703
704
705 op = container_of(ap, struct optimized_kprobe, kp);
706 if (unlikely(list_empty(&op->list)))
707 printk(KERN_WARNING "Warning: found a stray unused "
708 "aggrprobe@%p\n", ap->addr);
709
710 ap->flags &= ~KPROBE_FLAG_DISABLED;
711
712 BUG_ON(!kprobe_optready(ap));
713 optimize_kprobe(ap);
714}
715
716
717static void __kprobes kill_optimized_kprobe(struct kprobe *p)
718{
719 struct optimized_kprobe *op;
720
721 op = container_of(p, struct optimized_kprobe, kp);
722 if (!list_empty(&op->list))
723
724 list_del_init(&op->list);
725 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
726
727 if (kprobe_unused(p)) {
728
729 list_add(&op->list, &freeing_list);
730
731
732
733
734
735 hlist_del_rcu(&op->kp.hlist);
736 }
737
738
739 arch_remove_optimized_kprobe(op);
740}
741
742
743static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
744{
745 struct optimized_kprobe *op;
746
747 op = container_of(p, struct optimized_kprobe, kp);
748 arch_prepare_optimized_kprobe(op);
749}
750
751
752static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
753{
754 struct optimized_kprobe *op;
755
756 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
757 if (!op)
758 return NULL;
759
760 INIT_LIST_HEAD(&op->list);
761 op->kp.addr = p->addr;
762 arch_prepare_optimized_kprobe(op);
763
764 return &op->kp;
765}
766
767static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
768
769
770
771
772
773static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
774{
775 struct kprobe *ap;
776 struct optimized_kprobe *op;
777
778
779 if (kprobe_ftrace(p))
780 return;
781
782
783 jump_label_lock();
784 mutex_lock(&text_mutex);
785
786 ap = alloc_aggr_kprobe(p);
787 if (!ap)
788 goto out;
789
790 op = container_of(ap, struct optimized_kprobe, kp);
791 if (!arch_prepared_optinsn(&op->optinsn)) {
792
793 arch_remove_optimized_kprobe(op);
794 kfree(op);
795 goto out;
796 }
797
798 init_aggr_kprobe(ap, p);
799 optimize_kprobe(ap);
800
801out:
802 mutex_unlock(&text_mutex);
803 jump_label_unlock();
804}
805
806#ifdef CONFIG_SYSCTL
807static void __kprobes optimize_all_kprobes(void)
808{
809 struct hlist_head *head;
810 struct kprobe *p;
811 unsigned int i;
812
813 mutex_lock(&kprobe_mutex);
814
815 if (kprobes_allow_optimization)
816 goto out;
817
818 kprobes_allow_optimization = true;
819 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
820 head = &kprobe_table[i];
821 hlist_for_each_entry_rcu(p, head, hlist)
822 if (!kprobe_disabled(p))
823 optimize_kprobe(p);
824 }
825 printk(KERN_INFO "Kprobes globally optimized\n");
826out:
827 mutex_unlock(&kprobe_mutex);
828}
829
830static void __kprobes unoptimize_all_kprobes(void)
831{
832 struct hlist_head *head;
833 struct kprobe *p;
834 unsigned int i;
835
836 mutex_lock(&kprobe_mutex);
837
838 if (!kprobes_allow_optimization) {
839 mutex_unlock(&kprobe_mutex);
840 return;
841 }
842
843 kprobes_allow_optimization = false;
844 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
845 head = &kprobe_table[i];
846 hlist_for_each_entry_rcu(p, head, hlist) {
847 if (!kprobe_disabled(p))
848 unoptimize_kprobe(p, false);
849 }
850 }
851 mutex_unlock(&kprobe_mutex);
852
853
854 wait_for_kprobe_optimizer();
855 printk(KERN_INFO "Kprobes globally unoptimized\n");
856}
857
858static DEFINE_MUTEX(kprobe_sysctl_mutex);
859int sysctl_kprobes_optimization;
860int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
861 void __user *buffer, size_t *length,
862 loff_t *ppos)
863{
864 int ret;
865
866 mutex_lock(&kprobe_sysctl_mutex);
867 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
868 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
869
870 if (sysctl_kprobes_optimization)
871 optimize_all_kprobes();
872 else
873 unoptimize_all_kprobes();
874 mutex_unlock(&kprobe_sysctl_mutex);
875
876 return ret;
877}
878#endif
879
880
881static void __kprobes __arm_kprobe(struct kprobe *p)
882{
883 struct kprobe *_p;
884
885
886 _p = get_optimized_kprobe((unsigned long)p->addr);
887 if (unlikely(_p))
888
889 unoptimize_kprobe(_p, true);
890
891 arch_arm_kprobe(p);
892 optimize_kprobe(p);
893}
894
895
896static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
897{
898 struct kprobe *_p;
899
900 unoptimize_kprobe(p, false);
901
902 if (!kprobe_queued(p)) {
903 arch_disarm_kprobe(p);
904
905 _p = get_optimized_kprobe((unsigned long)p->addr);
906 if (unlikely(_p) && reopt)
907 optimize_kprobe(_p);
908 }
909
910}
911
912#else
913
914#define optimize_kprobe(p) do {} while (0)
915#define unoptimize_kprobe(p, f) do {} while (0)
916#define kill_optimized_kprobe(p) do {} while (0)
917#define prepare_optimized_kprobe(p) do {} while (0)
918#define try_to_optimize_kprobe(p) do {} while (0)
919#define __arm_kprobe(p) arch_arm_kprobe(p)
920#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
921#define kprobe_disarmed(p) kprobe_disabled(p)
922#define wait_for_kprobe_optimizer() do {} while (0)
923
924
925static void reuse_unused_kprobe(struct kprobe *ap)
926{
927 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
928 BUG_ON(kprobe_unused(ap));
929}
930
931static __kprobes void free_aggr_kprobe(struct kprobe *p)
932{
933 arch_remove_kprobe(p);
934 kfree(p);
935}
936
937static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
938{
939 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
940}
941#endif
942
943#ifdef CONFIG_KPROBES_ON_FTRACE
944static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
945 .func = kprobe_ftrace_handler,
946 .flags = FTRACE_OPS_FL_SAVE_REGS,
947};
948static int kprobe_ftrace_enabled;
949
950
951static int __kprobes prepare_kprobe(struct kprobe *p)
952{
953 if (!kprobe_ftrace(p))
954 return arch_prepare_kprobe(p);
955
956 return arch_prepare_kprobe_ftrace(p);
957}
958
959
960static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
961{
962 int ret;
963
964 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
965 (unsigned long)p->addr, 0, 0);
966 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
967 kprobe_ftrace_enabled++;
968 if (kprobe_ftrace_enabled == 1) {
969 ret = register_ftrace_function(&kprobe_ftrace_ops);
970 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
971 }
972}
973
974
975static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
976{
977 int ret;
978
979 kprobe_ftrace_enabled--;
980 if (kprobe_ftrace_enabled == 0) {
981 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
982 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
983 }
984 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
985 (unsigned long)p->addr, 1, 0);
986 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
987}
988#else
989#define prepare_kprobe(p) arch_prepare_kprobe(p)
990#define arm_kprobe_ftrace(p) do {} while (0)
991#define disarm_kprobe_ftrace(p) do {} while (0)
992#endif
993
994
995static void __kprobes arm_kprobe(struct kprobe *kp)
996{
997 if (unlikely(kprobe_ftrace(kp))) {
998 arm_kprobe_ftrace(kp);
999 return;
1000 }
1001
1002
1003
1004
1005
1006 mutex_lock(&text_mutex);
1007 __arm_kprobe(kp);
1008 mutex_unlock(&text_mutex);
1009}
1010
1011
1012static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
1013{
1014 if (unlikely(kprobe_ftrace(kp))) {
1015 disarm_kprobe_ftrace(kp);
1016 return;
1017 }
1018
1019 mutex_lock(&text_mutex);
1020 __disarm_kprobe(kp, reopt);
1021 mutex_unlock(&text_mutex);
1022}
1023
1024
1025
1026
1027
1028static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1029{
1030 struct kprobe *kp;
1031
1032 list_for_each_entry_rcu(kp, &p->list, list) {
1033 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1034 set_kprobe_instance(kp);
1035 if (kp->pre_handler(kp, regs))
1036 return 1;
1037 }
1038 reset_kprobe_instance();
1039 }
1040 return 0;
1041}
1042
1043static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1044 unsigned long flags)
1045{
1046 struct kprobe *kp;
1047
1048 list_for_each_entry_rcu(kp, &p->list, list) {
1049 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1050 set_kprobe_instance(kp);
1051 kp->post_handler(kp, regs, flags);
1052 reset_kprobe_instance();
1053 }
1054 }
1055}
1056
1057static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1058 int trapnr)
1059{
1060 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1061
1062
1063
1064
1065
1066 if (cur && cur->fault_handler) {
1067 if (cur->fault_handler(cur, regs, trapnr))
1068 return 1;
1069 }
1070 return 0;
1071}
1072
1073static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1074{
1075 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1076 int ret = 0;
1077
1078 if (cur && cur->break_handler) {
1079 if (cur->break_handler(cur, regs))
1080 ret = 1;
1081 }
1082 reset_kprobe_instance();
1083 return ret;
1084}
1085
1086
1087void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1088{
1089 struct kprobe *kp;
1090 if (!kprobe_aggrprobe(p)) {
1091 p->nmissed++;
1092 } else {
1093 list_for_each_entry_rcu(kp, &p->list, list)
1094 kp->nmissed++;
1095 }
1096 return;
1097}
1098
1099void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1100 struct hlist_head *head)
1101{
1102 struct kretprobe *rp = ri->rp;
1103
1104
1105 hlist_del(&ri->hlist);
1106 INIT_HLIST_NODE(&ri->hlist);
1107 if (likely(rp)) {
1108 raw_spin_lock(&rp->lock);
1109 hlist_add_head(&ri->hlist, &rp->free_instances);
1110 raw_spin_unlock(&rp->lock);
1111 } else
1112
1113 hlist_add_head(&ri->hlist, head);
1114}
1115
1116void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1117 struct hlist_head **head, unsigned long *flags)
1118__acquires(hlist_lock)
1119{
1120 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1121 raw_spinlock_t *hlist_lock;
1122
1123 *head = &kretprobe_inst_table[hash];
1124 hlist_lock = kretprobe_table_lock_ptr(hash);
1125 raw_spin_lock_irqsave(hlist_lock, *flags);
1126}
1127
1128static void __kprobes kretprobe_table_lock(unsigned long hash,
1129 unsigned long *flags)
1130__acquires(hlist_lock)
1131{
1132 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1133 raw_spin_lock_irqsave(hlist_lock, *flags);
1134}
1135
1136void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1137 unsigned long *flags)
1138__releases(hlist_lock)
1139{
1140 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1141 raw_spinlock_t *hlist_lock;
1142
1143 hlist_lock = kretprobe_table_lock_ptr(hash);
1144 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1145}
1146
1147static void __kprobes kretprobe_table_unlock(unsigned long hash,
1148 unsigned long *flags)
1149__releases(hlist_lock)
1150{
1151 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1152 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1153}
1154
1155
1156
1157
1158
1159
1160
1161void __kprobes kprobe_flush_task(struct task_struct *tk)
1162{
1163 struct kretprobe_instance *ri;
1164 struct hlist_head *head, empty_rp;
1165 struct hlist_node *tmp;
1166 unsigned long hash, flags = 0;
1167
1168 if (unlikely(!kprobes_initialized))
1169
1170 return;
1171
1172 INIT_HLIST_HEAD(&empty_rp);
1173 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1174 head = &kretprobe_inst_table[hash];
1175 kretprobe_table_lock(hash, &flags);
1176 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1177 if (ri->task == tk)
1178 recycle_rp_inst(ri, &empty_rp);
1179 }
1180 kretprobe_table_unlock(hash, &flags);
1181 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1182 hlist_del(&ri->hlist);
1183 kfree(ri);
1184 }
1185}
1186
1187static inline void free_rp_inst(struct kretprobe *rp)
1188{
1189 struct kretprobe_instance *ri;
1190 struct hlist_node *next;
1191
1192 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1193 hlist_del(&ri->hlist);
1194 kfree(ri);
1195 }
1196}
1197
1198static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1199{
1200 unsigned long flags, hash;
1201 struct kretprobe_instance *ri;
1202 struct hlist_node *next;
1203 struct hlist_head *head;
1204
1205
1206 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1207 kretprobe_table_lock(hash, &flags);
1208 head = &kretprobe_inst_table[hash];
1209 hlist_for_each_entry_safe(ri, next, head, hlist) {
1210 if (ri->rp == rp)
1211 ri->rp = NULL;
1212 }
1213 kretprobe_table_unlock(hash, &flags);
1214 }
1215 free_rp_inst(rp);
1216}
1217
1218
1219
1220
1221
1222static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1223{
1224 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1225
1226 if (p->break_handler || p->post_handler)
1227 unoptimize_kprobe(ap, true);
1228
1229 if (p->break_handler) {
1230 if (ap->break_handler)
1231 return -EEXIST;
1232 list_add_tail_rcu(&p->list, &ap->list);
1233 ap->break_handler = aggr_break_handler;
1234 } else
1235 list_add_rcu(&p->list, &ap->list);
1236 if (p->post_handler && !ap->post_handler)
1237 ap->post_handler = aggr_post_handler;
1238
1239 return 0;
1240}
1241
1242
1243
1244
1245
1246static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1247{
1248
1249 copy_kprobe(p, ap);
1250 flush_insn_slot(ap);
1251 ap->addr = p->addr;
1252 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1253 ap->pre_handler = aggr_pre_handler;
1254 ap->fault_handler = aggr_fault_handler;
1255
1256 if (p->post_handler && !kprobe_gone(p))
1257 ap->post_handler = aggr_post_handler;
1258 if (p->break_handler && !kprobe_gone(p))
1259 ap->break_handler = aggr_break_handler;
1260
1261 INIT_LIST_HEAD(&ap->list);
1262 INIT_HLIST_NODE(&ap->hlist);
1263
1264 list_add_rcu(&p->list, &ap->list);
1265 hlist_replace_rcu(&p->hlist, &ap->hlist);
1266}
1267
1268
1269
1270
1271
1272static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1273 struct kprobe *p)
1274{
1275 int ret = 0;
1276 struct kprobe *ap = orig_p;
1277
1278
1279 jump_label_lock();
1280
1281
1282
1283
1284 get_online_cpus();
1285 mutex_lock(&text_mutex);
1286
1287 if (!kprobe_aggrprobe(orig_p)) {
1288
1289 ap = alloc_aggr_kprobe(orig_p);
1290 if (!ap) {
1291 ret = -ENOMEM;
1292 goto out;
1293 }
1294 init_aggr_kprobe(ap, orig_p);
1295 } else if (kprobe_unused(ap))
1296
1297 reuse_unused_kprobe(ap);
1298
1299 if (kprobe_gone(ap)) {
1300
1301
1302
1303
1304
1305
1306 ret = arch_prepare_kprobe(ap);
1307 if (ret)
1308
1309
1310
1311
1312
1313 goto out;
1314
1315
1316 prepare_optimized_kprobe(ap);
1317
1318
1319
1320
1321
1322 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1323 | KPROBE_FLAG_DISABLED;
1324 }
1325
1326
1327 copy_kprobe(ap, p);
1328 ret = add_new_kprobe(ap, p);
1329
1330out:
1331 mutex_unlock(&text_mutex);
1332 put_online_cpus();
1333 jump_label_unlock();
1334
1335 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1336 ap->flags &= ~KPROBE_FLAG_DISABLED;
1337 if (!kprobes_all_disarmed)
1338
1339 arm_kprobe(ap);
1340 }
1341 return ret;
1342}
1343
1344static int __kprobes in_kprobes_functions(unsigned long addr)
1345{
1346 struct kprobe_blackpoint *kb;
1347
1348 if (addr >= (unsigned long)__kprobes_text_start &&
1349 addr < (unsigned long)__kprobes_text_end)
1350 return -EINVAL;
1351
1352
1353
1354
1355 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1356 if (kb->start_addr) {
1357 if (addr >= kb->start_addr &&
1358 addr < (kb->start_addr + kb->range))
1359 return -EINVAL;
1360 }
1361 }
1362 return 0;
1363}
1364
1365
1366
1367
1368
1369
1370
1371static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1372{
1373 kprobe_opcode_t *addr = p->addr;
1374
1375 if ((p->symbol_name && p->addr) ||
1376 (!p->symbol_name && !p->addr))
1377 goto invalid;
1378
1379 if (p->symbol_name) {
1380 kprobe_lookup_name(p->symbol_name, addr);
1381 if (!addr)
1382 return ERR_PTR(-ENOENT);
1383 }
1384
1385 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1386 if (addr)
1387 return addr;
1388
1389invalid:
1390 return ERR_PTR(-EINVAL);
1391}
1392
1393
1394static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1395{
1396 struct kprobe *ap, *list_p;
1397
1398 ap = get_kprobe(p->addr);
1399 if (unlikely(!ap))
1400 return NULL;
1401
1402 if (p != ap) {
1403 list_for_each_entry_rcu(list_p, &ap->list, list)
1404 if (list_p == p)
1405
1406 goto valid;
1407 return NULL;
1408 }
1409valid:
1410 return ap;
1411}
1412
1413
1414static inline int check_kprobe_rereg(struct kprobe *p)
1415{
1416 int ret = 0;
1417
1418 mutex_lock(&kprobe_mutex);
1419 if (__get_valid_kprobe(p))
1420 ret = -EINVAL;
1421 mutex_unlock(&kprobe_mutex);
1422
1423 return ret;
1424}
1425
1426static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1427 struct module **probed_mod)
1428{
1429 int ret = 0;
1430 unsigned long ftrace_addr;
1431
1432
1433
1434
1435
1436 ftrace_addr = ftrace_location((unsigned long)p->addr);
1437 if (ftrace_addr) {
1438#ifdef CONFIG_KPROBES_ON_FTRACE
1439
1440 if ((unsigned long)p->addr != ftrace_addr)
1441 return -EILSEQ;
1442 p->flags |= KPROBE_FLAG_FTRACE;
1443#else
1444 return -EINVAL;
1445#endif
1446 }
1447
1448 jump_label_lock();
1449 preempt_disable();
1450
1451
1452 if (!kernel_text_address((unsigned long) p->addr) ||
1453 in_kprobes_functions((unsigned long) p->addr) ||
1454 jump_label_text_reserved(p->addr, p->addr)) {
1455 ret = -EINVAL;
1456 goto out;
1457 }
1458
1459
1460 *probed_mod = __module_text_address((unsigned long) p->addr);
1461 if (*probed_mod) {
1462
1463
1464
1465
1466 if (unlikely(!try_module_get(*probed_mod))) {
1467 ret = -ENOENT;
1468 goto out;
1469 }
1470
1471
1472
1473
1474
1475 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1476 (*probed_mod)->state != MODULE_STATE_COMING) {
1477 module_put(*probed_mod);
1478 *probed_mod = NULL;
1479 ret = -ENOENT;
1480 }
1481 }
1482out:
1483 preempt_enable();
1484 jump_label_unlock();
1485
1486 return ret;
1487}
1488
1489int __kprobes register_kprobe(struct kprobe *p)
1490{
1491 int ret;
1492 struct kprobe *old_p;
1493 struct module *probed_mod;
1494 kprobe_opcode_t *addr;
1495
1496
1497 addr = kprobe_addr(p);
1498 if (IS_ERR(addr))
1499 return PTR_ERR(addr);
1500 p->addr = addr;
1501
1502 ret = check_kprobe_rereg(p);
1503 if (ret)
1504 return ret;
1505
1506
1507 p->flags &= KPROBE_FLAG_DISABLED;
1508 p->nmissed = 0;
1509 INIT_LIST_HEAD(&p->list);
1510
1511 ret = check_kprobe_address_safe(p, &probed_mod);
1512 if (ret)
1513 return ret;
1514
1515 mutex_lock(&kprobe_mutex);
1516
1517 old_p = get_kprobe(p->addr);
1518 if (old_p) {
1519
1520 ret = register_aggr_kprobe(old_p, p);
1521 goto out;
1522 }
1523
1524 mutex_lock(&text_mutex);
1525 ret = prepare_kprobe(p);
1526 mutex_unlock(&text_mutex);
1527 if (ret)
1528 goto out;
1529
1530 INIT_HLIST_NODE(&p->hlist);
1531 hlist_add_head_rcu(&p->hlist,
1532 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1533
1534 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1535 arm_kprobe(p);
1536
1537
1538 try_to_optimize_kprobe(p);
1539
1540out:
1541 mutex_unlock(&kprobe_mutex);
1542
1543 if (probed_mod)
1544 module_put(probed_mod);
1545
1546 return ret;
1547}
1548EXPORT_SYMBOL_GPL(register_kprobe);
1549
1550
1551static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1552{
1553 struct kprobe *kp;
1554
1555 list_for_each_entry_rcu(kp, &ap->list, list)
1556 if (!kprobe_disabled(kp))
1557
1558
1559
1560
1561 return 0;
1562
1563 return 1;
1564}
1565
1566
1567static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1568{
1569 struct kprobe *orig_p;
1570
1571
1572 orig_p = __get_valid_kprobe(p);
1573 if (unlikely(orig_p == NULL))
1574 return NULL;
1575
1576 if (!kprobe_disabled(p)) {
1577
1578 if (p != orig_p)
1579 p->flags |= KPROBE_FLAG_DISABLED;
1580
1581
1582 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1583 disarm_kprobe(orig_p, true);
1584 orig_p->flags |= KPROBE_FLAG_DISABLED;
1585 }
1586 }
1587
1588 return orig_p;
1589}
1590
1591
1592
1593
1594static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1595{
1596 struct kprobe *ap, *list_p;
1597
1598
1599 ap = __disable_kprobe(p);
1600 if (ap == NULL)
1601 return -EINVAL;
1602
1603 if (ap == p)
1604
1605
1606
1607
1608 goto disarmed;
1609
1610
1611 WARN_ON(!kprobe_aggrprobe(ap));
1612
1613 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1614
1615
1616
1617
1618 goto disarmed;
1619 else {
1620
1621 if (p->break_handler && !kprobe_gone(p))
1622 ap->break_handler = NULL;
1623 if (p->post_handler && !kprobe_gone(p)) {
1624 list_for_each_entry_rcu(list_p, &ap->list, list) {
1625 if ((list_p != p) && (list_p->post_handler))
1626 goto noclean;
1627 }
1628 ap->post_handler = NULL;
1629 }
1630noclean:
1631
1632
1633
1634
1635 list_del_rcu(&p->list);
1636 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1637
1638
1639
1640
1641 optimize_kprobe(ap);
1642 }
1643 return 0;
1644
1645disarmed:
1646 BUG_ON(!kprobe_disarmed(ap));
1647 hlist_del_rcu(&ap->hlist);
1648 return 0;
1649}
1650
1651static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1652{
1653 struct kprobe *ap;
1654
1655 if (list_empty(&p->list))
1656
1657 arch_remove_kprobe(p);
1658 else if (list_is_singular(&p->list)) {
1659
1660 ap = list_entry(p->list.next, struct kprobe, list);
1661 list_del(&p->list);
1662 free_aggr_kprobe(ap);
1663 }
1664
1665}
1666
1667int __kprobes register_kprobes(struct kprobe **kps, int num)
1668{
1669 int i, ret = 0;
1670
1671 if (num <= 0)
1672 return -EINVAL;
1673 for (i = 0; i < num; i++) {
1674 ret = register_kprobe(kps[i]);
1675 if (ret < 0) {
1676 if (i > 0)
1677 unregister_kprobes(kps, i);
1678 break;
1679 }
1680 }
1681 return ret;
1682}
1683EXPORT_SYMBOL_GPL(register_kprobes);
1684
1685void __kprobes unregister_kprobe(struct kprobe *p)
1686{
1687 unregister_kprobes(&p, 1);
1688}
1689EXPORT_SYMBOL_GPL(unregister_kprobe);
1690
1691void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1692{
1693 int i;
1694
1695 if (num <= 0)
1696 return;
1697 mutex_lock(&kprobe_mutex);
1698 for (i = 0; i < num; i++)
1699 if (__unregister_kprobe_top(kps[i]) < 0)
1700 kps[i]->addr = NULL;
1701 mutex_unlock(&kprobe_mutex);
1702
1703 synchronize_sched();
1704 for (i = 0; i < num; i++)
1705 if (kps[i]->addr)
1706 __unregister_kprobe_bottom(kps[i]);
1707}
1708EXPORT_SYMBOL_GPL(unregister_kprobes);
1709
1710static struct notifier_block kprobe_exceptions_nb = {
1711 .notifier_call = kprobe_exceptions_notify,
1712 .priority = 0x7fffffff
1713};
1714
1715unsigned long __weak arch_deref_entry_point(void *entry)
1716{
1717 return (unsigned long)entry;
1718}
1719
1720int __kprobes register_jprobes(struct jprobe **jps, int num)
1721{
1722 struct jprobe *jp;
1723 int ret = 0, i;
1724
1725 if (num <= 0)
1726 return -EINVAL;
1727 for (i = 0; i < num; i++) {
1728 unsigned long addr, offset;
1729 jp = jps[i];
1730 addr = arch_deref_entry_point(jp->entry);
1731
1732
1733 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1734 offset == 0) {
1735 jp->kp.pre_handler = setjmp_pre_handler;
1736 jp->kp.break_handler = longjmp_break_handler;
1737 ret = register_kprobe(&jp->kp);
1738 } else
1739 ret = -EINVAL;
1740
1741 if (ret < 0) {
1742 if (i > 0)
1743 unregister_jprobes(jps, i);
1744 break;
1745 }
1746 }
1747 return ret;
1748}
1749EXPORT_SYMBOL_GPL(register_jprobes);
1750
1751int __kprobes register_jprobe(struct jprobe *jp)
1752{
1753 return register_jprobes(&jp, 1);
1754}
1755EXPORT_SYMBOL_GPL(register_jprobe);
1756
1757void __kprobes unregister_jprobe(struct jprobe *jp)
1758{
1759 unregister_jprobes(&jp, 1);
1760}
1761EXPORT_SYMBOL_GPL(unregister_jprobe);
1762
1763void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1764{
1765 int i;
1766
1767 if (num <= 0)
1768 return;
1769 mutex_lock(&kprobe_mutex);
1770 for (i = 0; i < num; i++)
1771 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1772 jps[i]->kp.addr = NULL;
1773 mutex_unlock(&kprobe_mutex);
1774
1775 synchronize_sched();
1776 for (i = 0; i < num; i++) {
1777 if (jps[i]->kp.addr)
1778 __unregister_kprobe_bottom(&jps[i]->kp);
1779 }
1780}
1781EXPORT_SYMBOL_GPL(unregister_jprobes);
1782
1783#ifdef CONFIG_KRETPROBES
1784
1785
1786
1787
1788static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1789 struct pt_regs *regs)
1790{
1791 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1792 unsigned long hash, flags = 0;
1793 struct kretprobe_instance *ri;
1794
1795
1796 hash = hash_ptr(current, KPROBE_HASH_BITS);
1797 raw_spin_lock_irqsave(&rp->lock, flags);
1798 if (!hlist_empty(&rp->free_instances)) {
1799 ri = hlist_entry(rp->free_instances.first,
1800 struct kretprobe_instance, hlist);
1801 hlist_del(&ri->hlist);
1802 raw_spin_unlock_irqrestore(&rp->lock, flags);
1803
1804 ri->rp = rp;
1805 ri->task = current;
1806
1807 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1808 raw_spin_lock_irqsave(&rp->lock, flags);
1809 hlist_add_head(&ri->hlist, &rp->free_instances);
1810 raw_spin_unlock_irqrestore(&rp->lock, flags);
1811 return 0;
1812 }
1813
1814 arch_prepare_kretprobe(ri, regs);
1815
1816
1817 INIT_HLIST_NODE(&ri->hlist);
1818 kretprobe_table_lock(hash, &flags);
1819 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1820 kretprobe_table_unlock(hash, &flags);
1821 } else {
1822 rp->nmissed++;
1823 raw_spin_unlock_irqrestore(&rp->lock, flags);
1824 }
1825 return 0;
1826}
1827
1828int __kprobes register_kretprobe(struct kretprobe *rp)
1829{
1830 int ret = 0;
1831 struct kretprobe_instance *inst;
1832 int i;
1833 void *addr;
1834
1835 if (kretprobe_blacklist_size) {
1836 addr = kprobe_addr(&rp->kp);
1837 if (IS_ERR(addr))
1838 return PTR_ERR(addr);
1839
1840 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1841 if (kretprobe_blacklist[i].addr == addr)
1842 return -EINVAL;
1843 }
1844 }
1845
1846 rp->kp.pre_handler = pre_handler_kretprobe;
1847 rp->kp.post_handler = NULL;
1848 rp->kp.fault_handler = NULL;
1849 rp->kp.break_handler = NULL;
1850
1851
1852 if (rp->maxactive <= 0) {
1853#ifdef CONFIG_PREEMPT
1854 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1855#else
1856 rp->maxactive = num_possible_cpus();
1857#endif
1858 }
1859 raw_spin_lock_init(&rp->lock);
1860 INIT_HLIST_HEAD(&rp->free_instances);
1861 for (i = 0; i < rp->maxactive; i++) {
1862 inst = kmalloc(sizeof(struct kretprobe_instance) +
1863 rp->data_size, GFP_KERNEL);
1864 if (inst == NULL) {
1865 free_rp_inst(rp);
1866 return -ENOMEM;
1867 }
1868 INIT_HLIST_NODE(&inst->hlist);
1869 hlist_add_head(&inst->hlist, &rp->free_instances);
1870 }
1871
1872 rp->nmissed = 0;
1873
1874 ret = register_kprobe(&rp->kp);
1875 if (ret != 0)
1876 free_rp_inst(rp);
1877 return ret;
1878}
1879EXPORT_SYMBOL_GPL(register_kretprobe);
1880
1881int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1882{
1883 int ret = 0, i;
1884
1885 if (num <= 0)
1886 return -EINVAL;
1887 for (i = 0; i < num; i++) {
1888 ret = register_kretprobe(rps[i]);
1889 if (ret < 0) {
1890 if (i > 0)
1891 unregister_kretprobes(rps, i);
1892 break;
1893 }
1894 }
1895 return ret;
1896}
1897EXPORT_SYMBOL_GPL(register_kretprobes);
1898
1899void __kprobes unregister_kretprobe(struct kretprobe *rp)
1900{
1901 unregister_kretprobes(&rp, 1);
1902}
1903EXPORT_SYMBOL_GPL(unregister_kretprobe);
1904
1905void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1906{
1907 int i;
1908
1909 if (num <= 0)
1910 return;
1911 mutex_lock(&kprobe_mutex);
1912 for (i = 0; i < num; i++)
1913 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1914 rps[i]->kp.addr = NULL;
1915 mutex_unlock(&kprobe_mutex);
1916
1917 synchronize_sched();
1918 for (i = 0; i < num; i++) {
1919 if (rps[i]->kp.addr) {
1920 __unregister_kprobe_bottom(&rps[i]->kp);
1921 cleanup_rp_inst(rps[i]);
1922 }
1923 }
1924}
1925EXPORT_SYMBOL_GPL(unregister_kretprobes);
1926
1927#else
1928int __kprobes register_kretprobe(struct kretprobe *rp)
1929{
1930 return -ENOSYS;
1931}
1932EXPORT_SYMBOL_GPL(register_kretprobe);
1933
1934int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1935{
1936 return -ENOSYS;
1937}
1938EXPORT_SYMBOL_GPL(register_kretprobes);
1939
1940void __kprobes unregister_kretprobe(struct kretprobe *rp)
1941{
1942}
1943EXPORT_SYMBOL_GPL(unregister_kretprobe);
1944
1945void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1946{
1947}
1948EXPORT_SYMBOL_GPL(unregister_kretprobes);
1949
1950static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1951 struct pt_regs *regs)
1952{
1953 return 0;
1954}
1955
1956#endif
1957
1958
1959static void __kprobes kill_kprobe(struct kprobe *p)
1960{
1961 struct kprobe *kp;
1962
1963 p->flags |= KPROBE_FLAG_GONE;
1964 if (kprobe_aggrprobe(p)) {
1965
1966
1967
1968
1969 list_for_each_entry_rcu(kp, &p->list, list)
1970 kp->flags |= KPROBE_FLAG_GONE;
1971 p->post_handler = NULL;
1972 p->break_handler = NULL;
1973 kill_optimized_kprobe(p);
1974 }
1975
1976
1977
1978
1979 arch_remove_kprobe(p);
1980}
1981
1982
1983int __kprobes disable_kprobe(struct kprobe *kp)
1984{
1985 int ret = 0;
1986
1987 mutex_lock(&kprobe_mutex);
1988
1989
1990 if (__disable_kprobe(kp) == NULL)
1991 ret = -EINVAL;
1992
1993 mutex_unlock(&kprobe_mutex);
1994 return ret;
1995}
1996EXPORT_SYMBOL_GPL(disable_kprobe);
1997
1998
1999int __kprobes enable_kprobe(struct kprobe *kp)
2000{
2001 int ret = 0;
2002 struct kprobe *p;
2003
2004 mutex_lock(&kprobe_mutex);
2005
2006
2007 p = __get_valid_kprobe(kp);
2008 if (unlikely(p == NULL)) {
2009 ret = -EINVAL;
2010 goto out;
2011 }
2012
2013 if (kprobe_gone(kp)) {
2014
2015 ret = -EINVAL;
2016 goto out;
2017 }
2018
2019 if (p != kp)
2020 kp->flags &= ~KPROBE_FLAG_DISABLED;
2021
2022 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2023 p->flags &= ~KPROBE_FLAG_DISABLED;
2024 arm_kprobe(p);
2025 }
2026out:
2027 mutex_unlock(&kprobe_mutex);
2028 return ret;
2029}
2030EXPORT_SYMBOL_GPL(enable_kprobe);
2031
2032void __kprobes dump_kprobe(struct kprobe *kp)
2033{
2034 printk(KERN_WARNING "Dumping kprobe:\n");
2035 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2036 kp->symbol_name, kp->addr, kp->offset);
2037}
2038
2039
2040static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2041 unsigned long val, void *data)
2042{
2043 struct module *mod = data;
2044 struct hlist_head *head;
2045 struct kprobe *p;
2046 unsigned int i;
2047 int checkcore = (val == MODULE_STATE_GOING);
2048
2049 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2050 return NOTIFY_DONE;
2051
2052
2053
2054
2055
2056
2057
2058 mutex_lock(&kprobe_mutex);
2059 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2060 head = &kprobe_table[i];
2061 hlist_for_each_entry_rcu(p, head, hlist)
2062 if (within_module_init((unsigned long)p->addr, mod) ||
2063 (checkcore &&
2064 within_module_core((unsigned long)p->addr, mod))) {
2065
2066
2067
2068
2069
2070 kill_kprobe(p);
2071 }
2072 }
2073 mutex_unlock(&kprobe_mutex);
2074 return NOTIFY_DONE;
2075}
2076
2077static struct notifier_block kprobe_module_nb = {
2078 .notifier_call = kprobes_module_callback,
2079 .priority = 0
2080};
2081
2082static int __init init_kprobes(void)
2083{
2084 int i, err = 0;
2085 unsigned long offset = 0, size = 0;
2086 char *modname, namebuf[128];
2087 const char *symbol_name;
2088 void *addr;
2089 struct kprobe_blackpoint *kb;
2090
2091
2092
2093 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2094 INIT_HLIST_HEAD(&kprobe_table[i]);
2095 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2096 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2108 kprobe_lookup_name(kb->name, addr);
2109 if (!addr)
2110 continue;
2111
2112 kb->start_addr = (unsigned long)addr;
2113 symbol_name = kallsyms_lookup(kb->start_addr,
2114 &size, &offset, &modname, namebuf);
2115 if (!symbol_name)
2116 kb->range = 0;
2117 else
2118 kb->range = size;
2119 }
2120
2121 if (kretprobe_blacklist_size) {
2122
2123 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2124 kprobe_lookup_name(kretprobe_blacklist[i].name,
2125 kretprobe_blacklist[i].addr);
2126 if (!kretprobe_blacklist[i].addr)
2127 printk("kretprobe: lookup failed: %s\n",
2128 kretprobe_blacklist[i].name);
2129 }
2130 }
2131
2132#if defined(CONFIG_OPTPROBES)
2133#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2134
2135 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2136#endif
2137
2138 kprobes_allow_optimization = true;
2139#endif
2140
2141
2142 kprobes_all_disarmed = false;
2143
2144 err = arch_init_kprobes();
2145 if (!err)
2146 err = register_die_notifier(&kprobe_exceptions_nb);
2147 if (!err)
2148 err = register_module_notifier(&kprobe_module_nb);
2149
2150 kprobes_initialized = (err == 0);
2151
2152 if (!err)
2153 init_test_probes();
2154 return err;
2155}
2156
2157#ifdef CONFIG_DEBUG_FS
2158static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2159 const char *sym, int offset, char *modname, struct kprobe *pp)
2160{
2161 char *kprobe_type;
2162
2163 if (p->pre_handler == pre_handler_kretprobe)
2164 kprobe_type = "r";
2165 else if (p->pre_handler == setjmp_pre_handler)
2166 kprobe_type = "j";
2167 else
2168 kprobe_type = "k";
2169
2170 if (sym)
2171 seq_printf(pi, "%p %s %s+0x%x %s ",
2172 p->addr, kprobe_type, sym, offset,
2173 (modname ? modname : " "));
2174 else
2175 seq_printf(pi, "%p %s %p ",
2176 p->addr, kprobe_type, p->addr);
2177
2178 if (!pp)
2179 pp = p;
2180 seq_printf(pi, "%s%s%s%s\n",
2181 (kprobe_gone(p) ? "[GONE]" : ""),
2182 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2183 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2184 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2185}
2186
2187static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2188{
2189 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2190}
2191
2192static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2193{
2194 (*pos)++;
2195 if (*pos >= KPROBE_TABLE_SIZE)
2196 return NULL;
2197 return pos;
2198}
2199
2200static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2201{
2202
2203}
2204
2205static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2206{
2207 struct hlist_head *head;
2208 struct kprobe *p, *kp;
2209 const char *sym = NULL;
2210 unsigned int i = *(loff_t *) v;
2211 unsigned long offset = 0;
2212 char *modname, namebuf[128];
2213
2214 head = &kprobe_table[i];
2215 preempt_disable();
2216 hlist_for_each_entry_rcu(p, head, hlist) {
2217 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2218 &offset, &modname, namebuf);
2219 if (kprobe_aggrprobe(p)) {
2220 list_for_each_entry_rcu(kp, &p->list, list)
2221 report_probe(pi, kp, sym, offset, modname, p);
2222 } else
2223 report_probe(pi, p, sym, offset, modname, NULL);
2224 }
2225 preempt_enable();
2226 return 0;
2227}
2228
2229static const struct seq_operations kprobes_seq_ops = {
2230 .start = kprobe_seq_start,
2231 .next = kprobe_seq_next,
2232 .stop = kprobe_seq_stop,
2233 .show = show_kprobe_addr
2234};
2235
2236static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2237{
2238 return seq_open(filp, &kprobes_seq_ops);
2239}
2240
2241static const struct file_operations debugfs_kprobes_operations = {
2242 .open = kprobes_open,
2243 .read = seq_read,
2244 .llseek = seq_lseek,
2245 .release = seq_release,
2246};
2247
2248static void __kprobes arm_all_kprobes(void)
2249{
2250 struct hlist_head *head;
2251 struct kprobe *p;
2252 unsigned int i;
2253
2254 mutex_lock(&kprobe_mutex);
2255
2256
2257 if (!kprobes_all_disarmed)
2258 goto already_enabled;
2259
2260
2261 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2262 head = &kprobe_table[i];
2263 hlist_for_each_entry_rcu(p, head, hlist)
2264 if (!kprobe_disabled(p))
2265 arm_kprobe(p);
2266 }
2267
2268 kprobes_all_disarmed = false;
2269 printk(KERN_INFO "Kprobes globally enabled\n");
2270
2271already_enabled:
2272 mutex_unlock(&kprobe_mutex);
2273 return;
2274}
2275
2276static void __kprobes disarm_all_kprobes(void)
2277{
2278 struct hlist_head *head;
2279 struct kprobe *p;
2280 unsigned int i;
2281
2282 mutex_lock(&kprobe_mutex);
2283
2284
2285 if (kprobes_all_disarmed) {
2286 mutex_unlock(&kprobe_mutex);
2287 return;
2288 }
2289
2290 kprobes_all_disarmed = true;
2291 printk(KERN_INFO "Kprobes globally disabled\n");
2292
2293 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2294 head = &kprobe_table[i];
2295 hlist_for_each_entry_rcu(p, head, hlist) {
2296 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2297 disarm_kprobe(p, false);
2298 }
2299 }
2300 mutex_unlock(&kprobe_mutex);
2301
2302
2303 wait_for_kprobe_optimizer();
2304}
2305
2306
2307
2308
2309
2310
2311static ssize_t read_enabled_file_bool(struct file *file,
2312 char __user *user_buf, size_t count, loff_t *ppos)
2313{
2314 char buf[3];
2315
2316 if (!kprobes_all_disarmed)
2317 buf[0] = '1';
2318 else
2319 buf[0] = '0';
2320 buf[1] = '\n';
2321 buf[2] = 0x00;
2322 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2323}
2324
2325static ssize_t write_enabled_file_bool(struct file *file,
2326 const char __user *user_buf, size_t count, loff_t *ppos)
2327{
2328 char buf[32];
2329 size_t buf_size;
2330
2331 buf_size = min(count, (sizeof(buf)-1));
2332 if (copy_from_user(buf, user_buf, buf_size))
2333 return -EFAULT;
2334
2335 buf[buf_size] = '\0';
2336 switch (buf[0]) {
2337 case 'y':
2338 case 'Y':
2339 case '1':
2340 arm_all_kprobes();
2341 break;
2342 case 'n':
2343 case 'N':
2344 case '0':
2345 disarm_all_kprobes();
2346 break;
2347 default:
2348 return -EINVAL;
2349 }
2350
2351 return count;
2352}
2353
2354static const struct file_operations fops_kp = {
2355 .read = read_enabled_file_bool,
2356 .write = write_enabled_file_bool,
2357 .llseek = default_llseek,
2358};
2359
2360static int __kprobes debugfs_kprobe_init(void)
2361{
2362 struct dentry *dir, *file;
2363 unsigned int value = 1;
2364
2365 dir = debugfs_create_dir("kprobes", NULL);
2366 if (!dir)
2367 return -ENOMEM;
2368
2369 file = debugfs_create_file("list", 0444, dir, NULL,
2370 &debugfs_kprobes_operations);
2371 if (!file) {
2372 debugfs_remove(dir);
2373 return -ENOMEM;
2374 }
2375
2376 file = debugfs_create_file("enabled", 0600, dir,
2377 &value, &fops_kp);
2378 if (!file) {
2379 debugfs_remove(dir);
2380 return -ENOMEM;
2381 }
2382
2383 return 0;
2384}
2385
2386late_initcall(debugfs_kprobe_init);
2387#endif
2388
2389module_init(init_kprobes);
2390
2391
2392EXPORT_SYMBOL_GPL(jprobe_return);
2393