1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/export.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/sysctl.h>
46#include <linux/kdebug.h>
47#include <linux/memory.h>
48#include <linux/ftrace.h>
49#include <linux/cpu.h>
50#include <linux/jump_label.h>
51
52#include <asm/sections.h>
53#include <asm/cacheflush.h>
54#include <asm/errno.h>
55#include <linux/uaccess.h>
56
57#define KPROBE_HASH_BITS 6
58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
60
61static int kprobes_initialized;
62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
64
65
66static bool kprobes_all_disarmed;
67
68
69static DEFINE_MUTEX(kprobe_mutex);
70static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
71static struct {
72 raw_spinlock_t lock ____cacheline_aligned_in_smp;
73} kretprobe_table_locks[KPROBE_TABLE_SIZE];
74
75kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76 unsigned int __unused)
77{
78 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
79}
80
81static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
82{
83 return &(kretprobe_table_locks[hash].lock);
84}
85
86
87static LIST_HEAD(kprobe_blacklist);
88
89#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
90
91
92
93
94
95
96struct kprobe_insn_page {
97 struct list_head list;
98 kprobe_opcode_t *insns;
99 struct kprobe_insn_cache *cache;
100 int nused;
101 int ngarbage;
102 char slot_used[];
103};
104
105#define KPROBE_INSN_PAGE_SIZE(slots) \
106 (offsetof(struct kprobe_insn_page, slot_used) + \
107 (sizeof(char) * (slots)))
108
109static int slots_per_page(struct kprobe_insn_cache *c)
110{
111 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
112}
113
114enum kprobe_slot_state {
115 SLOT_CLEAN = 0,
116 SLOT_DIRTY = 1,
117 SLOT_USED = 2,
118};
119
120static void *alloc_insn_page(void)
121{
122 return module_alloc(PAGE_SIZE);
123}
124
125void __weak free_insn_page(void *page)
126{
127 module_memfree(page);
128}
129
130struct kprobe_insn_cache kprobe_insn_slots = {
131 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
132 .alloc = alloc_insn_page,
133 .free = free_insn_page,
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137};
138static int collect_garbage_slots(struct kprobe_insn_cache *c);
139
140
141
142
143
144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145{
146 struct kprobe_insn_page *kip;
147 kprobe_opcode_t *slot = NULL;
148
149
150 mutex_lock(&c->mutex);
151 retry:
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
154 if (kip->nused < slots_per_page(c)) {
155 int i;
156 for (i = 0; i < slots_per_page(c); i++) {
157 if (kip->slot_used[i] == SLOT_CLEAN) {
158 kip->slot_used[i] = SLOT_USED;
159 kip->nused++;
160 slot = kip->insns + (i * c->insn_size);
161 rcu_read_unlock();
162 goto out;
163 }
164 }
165
166 kip->nused = slots_per_page(c);
167 WARN_ON(1);
168 }
169 }
170 rcu_read_unlock();
171
172
173 if (c->nr_garbage && collect_garbage_slots(c) == 0)
174 goto retry;
175
176
177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178 if (!kip)
179 goto out;
180
181
182
183
184
185
186 kip->insns = c->alloc();
187 if (!kip->insns) {
188 kfree(kip);
189 goto out;
190 }
191 INIT_LIST_HEAD(&kip->list);
192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193 kip->slot_used[0] = SLOT_USED;
194 kip->nused = 1;
195 kip->ngarbage = 0;
196 kip->cache = c;
197 list_add_rcu(&kip->list, &c->pages);
198 slot = kip->insns;
199out:
200 mutex_unlock(&c->mutex);
201 return slot;
202}
203
204
205static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
206{
207 kip->slot_used[idx] = SLOT_CLEAN;
208 kip->nused--;
209 if (kip->nused == 0) {
210
211
212
213
214
215
216 if (!list_is_singular(&kip->list)) {
217 list_del_rcu(&kip->list);
218 synchronize_rcu();
219 kip->cache->free(kip->insns);
220 kfree(kip);
221 }
222 return 1;
223 }
224 return 0;
225}
226
227static int collect_garbage_slots(struct kprobe_insn_cache *c)
228{
229 struct kprobe_insn_page *kip, *next;
230
231
232 synchronize_sched();
233
234 list_for_each_entry_safe(kip, next, &c->pages, list) {
235 int i;
236 if (kip->ngarbage == 0)
237 continue;
238 kip->ngarbage = 0;
239 for (i = 0; i < slots_per_page(c); i++) {
240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
241 break;
242 }
243 }
244 c->nr_garbage = 0;
245 return 0;
246}
247
248void __free_insn_slot(struct kprobe_insn_cache *c,
249 kprobe_opcode_t *slot, int dirty)
250{
251 struct kprobe_insn_page *kip;
252 long idx;
253
254 mutex_lock(&c->mutex);
255 rcu_read_lock();
256 list_for_each_entry_rcu(kip, &c->pages, list) {
257 idx = ((long)slot - (long)kip->insns) /
258 (c->insn_size * sizeof(kprobe_opcode_t));
259 if (idx >= 0 && idx < slots_per_page(c))
260 goto out;
261 }
262
263 WARN_ON(1);
264 kip = NULL;
265out:
266 rcu_read_unlock();
267
268 if (kip) {
269
270 WARN_ON(kip->slot_used[idx] != SLOT_USED);
271 if (dirty) {
272 kip->slot_used[idx] = SLOT_DIRTY;
273 kip->ngarbage++;
274 if (++c->nr_garbage > slots_per_page(c))
275 collect_garbage_slots(c);
276 } else {
277 collect_one_slot(kip, idx);
278 }
279 }
280 mutex_unlock(&c->mutex);
281}
282
283
284
285
286
287
288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
289{
290 struct kprobe_insn_page *kip;
291 bool ret = false;
292
293 rcu_read_lock();
294 list_for_each_entry_rcu(kip, &c->pages, list) {
295 if (addr >= (unsigned long)kip->insns &&
296 addr < (unsigned long)kip->insns + PAGE_SIZE) {
297 ret = true;
298 break;
299 }
300 }
301 rcu_read_unlock();
302
303 return ret;
304}
305
306#ifdef CONFIG_OPTPROBES
307
308struct kprobe_insn_cache kprobe_optinsn_slots = {
309 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
310 .alloc = alloc_insn_page,
311 .free = free_insn_page,
312 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
313
314 .nr_garbage = 0,
315};
316#endif
317#endif
318
319
320static inline void set_kprobe_instance(struct kprobe *kp)
321{
322 __this_cpu_write(kprobe_instance, kp);
323}
324
325static inline void reset_kprobe_instance(void)
326{
327 __this_cpu_write(kprobe_instance, NULL);
328}
329
330
331
332
333
334
335
336struct kprobe *get_kprobe(void *addr)
337{
338 struct hlist_head *head;
339 struct kprobe *p;
340
341 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
342 hlist_for_each_entry_rcu(p, head, hlist) {
343 if (p->addr == addr)
344 return p;
345 }
346
347 return NULL;
348}
349NOKPROBE_SYMBOL(get_kprobe);
350
351static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
352
353
354static inline int kprobe_aggrprobe(struct kprobe *p)
355{
356 return p->pre_handler == aggr_pre_handler;
357}
358
359
360static inline int kprobe_unused(struct kprobe *p)
361{
362 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
363 list_empty(&p->list);
364}
365
366
367
368
369static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
370{
371 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
372 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
373}
374
375#ifdef CONFIG_OPTPROBES
376
377static bool kprobes_allow_optimization;
378
379
380
381
382
383void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
384{
385 struct kprobe *kp;
386
387 list_for_each_entry_rcu(kp, &p->list, list) {
388 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
389 set_kprobe_instance(kp);
390 kp->pre_handler(kp, regs);
391 }
392 reset_kprobe_instance();
393 }
394}
395NOKPROBE_SYMBOL(opt_pre_handler);
396
397
398static void free_aggr_kprobe(struct kprobe *p)
399{
400 struct optimized_kprobe *op;
401
402 op = container_of(p, struct optimized_kprobe, kp);
403 arch_remove_optimized_kprobe(op);
404 arch_remove_kprobe(p);
405 kfree(op);
406}
407
408
409static inline int kprobe_optready(struct kprobe *p)
410{
411 struct optimized_kprobe *op;
412
413 if (kprobe_aggrprobe(p)) {
414 op = container_of(p, struct optimized_kprobe, kp);
415 return arch_prepared_optinsn(&op->optinsn);
416 }
417
418 return 0;
419}
420
421
422static inline int kprobe_disarmed(struct kprobe *p)
423{
424 struct optimized_kprobe *op;
425
426
427 if (!kprobe_aggrprobe(p))
428 return kprobe_disabled(p);
429
430 op = container_of(p, struct optimized_kprobe, kp);
431
432 return kprobe_disabled(p) && list_empty(&op->list);
433}
434
435
436static int kprobe_queued(struct kprobe *p)
437{
438 struct optimized_kprobe *op;
439
440 if (kprobe_aggrprobe(p)) {
441 op = container_of(p, struct optimized_kprobe, kp);
442 if (!list_empty(&op->list))
443 return 1;
444 }
445 return 0;
446}
447
448
449
450
451
452static struct kprobe *get_optimized_kprobe(unsigned long addr)
453{
454 int i;
455 struct kprobe *p = NULL;
456 struct optimized_kprobe *op;
457
458
459 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
460 p = get_kprobe((void *)(addr - i));
461
462 if (p && kprobe_optready(p)) {
463 op = container_of(p, struct optimized_kprobe, kp);
464 if (arch_within_optimized_kprobe(op, addr))
465 return p;
466 }
467
468 return NULL;
469}
470
471
472static LIST_HEAD(optimizing_list);
473static LIST_HEAD(unoptimizing_list);
474static LIST_HEAD(freeing_list);
475
476static void kprobe_optimizer(struct work_struct *work);
477static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
478#define OPTIMIZE_DELAY 5
479
480
481
482
483
484static void do_optimize_kprobes(void)
485{
486
487
488
489
490
491
492
493
494
495
496 lockdep_assert_cpus_held();
497
498
499 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
500 list_empty(&optimizing_list))
501 return;
502
503 mutex_lock(&text_mutex);
504 arch_optimize_kprobes(&optimizing_list);
505 mutex_unlock(&text_mutex);
506}
507
508
509
510
511
512static void do_unoptimize_kprobes(void)
513{
514 struct optimized_kprobe *op, *tmp;
515
516
517 lockdep_assert_cpus_held();
518
519
520 if (list_empty(&unoptimizing_list))
521 return;
522
523 mutex_lock(&text_mutex);
524 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
525
526 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
527
528 if (kprobe_disabled(&op->kp))
529 arch_disarm_kprobe(&op->kp);
530 if (kprobe_unused(&op->kp)) {
531
532
533
534
535
536 hlist_del_rcu(&op->kp.hlist);
537 } else
538 list_del_init(&op->list);
539 }
540 mutex_unlock(&text_mutex);
541}
542
543
544static void do_free_cleaned_kprobes(void)
545{
546 struct optimized_kprobe *op, *tmp;
547
548 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
549 BUG_ON(!kprobe_unused(&op->kp));
550 list_del_init(&op->list);
551 free_aggr_kprobe(&op->kp);
552 }
553}
554
555
556static void kick_kprobe_optimizer(void)
557{
558 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
559}
560
561
562static void kprobe_optimizer(struct work_struct *work)
563{
564 mutex_lock(&kprobe_mutex);
565 cpus_read_lock();
566
567 mutex_lock(&module_mutex);
568
569
570
571
572
573 do_unoptimize_kprobes();
574
575
576
577
578
579
580
581
582 synchronize_sched();
583
584
585 do_optimize_kprobes();
586
587
588 do_free_cleaned_kprobes();
589
590 mutex_unlock(&module_mutex);
591 cpus_read_unlock();
592 mutex_unlock(&kprobe_mutex);
593
594
595 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
596 kick_kprobe_optimizer();
597}
598
599
600void wait_for_kprobe_optimizer(void)
601{
602 mutex_lock(&kprobe_mutex);
603
604 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
605 mutex_unlock(&kprobe_mutex);
606
607
608 flush_delayed_work(&optimizing_work);
609
610 cpu_relax();
611
612 mutex_lock(&kprobe_mutex);
613 }
614
615 mutex_unlock(&kprobe_mutex);
616}
617
618
619static void optimize_kprobe(struct kprobe *p)
620{
621 struct optimized_kprobe *op;
622
623
624 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
625 (kprobe_disabled(p) || kprobes_all_disarmed))
626 return;
627
628
629 if (p->break_handler || p->post_handler)
630 return;
631
632 op = container_of(p, struct optimized_kprobe, kp);
633
634
635 if (arch_check_optimized_kprobe(op) < 0)
636 return;
637
638
639 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
640 return;
641 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
642
643 if (!list_empty(&op->list))
644
645 list_del_init(&op->list);
646 else {
647 list_add(&op->list, &optimizing_list);
648 kick_kprobe_optimizer();
649 }
650}
651
652
653static void force_unoptimize_kprobe(struct optimized_kprobe *op)
654{
655 lockdep_assert_cpus_held();
656 arch_unoptimize_kprobe(op);
657 if (kprobe_disabled(&op->kp))
658 arch_disarm_kprobe(&op->kp);
659}
660
661
662static void unoptimize_kprobe(struct kprobe *p, bool force)
663{
664 struct optimized_kprobe *op;
665
666 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
667 return;
668
669 op = container_of(p, struct optimized_kprobe, kp);
670 if (!kprobe_optimized(p)) {
671
672 if (force && !list_empty(&op->list)) {
673
674
675
676
677
678 list_del_init(&op->list);
679 force_unoptimize_kprobe(op);
680 }
681 return;
682 }
683
684 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
685 if (!list_empty(&op->list)) {
686
687 list_del_init(&op->list);
688 return;
689 }
690
691 if (force)
692
693 force_unoptimize_kprobe(op);
694 else {
695 list_add(&op->list, &unoptimizing_list);
696 kick_kprobe_optimizer();
697 }
698}
699
700
701static void reuse_unused_kprobe(struct kprobe *ap)
702{
703 struct optimized_kprobe *op;
704
705 BUG_ON(!kprobe_unused(ap));
706
707
708
709
710 op = container_of(ap, struct optimized_kprobe, kp);
711 if (unlikely(list_empty(&op->list)))
712 printk(KERN_WARNING "Warning: found a stray unused "
713 "aggrprobe@%p\n", ap->addr);
714
715 ap->flags &= ~KPROBE_FLAG_DISABLED;
716
717 BUG_ON(!kprobe_optready(ap));
718 optimize_kprobe(ap);
719}
720
721
722static void kill_optimized_kprobe(struct kprobe *p)
723{
724 struct optimized_kprobe *op;
725
726 op = container_of(p, struct optimized_kprobe, kp);
727 if (!list_empty(&op->list))
728
729 list_del_init(&op->list);
730 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
731
732 if (kprobe_unused(p)) {
733
734 list_add(&op->list, &freeing_list);
735
736
737
738
739
740 hlist_del_rcu(&op->kp.hlist);
741 }
742
743
744 arch_remove_optimized_kprobe(op);
745}
746
747static inline
748void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
749{
750 if (!kprobe_ftrace(p))
751 arch_prepare_optimized_kprobe(op, p);
752}
753
754
755static void prepare_optimized_kprobe(struct kprobe *p)
756{
757 struct optimized_kprobe *op;
758
759 op = container_of(p, struct optimized_kprobe, kp);
760 __prepare_optimized_kprobe(op, p);
761}
762
763
764static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
765{
766 struct optimized_kprobe *op;
767
768 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
769 if (!op)
770 return NULL;
771
772 INIT_LIST_HEAD(&op->list);
773 op->kp.addr = p->addr;
774 __prepare_optimized_kprobe(op, p);
775
776 return &op->kp;
777}
778
779static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
780
781
782
783
784
785static void try_to_optimize_kprobe(struct kprobe *p)
786{
787 struct kprobe *ap;
788 struct optimized_kprobe *op;
789
790
791 if (kprobe_ftrace(p))
792 return;
793
794
795 cpus_read_lock();
796 jump_label_lock();
797 mutex_lock(&text_mutex);
798
799 ap = alloc_aggr_kprobe(p);
800 if (!ap)
801 goto out;
802
803 op = container_of(ap, struct optimized_kprobe, kp);
804 if (!arch_prepared_optinsn(&op->optinsn)) {
805
806 arch_remove_optimized_kprobe(op);
807 kfree(op);
808 goto out;
809 }
810
811 init_aggr_kprobe(ap, p);
812 optimize_kprobe(ap);
813
814out:
815 mutex_unlock(&text_mutex);
816 jump_label_unlock();
817 cpus_read_unlock();
818}
819
820#ifdef CONFIG_SYSCTL
821static void optimize_all_kprobes(void)
822{
823 struct hlist_head *head;
824 struct kprobe *p;
825 unsigned int i;
826
827 mutex_lock(&kprobe_mutex);
828
829 if (kprobes_allow_optimization)
830 goto out;
831
832 cpus_read_lock();
833 kprobes_allow_optimization = true;
834 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
835 head = &kprobe_table[i];
836 hlist_for_each_entry_rcu(p, head, hlist)
837 if (!kprobe_disabled(p))
838 optimize_kprobe(p);
839 }
840 cpus_read_unlock();
841 printk(KERN_INFO "Kprobes globally optimized\n");
842out:
843 mutex_unlock(&kprobe_mutex);
844}
845
846static void unoptimize_all_kprobes(void)
847{
848 struct hlist_head *head;
849 struct kprobe *p;
850 unsigned int i;
851
852 mutex_lock(&kprobe_mutex);
853
854 if (!kprobes_allow_optimization) {
855 mutex_unlock(&kprobe_mutex);
856 return;
857 }
858
859 cpus_read_lock();
860 kprobes_allow_optimization = false;
861 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
862 head = &kprobe_table[i];
863 hlist_for_each_entry_rcu(p, head, hlist) {
864 if (!kprobe_disabled(p))
865 unoptimize_kprobe(p, false);
866 }
867 }
868 cpus_read_unlock();
869 mutex_unlock(&kprobe_mutex);
870
871
872 wait_for_kprobe_optimizer();
873 printk(KERN_INFO "Kprobes globally unoptimized\n");
874}
875
876static DEFINE_MUTEX(kprobe_sysctl_mutex);
877int sysctl_kprobes_optimization;
878int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
879 void __user *buffer, size_t *length,
880 loff_t *ppos)
881{
882 int ret;
883
884 mutex_lock(&kprobe_sysctl_mutex);
885 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
886 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
887
888 if (sysctl_kprobes_optimization)
889 optimize_all_kprobes();
890 else
891 unoptimize_all_kprobes();
892 mutex_unlock(&kprobe_sysctl_mutex);
893
894 return ret;
895}
896#endif
897
898
899static void __arm_kprobe(struct kprobe *p)
900{
901 struct kprobe *_p;
902
903
904 _p = get_optimized_kprobe((unsigned long)p->addr);
905 if (unlikely(_p))
906
907 unoptimize_kprobe(_p, true);
908
909 arch_arm_kprobe(p);
910 optimize_kprobe(p);
911}
912
913
914static void __disarm_kprobe(struct kprobe *p, bool reopt)
915{
916 struct kprobe *_p;
917
918
919 unoptimize_kprobe(p, kprobes_all_disarmed);
920
921 if (!kprobe_queued(p)) {
922 arch_disarm_kprobe(p);
923
924 _p = get_optimized_kprobe((unsigned long)p->addr);
925 if (unlikely(_p) && reopt)
926 optimize_kprobe(_p);
927 }
928
929}
930
931#else
932
933#define optimize_kprobe(p) do {} while (0)
934#define unoptimize_kprobe(p, f) do {} while (0)
935#define kill_optimized_kprobe(p) do {} while (0)
936#define prepare_optimized_kprobe(p) do {} while (0)
937#define try_to_optimize_kprobe(p) do {} while (0)
938#define __arm_kprobe(p) arch_arm_kprobe(p)
939#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
940#define kprobe_disarmed(p) kprobe_disabled(p)
941#define wait_for_kprobe_optimizer() do {} while (0)
942
943
944static void reuse_unused_kprobe(struct kprobe *ap)
945{
946 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
947 BUG_ON(kprobe_unused(ap));
948}
949
950static void free_aggr_kprobe(struct kprobe *p)
951{
952 arch_remove_kprobe(p);
953 kfree(p);
954}
955
956static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
957{
958 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
959}
960#endif
961
962#ifdef CONFIG_KPROBES_ON_FTRACE
963static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
964 .func = kprobe_ftrace_handler,
965 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
966};
967static int kprobe_ftrace_enabled;
968
969
970static int prepare_kprobe(struct kprobe *p)
971{
972 if (!kprobe_ftrace(p))
973 return arch_prepare_kprobe(p);
974
975 return arch_prepare_kprobe_ftrace(p);
976}
977
978
979static void arm_kprobe_ftrace(struct kprobe *p)
980{
981 int ret;
982
983 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
984 (unsigned long)p->addr, 0, 0);
985 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
986 kprobe_ftrace_enabled++;
987 if (kprobe_ftrace_enabled == 1) {
988 ret = register_ftrace_function(&kprobe_ftrace_ops);
989 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
990 }
991}
992
993
994static void disarm_kprobe_ftrace(struct kprobe *p)
995{
996 int ret;
997
998 kprobe_ftrace_enabled--;
999 if (kprobe_ftrace_enabled == 0) {
1000 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1001 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1002 }
1003 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1004 (unsigned long)p->addr, 1, 0);
1005 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1006}
1007#else
1008#define prepare_kprobe(p) arch_prepare_kprobe(p)
1009#define arm_kprobe_ftrace(p) do {} while (0)
1010#define disarm_kprobe_ftrace(p) do {} while (0)
1011#endif
1012
1013
1014static void arm_kprobe(struct kprobe *kp)
1015{
1016 if (unlikely(kprobe_ftrace(kp))) {
1017 arm_kprobe_ftrace(kp);
1018 return;
1019 }
1020 cpus_read_lock();
1021 mutex_lock(&text_mutex);
1022 __arm_kprobe(kp);
1023 mutex_unlock(&text_mutex);
1024 cpus_read_unlock();
1025}
1026
1027
1028static void disarm_kprobe(struct kprobe *kp, bool reopt)
1029{
1030 if (unlikely(kprobe_ftrace(kp))) {
1031 disarm_kprobe_ftrace(kp);
1032 return;
1033 }
1034
1035 cpus_read_lock();
1036 mutex_lock(&text_mutex);
1037 __disarm_kprobe(kp, reopt);
1038 mutex_unlock(&text_mutex);
1039 cpus_read_unlock();
1040}
1041
1042
1043
1044
1045
1046static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1047{
1048 struct kprobe *kp;
1049
1050 list_for_each_entry_rcu(kp, &p->list, list) {
1051 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1052 set_kprobe_instance(kp);
1053 if (kp->pre_handler(kp, regs))
1054 return 1;
1055 }
1056 reset_kprobe_instance();
1057 }
1058 return 0;
1059}
1060NOKPROBE_SYMBOL(aggr_pre_handler);
1061
1062static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1063 unsigned long flags)
1064{
1065 struct kprobe *kp;
1066
1067 list_for_each_entry_rcu(kp, &p->list, list) {
1068 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1069 set_kprobe_instance(kp);
1070 kp->post_handler(kp, regs, flags);
1071 reset_kprobe_instance();
1072 }
1073 }
1074}
1075NOKPROBE_SYMBOL(aggr_post_handler);
1076
1077static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1078 int trapnr)
1079{
1080 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1081
1082
1083
1084
1085
1086 if (cur && cur->fault_handler) {
1087 if (cur->fault_handler(cur, regs, trapnr))
1088 return 1;
1089 }
1090 return 0;
1091}
1092NOKPROBE_SYMBOL(aggr_fault_handler);
1093
1094static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1095{
1096 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1097 int ret = 0;
1098
1099 if (cur && cur->break_handler) {
1100 if (cur->break_handler(cur, regs))
1101 ret = 1;
1102 }
1103 reset_kprobe_instance();
1104 return ret;
1105}
1106NOKPROBE_SYMBOL(aggr_break_handler);
1107
1108
1109void kprobes_inc_nmissed_count(struct kprobe *p)
1110{
1111 struct kprobe *kp;
1112 if (!kprobe_aggrprobe(p)) {
1113 p->nmissed++;
1114 } else {
1115 list_for_each_entry_rcu(kp, &p->list, list)
1116 kp->nmissed++;
1117 }
1118 return;
1119}
1120NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1121
1122void recycle_rp_inst(struct kretprobe_instance *ri,
1123 struct hlist_head *head)
1124{
1125 struct kretprobe *rp = ri->rp;
1126
1127
1128 hlist_del(&ri->hlist);
1129 INIT_HLIST_NODE(&ri->hlist);
1130 if (likely(rp)) {
1131 raw_spin_lock(&rp->lock);
1132 hlist_add_head(&ri->hlist, &rp->free_instances);
1133 raw_spin_unlock(&rp->lock);
1134 } else
1135
1136 hlist_add_head(&ri->hlist, head);
1137}
1138NOKPROBE_SYMBOL(recycle_rp_inst);
1139
1140void kretprobe_hash_lock(struct task_struct *tsk,
1141 struct hlist_head **head, unsigned long *flags)
1142__acquires(hlist_lock)
1143{
1144 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1145 raw_spinlock_t *hlist_lock;
1146
1147 *head = &kretprobe_inst_table[hash];
1148 hlist_lock = kretprobe_table_lock_ptr(hash);
1149 raw_spin_lock_irqsave(hlist_lock, *flags);
1150}
1151NOKPROBE_SYMBOL(kretprobe_hash_lock);
1152
1153static void kretprobe_table_lock(unsigned long hash,
1154 unsigned long *flags)
1155__acquires(hlist_lock)
1156{
1157 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1158 raw_spin_lock_irqsave(hlist_lock, *flags);
1159}
1160NOKPROBE_SYMBOL(kretprobe_table_lock);
1161
1162void kretprobe_hash_unlock(struct task_struct *tsk,
1163 unsigned long *flags)
1164__releases(hlist_lock)
1165{
1166 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1167 raw_spinlock_t *hlist_lock;
1168
1169 hlist_lock = kretprobe_table_lock_ptr(hash);
1170 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1171}
1172NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1173
1174static void kretprobe_table_unlock(unsigned long hash,
1175 unsigned long *flags)
1176__releases(hlist_lock)
1177{
1178 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1179 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1180}
1181NOKPROBE_SYMBOL(kretprobe_table_unlock);
1182
1183
1184
1185
1186
1187
1188
1189void kprobe_flush_task(struct task_struct *tk)
1190{
1191 struct kretprobe_instance *ri;
1192 struct hlist_head *head, empty_rp;
1193 struct hlist_node *tmp;
1194 unsigned long hash, flags = 0;
1195
1196 if (unlikely(!kprobes_initialized))
1197
1198 return;
1199
1200 INIT_HLIST_HEAD(&empty_rp);
1201 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1202 head = &kretprobe_inst_table[hash];
1203 kretprobe_table_lock(hash, &flags);
1204 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1205 if (ri->task == tk)
1206 recycle_rp_inst(ri, &empty_rp);
1207 }
1208 kretprobe_table_unlock(hash, &flags);
1209 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1210 hlist_del(&ri->hlist);
1211 kfree(ri);
1212 }
1213}
1214NOKPROBE_SYMBOL(kprobe_flush_task);
1215
1216static inline void free_rp_inst(struct kretprobe *rp)
1217{
1218 struct kretprobe_instance *ri;
1219 struct hlist_node *next;
1220
1221 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1222 hlist_del(&ri->hlist);
1223 kfree(ri);
1224 }
1225}
1226
1227static void cleanup_rp_inst(struct kretprobe *rp)
1228{
1229 unsigned long flags, hash;
1230 struct kretprobe_instance *ri;
1231 struct hlist_node *next;
1232 struct hlist_head *head;
1233
1234
1235 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1236 kretprobe_table_lock(hash, &flags);
1237 head = &kretprobe_inst_table[hash];
1238 hlist_for_each_entry_safe(ri, next, head, hlist) {
1239 if (ri->rp == rp)
1240 ri->rp = NULL;
1241 }
1242 kretprobe_table_unlock(hash, &flags);
1243 }
1244 free_rp_inst(rp);
1245}
1246NOKPROBE_SYMBOL(cleanup_rp_inst);
1247
1248
1249
1250
1251
1252static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1253{
1254 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1255
1256 if (p->break_handler || p->post_handler)
1257 unoptimize_kprobe(ap, true);
1258
1259 if (p->break_handler) {
1260 if (ap->break_handler)
1261 return -EEXIST;
1262 list_add_tail_rcu(&p->list, &ap->list);
1263 ap->break_handler = aggr_break_handler;
1264 } else
1265 list_add_rcu(&p->list, &ap->list);
1266 if (p->post_handler && !ap->post_handler)
1267 ap->post_handler = aggr_post_handler;
1268
1269 return 0;
1270}
1271
1272
1273
1274
1275
1276static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1277{
1278
1279 copy_kprobe(p, ap);
1280 flush_insn_slot(ap);
1281 ap->addr = p->addr;
1282 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1283 ap->pre_handler = aggr_pre_handler;
1284 ap->fault_handler = aggr_fault_handler;
1285
1286 if (p->post_handler && !kprobe_gone(p))
1287 ap->post_handler = aggr_post_handler;
1288 if (p->break_handler && !kprobe_gone(p))
1289 ap->break_handler = aggr_break_handler;
1290
1291 INIT_LIST_HEAD(&ap->list);
1292 INIT_HLIST_NODE(&ap->hlist);
1293
1294 list_add_rcu(&p->list, &ap->list);
1295 hlist_replace_rcu(&p->hlist, &ap->hlist);
1296}
1297
1298
1299
1300
1301
1302static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1303{
1304 int ret = 0;
1305 struct kprobe *ap = orig_p;
1306
1307 cpus_read_lock();
1308
1309
1310 jump_label_lock();
1311 mutex_lock(&text_mutex);
1312
1313 if (!kprobe_aggrprobe(orig_p)) {
1314
1315 ap = alloc_aggr_kprobe(orig_p);
1316 if (!ap) {
1317 ret = -ENOMEM;
1318 goto out;
1319 }
1320 init_aggr_kprobe(ap, orig_p);
1321 } else if (kprobe_unused(ap))
1322
1323 reuse_unused_kprobe(ap);
1324
1325 if (kprobe_gone(ap)) {
1326
1327
1328
1329
1330
1331
1332 ret = arch_prepare_kprobe(ap);
1333 if (ret)
1334
1335
1336
1337
1338
1339 goto out;
1340
1341
1342 prepare_optimized_kprobe(ap);
1343
1344
1345
1346
1347
1348 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1349 | KPROBE_FLAG_DISABLED;
1350 }
1351
1352
1353 copy_kprobe(ap, p);
1354 ret = add_new_kprobe(ap, p);
1355
1356out:
1357 mutex_unlock(&text_mutex);
1358 jump_label_unlock();
1359 cpus_read_unlock();
1360
1361 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1362 ap->flags &= ~KPROBE_FLAG_DISABLED;
1363 if (!kprobes_all_disarmed)
1364
1365 arm_kprobe(ap);
1366 }
1367 return ret;
1368}
1369
1370bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1371{
1372
1373 return addr >= (unsigned long)__kprobes_text_start &&
1374 addr < (unsigned long)__kprobes_text_end;
1375}
1376
1377bool within_kprobe_blacklist(unsigned long addr)
1378{
1379 struct kprobe_blacklist_entry *ent;
1380
1381 if (arch_within_kprobe_blacklist(addr))
1382 return true;
1383
1384
1385
1386
1387 list_for_each_entry(ent, &kprobe_blacklist, list) {
1388 if (addr >= ent->start_addr && addr < ent->end_addr)
1389 return true;
1390 }
1391
1392 return false;
1393}
1394
1395
1396
1397
1398
1399
1400
1401static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1402 const char *symbol_name, unsigned int offset)
1403{
1404 if ((symbol_name && addr) || (!symbol_name && !addr))
1405 goto invalid;
1406
1407 if (symbol_name) {
1408 addr = kprobe_lookup_name(symbol_name, offset);
1409 if (!addr)
1410 return ERR_PTR(-ENOENT);
1411 }
1412
1413 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1414 if (addr)
1415 return addr;
1416
1417invalid:
1418 return ERR_PTR(-EINVAL);
1419}
1420
1421static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1422{
1423 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1424}
1425
1426
1427static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1428{
1429 struct kprobe *ap, *list_p;
1430
1431 ap = get_kprobe(p->addr);
1432 if (unlikely(!ap))
1433 return NULL;
1434
1435 if (p != ap) {
1436 list_for_each_entry_rcu(list_p, &ap->list, list)
1437 if (list_p == p)
1438
1439 goto valid;
1440 return NULL;
1441 }
1442valid:
1443 return ap;
1444}
1445
1446
1447static inline int check_kprobe_rereg(struct kprobe *p)
1448{
1449 int ret = 0;
1450
1451 mutex_lock(&kprobe_mutex);
1452 if (__get_valid_kprobe(p))
1453 ret = -EINVAL;
1454 mutex_unlock(&kprobe_mutex);
1455
1456 return ret;
1457}
1458
1459int __weak arch_check_ftrace_location(struct kprobe *p)
1460{
1461 unsigned long ftrace_addr;
1462
1463 ftrace_addr = ftrace_location((unsigned long)p->addr);
1464 if (ftrace_addr) {
1465#ifdef CONFIG_KPROBES_ON_FTRACE
1466
1467 if ((unsigned long)p->addr != ftrace_addr)
1468 return -EILSEQ;
1469 p->flags |= KPROBE_FLAG_FTRACE;
1470#else
1471 return -EINVAL;
1472#endif
1473 }
1474 return 0;
1475}
1476
1477static int check_kprobe_address_safe(struct kprobe *p,
1478 struct module **probed_mod)
1479{
1480 int ret;
1481
1482 ret = arch_check_ftrace_location(p);
1483 if (ret)
1484 return ret;
1485 jump_label_lock();
1486 preempt_disable();
1487
1488
1489 if (!kernel_text_address((unsigned long) p->addr) ||
1490 within_kprobe_blacklist((unsigned long) p->addr) ||
1491 jump_label_text_reserved(p->addr, p->addr)) {
1492 ret = -EINVAL;
1493 goto out;
1494 }
1495
1496
1497 *probed_mod = __module_text_address((unsigned long) p->addr);
1498 if (*probed_mod) {
1499
1500
1501
1502
1503 if (unlikely(!try_module_get(*probed_mod))) {
1504 ret = -ENOENT;
1505 goto out;
1506 }
1507
1508
1509
1510
1511
1512 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1513 (*probed_mod)->state != MODULE_STATE_COMING) {
1514 module_put(*probed_mod);
1515 *probed_mod = NULL;
1516 ret = -ENOENT;
1517 }
1518 }
1519out:
1520 preempt_enable();
1521 jump_label_unlock();
1522
1523 return ret;
1524}
1525
1526int register_kprobe(struct kprobe *p)
1527{
1528 int ret;
1529 struct kprobe *old_p;
1530 struct module *probed_mod;
1531 kprobe_opcode_t *addr;
1532
1533
1534 addr = kprobe_addr(p);
1535 if (IS_ERR(addr))
1536 return PTR_ERR(addr);
1537 p->addr = addr;
1538
1539 ret = check_kprobe_rereg(p);
1540 if (ret)
1541 return ret;
1542
1543
1544 p->flags &= KPROBE_FLAG_DISABLED;
1545 p->nmissed = 0;
1546 INIT_LIST_HEAD(&p->list);
1547
1548 ret = check_kprobe_address_safe(p, &probed_mod);
1549 if (ret)
1550 return ret;
1551
1552 mutex_lock(&kprobe_mutex);
1553
1554 old_p = get_kprobe(p->addr);
1555 if (old_p) {
1556
1557 ret = register_aggr_kprobe(old_p, p);
1558 goto out;
1559 }
1560
1561 cpus_read_lock();
1562
1563 mutex_lock(&text_mutex);
1564 ret = prepare_kprobe(p);
1565 mutex_unlock(&text_mutex);
1566 cpus_read_unlock();
1567 if (ret)
1568 goto out;
1569
1570 INIT_HLIST_NODE(&p->hlist);
1571 hlist_add_head_rcu(&p->hlist,
1572 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1573
1574 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1575 arm_kprobe(p);
1576
1577
1578 try_to_optimize_kprobe(p);
1579out:
1580 mutex_unlock(&kprobe_mutex);
1581
1582 if (probed_mod)
1583 module_put(probed_mod);
1584
1585 return ret;
1586}
1587EXPORT_SYMBOL_GPL(register_kprobe);
1588
1589
1590static int aggr_kprobe_disabled(struct kprobe *ap)
1591{
1592 struct kprobe *kp;
1593
1594 list_for_each_entry_rcu(kp, &ap->list, list)
1595 if (!kprobe_disabled(kp))
1596
1597
1598
1599
1600 return 0;
1601
1602 return 1;
1603}
1604
1605
1606static struct kprobe *__disable_kprobe(struct kprobe *p)
1607{
1608 struct kprobe *orig_p;
1609
1610
1611 orig_p = __get_valid_kprobe(p);
1612 if (unlikely(orig_p == NULL))
1613 return NULL;
1614
1615 if (!kprobe_disabled(p)) {
1616
1617 if (p != orig_p)
1618 p->flags |= KPROBE_FLAG_DISABLED;
1619
1620
1621 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1622
1623
1624
1625
1626
1627 if (!kprobes_all_disarmed)
1628 disarm_kprobe(orig_p, true);
1629 orig_p->flags |= KPROBE_FLAG_DISABLED;
1630 }
1631 }
1632
1633 return orig_p;
1634}
1635
1636
1637
1638
1639static int __unregister_kprobe_top(struct kprobe *p)
1640{
1641 struct kprobe *ap, *list_p;
1642
1643
1644 ap = __disable_kprobe(p);
1645 if (ap == NULL)
1646 return -EINVAL;
1647
1648 if (ap == p)
1649
1650
1651
1652
1653 goto disarmed;
1654
1655
1656 WARN_ON(!kprobe_aggrprobe(ap));
1657
1658 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1659
1660
1661
1662
1663 goto disarmed;
1664 else {
1665
1666 if (p->break_handler && !kprobe_gone(p))
1667 ap->break_handler = NULL;
1668 if (p->post_handler && !kprobe_gone(p)) {
1669 list_for_each_entry_rcu(list_p, &ap->list, list) {
1670 if ((list_p != p) && (list_p->post_handler))
1671 goto noclean;
1672 }
1673 ap->post_handler = NULL;
1674 }
1675noclean:
1676
1677
1678
1679
1680 list_del_rcu(&p->list);
1681 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1682
1683
1684
1685
1686 optimize_kprobe(ap);
1687 }
1688 return 0;
1689
1690disarmed:
1691 BUG_ON(!kprobe_disarmed(ap));
1692 hlist_del_rcu(&ap->hlist);
1693 return 0;
1694}
1695
1696static void __unregister_kprobe_bottom(struct kprobe *p)
1697{
1698 struct kprobe *ap;
1699
1700 if (list_empty(&p->list))
1701
1702 arch_remove_kprobe(p);
1703 else if (list_is_singular(&p->list)) {
1704
1705 ap = list_entry(p->list.next, struct kprobe, list);
1706 list_del(&p->list);
1707 free_aggr_kprobe(ap);
1708 }
1709
1710}
1711
1712int register_kprobes(struct kprobe **kps, int num)
1713{
1714 int i, ret = 0;
1715
1716 if (num <= 0)
1717 return -EINVAL;
1718 for (i = 0; i < num; i++) {
1719 ret = register_kprobe(kps[i]);
1720 if (ret < 0) {
1721 if (i > 0)
1722 unregister_kprobes(kps, i);
1723 break;
1724 }
1725 }
1726 return ret;
1727}
1728EXPORT_SYMBOL_GPL(register_kprobes);
1729
1730void unregister_kprobe(struct kprobe *p)
1731{
1732 unregister_kprobes(&p, 1);
1733}
1734EXPORT_SYMBOL_GPL(unregister_kprobe);
1735
1736void unregister_kprobes(struct kprobe **kps, int num)
1737{
1738 int i;
1739
1740 if (num <= 0)
1741 return;
1742 mutex_lock(&kprobe_mutex);
1743 for (i = 0; i < num; i++)
1744 if (__unregister_kprobe_top(kps[i]) < 0)
1745 kps[i]->addr = NULL;
1746 mutex_unlock(&kprobe_mutex);
1747
1748 synchronize_sched();
1749 for (i = 0; i < num; i++)
1750 if (kps[i]->addr)
1751 __unregister_kprobe_bottom(kps[i]);
1752}
1753EXPORT_SYMBOL_GPL(unregister_kprobes);
1754
1755int __weak kprobe_exceptions_notify(struct notifier_block *self,
1756 unsigned long val, void *data)
1757{
1758 return NOTIFY_DONE;
1759}
1760NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1761
1762static struct notifier_block kprobe_exceptions_nb = {
1763 .notifier_call = kprobe_exceptions_notify,
1764 .priority = 0x7fffffff
1765};
1766
1767unsigned long __weak arch_deref_entry_point(void *entry)
1768{
1769 return (unsigned long)entry;
1770}
1771
1772int register_jprobes(struct jprobe **jps, int num)
1773{
1774 int ret = 0, i;
1775
1776 if (num <= 0)
1777 return -EINVAL;
1778
1779 for (i = 0; i < num; i++) {
1780 ret = register_jprobe(jps[i]);
1781
1782 if (ret < 0) {
1783 if (i > 0)
1784 unregister_jprobes(jps, i);
1785 break;
1786 }
1787 }
1788
1789 return ret;
1790}
1791EXPORT_SYMBOL_GPL(register_jprobes);
1792
1793int register_jprobe(struct jprobe *jp)
1794{
1795 unsigned long addr, offset;
1796 struct kprobe *kp = &jp->kp;
1797
1798
1799
1800
1801
1802 addr = arch_deref_entry_point(jp->entry);
1803
1804 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1805 kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1806 kp->pre_handler = setjmp_pre_handler;
1807 kp->break_handler = longjmp_break_handler;
1808 return register_kprobe(kp);
1809 }
1810
1811 return -EINVAL;
1812}
1813EXPORT_SYMBOL_GPL(register_jprobe);
1814
1815void unregister_jprobe(struct jprobe *jp)
1816{
1817 unregister_jprobes(&jp, 1);
1818}
1819EXPORT_SYMBOL_GPL(unregister_jprobe);
1820
1821void unregister_jprobes(struct jprobe **jps, int num)
1822{
1823 int i;
1824
1825 if (num <= 0)
1826 return;
1827 mutex_lock(&kprobe_mutex);
1828 for (i = 0; i < num; i++)
1829 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1830 jps[i]->kp.addr = NULL;
1831 mutex_unlock(&kprobe_mutex);
1832
1833 synchronize_sched();
1834 for (i = 0; i < num; i++) {
1835 if (jps[i]->kp.addr)
1836 __unregister_kprobe_bottom(&jps[i]->kp);
1837 }
1838}
1839EXPORT_SYMBOL_GPL(unregister_jprobes);
1840
1841#ifdef CONFIG_KRETPROBES
1842
1843
1844
1845
1846static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1847{
1848 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1849 unsigned long hash, flags = 0;
1850 struct kretprobe_instance *ri;
1851
1852
1853
1854
1855
1856
1857
1858 if (unlikely(in_nmi())) {
1859 rp->nmissed++;
1860 return 0;
1861 }
1862
1863
1864 hash = hash_ptr(current, KPROBE_HASH_BITS);
1865 raw_spin_lock_irqsave(&rp->lock, flags);
1866 if (!hlist_empty(&rp->free_instances)) {
1867 ri = hlist_entry(rp->free_instances.first,
1868 struct kretprobe_instance, hlist);
1869 hlist_del(&ri->hlist);
1870 raw_spin_unlock_irqrestore(&rp->lock, flags);
1871
1872 ri->rp = rp;
1873 ri->task = current;
1874
1875 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1876 raw_spin_lock_irqsave(&rp->lock, flags);
1877 hlist_add_head(&ri->hlist, &rp->free_instances);
1878 raw_spin_unlock_irqrestore(&rp->lock, flags);
1879 return 0;
1880 }
1881
1882 arch_prepare_kretprobe(ri, regs);
1883
1884
1885 INIT_HLIST_NODE(&ri->hlist);
1886 kretprobe_table_lock(hash, &flags);
1887 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1888 kretprobe_table_unlock(hash, &flags);
1889 } else {
1890 rp->nmissed++;
1891 raw_spin_unlock_irqrestore(&rp->lock, flags);
1892 }
1893 return 0;
1894}
1895NOKPROBE_SYMBOL(pre_handler_kretprobe);
1896
1897bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1898{
1899 return !offset;
1900}
1901
1902bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1903{
1904 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1905
1906 if (IS_ERR(kp_addr))
1907 return false;
1908
1909 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1910 !arch_kprobe_on_func_entry(offset))
1911 return false;
1912
1913 return true;
1914}
1915
1916int register_kretprobe(struct kretprobe *rp)
1917{
1918 int ret = 0;
1919 struct kretprobe_instance *inst;
1920 int i;
1921 void *addr;
1922
1923 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1924 return -EINVAL;
1925
1926 if (kretprobe_blacklist_size) {
1927 addr = kprobe_addr(&rp->kp);
1928 if (IS_ERR(addr))
1929 return PTR_ERR(addr);
1930
1931 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1932 if (kretprobe_blacklist[i].addr == addr)
1933 return -EINVAL;
1934 }
1935 }
1936
1937 rp->kp.pre_handler = pre_handler_kretprobe;
1938 rp->kp.post_handler = NULL;
1939 rp->kp.fault_handler = NULL;
1940 rp->kp.break_handler = NULL;
1941
1942
1943 if (rp->maxactive <= 0) {
1944#ifdef CONFIG_PREEMPT
1945 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1946#else
1947 rp->maxactive = num_possible_cpus();
1948#endif
1949 }
1950 raw_spin_lock_init(&rp->lock);
1951 INIT_HLIST_HEAD(&rp->free_instances);
1952 for (i = 0; i < rp->maxactive; i++) {
1953 inst = kmalloc(sizeof(struct kretprobe_instance) +
1954 rp->data_size, GFP_KERNEL);
1955 if (inst == NULL) {
1956 free_rp_inst(rp);
1957 return -ENOMEM;
1958 }
1959 INIT_HLIST_NODE(&inst->hlist);
1960 hlist_add_head(&inst->hlist, &rp->free_instances);
1961 }
1962
1963 rp->nmissed = 0;
1964
1965 ret = register_kprobe(&rp->kp);
1966 if (ret != 0)
1967 free_rp_inst(rp);
1968 return ret;
1969}
1970EXPORT_SYMBOL_GPL(register_kretprobe);
1971
1972int register_kretprobes(struct kretprobe **rps, int num)
1973{
1974 int ret = 0, i;
1975
1976 if (num <= 0)
1977 return -EINVAL;
1978 for (i = 0; i < num; i++) {
1979 ret = register_kretprobe(rps[i]);
1980 if (ret < 0) {
1981 if (i > 0)
1982 unregister_kretprobes(rps, i);
1983 break;
1984 }
1985 }
1986 return ret;
1987}
1988EXPORT_SYMBOL_GPL(register_kretprobes);
1989
1990void unregister_kretprobe(struct kretprobe *rp)
1991{
1992 unregister_kretprobes(&rp, 1);
1993}
1994EXPORT_SYMBOL_GPL(unregister_kretprobe);
1995
1996void unregister_kretprobes(struct kretprobe **rps, int num)
1997{
1998 int i;
1999
2000 if (num <= 0)
2001 return;
2002 mutex_lock(&kprobe_mutex);
2003 for (i = 0; i < num; i++)
2004 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2005 rps[i]->kp.addr = NULL;
2006 mutex_unlock(&kprobe_mutex);
2007
2008 synchronize_sched();
2009 for (i = 0; i < num; i++) {
2010 if (rps[i]->kp.addr) {
2011 __unregister_kprobe_bottom(&rps[i]->kp);
2012 cleanup_rp_inst(rps[i]);
2013 }
2014 }
2015}
2016EXPORT_SYMBOL_GPL(unregister_kretprobes);
2017
2018#else
2019int register_kretprobe(struct kretprobe *rp)
2020{
2021 return -ENOSYS;
2022}
2023EXPORT_SYMBOL_GPL(register_kretprobe);
2024
2025int register_kretprobes(struct kretprobe **rps, int num)
2026{
2027 return -ENOSYS;
2028}
2029EXPORT_SYMBOL_GPL(register_kretprobes);
2030
2031void unregister_kretprobe(struct kretprobe *rp)
2032{
2033}
2034EXPORT_SYMBOL_GPL(unregister_kretprobe);
2035
2036void unregister_kretprobes(struct kretprobe **rps, int num)
2037{
2038}
2039EXPORT_SYMBOL_GPL(unregister_kretprobes);
2040
2041static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2042{
2043 return 0;
2044}
2045NOKPROBE_SYMBOL(pre_handler_kretprobe);
2046
2047#endif
2048
2049
2050static void kill_kprobe(struct kprobe *p)
2051{
2052 struct kprobe *kp;
2053
2054 p->flags |= KPROBE_FLAG_GONE;
2055 if (kprobe_aggrprobe(p)) {
2056
2057
2058
2059
2060 list_for_each_entry_rcu(kp, &p->list, list)
2061 kp->flags |= KPROBE_FLAG_GONE;
2062 p->post_handler = NULL;
2063 p->break_handler = NULL;
2064 kill_optimized_kprobe(p);
2065 }
2066
2067
2068
2069
2070 arch_remove_kprobe(p);
2071}
2072
2073
2074int disable_kprobe(struct kprobe *kp)
2075{
2076 int ret = 0;
2077
2078 mutex_lock(&kprobe_mutex);
2079
2080
2081 if (__disable_kprobe(kp) == NULL)
2082 ret = -EINVAL;
2083
2084 mutex_unlock(&kprobe_mutex);
2085 return ret;
2086}
2087EXPORT_SYMBOL_GPL(disable_kprobe);
2088
2089
2090int enable_kprobe(struct kprobe *kp)
2091{
2092 int ret = 0;
2093 struct kprobe *p;
2094
2095 mutex_lock(&kprobe_mutex);
2096
2097
2098 p = __get_valid_kprobe(kp);
2099 if (unlikely(p == NULL)) {
2100 ret = -EINVAL;
2101 goto out;
2102 }
2103
2104 if (kprobe_gone(kp)) {
2105
2106 ret = -EINVAL;
2107 goto out;
2108 }
2109
2110 if (p != kp)
2111 kp->flags &= ~KPROBE_FLAG_DISABLED;
2112
2113 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2114 p->flags &= ~KPROBE_FLAG_DISABLED;
2115 arm_kprobe(p);
2116 }
2117out:
2118 mutex_unlock(&kprobe_mutex);
2119 return ret;
2120}
2121EXPORT_SYMBOL_GPL(enable_kprobe);
2122
2123void dump_kprobe(struct kprobe *kp)
2124{
2125 printk(KERN_WARNING "Dumping kprobe:\n");
2126 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2127 kp->symbol_name, kp->addr, kp->offset);
2128}
2129NOKPROBE_SYMBOL(dump_kprobe);
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static int __init populate_kprobe_blacklist(unsigned long *start,
2140 unsigned long *end)
2141{
2142 unsigned long *iter;
2143 struct kprobe_blacklist_entry *ent;
2144 unsigned long entry, offset = 0, size = 0;
2145
2146 for (iter = start; iter < end; iter++) {
2147 entry = arch_deref_entry_point((void *)*iter);
2148
2149 if (!kernel_text_address(entry) ||
2150 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2151 pr_err("Failed to find blacklist at %p\n",
2152 (void *)entry);
2153 continue;
2154 }
2155
2156 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2157 if (!ent)
2158 return -ENOMEM;
2159 ent->start_addr = entry;
2160 ent->end_addr = entry + size;
2161 INIT_LIST_HEAD(&ent->list);
2162 list_add_tail(&ent->list, &kprobe_blacklist);
2163 }
2164 return 0;
2165}
2166
2167
2168static int kprobes_module_callback(struct notifier_block *nb,
2169 unsigned long val, void *data)
2170{
2171 struct module *mod = data;
2172 struct hlist_head *head;
2173 struct kprobe *p;
2174 unsigned int i;
2175 int checkcore = (val == MODULE_STATE_GOING);
2176
2177 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2178 return NOTIFY_DONE;
2179
2180
2181
2182
2183
2184
2185
2186 mutex_lock(&kprobe_mutex);
2187 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2188 head = &kprobe_table[i];
2189 hlist_for_each_entry_rcu(p, head, hlist)
2190 if (within_module_init((unsigned long)p->addr, mod) ||
2191 (checkcore &&
2192 within_module_core((unsigned long)p->addr, mod))) {
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 kill_kprobe(p);
2205 }
2206 }
2207 mutex_unlock(&kprobe_mutex);
2208 return NOTIFY_DONE;
2209}
2210
2211static struct notifier_block kprobe_module_nb = {
2212 .notifier_call = kprobes_module_callback,
2213 .priority = 0
2214};
2215
2216
2217extern unsigned long __start_kprobe_blacklist[];
2218extern unsigned long __stop_kprobe_blacklist[];
2219
2220static int __init init_kprobes(void)
2221{
2222 int i, err = 0;
2223
2224
2225
2226 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2227 INIT_HLIST_HEAD(&kprobe_table[i]);
2228 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2229 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2230 }
2231
2232 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2233 __stop_kprobe_blacklist);
2234 if (err) {
2235 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2236 pr_err("Please take care of using kprobes.\n");
2237 }
2238
2239 if (kretprobe_blacklist_size) {
2240
2241 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2242 kretprobe_blacklist[i].addr =
2243 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2244 if (!kretprobe_blacklist[i].addr)
2245 printk("kretprobe: lookup failed: %s\n",
2246 kretprobe_blacklist[i].name);
2247 }
2248 }
2249
2250#if defined(CONFIG_OPTPROBES)
2251#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2252
2253 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2254#endif
2255
2256 kprobes_allow_optimization = true;
2257#endif
2258
2259
2260 kprobes_all_disarmed = false;
2261
2262 err = arch_init_kprobes();
2263 if (!err)
2264 err = register_die_notifier(&kprobe_exceptions_nb);
2265 if (!err)
2266 err = register_module_notifier(&kprobe_module_nb);
2267
2268 kprobes_initialized = (err == 0);
2269
2270 if (!err)
2271 init_test_probes();
2272 return err;
2273}
2274
2275#ifdef CONFIG_DEBUG_FS
2276static void report_probe(struct seq_file *pi, struct kprobe *p,
2277 const char *sym, int offset, char *modname, struct kprobe *pp)
2278{
2279 char *kprobe_type;
2280
2281 if (p->pre_handler == pre_handler_kretprobe)
2282 kprobe_type = "r";
2283 else if (p->pre_handler == setjmp_pre_handler)
2284 kprobe_type = "j";
2285 else
2286 kprobe_type = "k";
2287
2288 if (sym)
2289 seq_printf(pi, "%p %s %s+0x%x %s ",
2290 p->addr, kprobe_type, sym, offset,
2291 (modname ? modname : " "));
2292 else
2293 seq_printf(pi, "%p %s %p ",
2294 p->addr, kprobe_type, p->addr);
2295
2296 if (!pp)
2297 pp = p;
2298 seq_printf(pi, "%s%s%s%s\n",
2299 (kprobe_gone(p) ? "[GONE]" : ""),
2300 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2301 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2302 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2303}
2304
2305static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2306{
2307 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2308}
2309
2310static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2311{
2312 (*pos)++;
2313 if (*pos >= KPROBE_TABLE_SIZE)
2314 return NULL;
2315 return pos;
2316}
2317
2318static void kprobe_seq_stop(struct seq_file *f, void *v)
2319{
2320
2321}
2322
2323static int show_kprobe_addr(struct seq_file *pi, void *v)
2324{
2325 struct hlist_head *head;
2326 struct kprobe *p, *kp;
2327 const char *sym = NULL;
2328 unsigned int i = *(loff_t *) v;
2329 unsigned long offset = 0;
2330 char *modname, namebuf[KSYM_NAME_LEN];
2331
2332 head = &kprobe_table[i];
2333 preempt_disable();
2334 hlist_for_each_entry_rcu(p, head, hlist) {
2335 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2336 &offset, &modname, namebuf);
2337 if (kprobe_aggrprobe(p)) {
2338 list_for_each_entry_rcu(kp, &p->list, list)
2339 report_probe(pi, kp, sym, offset, modname, p);
2340 } else
2341 report_probe(pi, p, sym, offset, modname, NULL);
2342 }
2343 preempt_enable();
2344 return 0;
2345}
2346
2347static const struct seq_operations kprobes_seq_ops = {
2348 .start = kprobe_seq_start,
2349 .next = kprobe_seq_next,
2350 .stop = kprobe_seq_stop,
2351 .show = show_kprobe_addr
2352};
2353
2354static int kprobes_open(struct inode *inode, struct file *filp)
2355{
2356 return seq_open(filp, &kprobes_seq_ops);
2357}
2358
2359static const struct file_operations debugfs_kprobes_operations = {
2360 .open = kprobes_open,
2361 .read = seq_read,
2362 .llseek = seq_lseek,
2363 .release = seq_release,
2364};
2365
2366
2367static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2368{
2369 return seq_list_start(&kprobe_blacklist, *pos);
2370}
2371
2372static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2373{
2374 return seq_list_next(v, &kprobe_blacklist, pos);
2375}
2376
2377static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2378{
2379 struct kprobe_blacklist_entry *ent =
2380 list_entry(v, struct kprobe_blacklist_entry, list);
2381
2382 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2383 (void *)ent->end_addr, (void *)ent->start_addr);
2384 return 0;
2385}
2386
2387static const struct seq_operations kprobe_blacklist_seq_ops = {
2388 .start = kprobe_blacklist_seq_start,
2389 .next = kprobe_blacklist_seq_next,
2390 .stop = kprobe_seq_stop,
2391 .show = kprobe_blacklist_seq_show,
2392};
2393
2394static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2395{
2396 return seq_open(filp, &kprobe_blacklist_seq_ops);
2397}
2398
2399static const struct file_operations debugfs_kprobe_blacklist_ops = {
2400 .open = kprobe_blacklist_open,
2401 .read = seq_read,
2402 .llseek = seq_lseek,
2403 .release = seq_release,
2404};
2405
2406static void arm_all_kprobes(void)
2407{
2408 struct hlist_head *head;
2409 struct kprobe *p;
2410 unsigned int i;
2411
2412 mutex_lock(&kprobe_mutex);
2413
2414
2415 if (!kprobes_all_disarmed)
2416 goto already_enabled;
2417
2418
2419
2420
2421
2422
2423 kprobes_all_disarmed = false;
2424
2425 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2426 head = &kprobe_table[i];
2427 hlist_for_each_entry_rcu(p, head, hlist)
2428 if (!kprobe_disabled(p))
2429 arm_kprobe(p);
2430 }
2431
2432 printk(KERN_INFO "Kprobes globally enabled\n");
2433
2434already_enabled:
2435 mutex_unlock(&kprobe_mutex);
2436 return;
2437}
2438
2439static void disarm_all_kprobes(void)
2440{
2441 struct hlist_head *head;
2442 struct kprobe *p;
2443 unsigned int i;
2444
2445 mutex_lock(&kprobe_mutex);
2446
2447
2448 if (kprobes_all_disarmed) {
2449 mutex_unlock(&kprobe_mutex);
2450 return;
2451 }
2452
2453 kprobes_all_disarmed = true;
2454 printk(KERN_INFO "Kprobes globally disabled\n");
2455
2456 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2457 head = &kprobe_table[i];
2458 hlist_for_each_entry_rcu(p, head, hlist) {
2459 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2460 disarm_kprobe(p, false);
2461 }
2462 }
2463 mutex_unlock(&kprobe_mutex);
2464
2465
2466 wait_for_kprobe_optimizer();
2467}
2468
2469
2470
2471
2472
2473
2474static ssize_t read_enabled_file_bool(struct file *file,
2475 char __user *user_buf, size_t count, loff_t *ppos)
2476{
2477 char buf[3];
2478
2479 if (!kprobes_all_disarmed)
2480 buf[0] = '1';
2481 else
2482 buf[0] = '0';
2483 buf[1] = '\n';
2484 buf[2] = 0x00;
2485 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2486}
2487
2488static ssize_t write_enabled_file_bool(struct file *file,
2489 const char __user *user_buf, size_t count, loff_t *ppos)
2490{
2491 char buf[32];
2492 size_t buf_size;
2493
2494 buf_size = min(count, (sizeof(buf)-1));
2495 if (copy_from_user(buf, user_buf, buf_size))
2496 return -EFAULT;
2497
2498 buf[buf_size] = '\0';
2499 switch (buf[0]) {
2500 case 'y':
2501 case 'Y':
2502 case '1':
2503 arm_all_kprobes();
2504 break;
2505 case 'n':
2506 case 'N':
2507 case '0':
2508 disarm_all_kprobes();
2509 break;
2510 default:
2511 return -EINVAL;
2512 }
2513
2514 return count;
2515}
2516
2517static const struct file_operations fops_kp = {
2518 .read = read_enabled_file_bool,
2519 .write = write_enabled_file_bool,
2520 .llseek = default_llseek,
2521};
2522
2523static int __init debugfs_kprobe_init(void)
2524{
2525 struct dentry *dir, *file;
2526 unsigned int value = 1;
2527
2528 dir = debugfs_create_dir("kprobes", NULL);
2529 if (!dir)
2530 return -ENOMEM;
2531
2532 file = debugfs_create_file("list", 0444, dir, NULL,
2533 &debugfs_kprobes_operations);
2534 if (!file)
2535 goto error;
2536
2537 file = debugfs_create_file("enabled", 0600, dir,
2538 &value, &fops_kp);
2539 if (!file)
2540 goto error;
2541
2542 file = debugfs_create_file("blacklist", 0444, dir, NULL,
2543 &debugfs_kprobe_blacklist_ops);
2544 if (!file)
2545 goto error;
2546
2547 return 0;
2548
2549error:
2550 debugfs_remove(dir);
2551 return -ENOMEM;
2552}
2553
2554late_initcall(debugfs_kprobe_init);
2555#endif
2556
2557module_init(init_kprobes);
2558
2559
2560EXPORT_SYMBOL_GPL(jprobe_return);
2561