1
2
3
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_perf_event.h>
10#include <linux/filter.h>
11#include <linux/uaccess.h>
12#include <linux/ctype.h>
13#include <linux/kprobes.h>
14#include <linux/syscalls.h>
15#include <linux/error-injection.h>
16
17#include <asm/tlb.h>
18
19#include "trace_probe.h"
20#include "trace.h"
21
22#define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
25#ifdef CONFIG_MODULES
26struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
29};
30
31static LIST_HEAD(bpf_trace_modules);
32static DEFINE_MUTEX(bpf_module_mutex);
33
34static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35{
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
38 unsigned int i;
39
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
46 ret = btp;
47 goto out;
48 }
49 }
50 }
51out:
52 mutex_unlock(&bpf_module_mutex);
53 return ret;
54}
55#else
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 return NULL;
59}
60#endif
61
62u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
80{
81 unsigned int ret;
82
83 if (in_nmi())
84 return 1;
85
86 preempt_disable();
87
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89
90
91
92
93
94
95 ret = 0;
96 goto out;
97 }
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
115
116 out:
117 __this_cpu_dec(bpf_prog_active);
118 preempt_enable();
119
120 return ret;
121}
122EXPORT_SYMBOL_GPL(trace_call_bpf);
123
124#ifdef CONFIG_BPF_KPROBE_OVERRIDE
125BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
126{
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
129 return 0;
130}
131
132static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
134 .gpl_only = true,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
138};
139#endif
140
141BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
142{
143 int ret;
144
145 ret = probe_kernel_read(dst, unsafe_ptr, size);
146 if (unlikely(ret < 0))
147 memset(dst, 0, size);
148
149 return ret;
150}
151
152static const struct bpf_func_proto bpf_probe_read_proto = {
153 .func = bpf_probe_read,
154 .gpl_only = true,
155 .ret_type = RET_INTEGER,
156 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
157 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
158 .arg3_type = ARG_ANYTHING,
159};
160
161BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
162 u32, size)
163{
164
165
166
167
168
169
170
171
172
173
174
175
176
177 if (unlikely(in_interrupt() ||
178 current->flags & (PF_KTHREAD | PF_EXITING)))
179 return -EPERM;
180 if (unlikely(uaccess_kernel()))
181 return -EPERM;
182 if (unlikely(!nmi_uaccess_okay()))
183 return -EPERM;
184 if (!access_ok(unsafe_ptr, size))
185 return -EPERM;
186
187 return probe_kernel_write(unsafe_ptr, src, size);
188}
189
190static const struct bpf_func_proto bpf_probe_write_user_proto = {
191 .func = bpf_probe_write_user,
192 .gpl_only = true,
193 .ret_type = RET_INTEGER,
194 .arg1_type = ARG_ANYTHING,
195 .arg2_type = ARG_PTR_TO_MEM,
196 .arg3_type = ARG_CONST_SIZE,
197};
198
199static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
200{
201 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
202 current->comm, task_pid_nr(current));
203
204 return &bpf_probe_write_user_proto;
205}
206
207
208
209
210
211BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
212 u64, arg2, u64, arg3)
213{
214 bool str_seen = false;
215 int mod[3] = {};
216 int fmt_cnt = 0;
217 u64 unsafe_addr;
218 char buf[64];
219 int i;
220
221
222
223
224
225
226 if (fmt[--fmt_size] != 0)
227 return -EINVAL;
228
229
230 for (i = 0; i < fmt_size; i++) {
231 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
232 return -EINVAL;
233
234 if (fmt[i] != '%')
235 continue;
236
237 if (fmt_cnt >= 3)
238 return -EINVAL;
239
240
241 i++;
242 if (fmt[i] == 'l') {
243 mod[fmt_cnt]++;
244 i++;
245 } else if (fmt[i] == 'p' || fmt[i] == 's') {
246 mod[fmt_cnt]++;
247
248 if (fmt[i + 1] != 0 &&
249 !isspace(fmt[i + 1]) &&
250 !ispunct(fmt[i + 1]))
251 return -EINVAL;
252 fmt_cnt++;
253 if (fmt[i] == 's') {
254 if (str_seen)
255
256 return -EINVAL;
257 str_seen = true;
258
259 switch (fmt_cnt) {
260 case 1:
261 unsafe_addr = arg1;
262 arg1 = (long) buf;
263 break;
264 case 2:
265 unsafe_addr = arg2;
266 arg2 = (long) buf;
267 break;
268 case 3:
269 unsafe_addr = arg3;
270 arg3 = (long) buf;
271 break;
272 }
273 buf[0] = 0;
274 strncpy_from_unsafe(buf,
275 (void *) (long) unsafe_addr,
276 sizeof(buf));
277 }
278 continue;
279 }
280
281 if (fmt[i] == 'l') {
282 mod[fmt_cnt]++;
283 i++;
284 }
285
286 if (fmt[i] != 'i' && fmt[i] != 'd' &&
287 fmt[i] != 'u' && fmt[i] != 'x')
288 return -EINVAL;
289 fmt_cnt++;
290 }
291
292
293
294
295#define __BPF_TP_EMIT() __BPF_ARG3_TP()
296#define __BPF_TP(...) \
297 __trace_printk(0 , \
298 fmt, ##__VA_ARGS__)
299
300#define __BPF_ARG1_TP(...) \
301 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
302 ? __BPF_TP(arg1, ##__VA_ARGS__) \
303 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
304 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
305 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
306
307#define __BPF_ARG2_TP(...) \
308 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
309 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
310 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
311 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
312 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
313
314#define __BPF_ARG3_TP(...) \
315 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
316 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
317 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
318 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
319 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
320
321 return __BPF_TP_EMIT();
322}
323
324static const struct bpf_func_proto bpf_trace_printk_proto = {
325 .func = bpf_trace_printk,
326 .gpl_only = true,
327 .ret_type = RET_INTEGER,
328 .arg1_type = ARG_PTR_TO_MEM,
329 .arg2_type = ARG_CONST_SIZE,
330};
331
332const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
333{
334
335
336
337
338 trace_printk_init_buffers();
339
340 return &bpf_trace_printk_proto;
341}
342
343static __always_inline int
344get_map_perf_counter(struct bpf_map *map, u64 flags,
345 u64 *value, u64 *enabled, u64 *running)
346{
347 struct bpf_array *array = container_of(map, struct bpf_array, map);
348 unsigned int cpu = smp_processor_id();
349 u64 index = flags & BPF_F_INDEX_MASK;
350 struct bpf_event_entry *ee;
351
352 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
353 return -EINVAL;
354 if (index == BPF_F_CURRENT_CPU)
355 index = cpu;
356 if (unlikely(index >= array->map.max_entries))
357 return -E2BIG;
358
359 ee = READ_ONCE(array->ptrs[index]);
360 if (!ee)
361 return -ENOENT;
362
363 return perf_event_read_local(ee->event, value, enabled, running);
364}
365
366BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
367{
368 u64 value = 0;
369 int err;
370
371 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
372
373
374
375
376 if (err)
377 return err;
378 return value;
379}
380
381static const struct bpf_func_proto bpf_perf_event_read_proto = {
382 .func = bpf_perf_event_read,
383 .gpl_only = true,
384 .ret_type = RET_INTEGER,
385 .arg1_type = ARG_CONST_MAP_PTR,
386 .arg2_type = ARG_ANYTHING,
387};
388
389BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
390 struct bpf_perf_event_value *, buf, u32, size)
391{
392 int err = -EINVAL;
393
394 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
395 goto clear;
396 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
397 &buf->running);
398 if (unlikely(err))
399 goto clear;
400 return 0;
401clear:
402 memset(buf, 0, size);
403 return err;
404}
405
406static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
407 .func = bpf_perf_event_read_value,
408 .gpl_only = true,
409 .ret_type = RET_INTEGER,
410 .arg1_type = ARG_CONST_MAP_PTR,
411 .arg2_type = ARG_ANYTHING,
412 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
413 .arg4_type = ARG_CONST_SIZE,
414};
415
416static __always_inline u64
417__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
418 u64 flags, struct perf_sample_data *sd)
419{
420 struct bpf_array *array = container_of(map, struct bpf_array, map);
421 unsigned int cpu = smp_processor_id();
422 u64 index = flags & BPF_F_INDEX_MASK;
423 struct bpf_event_entry *ee;
424 struct perf_event *event;
425
426 if (index == BPF_F_CURRENT_CPU)
427 index = cpu;
428 if (unlikely(index >= array->map.max_entries))
429 return -E2BIG;
430
431 ee = READ_ONCE(array->ptrs[index]);
432 if (!ee)
433 return -ENOENT;
434
435 event = ee->event;
436 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
437 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
438 return -EINVAL;
439
440 if (unlikely(event->oncpu != cpu))
441 return -EOPNOTSUPP;
442
443 return perf_event_output(event, sd, regs);
444}
445
446
447
448
449
450struct bpf_trace_sample_data {
451 struct perf_sample_data sds[3];
452};
453
454static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
455static DEFINE_PER_CPU(int, bpf_trace_nest_level);
456BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
457 u64, flags, void *, data, u64, size)
458{
459 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
460 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
461 struct perf_raw_record raw = {
462 .frag = {
463 .size = size,
464 .data = data,
465 },
466 };
467 struct perf_sample_data *sd;
468 int err;
469
470 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
471 err = -EBUSY;
472 goto out;
473 }
474
475 sd = &sds->sds[nest_level - 1];
476
477 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
478 err = -EINVAL;
479 goto out;
480 }
481
482 perf_sample_data_init(sd, 0, 0);
483 sd->raw = &raw;
484
485 err = __bpf_perf_event_output(regs, map, flags, sd);
486
487out:
488 this_cpu_dec(bpf_trace_nest_level);
489 return err;
490}
491
492static const struct bpf_func_proto bpf_perf_event_output_proto = {
493 .func = bpf_perf_event_output,
494 .gpl_only = true,
495 .ret_type = RET_INTEGER,
496 .arg1_type = ARG_PTR_TO_CTX,
497 .arg2_type = ARG_CONST_MAP_PTR,
498 .arg3_type = ARG_ANYTHING,
499 .arg4_type = ARG_PTR_TO_MEM,
500 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
501};
502
503static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
504static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
505
506u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
507 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
508{
509 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
510 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
511 struct perf_raw_frag frag = {
512 .copy = ctx_copy,
513 .size = ctx_size,
514 .data = ctx,
515 };
516 struct perf_raw_record raw = {
517 .frag = {
518 {
519 .next = ctx_size ? &frag : NULL,
520 },
521 .size = meta_size,
522 .data = meta,
523 },
524 };
525
526 perf_fetch_caller_regs(regs);
527 perf_sample_data_init(sd, 0, 0);
528 sd->raw = &raw;
529
530 return __bpf_perf_event_output(regs, map, flags, sd);
531}
532
533BPF_CALL_0(bpf_get_current_task)
534{
535 return (long) current;
536}
537
538static const struct bpf_func_proto bpf_get_current_task_proto = {
539 .func = bpf_get_current_task,
540 .gpl_only = true,
541 .ret_type = RET_INTEGER,
542};
543
544BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
545{
546 struct bpf_array *array = container_of(map, struct bpf_array, map);
547 struct cgroup *cgrp;
548
549 if (unlikely(idx >= array->map.max_entries))
550 return -E2BIG;
551
552 cgrp = READ_ONCE(array->ptrs[idx]);
553 if (unlikely(!cgrp))
554 return -EAGAIN;
555
556 return task_under_cgroup_hierarchy(current, cgrp);
557}
558
559static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
560 .func = bpf_current_task_under_cgroup,
561 .gpl_only = false,
562 .ret_type = RET_INTEGER,
563 .arg1_type = ARG_CONST_MAP_PTR,
564 .arg2_type = ARG_ANYTHING,
565};
566
567BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
568 const void *, unsafe_ptr)
569{
570 int ret;
571
572
573
574
575
576
577
578
579
580
581 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
582 if (unlikely(ret < 0))
583 memset(dst, 0, size);
584
585 return ret;
586}
587
588static const struct bpf_func_proto bpf_probe_read_str_proto = {
589 .func = bpf_probe_read_str,
590 .gpl_only = true,
591 .ret_type = RET_INTEGER,
592 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
593 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
594 .arg3_type = ARG_ANYTHING,
595};
596
597struct send_signal_irq_work {
598 struct irq_work irq_work;
599 struct task_struct *task;
600 u32 sig;
601};
602
603static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
604
605static void do_bpf_send_signal(struct irq_work *entry)
606{
607 struct send_signal_irq_work *work;
608
609 work = container_of(entry, struct send_signal_irq_work, irq_work);
610 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
611}
612
613BPF_CALL_1(bpf_send_signal, u32, sig)
614{
615 struct send_signal_irq_work *work = NULL;
616
617
618
619
620
621
622 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
623 return -EPERM;
624 if (unlikely(uaccess_kernel()))
625 return -EPERM;
626 if (unlikely(!nmi_uaccess_okay()))
627 return -EPERM;
628
629 if (in_nmi()) {
630
631
632
633 if (unlikely(!valid_signal(sig)))
634 return -EINVAL;
635
636 work = this_cpu_ptr(&send_signal_work);
637 if (work->irq_work.flags & IRQ_WORK_BUSY)
638 return -EBUSY;
639
640
641
642
643
644 work->task = current;
645 work->sig = sig;
646 irq_work_queue(&work->irq_work);
647 return 0;
648 }
649
650 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
651}
652
653static const struct bpf_func_proto bpf_send_signal_proto = {
654 .func = bpf_send_signal,
655 .gpl_only = false,
656 .ret_type = RET_INTEGER,
657 .arg1_type = ARG_ANYTHING,
658};
659
660static const struct bpf_func_proto *
661tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
662{
663 switch (func_id) {
664 case BPF_FUNC_map_lookup_elem:
665 return &bpf_map_lookup_elem_proto;
666 case BPF_FUNC_map_update_elem:
667 return &bpf_map_update_elem_proto;
668 case BPF_FUNC_map_delete_elem:
669 return &bpf_map_delete_elem_proto;
670 case BPF_FUNC_map_push_elem:
671 return &bpf_map_push_elem_proto;
672 case BPF_FUNC_map_pop_elem:
673 return &bpf_map_pop_elem_proto;
674 case BPF_FUNC_map_peek_elem:
675 return &bpf_map_peek_elem_proto;
676 case BPF_FUNC_probe_read:
677 return &bpf_probe_read_proto;
678 case BPF_FUNC_ktime_get_ns:
679 return &bpf_ktime_get_ns_proto;
680 case BPF_FUNC_tail_call:
681 return &bpf_tail_call_proto;
682 case BPF_FUNC_get_current_pid_tgid:
683 return &bpf_get_current_pid_tgid_proto;
684 case BPF_FUNC_get_current_task:
685 return &bpf_get_current_task_proto;
686 case BPF_FUNC_get_current_uid_gid:
687 return &bpf_get_current_uid_gid_proto;
688 case BPF_FUNC_get_current_comm:
689 return &bpf_get_current_comm_proto;
690 case BPF_FUNC_trace_printk:
691 return bpf_get_trace_printk_proto();
692 case BPF_FUNC_get_smp_processor_id:
693 return &bpf_get_smp_processor_id_proto;
694 case BPF_FUNC_get_numa_node_id:
695 return &bpf_get_numa_node_id_proto;
696 case BPF_FUNC_perf_event_read:
697 return &bpf_perf_event_read_proto;
698 case BPF_FUNC_probe_write_user:
699 return bpf_get_probe_write_proto();
700 case BPF_FUNC_current_task_under_cgroup:
701 return &bpf_current_task_under_cgroup_proto;
702 case BPF_FUNC_get_prandom_u32:
703 return &bpf_get_prandom_u32_proto;
704 case BPF_FUNC_probe_read_str:
705 return &bpf_probe_read_str_proto;
706#ifdef CONFIG_CGROUPS
707 case BPF_FUNC_get_current_cgroup_id:
708 return &bpf_get_current_cgroup_id_proto;
709#endif
710 case BPF_FUNC_send_signal:
711 return &bpf_send_signal_proto;
712 default:
713 return NULL;
714 }
715}
716
717static const struct bpf_func_proto *
718kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
719{
720 switch (func_id) {
721 case BPF_FUNC_perf_event_output:
722 return &bpf_perf_event_output_proto;
723 case BPF_FUNC_get_stackid:
724 return &bpf_get_stackid_proto;
725 case BPF_FUNC_get_stack:
726 return &bpf_get_stack_proto;
727 case BPF_FUNC_perf_event_read_value:
728 return &bpf_perf_event_read_value_proto;
729#ifdef CONFIG_BPF_KPROBE_OVERRIDE
730 case BPF_FUNC_override_return:
731 return &bpf_override_return_proto;
732#endif
733 default:
734 return tracing_func_proto(func_id, prog);
735 }
736}
737
738
739static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
740 const struct bpf_prog *prog,
741 struct bpf_insn_access_aux *info)
742{
743 if (off < 0 || off >= sizeof(struct pt_regs))
744 return false;
745 if (type != BPF_READ)
746 return false;
747 if (off % size != 0)
748 return false;
749
750
751
752
753 if (off + size > sizeof(struct pt_regs))
754 return false;
755
756 return true;
757}
758
759const struct bpf_verifier_ops kprobe_verifier_ops = {
760 .get_func_proto = kprobe_prog_func_proto,
761 .is_valid_access = kprobe_prog_is_valid_access,
762};
763
764const struct bpf_prog_ops kprobe_prog_ops = {
765};
766
767BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
768 u64, flags, void *, data, u64, size)
769{
770 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
771
772
773
774
775
776
777 return ____bpf_perf_event_output(regs, map, flags, data, size);
778}
779
780static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
781 .func = bpf_perf_event_output_tp,
782 .gpl_only = true,
783 .ret_type = RET_INTEGER,
784 .arg1_type = ARG_PTR_TO_CTX,
785 .arg2_type = ARG_CONST_MAP_PTR,
786 .arg3_type = ARG_ANYTHING,
787 .arg4_type = ARG_PTR_TO_MEM,
788 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
789};
790
791BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
792 u64, flags)
793{
794 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
795
796
797
798
799
800
801 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
802 flags, 0, 0);
803}
804
805static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
806 .func = bpf_get_stackid_tp,
807 .gpl_only = true,
808 .ret_type = RET_INTEGER,
809 .arg1_type = ARG_PTR_TO_CTX,
810 .arg2_type = ARG_CONST_MAP_PTR,
811 .arg3_type = ARG_ANYTHING,
812};
813
814BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
815 u64, flags)
816{
817 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
818
819 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
820 (unsigned long) size, flags, 0);
821}
822
823static const struct bpf_func_proto bpf_get_stack_proto_tp = {
824 .func = bpf_get_stack_tp,
825 .gpl_only = true,
826 .ret_type = RET_INTEGER,
827 .arg1_type = ARG_PTR_TO_CTX,
828 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
829 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
830 .arg4_type = ARG_ANYTHING,
831};
832
833static const struct bpf_func_proto *
834tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
835{
836 switch (func_id) {
837 case BPF_FUNC_perf_event_output:
838 return &bpf_perf_event_output_proto_tp;
839 case BPF_FUNC_get_stackid:
840 return &bpf_get_stackid_proto_tp;
841 case BPF_FUNC_get_stack:
842 return &bpf_get_stack_proto_tp;
843 default:
844 return tracing_func_proto(func_id, prog);
845 }
846}
847
848static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
849 const struct bpf_prog *prog,
850 struct bpf_insn_access_aux *info)
851{
852 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
853 return false;
854 if (type != BPF_READ)
855 return false;
856 if (off % size != 0)
857 return false;
858
859 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
860 return true;
861}
862
863const struct bpf_verifier_ops tracepoint_verifier_ops = {
864 .get_func_proto = tp_prog_func_proto,
865 .is_valid_access = tp_prog_is_valid_access,
866};
867
868const struct bpf_prog_ops tracepoint_prog_ops = {
869};
870
871BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
872 struct bpf_perf_event_value *, buf, u32, size)
873{
874 int err = -EINVAL;
875
876 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
877 goto clear;
878 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
879 &buf->running);
880 if (unlikely(err))
881 goto clear;
882 return 0;
883clear:
884 memset(buf, 0, size);
885 return err;
886}
887
888static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
889 .func = bpf_perf_prog_read_value,
890 .gpl_only = true,
891 .ret_type = RET_INTEGER,
892 .arg1_type = ARG_PTR_TO_CTX,
893 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
894 .arg3_type = ARG_CONST_SIZE,
895};
896
897static const struct bpf_func_proto *
898pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
899{
900 switch (func_id) {
901 case BPF_FUNC_perf_event_output:
902 return &bpf_perf_event_output_proto_tp;
903 case BPF_FUNC_get_stackid:
904 return &bpf_get_stackid_proto_tp;
905 case BPF_FUNC_get_stack:
906 return &bpf_get_stack_proto_tp;
907 case BPF_FUNC_perf_prog_read_value:
908 return &bpf_perf_prog_read_value_proto;
909 default:
910 return tracing_func_proto(func_id, prog);
911 }
912}
913
914
915
916
917
918
919
920
921
922struct bpf_raw_tp_regs {
923 struct pt_regs regs[3];
924};
925static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
926static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
927static struct pt_regs *get_bpf_raw_tp_regs(void)
928{
929 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
930 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
931
932 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
933 this_cpu_dec(bpf_raw_tp_nest_level);
934 return ERR_PTR(-EBUSY);
935 }
936
937 return &tp_regs->regs[nest_level - 1];
938}
939
940static void put_bpf_raw_tp_regs(void)
941{
942 this_cpu_dec(bpf_raw_tp_nest_level);
943}
944
945BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
946 struct bpf_map *, map, u64, flags, void *, data, u64, size)
947{
948 struct pt_regs *regs = get_bpf_raw_tp_regs();
949 int ret;
950
951 if (IS_ERR(regs))
952 return PTR_ERR(regs);
953
954 perf_fetch_caller_regs(regs);
955 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
956
957 put_bpf_raw_tp_regs();
958 return ret;
959}
960
961static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
962 .func = bpf_perf_event_output_raw_tp,
963 .gpl_only = true,
964 .ret_type = RET_INTEGER,
965 .arg1_type = ARG_PTR_TO_CTX,
966 .arg2_type = ARG_CONST_MAP_PTR,
967 .arg3_type = ARG_ANYTHING,
968 .arg4_type = ARG_PTR_TO_MEM,
969 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
970};
971
972BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
973 struct bpf_map *, map, u64, flags)
974{
975 struct pt_regs *regs = get_bpf_raw_tp_regs();
976 int ret;
977
978 if (IS_ERR(regs))
979 return PTR_ERR(regs);
980
981 perf_fetch_caller_regs(regs);
982
983 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
984 flags, 0, 0);
985 put_bpf_raw_tp_regs();
986 return ret;
987}
988
989static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
990 .func = bpf_get_stackid_raw_tp,
991 .gpl_only = true,
992 .ret_type = RET_INTEGER,
993 .arg1_type = ARG_PTR_TO_CTX,
994 .arg2_type = ARG_CONST_MAP_PTR,
995 .arg3_type = ARG_ANYTHING,
996};
997
998BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
999 void *, buf, u32, size, u64, flags)
1000{
1001 struct pt_regs *regs = get_bpf_raw_tp_regs();
1002 int ret;
1003
1004 if (IS_ERR(regs))
1005 return PTR_ERR(regs);
1006
1007 perf_fetch_caller_regs(regs);
1008 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1009 (unsigned long) size, flags, 0);
1010 put_bpf_raw_tp_regs();
1011 return ret;
1012}
1013
1014static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1015 .func = bpf_get_stack_raw_tp,
1016 .gpl_only = true,
1017 .ret_type = RET_INTEGER,
1018 .arg1_type = ARG_PTR_TO_CTX,
1019 .arg2_type = ARG_PTR_TO_MEM,
1020 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1021 .arg4_type = ARG_ANYTHING,
1022};
1023
1024static const struct bpf_func_proto *
1025raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1026{
1027 switch (func_id) {
1028 case BPF_FUNC_perf_event_output:
1029 return &bpf_perf_event_output_proto_raw_tp;
1030 case BPF_FUNC_get_stackid:
1031 return &bpf_get_stackid_proto_raw_tp;
1032 case BPF_FUNC_get_stack:
1033 return &bpf_get_stack_proto_raw_tp;
1034 default:
1035 return tracing_func_proto(func_id, prog);
1036 }
1037}
1038
1039static bool raw_tp_prog_is_valid_access(int off, int size,
1040 enum bpf_access_type type,
1041 const struct bpf_prog *prog,
1042 struct bpf_insn_access_aux *info)
1043{
1044
1045 if (off < 0 || off >= sizeof(__u64) * 12)
1046 return false;
1047 if (type != BPF_READ)
1048 return false;
1049 if (off % size != 0)
1050 return false;
1051 return true;
1052}
1053
1054const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1055 .get_func_proto = raw_tp_prog_func_proto,
1056 .is_valid_access = raw_tp_prog_is_valid_access,
1057};
1058
1059const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1060};
1061
1062static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1063 enum bpf_access_type type,
1064 const struct bpf_prog *prog,
1065 struct bpf_insn_access_aux *info)
1066{
1067 if (off == 0) {
1068 if (size != sizeof(u64) || type != BPF_READ)
1069 return false;
1070 info->reg_type = PTR_TO_TP_BUFFER;
1071 }
1072 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1073}
1074
1075const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1076 .get_func_proto = raw_tp_prog_func_proto,
1077 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1078};
1079
1080const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1081};
1082
1083static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1084 const struct bpf_prog *prog,
1085 struct bpf_insn_access_aux *info)
1086{
1087 const int size_u64 = sizeof(u64);
1088
1089 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1090 return false;
1091 if (type != BPF_READ)
1092 return false;
1093 if (off % size != 0) {
1094 if (sizeof(unsigned long) != 4)
1095 return false;
1096 if (size != 8)
1097 return false;
1098 if (off % size != 4)
1099 return false;
1100 }
1101
1102 switch (off) {
1103 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1104 bpf_ctx_record_field_size(info, size_u64);
1105 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1106 return false;
1107 break;
1108 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1109 bpf_ctx_record_field_size(info, size_u64);
1110 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1111 return false;
1112 break;
1113 default:
1114 if (size != sizeof(long))
1115 return false;
1116 }
1117
1118 return true;
1119}
1120
1121static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1122 const struct bpf_insn *si,
1123 struct bpf_insn *insn_buf,
1124 struct bpf_prog *prog, u32 *target_size)
1125{
1126 struct bpf_insn *insn = insn_buf;
1127
1128 switch (si->off) {
1129 case offsetof(struct bpf_perf_event_data, sample_period):
1130 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1131 data), si->dst_reg, si->src_reg,
1132 offsetof(struct bpf_perf_event_data_kern, data));
1133 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1134 bpf_target_off(struct perf_sample_data, period, 8,
1135 target_size));
1136 break;
1137 case offsetof(struct bpf_perf_event_data, addr):
1138 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1139 data), si->dst_reg, si->src_reg,
1140 offsetof(struct bpf_perf_event_data_kern, data));
1141 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1142 bpf_target_off(struct perf_sample_data, addr, 8,
1143 target_size));
1144 break;
1145 default:
1146 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1147 regs), si->dst_reg, si->src_reg,
1148 offsetof(struct bpf_perf_event_data_kern, regs));
1149 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1150 si->off);
1151 break;
1152 }
1153
1154 return insn - insn_buf;
1155}
1156
1157const struct bpf_verifier_ops perf_event_verifier_ops = {
1158 .get_func_proto = pe_prog_func_proto,
1159 .is_valid_access = pe_prog_is_valid_access,
1160 .convert_ctx_access = pe_prog_convert_ctx_access,
1161};
1162
1163const struct bpf_prog_ops perf_event_prog_ops = {
1164};
1165
1166static DEFINE_MUTEX(bpf_event_mutex);
1167
1168#define BPF_TRACE_MAX_PROGS 64
1169
1170int perf_event_attach_bpf_prog(struct perf_event *event,
1171 struct bpf_prog *prog)
1172{
1173 struct bpf_prog_array *old_array;
1174 struct bpf_prog_array *new_array;
1175 int ret = -EEXIST;
1176
1177
1178
1179
1180
1181 if (prog->kprobe_override &&
1182 (!trace_kprobe_on_func_entry(event->tp_event) ||
1183 !trace_kprobe_error_injectable(event->tp_event)))
1184 return -EINVAL;
1185
1186 mutex_lock(&bpf_event_mutex);
1187
1188 if (event->prog)
1189 goto unlock;
1190
1191 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1192 if (old_array &&
1193 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1194 ret = -E2BIG;
1195 goto unlock;
1196 }
1197
1198 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1199 if (ret < 0)
1200 goto unlock;
1201
1202
1203 event->prog = prog;
1204 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1205 bpf_prog_array_free(old_array);
1206
1207unlock:
1208 mutex_unlock(&bpf_event_mutex);
1209 return ret;
1210}
1211
1212void perf_event_detach_bpf_prog(struct perf_event *event)
1213{
1214 struct bpf_prog_array *old_array;
1215 struct bpf_prog_array *new_array;
1216 int ret;
1217
1218 mutex_lock(&bpf_event_mutex);
1219
1220 if (!event->prog)
1221 goto unlock;
1222
1223 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1224 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1225 if (ret == -ENOENT)
1226 goto unlock;
1227 if (ret < 0) {
1228 bpf_prog_array_delete_safe(old_array, event->prog);
1229 } else {
1230 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1231 bpf_prog_array_free(old_array);
1232 }
1233
1234 bpf_prog_put(event->prog);
1235 event->prog = NULL;
1236
1237unlock:
1238 mutex_unlock(&bpf_event_mutex);
1239}
1240
1241int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1242{
1243 struct perf_event_query_bpf __user *uquery = info;
1244 struct perf_event_query_bpf query = {};
1245 struct bpf_prog_array *progs;
1246 u32 *ids, prog_cnt, ids_len;
1247 int ret;
1248
1249 if (!capable(CAP_SYS_ADMIN))
1250 return -EPERM;
1251 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1252 return -EINVAL;
1253 if (copy_from_user(&query, uquery, sizeof(query)))
1254 return -EFAULT;
1255
1256 ids_len = query.ids_len;
1257 if (ids_len > BPF_TRACE_MAX_PROGS)
1258 return -E2BIG;
1259 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1260 if (!ids)
1261 return -ENOMEM;
1262
1263
1264
1265
1266
1267
1268
1269 mutex_lock(&bpf_event_mutex);
1270 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1271 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1272 mutex_unlock(&bpf_event_mutex);
1273
1274 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1275 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1276 ret = -EFAULT;
1277
1278 kfree(ids);
1279 return ret;
1280}
1281
1282extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1283extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1284
1285struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1286{
1287 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1288
1289 for (; btp < __stop__bpf_raw_tp; btp++) {
1290 if (!strcmp(btp->tp->name, name))
1291 return btp;
1292 }
1293
1294 return bpf_get_raw_tracepoint_module(name);
1295}
1296
1297void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1298{
1299 struct module *mod = __module_address((unsigned long)btp);
1300
1301 if (mod)
1302 module_put(mod);
1303}
1304
1305static __always_inline
1306void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1307{
1308 rcu_read_lock();
1309 preempt_disable();
1310 (void) BPF_PROG_RUN(prog, args);
1311 preempt_enable();
1312 rcu_read_unlock();
1313}
1314
1315#define UNPACK(...) __VA_ARGS__
1316#define REPEAT_1(FN, DL, X, ...) FN(X)
1317#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1318#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1319#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1320#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1321#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1322#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1323#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1324#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1325#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1326#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1327#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1328#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1329
1330#define SARG(X) u64 arg##X
1331#define COPY(X) args[X] = arg##X
1332
1333#define __DL_COM (,)
1334#define __DL_SEM (;)
1335
1336#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1337
1338#define BPF_TRACE_DEFN_x(x) \
1339 void bpf_trace_run##x(struct bpf_prog *prog, \
1340 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1341 { \
1342 u64 args[x]; \
1343 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1344 __bpf_trace_run(prog, args); \
1345 } \
1346 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1347BPF_TRACE_DEFN_x(1);
1348BPF_TRACE_DEFN_x(2);
1349BPF_TRACE_DEFN_x(3);
1350BPF_TRACE_DEFN_x(4);
1351BPF_TRACE_DEFN_x(5);
1352BPF_TRACE_DEFN_x(6);
1353BPF_TRACE_DEFN_x(7);
1354BPF_TRACE_DEFN_x(8);
1355BPF_TRACE_DEFN_x(9);
1356BPF_TRACE_DEFN_x(10);
1357BPF_TRACE_DEFN_x(11);
1358BPF_TRACE_DEFN_x(12);
1359
1360static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1361{
1362 struct tracepoint *tp = btp->tp;
1363
1364
1365
1366
1367
1368 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1369 return -EINVAL;
1370
1371 if (prog->aux->max_tp_access > btp->writable_size)
1372 return -EINVAL;
1373
1374 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1375}
1376
1377int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1378{
1379 return __bpf_probe_register(btp, prog);
1380}
1381
1382int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1383{
1384 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1385}
1386
1387int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1388 u32 *fd_type, const char **buf,
1389 u64 *probe_offset, u64 *probe_addr)
1390{
1391 bool is_tracepoint, is_syscall_tp;
1392 struct bpf_prog *prog;
1393 int flags, err = 0;
1394
1395 prog = event->prog;
1396 if (!prog)
1397 return -ENOENT;
1398
1399
1400 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1401 return -EOPNOTSUPP;
1402
1403 *prog_id = prog->aux->id;
1404 flags = event->tp_event->flags;
1405 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1406 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1407
1408 if (is_tracepoint || is_syscall_tp) {
1409 *buf = is_tracepoint ? event->tp_event->tp->name
1410 : event->tp_event->name;
1411 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1412 *probe_offset = 0x0;
1413 *probe_addr = 0x0;
1414 } else {
1415
1416 err = -EOPNOTSUPP;
1417#ifdef CONFIG_KPROBE_EVENTS
1418 if (flags & TRACE_EVENT_FL_KPROBE)
1419 err = bpf_get_kprobe_info(event, fd_type, buf,
1420 probe_offset, probe_addr,
1421 event->attr.type == PERF_TYPE_TRACEPOINT);
1422#endif
1423#ifdef CONFIG_UPROBE_EVENTS
1424 if (flags & TRACE_EVENT_FL_UPROBE)
1425 err = bpf_get_uprobe_info(event, fd_type, buf,
1426 probe_offset,
1427 event->attr.type == PERF_TYPE_TRACEPOINT);
1428#endif
1429 }
1430
1431 return err;
1432}
1433
1434static int __init send_signal_irq_work_init(void)
1435{
1436 int cpu;
1437 struct send_signal_irq_work *work;
1438
1439 for_each_possible_cpu(cpu) {
1440 work = per_cpu_ptr(&send_signal_work, cpu);
1441 init_irq_work(&work->irq_work, do_bpf_send_signal);
1442 }
1443 return 0;
1444}
1445
1446subsys_initcall(send_signal_irq_work_init);
1447
1448#ifdef CONFIG_MODULES
1449static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1450 void *module)
1451{
1452 struct bpf_trace_module *btm, *tmp;
1453 struct module *mod = module;
1454
1455 if (mod->num_bpf_raw_events == 0 ||
1456 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1457 return 0;
1458
1459 mutex_lock(&bpf_module_mutex);
1460
1461 switch (op) {
1462 case MODULE_STATE_COMING:
1463 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1464 if (btm) {
1465 btm->module = module;
1466 list_add(&btm->list, &bpf_trace_modules);
1467 }
1468 break;
1469 case MODULE_STATE_GOING:
1470 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1471 if (btm->module == module) {
1472 list_del(&btm->list);
1473 kfree(btm);
1474 break;
1475 }
1476 }
1477 break;
1478 }
1479
1480 mutex_unlock(&bpf_module_mutex);
1481
1482 return 0;
1483}
1484
1485static struct notifier_block bpf_module_nb = {
1486 .notifier_call = bpf_event_notify,
1487};
1488
1489static int __init bpf_event_init(void)
1490{
1491 register_module_notifier(&bpf_module_nb);
1492 return 0;
1493}
1494
1495fs_initcall(bpf_event_init);
1496#endif
1497