1
2
3
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_perf_event.h>
10#include <linux/filter.h>
11#include <linux/uaccess.h>
12#include <linux/ctype.h>
13#include <linux/kprobes.h>
14#include <linux/syscalls.h>
15#include <linux/error-injection.h>
16
17#include "trace_probe.h"
18#include "trace.h"
19
20u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
21u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
38{
39 unsigned int ret;
40
41 if (in_nmi())
42 return 1;
43
44 preempt_disable();
45
46 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
47
48
49
50
51
52
53 ret = 0;
54 goto out;
55 }
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
73
74 out:
75 __this_cpu_dec(bpf_prog_active);
76 preempt_enable();
77
78 return ret;
79}
80EXPORT_SYMBOL_GPL(trace_call_bpf);
81
82#ifdef CONFIG_BPF_KPROBE_OVERRIDE
83BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
84{
85 regs_set_return_value(regs, rc);
86 override_function_with_return(regs);
87 return 0;
88}
89
90static const struct bpf_func_proto bpf_override_return_proto = {
91 .func = bpf_override_return,
92 .gpl_only = true,
93 .ret_type = RET_INTEGER,
94 .arg1_type = ARG_PTR_TO_CTX,
95 .arg2_type = ARG_ANYTHING,
96};
97#endif
98
99BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
100{
101 int ret;
102
103 ret = probe_kernel_read(dst, unsafe_ptr, size);
104 if (unlikely(ret < 0))
105 memset(dst, 0, size);
106
107 return ret;
108}
109
110static const struct bpf_func_proto bpf_probe_read_proto = {
111 .func = bpf_probe_read,
112 .gpl_only = true,
113 .ret_type = RET_INTEGER,
114 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
115 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
116 .arg3_type = ARG_ANYTHING,
117};
118
119BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
120 u32, size)
121{
122
123
124
125
126
127
128
129
130
131 if (unlikely(in_interrupt() ||
132 current->flags & (PF_KTHREAD | PF_EXITING)))
133 return -EPERM;
134 if (unlikely(uaccess_kernel()))
135 return -EPERM;
136 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
137 return -EPERM;
138
139 return probe_kernel_write(unsafe_ptr, src, size);
140}
141
142static const struct bpf_func_proto bpf_probe_write_user_proto = {
143 .func = bpf_probe_write_user,
144 .gpl_only = true,
145 .ret_type = RET_INTEGER,
146 .arg1_type = ARG_ANYTHING,
147 .arg2_type = ARG_PTR_TO_MEM,
148 .arg3_type = ARG_CONST_SIZE,
149};
150
151static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
152{
153 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
154 current->comm, task_pid_nr(current));
155
156 return &bpf_probe_write_user_proto;
157}
158
159
160
161
162
163BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
164 u64, arg2, u64, arg3)
165{
166 bool str_seen = false;
167 int mod[3] = {};
168 int fmt_cnt = 0;
169 u64 unsafe_addr;
170 char buf[64];
171 int i;
172
173
174
175
176
177
178 if (fmt[--fmt_size] != 0)
179 return -EINVAL;
180
181
182 for (i = 0; i < fmt_size; i++) {
183 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
184 return -EINVAL;
185
186 if (fmt[i] != '%')
187 continue;
188
189 if (fmt_cnt >= 3)
190 return -EINVAL;
191
192
193 i++;
194 if (fmt[i] == 'l') {
195 mod[fmt_cnt]++;
196 i++;
197 } else if (fmt[i] == 'p' || fmt[i] == 's') {
198 mod[fmt_cnt]++;
199
200 if (fmt[i + 1] != 0 &&
201 !isspace(fmt[i + 1]) &&
202 !ispunct(fmt[i + 1]))
203 return -EINVAL;
204 fmt_cnt++;
205 if (fmt[i] == 's') {
206 if (str_seen)
207
208 return -EINVAL;
209 str_seen = true;
210
211 switch (fmt_cnt) {
212 case 1:
213 unsafe_addr = arg1;
214 arg1 = (long) buf;
215 break;
216 case 2:
217 unsafe_addr = arg2;
218 arg2 = (long) buf;
219 break;
220 case 3:
221 unsafe_addr = arg3;
222 arg3 = (long) buf;
223 break;
224 }
225 buf[0] = 0;
226 strncpy_from_unsafe(buf,
227 (void *) (long) unsafe_addr,
228 sizeof(buf));
229 }
230 continue;
231 }
232
233 if (fmt[i] == 'l') {
234 mod[fmt_cnt]++;
235 i++;
236 }
237
238 if (fmt[i] != 'i' && fmt[i] != 'd' &&
239 fmt[i] != 'u' && fmt[i] != 'x')
240 return -EINVAL;
241 fmt_cnt++;
242 }
243
244
245
246
247#define __BPF_TP_EMIT() __BPF_ARG3_TP()
248#define __BPF_TP(...) \
249 __trace_printk(0 , \
250 fmt, ##__VA_ARGS__)
251
252#define __BPF_ARG1_TP(...) \
253 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
254 ? __BPF_TP(arg1, ##__VA_ARGS__) \
255 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
256 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
257 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
258
259#define __BPF_ARG2_TP(...) \
260 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
261 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
262 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
263 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
264 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
265
266#define __BPF_ARG3_TP(...) \
267 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
268 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
269 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
270 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
271 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
272
273 return __BPF_TP_EMIT();
274}
275
276static const struct bpf_func_proto bpf_trace_printk_proto = {
277 .func = bpf_trace_printk,
278 .gpl_only = true,
279 .ret_type = RET_INTEGER,
280 .arg1_type = ARG_PTR_TO_MEM,
281 .arg2_type = ARG_CONST_SIZE,
282};
283
284const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
285{
286
287
288
289
290 trace_printk_init_buffers();
291
292 return &bpf_trace_printk_proto;
293}
294
295static __always_inline int
296get_map_perf_counter(struct bpf_map *map, u64 flags,
297 u64 *value, u64 *enabled, u64 *running)
298{
299 struct bpf_array *array = container_of(map, struct bpf_array, map);
300 unsigned int cpu = smp_processor_id();
301 u64 index = flags & BPF_F_INDEX_MASK;
302 struct bpf_event_entry *ee;
303
304 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
305 return -EINVAL;
306 if (index == BPF_F_CURRENT_CPU)
307 index = cpu;
308 if (unlikely(index >= array->map.max_entries))
309 return -E2BIG;
310
311 ee = READ_ONCE(array->ptrs[index]);
312 if (!ee)
313 return -ENOENT;
314
315 return perf_event_read_local(ee->event, value, enabled, running);
316}
317
318BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
319{
320 u64 value = 0;
321 int err;
322
323 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
324
325
326
327
328 if (err)
329 return err;
330 return value;
331}
332
333static const struct bpf_func_proto bpf_perf_event_read_proto = {
334 .func = bpf_perf_event_read,
335 .gpl_only = true,
336 .ret_type = RET_INTEGER,
337 .arg1_type = ARG_CONST_MAP_PTR,
338 .arg2_type = ARG_ANYTHING,
339};
340
341BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
342 struct bpf_perf_event_value *, buf, u32, size)
343{
344 int err = -EINVAL;
345
346 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
347 goto clear;
348 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
349 &buf->running);
350 if (unlikely(err))
351 goto clear;
352 return 0;
353clear:
354 memset(buf, 0, size);
355 return err;
356}
357
358static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
359 .func = bpf_perf_event_read_value,
360 .gpl_only = true,
361 .ret_type = RET_INTEGER,
362 .arg1_type = ARG_CONST_MAP_PTR,
363 .arg2_type = ARG_ANYTHING,
364 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
365 .arg4_type = ARG_CONST_SIZE,
366};
367
368static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
369
370static __always_inline u64
371__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
372 u64 flags, struct perf_sample_data *sd)
373{
374 struct bpf_array *array = container_of(map, struct bpf_array, map);
375 unsigned int cpu = smp_processor_id();
376 u64 index = flags & BPF_F_INDEX_MASK;
377 struct bpf_event_entry *ee;
378 struct perf_event *event;
379
380 if (index == BPF_F_CURRENT_CPU)
381 index = cpu;
382 if (unlikely(index >= array->map.max_entries))
383 return -E2BIG;
384
385 ee = READ_ONCE(array->ptrs[index]);
386 if (!ee)
387 return -ENOENT;
388
389 event = ee->event;
390 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
391 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
392 return -EINVAL;
393
394 if (unlikely(event->oncpu != cpu))
395 return -EOPNOTSUPP;
396
397 perf_event_output(event, sd, regs);
398 return 0;
399}
400
401BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
402 u64, flags, void *, data, u64, size)
403{
404 struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
405 struct perf_raw_record raw = {
406 .frag = {
407 .size = size,
408 .data = data,
409 },
410 };
411
412 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
413 return -EINVAL;
414
415 perf_sample_data_init(sd, 0, 0);
416 sd->raw = &raw;
417
418 return __bpf_perf_event_output(regs, map, flags, sd);
419}
420
421static const struct bpf_func_proto bpf_perf_event_output_proto = {
422 .func = bpf_perf_event_output,
423 .gpl_only = true,
424 .ret_type = RET_INTEGER,
425 .arg1_type = ARG_PTR_TO_CTX,
426 .arg2_type = ARG_CONST_MAP_PTR,
427 .arg3_type = ARG_ANYTHING,
428 .arg4_type = ARG_PTR_TO_MEM,
429 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
430};
431
432static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
433static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
434
435u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
436 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
437{
438 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
439 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
440 struct perf_raw_frag frag = {
441 .copy = ctx_copy,
442 .size = ctx_size,
443 .data = ctx,
444 };
445 struct perf_raw_record raw = {
446 .frag = {
447 {
448 .next = ctx_size ? &frag : NULL,
449 },
450 .size = meta_size,
451 .data = meta,
452 },
453 };
454
455 perf_fetch_caller_regs(regs);
456 perf_sample_data_init(sd, 0, 0);
457 sd->raw = &raw;
458
459 return __bpf_perf_event_output(regs, map, flags, sd);
460}
461
462BPF_CALL_0(bpf_get_current_task)
463{
464 return (long) current;
465}
466
467static const struct bpf_func_proto bpf_get_current_task_proto = {
468 .func = bpf_get_current_task,
469 .gpl_only = true,
470 .ret_type = RET_INTEGER,
471};
472
473BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
474{
475 struct bpf_array *array = container_of(map, struct bpf_array, map);
476 struct cgroup *cgrp;
477
478 if (unlikely(idx >= array->map.max_entries))
479 return -E2BIG;
480
481 cgrp = READ_ONCE(array->ptrs[idx]);
482 if (unlikely(!cgrp))
483 return -EAGAIN;
484
485 return task_under_cgroup_hierarchy(current, cgrp);
486}
487
488static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
489 .func = bpf_current_task_under_cgroup,
490 .gpl_only = false,
491 .ret_type = RET_INTEGER,
492 .arg1_type = ARG_CONST_MAP_PTR,
493 .arg2_type = ARG_ANYTHING,
494};
495
496BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
497 const void *, unsafe_ptr)
498{
499 int ret;
500
501
502
503
504
505
506
507
508
509
510 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
511 if (unlikely(ret < 0))
512 memset(dst, 0, size);
513
514 return ret;
515}
516
517static const struct bpf_func_proto bpf_probe_read_str_proto = {
518 .func = bpf_probe_read_str,
519 .gpl_only = true,
520 .ret_type = RET_INTEGER,
521 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
522 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
523 .arg3_type = ARG_ANYTHING,
524};
525
526static const struct bpf_func_proto *
527tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
528{
529 switch (func_id) {
530 case BPF_FUNC_map_lookup_elem:
531 return &bpf_map_lookup_elem_proto;
532 case BPF_FUNC_map_update_elem:
533 return &bpf_map_update_elem_proto;
534 case BPF_FUNC_map_delete_elem:
535 return &bpf_map_delete_elem_proto;
536 case BPF_FUNC_probe_read:
537 return &bpf_probe_read_proto;
538 case BPF_FUNC_ktime_get_ns:
539 return &bpf_ktime_get_ns_proto;
540 case BPF_FUNC_tail_call:
541 return &bpf_tail_call_proto;
542 case BPF_FUNC_get_current_pid_tgid:
543 return &bpf_get_current_pid_tgid_proto;
544 case BPF_FUNC_get_current_task:
545 return &bpf_get_current_task_proto;
546 case BPF_FUNC_get_current_uid_gid:
547 return &bpf_get_current_uid_gid_proto;
548 case BPF_FUNC_get_current_comm:
549 return &bpf_get_current_comm_proto;
550 case BPF_FUNC_trace_printk:
551 return bpf_get_trace_printk_proto();
552 case BPF_FUNC_get_smp_processor_id:
553 return &bpf_get_smp_processor_id_proto;
554 case BPF_FUNC_get_numa_node_id:
555 return &bpf_get_numa_node_id_proto;
556 case BPF_FUNC_perf_event_read:
557 return &bpf_perf_event_read_proto;
558 case BPF_FUNC_probe_write_user:
559 return bpf_get_probe_write_proto();
560 case BPF_FUNC_current_task_under_cgroup:
561 return &bpf_current_task_under_cgroup_proto;
562 case BPF_FUNC_get_prandom_u32:
563 return &bpf_get_prandom_u32_proto;
564 case BPF_FUNC_probe_read_str:
565 return &bpf_probe_read_str_proto;
566#ifdef CONFIG_CGROUPS
567 case BPF_FUNC_get_current_cgroup_id:
568 return &bpf_get_current_cgroup_id_proto;
569#endif
570 default:
571 return NULL;
572 }
573}
574
575static const struct bpf_func_proto *
576kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
577{
578 switch (func_id) {
579 case BPF_FUNC_perf_event_output:
580 return &bpf_perf_event_output_proto;
581 case BPF_FUNC_get_stackid:
582 return &bpf_get_stackid_proto;
583 case BPF_FUNC_get_stack:
584 return &bpf_get_stack_proto;
585 case BPF_FUNC_perf_event_read_value:
586 return &bpf_perf_event_read_value_proto;
587#ifdef CONFIG_BPF_KPROBE_OVERRIDE
588 case BPF_FUNC_override_return:
589 return &bpf_override_return_proto;
590#endif
591 default:
592 return tracing_func_proto(func_id, prog);
593 }
594}
595
596
597static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
598 const struct bpf_prog *prog,
599 struct bpf_insn_access_aux *info)
600{
601 if (off < 0 || off >= sizeof(struct pt_regs))
602 return false;
603 if (type != BPF_READ)
604 return false;
605 if (off % size != 0)
606 return false;
607
608
609
610
611 if (off + size > sizeof(struct pt_regs))
612 return false;
613
614 return true;
615}
616
617const struct bpf_verifier_ops kprobe_verifier_ops = {
618 .get_func_proto = kprobe_prog_func_proto,
619 .is_valid_access = kprobe_prog_is_valid_access,
620};
621
622const struct bpf_prog_ops kprobe_prog_ops = {
623};
624
625BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
626 u64, flags, void *, data, u64, size)
627{
628 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
629
630
631
632
633
634
635 return ____bpf_perf_event_output(regs, map, flags, data, size);
636}
637
638static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
639 .func = bpf_perf_event_output_tp,
640 .gpl_only = true,
641 .ret_type = RET_INTEGER,
642 .arg1_type = ARG_PTR_TO_CTX,
643 .arg2_type = ARG_CONST_MAP_PTR,
644 .arg3_type = ARG_ANYTHING,
645 .arg4_type = ARG_PTR_TO_MEM,
646 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
647};
648
649BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
650 u64, flags)
651{
652 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
653
654
655
656
657
658
659 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
660 flags, 0, 0);
661}
662
663static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
664 .func = bpf_get_stackid_tp,
665 .gpl_only = true,
666 .ret_type = RET_INTEGER,
667 .arg1_type = ARG_PTR_TO_CTX,
668 .arg2_type = ARG_CONST_MAP_PTR,
669 .arg3_type = ARG_ANYTHING,
670};
671
672BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
673 u64, flags)
674{
675 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
676
677 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
678 (unsigned long) size, flags, 0);
679}
680
681static const struct bpf_func_proto bpf_get_stack_proto_tp = {
682 .func = bpf_get_stack_tp,
683 .gpl_only = true,
684 .ret_type = RET_INTEGER,
685 .arg1_type = ARG_PTR_TO_CTX,
686 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
687 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
688 .arg4_type = ARG_ANYTHING,
689};
690
691static const struct bpf_func_proto *
692tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
693{
694 switch (func_id) {
695 case BPF_FUNC_perf_event_output:
696 return &bpf_perf_event_output_proto_tp;
697 case BPF_FUNC_get_stackid:
698 return &bpf_get_stackid_proto_tp;
699 case BPF_FUNC_get_stack:
700 return &bpf_get_stack_proto_tp;
701 default:
702 return tracing_func_proto(func_id, prog);
703 }
704}
705
706static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
707 const struct bpf_prog *prog,
708 struct bpf_insn_access_aux *info)
709{
710 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
711 return false;
712 if (type != BPF_READ)
713 return false;
714 if (off % size != 0)
715 return false;
716
717 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
718 return true;
719}
720
721const struct bpf_verifier_ops tracepoint_verifier_ops = {
722 .get_func_proto = tp_prog_func_proto,
723 .is_valid_access = tp_prog_is_valid_access,
724};
725
726const struct bpf_prog_ops tracepoint_prog_ops = {
727};
728
729BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
730 struct bpf_perf_event_value *, buf, u32, size)
731{
732 int err = -EINVAL;
733
734 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
735 goto clear;
736 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
737 &buf->running);
738 if (unlikely(err))
739 goto clear;
740 return 0;
741clear:
742 memset(buf, 0, size);
743 return err;
744}
745
746static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
747 .func = bpf_perf_prog_read_value,
748 .gpl_only = true,
749 .ret_type = RET_INTEGER,
750 .arg1_type = ARG_PTR_TO_CTX,
751 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
752 .arg3_type = ARG_CONST_SIZE,
753};
754
755static const struct bpf_func_proto *
756pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
757{
758 switch (func_id) {
759 case BPF_FUNC_perf_event_output:
760 return &bpf_perf_event_output_proto_tp;
761 case BPF_FUNC_get_stackid:
762 return &bpf_get_stackid_proto_tp;
763 case BPF_FUNC_get_stack:
764 return &bpf_get_stack_proto_tp;
765 case BPF_FUNC_perf_prog_read_value:
766 return &bpf_perf_prog_read_value_proto;
767 default:
768 return tracing_func_proto(func_id, prog);
769 }
770}
771
772
773
774
775
776
777static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
778BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
779 struct bpf_map *, map, u64, flags, void *, data, u64, size)
780{
781 struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
782
783 perf_fetch_caller_regs(regs);
784 return ____bpf_perf_event_output(regs, map, flags, data, size);
785}
786
787static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
788 .func = bpf_perf_event_output_raw_tp,
789 .gpl_only = true,
790 .ret_type = RET_INTEGER,
791 .arg1_type = ARG_PTR_TO_CTX,
792 .arg2_type = ARG_CONST_MAP_PTR,
793 .arg3_type = ARG_ANYTHING,
794 .arg4_type = ARG_PTR_TO_MEM,
795 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
796};
797
798BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
799 struct bpf_map *, map, u64, flags)
800{
801 struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
802
803 perf_fetch_caller_regs(regs);
804
805 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
806 flags, 0, 0);
807}
808
809static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
810 .func = bpf_get_stackid_raw_tp,
811 .gpl_only = true,
812 .ret_type = RET_INTEGER,
813 .arg1_type = ARG_PTR_TO_CTX,
814 .arg2_type = ARG_CONST_MAP_PTR,
815 .arg3_type = ARG_ANYTHING,
816};
817
818BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
819 void *, buf, u32, size, u64, flags)
820{
821 struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
822
823 perf_fetch_caller_regs(regs);
824 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
825 (unsigned long) size, flags, 0);
826}
827
828static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
829 .func = bpf_get_stack_raw_tp,
830 .gpl_only = true,
831 .ret_type = RET_INTEGER,
832 .arg1_type = ARG_PTR_TO_CTX,
833 .arg2_type = ARG_PTR_TO_MEM,
834 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
835 .arg4_type = ARG_ANYTHING,
836};
837
838static const struct bpf_func_proto *
839raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
840{
841 switch (func_id) {
842 case BPF_FUNC_perf_event_output:
843 return &bpf_perf_event_output_proto_raw_tp;
844 case BPF_FUNC_get_stackid:
845 return &bpf_get_stackid_proto_raw_tp;
846 case BPF_FUNC_get_stack:
847 return &bpf_get_stack_proto_raw_tp;
848 default:
849 return tracing_func_proto(func_id, prog);
850 }
851}
852
853static bool raw_tp_prog_is_valid_access(int off, int size,
854 enum bpf_access_type type,
855 const struct bpf_prog *prog,
856 struct bpf_insn_access_aux *info)
857{
858
859 if (off < 0 || off >= sizeof(__u64) * 12)
860 return false;
861 if (type != BPF_READ)
862 return false;
863 if (off % size != 0)
864 return false;
865 return true;
866}
867
868const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
869 .get_func_proto = raw_tp_prog_func_proto,
870 .is_valid_access = raw_tp_prog_is_valid_access,
871};
872
873const struct bpf_prog_ops raw_tracepoint_prog_ops = {
874};
875
876static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
877 const struct bpf_prog *prog,
878 struct bpf_insn_access_aux *info)
879{
880 const int size_u64 = sizeof(u64);
881
882 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
883 return false;
884 if (type != BPF_READ)
885 return false;
886 if (off % size != 0) {
887 if (sizeof(unsigned long) != 4)
888 return false;
889 if (size != 8)
890 return false;
891 if (off % size != 4)
892 return false;
893 }
894
895 switch (off) {
896 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
897 bpf_ctx_record_field_size(info, size_u64);
898 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
899 return false;
900 break;
901 case bpf_ctx_range(struct bpf_perf_event_data, addr):
902 bpf_ctx_record_field_size(info, size_u64);
903 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
904 return false;
905 break;
906 default:
907 if (size != sizeof(long))
908 return false;
909 }
910
911 return true;
912}
913
914static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
915 const struct bpf_insn *si,
916 struct bpf_insn *insn_buf,
917 struct bpf_prog *prog, u32 *target_size)
918{
919 struct bpf_insn *insn = insn_buf;
920
921 switch (si->off) {
922 case offsetof(struct bpf_perf_event_data, sample_period):
923 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
924 data), si->dst_reg, si->src_reg,
925 offsetof(struct bpf_perf_event_data_kern, data));
926 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
927 bpf_target_off(struct perf_sample_data, period, 8,
928 target_size));
929 break;
930 case offsetof(struct bpf_perf_event_data, addr):
931 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
932 data), si->dst_reg, si->src_reg,
933 offsetof(struct bpf_perf_event_data_kern, data));
934 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
935 bpf_target_off(struct perf_sample_data, addr, 8,
936 target_size));
937 break;
938 default:
939 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
940 regs), si->dst_reg, si->src_reg,
941 offsetof(struct bpf_perf_event_data_kern, regs));
942 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
943 si->off);
944 break;
945 }
946
947 return insn - insn_buf;
948}
949
950const struct bpf_verifier_ops perf_event_verifier_ops = {
951 .get_func_proto = pe_prog_func_proto,
952 .is_valid_access = pe_prog_is_valid_access,
953 .convert_ctx_access = pe_prog_convert_ctx_access,
954};
955
956const struct bpf_prog_ops perf_event_prog_ops = {
957};
958
959static DEFINE_MUTEX(bpf_event_mutex);
960
961#define BPF_TRACE_MAX_PROGS 64
962
963int perf_event_attach_bpf_prog(struct perf_event *event,
964 struct bpf_prog *prog)
965{
966 struct bpf_prog_array __rcu *old_array;
967 struct bpf_prog_array *new_array;
968 int ret = -EEXIST;
969
970
971
972
973
974 if (prog->kprobe_override &&
975 (!trace_kprobe_on_func_entry(event->tp_event) ||
976 !trace_kprobe_error_injectable(event->tp_event)))
977 return -EINVAL;
978
979 mutex_lock(&bpf_event_mutex);
980
981 if (event->prog)
982 goto unlock;
983
984 old_array = event->tp_event->prog_array;
985 if (old_array &&
986 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
987 ret = -E2BIG;
988 goto unlock;
989 }
990
991 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
992 if (ret < 0)
993 goto unlock;
994
995
996 event->prog = prog;
997 rcu_assign_pointer(event->tp_event->prog_array, new_array);
998 bpf_prog_array_free(old_array);
999
1000unlock:
1001 mutex_unlock(&bpf_event_mutex);
1002 return ret;
1003}
1004
1005void perf_event_detach_bpf_prog(struct perf_event *event)
1006{
1007 struct bpf_prog_array __rcu *old_array;
1008 struct bpf_prog_array *new_array;
1009 int ret;
1010
1011 mutex_lock(&bpf_event_mutex);
1012
1013 if (!event->prog)
1014 goto unlock;
1015
1016 old_array = event->tp_event->prog_array;
1017 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1018 if (ret == -ENOENT)
1019 goto unlock;
1020 if (ret < 0) {
1021 bpf_prog_array_delete_safe(old_array, event->prog);
1022 } else {
1023 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1024 bpf_prog_array_free(old_array);
1025 }
1026
1027 bpf_prog_put(event->prog);
1028 event->prog = NULL;
1029
1030unlock:
1031 mutex_unlock(&bpf_event_mutex);
1032}
1033
1034int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1035{
1036 struct perf_event_query_bpf __user *uquery = info;
1037 struct perf_event_query_bpf query = {};
1038 u32 *ids, prog_cnt, ids_len;
1039 int ret;
1040
1041 if (!capable(CAP_SYS_ADMIN))
1042 return -EPERM;
1043 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1044 return -EINVAL;
1045 if (copy_from_user(&query, uquery, sizeof(query)))
1046 return -EFAULT;
1047
1048 ids_len = query.ids_len;
1049 if (ids_len > BPF_TRACE_MAX_PROGS)
1050 return -E2BIG;
1051 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1052 if (!ids)
1053 return -ENOMEM;
1054
1055
1056
1057
1058
1059
1060
1061 mutex_lock(&bpf_event_mutex);
1062 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
1063 ids,
1064 ids_len,
1065 &prog_cnt);
1066 mutex_unlock(&bpf_event_mutex);
1067
1068 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1069 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1070 ret = -EFAULT;
1071
1072 kfree(ids);
1073 return ret;
1074}
1075
1076extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1077extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1078
1079struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
1080{
1081 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1082
1083 for (; btp < __stop__bpf_raw_tp; btp++) {
1084 if (!strcmp(btp->tp->name, name))
1085 return btp;
1086 }
1087 return NULL;
1088}
1089
1090static __always_inline
1091void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1092{
1093 rcu_read_lock();
1094 preempt_disable();
1095 (void) BPF_PROG_RUN(prog, args);
1096 preempt_enable();
1097 rcu_read_unlock();
1098}
1099
1100#define UNPACK(...) __VA_ARGS__
1101#define REPEAT_1(FN, DL, X, ...) FN(X)
1102#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1103#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1104#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1105#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1106#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1107#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1108#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1109#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1110#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1111#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1112#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1113#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1114
1115#define SARG(X) u64 arg##X
1116#define COPY(X) args[X] = arg##X
1117
1118#define __DL_COM (,)
1119#define __DL_SEM (;)
1120
1121#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1122
1123#define BPF_TRACE_DEFN_x(x) \
1124 void bpf_trace_run##x(struct bpf_prog *prog, \
1125 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1126 { \
1127 u64 args[x]; \
1128 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1129 __bpf_trace_run(prog, args); \
1130 } \
1131 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1132BPF_TRACE_DEFN_x(1);
1133BPF_TRACE_DEFN_x(2);
1134BPF_TRACE_DEFN_x(3);
1135BPF_TRACE_DEFN_x(4);
1136BPF_TRACE_DEFN_x(5);
1137BPF_TRACE_DEFN_x(6);
1138BPF_TRACE_DEFN_x(7);
1139BPF_TRACE_DEFN_x(8);
1140BPF_TRACE_DEFN_x(9);
1141BPF_TRACE_DEFN_x(10);
1142BPF_TRACE_DEFN_x(11);
1143BPF_TRACE_DEFN_x(12);
1144
1145static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1146{
1147 struct tracepoint *tp = btp->tp;
1148
1149
1150
1151
1152
1153 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1154 return -EINVAL;
1155
1156 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1157}
1158
1159int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1160{
1161 int err;
1162
1163 mutex_lock(&bpf_event_mutex);
1164 err = __bpf_probe_register(btp, prog);
1165 mutex_unlock(&bpf_event_mutex);
1166 return err;
1167}
1168
1169int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1170{
1171 int err;
1172
1173 mutex_lock(&bpf_event_mutex);
1174 err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1175 mutex_unlock(&bpf_event_mutex);
1176 return err;
1177}
1178
1179int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1180 u32 *fd_type, const char **buf,
1181 u64 *probe_offset, u64 *probe_addr)
1182{
1183 bool is_tracepoint, is_syscall_tp;
1184 struct bpf_prog *prog;
1185 int flags, err = 0;
1186
1187 prog = event->prog;
1188 if (!prog)
1189 return -ENOENT;
1190
1191
1192 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1193 return -EOPNOTSUPP;
1194
1195 *prog_id = prog->aux->id;
1196 flags = event->tp_event->flags;
1197 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1198 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1199
1200 if (is_tracepoint || is_syscall_tp) {
1201 *buf = is_tracepoint ? event->tp_event->tp->name
1202 : event->tp_event->name;
1203 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1204 *probe_offset = 0x0;
1205 *probe_addr = 0x0;
1206 } else {
1207
1208 err = -EOPNOTSUPP;
1209#ifdef CONFIG_KPROBE_EVENTS
1210 if (flags & TRACE_EVENT_FL_KPROBE)
1211 err = bpf_get_kprobe_info(event, fd_type, buf,
1212 probe_offset, probe_addr,
1213 event->attr.type == PERF_TYPE_TRACEPOINT);
1214#endif
1215#ifdef CONFIG_UPROBE_EVENTS
1216 if (flags & TRACE_EVENT_FL_UPROBE)
1217 err = bpf_get_uprobe_info(event, fd_type, buf,
1218 probe_offset,
1219 event->attr.type == PERF_TYPE_TRACEPOINT);
1220#endif
1221 }
1222
1223 return err;
1224}
1225