1
2
3
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_perf_event.h>
10#include <linux/btf.h>
11#include <linux/filter.h>
12#include <linux/uaccess.h>
13#include <linux/ctype.h>
14#include <linux/kprobes.h>
15#include <linux/spinlock.h>
16#include <linux/syscalls.h>
17#include <linux/error-injection.h>
18#include <linux/btf_ids.h>
19
20#include <uapi/linux/bpf.h>
21#include <uapi/linux/btf.h>
22
23#include <asm/tlb.h>
24
25#include "trace_probe.h"
26#include "trace.h"
27
28#define CREATE_TRACE_POINTS
29#include "bpf_trace.h"
30
31#define bpf_event_rcu_dereference(p) \
32 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
33
34#ifdef CONFIG_MODULES
35struct bpf_trace_module {
36 struct module *module;
37 struct list_head list;
38};
39
40static LIST_HEAD(bpf_trace_modules);
41static DEFINE_MUTEX(bpf_module_mutex);
42
43static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
44{
45 struct bpf_raw_event_map *btp, *ret = NULL;
46 struct bpf_trace_module *btm;
47 unsigned int i;
48
49 mutex_lock(&bpf_module_mutex);
50 list_for_each_entry(btm, &bpf_trace_modules, list) {
51 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
52 btp = &btm->module->bpf_raw_events[i];
53 if (!strcmp(btp->tp->name, name)) {
54 if (try_module_get(btm->module))
55 ret = btp;
56 goto out;
57 }
58 }
59 }
60out:
61 mutex_unlock(&bpf_module_mutex);
62 return ret;
63}
64#else
65static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
66{
67 return NULL;
68}
69#endif
70
71u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
72u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
73
74static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
75 u64 flags, const struct btf **btf,
76 s32 *btf_id);
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
93{
94 unsigned int ret;
95
96 if (in_nmi())
97 return 1;
98
99 cant_sleep();
100
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102
103
104
105
106
107
108 ret = 0;
109 goto out;
110 }
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
128
129 out:
130 __this_cpu_dec(bpf_prog_active);
131
132 return ret;
133}
134
135#ifdef CONFIG_BPF_KPROBE_OVERRIDE
136BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137{
138 regs_set_return_value(regs, rc);
139 override_function_with_return(regs);
140 return 0;
141}
142
143static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
145 .gpl_only = true,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
149};
150#endif
151
152static __always_inline int
153bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154{
155 int ret;
156
157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
160 return ret;
161}
162
163BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
165{
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167}
168
169const struct bpf_func_proto bpf_probe_read_user_proto = {
170 .func = bpf_probe_read_user,
171 .gpl_only = true,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
176};
177
178static __always_inline int
179bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
181{
182 int ret;
183
184
185
186
187
188
189
190
191
192
193
194 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 if (unlikely(ret < 0))
196 memset(dst, 0, size);
197 return ret;
198}
199
200BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 const void __user *, unsafe_ptr)
202{
203 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204}
205
206const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207 .func = bpf_probe_read_user_str,
208 .gpl_only = true,
209 .ret_type = RET_INTEGER,
210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
212 .arg3_type = ARG_ANYTHING,
213};
214
215static __always_inline int
216bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217{
218 int ret = security_locked_down(LOCKDOWN_BPF_READ);
219
220 if (unlikely(ret < 0))
221 goto fail;
222 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
223 if (unlikely(ret < 0))
224 goto fail;
225 return ret;
226fail:
227 memset(dst, 0, size);
228 return ret;
229}
230
231BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232 const void *, unsafe_ptr)
233{
234 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
235}
236
237const struct bpf_func_proto bpf_probe_read_kernel_proto = {
238 .func = bpf_probe_read_kernel,
239 .gpl_only = true,
240 .ret_type = RET_INTEGER,
241 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
242 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
243 .arg3_type = ARG_ANYTHING,
244};
245
246static __always_inline int
247bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
248{
249 int ret = security_locked_down(LOCKDOWN_BPF_READ);
250
251 if (unlikely(ret < 0))
252 goto fail;
253
254
255
256
257
258
259
260
261
262
263 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
264 if (unlikely(ret < 0))
265 goto fail;
266
267 return ret;
268fail:
269 memset(dst, 0, size);
270 return ret;
271}
272
273BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
274 const void *, unsafe_ptr)
275{
276 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
277}
278
279const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
280 .func = bpf_probe_read_kernel_str,
281 .gpl_only = true,
282 .ret_type = RET_INTEGER,
283 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
284 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
285 .arg3_type = ARG_ANYTHING,
286};
287
288#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
289BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
290 const void *, unsafe_ptr)
291{
292 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
293 return bpf_probe_read_user_common(dst, size,
294 (__force void __user *)unsafe_ptr);
295 }
296 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
297}
298
299static const struct bpf_func_proto bpf_probe_read_compat_proto = {
300 .func = bpf_probe_read_compat,
301 .gpl_only = true,
302 .ret_type = RET_INTEGER,
303 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
304 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
305 .arg3_type = ARG_ANYTHING,
306};
307
308BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
309 const void *, unsafe_ptr)
310{
311 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
312 return bpf_probe_read_user_str_common(dst, size,
313 (__force void __user *)unsafe_ptr);
314 }
315 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
316}
317
318static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
319 .func = bpf_probe_read_compat_str,
320 .gpl_only = true,
321 .ret_type = RET_INTEGER,
322 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
323 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
324 .arg3_type = ARG_ANYTHING,
325};
326#endif
327
328BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
329 u32, size)
330{
331
332
333
334
335
336
337
338
339
340
341
342
343
344 if (unlikely(in_interrupt() ||
345 current->flags & (PF_KTHREAD | PF_EXITING)))
346 return -EPERM;
347 if (unlikely(uaccess_kernel()))
348 return -EPERM;
349 if (unlikely(!nmi_uaccess_okay()))
350 return -EPERM;
351
352 return copy_to_user_nofault(unsafe_ptr, src, size);
353}
354
355static const struct bpf_func_proto bpf_probe_write_user_proto = {
356 .func = bpf_probe_write_user,
357 .gpl_only = true,
358 .ret_type = RET_INTEGER,
359 .arg1_type = ARG_ANYTHING,
360 .arg2_type = ARG_PTR_TO_MEM,
361 .arg3_type = ARG_CONST_SIZE,
362};
363
364static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
365{
366 if (!capable(CAP_SYS_ADMIN))
367 return NULL;
368
369 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
370 current->comm, task_pid_nr(current));
371
372 return &bpf_probe_write_user_proto;
373}
374
375static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
376 size_t bufsz)
377{
378 void __user *user_ptr = (__force void __user *)unsafe_ptr;
379
380 buf[0] = 0;
381
382 switch (fmt_ptype) {
383 case 's':
384#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
385 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
386 strncpy_from_user_nofault(buf, user_ptr, bufsz);
387 break;
388 }
389 fallthrough;
390#endif
391 case 'k':
392 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
393 break;
394 case 'u':
395 strncpy_from_user_nofault(buf, user_ptr, bufsz);
396 break;
397 }
398}
399
400static DEFINE_RAW_SPINLOCK(trace_printk_lock);
401
402#define BPF_TRACE_PRINTK_SIZE 1024
403
404static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
405{
406 static char buf[BPF_TRACE_PRINTK_SIZE];
407 unsigned long flags;
408 va_list ap;
409 int ret;
410
411 raw_spin_lock_irqsave(&trace_printk_lock, flags);
412 va_start(ap, fmt);
413 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
414 va_end(ap);
415
416 if (ret == 0)
417 buf[0] = '\0';
418 trace_bpf_trace_printk(buf);
419 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
420
421 return ret;
422}
423
424
425
426
427
428BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
429 u64, arg2, u64, arg3)
430{
431 int i, mod[3] = {}, fmt_cnt = 0;
432 char buf[64], fmt_ptype;
433 void *unsafe_ptr = NULL;
434 bool str_seen = false;
435
436
437
438
439
440
441 if (fmt[--fmt_size] != 0)
442 return -EINVAL;
443
444
445 for (i = 0; i < fmt_size; i++) {
446 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
447 return -EINVAL;
448
449 if (fmt[i] != '%')
450 continue;
451
452 if (fmt_cnt >= 3)
453 return -EINVAL;
454
455
456 i++;
457 if (fmt[i] == 'l') {
458 mod[fmt_cnt]++;
459 i++;
460 } else if (fmt[i] == 'p') {
461 mod[fmt_cnt]++;
462 if ((fmt[i + 1] == 'k' ||
463 fmt[i + 1] == 'u') &&
464 fmt[i + 2] == 's') {
465 fmt_ptype = fmt[i + 1];
466 i += 2;
467 goto fmt_str;
468 }
469
470 if (fmt[i + 1] == 'B') {
471 i++;
472 goto fmt_next;
473 }
474
475
476 if (fmt[i + 1] != 0 &&
477 !isspace(fmt[i + 1]) &&
478 !ispunct(fmt[i + 1]))
479 return -EINVAL;
480
481 goto fmt_next;
482 } else if (fmt[i] == 's') {
483 mod[fmt_cnt]++;
484 fmt_ptype = fmt[i];
485fmt_str:
486 if (str_seen)
487
488 return -EINVAL;
489 str_seen = true;
490
491 if (fmt[i + 1] != 0 &&
492 !isspace(fmt[i + 1]) &&
493 !ispunct(fmt[i + 1]))
494 return -EINVAL;
495
496 switch (fmt_cnt) {
497 case 0:
498 unsafe_ptr = (void *)(long)arg1;
499 arg1 = (long)buf;
500 break;
501 case 1:
502 unsafe_ptr = (void *)(long)arg2;
503 arg2 = (long)buf;
504 break;
505 case 2:
506 unsafe_ptr = (void *)(long)arg3;
507 arg3 = (long)buf;
508 break;
509 }
510
511 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
512 sizeof(buf));
513 goto fmt_next;
514 }
515
516 if (fmt[i] == 'l') {
517 mod[fmt_cnt]++;
518 i++;
519 }
520
521 if (fmt[i] != 'i' && fmt[i] != 'd' &&
522 fmt[i] != 'u' && fmt[i] != 'x')
523 return -EINVAL;
524fmt_next:
525 fmt_cnt++;
526 }
527
528
529
530
531#define __BPF_TP_EMIT() __BPF_ARG3_TP()
532#define __BPF_TP(...) \
533 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
534
535#define __BPF_ARG1_TP(...) \
536 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
537 ? __BPF_TP(arg1, ##__VA_ARGS__) \
538 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
539 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
540 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
541
542#define __BPF_ARG2_TP(...) \
543 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
544 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
545 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
546 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
547 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
548
549#define __BPF_ARG3_TP(...) \
550 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
551 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
552 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
553 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
554 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
555
556 return __BPF_TP_EMIT();
557}
558
559static const struct bpf_func_proto bpf_trace_printk_proto = {
560 .func = bpf_trace_printk,
561 .gpl_only = true,
562 .ret_type = RET_INTEGER,
563 .arg1_type = ARG_PTR_TO_MEM,
564 .arg2_type = ARG_CONST_SIZE,
565};
566
567const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
568{
569
570
571
572
573
574
575
576
577 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
578 pr_warn_ratelimited("could not enable bpf_trace_printk events");
579
580 return &bpf_trace_printk_proto;
581}
582
583#define MAX_SEQ_PRINTF_VARARGS 12
584#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
585#define MAX_SEQ_PRINTF_STR_LEN 128
586
587struct bpf_seq_printf_buf {
588 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
589};
590static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
591static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
592
593BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
594 const void *, data, u32, data_len)
595{
596 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
597 int i, buf_used, copy_size, num_args;
598 u64 params[MAX_SEQ_PRINTF_VARARGS];
599 struct bpf_seq_printf_buf *bufs;
600 const u64 *args = data;
601
602 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
603 if (WARN_ON_ONCE(buf_used > 1)) {
604 err = -EBUSY;
605 goto out;
606 }
607
608 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
609
610
611
612
613
614
615 if (fmt[--fmt_size] != 0)
616 goto out;
617
618 if (data_len & 7)
619 goto out;
620
621 for (i = 0; i < fmt_size; i++) {
622 if (fmt[i] == '%') {
623 if (fmt[i + 1] == '%')
624 i++;
625 else if (!data || !data_len)
626 goto out;
627 }
628 }
629
630 num_args = data_len / 8;
631
632
633 for (i = 0; i < fmt_size; i++) {
634
635 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
636 err = -EINVAL;
637 goto out;
638 }
639
640 if (fmt[i] != '%')
641 continue;
642
643 if (fmt[i + 1] == '%') {
644 i++;
645 continue;
646 }
647
648 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
649 err = -E2BIG;
650 goto out;
651 }
652
653 if (fmt_cnt >= num_args) {
654 err = -EINVAL;
655 goto out;
656 }
657
658
659 i++;
660
661
662 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
663 fmt[i] == ' ')
664 i++;
665 if (fmt[i] >= '1' && fmt[i] <= '9') {
666 i++;
667 while (fmt[i] >= '0' && fmt[i] <= '9')
668 i++;
669 }
670
671 if (fmt[i] == 's') {
672 void *unsafe_ptr;
673
674
675 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
676 err = -E2BIG;
677 goto out;
678 }
679
680 unsafe_ptr = (void *)(long)args[fmt_cnt];
681 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
682 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
683 if (err < 0)
684 bufs->buf[memcpy_cnt][0] = '\0';
685 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
686
687 fmt_cnt++;
688 memcpy_cnt++;
689 continue;
690 }
691
692 if (fmt[i] == 'p') {
693 if (fmt[i + 1] == 0 ||
694 fmt[i + 1] == 'K' ||
695 fmt[i + 1] == 'x' ||
696 fmt[i + 1] == 'B') {
697
698 params[fmt_cnt] = args[fmt_cnt];
699 fmt_cnt++;
700 continue;
701 }
702
703
704 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
705 err = -EINVAL;
706 goto out;
707 }
708 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
709 err = -EINVAL;
710 goto out;
711 }
712
713 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
714 err = -E2BIG;
715 goto out;
716 }
717
718
719 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
720
721 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
722 (void *) (long) args[fmt_cnt],
723 copy_size);
724 if (err < 0)
725 memset(bufs->buf[memcpy_cnt], 0, copy_size);
726 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
727
728 i += 2;
729 fmt_cnt++;
730 memcpy_cnt++;
731 continue;
732 }
733
734 if (fmt[i] == 'l') {
735 i++;
736 if (fmt[i] == 'l')
737 i++;
738 }
739
740 if (fmt[i] != 'i' && fmt[i] != 'd' &&
741 fmt[i] != 'u' && fmt[i] != 'x' &&
742 fmt[i] != 'X') {
743 err = -EINVAL;
744 goto out;
745 }
746
747 params[fmt_cnt] = args[fmt_cnt];
748 fmt_cnt++;
749 }
750
751
752
753
754 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
755 params[4], params[5], params[6], params[7], params[8],
756 params[9], params[10], params[11]);
757
758 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
759out:
760 this_cpu_dec(bpf_seq_printf_buf_used);
761 return err;
762}
763
764BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
765
766static const struct bpf_func_proto bpf_seq_printf_proto = {
767 .func = bpf_seq_printf,
768 .gpl_only = true,
769 .ret_type = RET_INTEGER,
770 .arg1_type = ARG_PTR_TO_BTF_ID,
771 .arg1_btf_id = &btf_seq_file_ids[0],
772 .arg2_type = ARG_PTR_TO_MEM,
773 .arg3_type = ARG_CONST_SIZE,
774 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
775 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
776};
777
778BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
779{
780 return seq_write(m, data, len) ? -EOVERFLOW : 0;
781}
782
783static const struct bpf_func_proto bpf_seq_write_proto = {
784 .func = bpf_seq_write,
785 .gpl_only = true,
786 .ret_type = RET_INTEGER,
787 .arg1_type = ARG_PTR_TO_BTF_ID,
788 .arg1_btf_id = &btf_seq_file_ids[0],
789 .arg2_type = ARG_PTR_TO_MEM,
790 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
791};
792
793BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
794 u32, btf_ptr_size, u64, flags)
795{
796 const struct btf *btf;
797 s32 btf_id;
798 int ret;
799
800 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
801 if (ret)
802 return ret;
803
804 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
805}
806
807static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
808 .func = bpf_seq_printf_btf,
809 .gpl_only = true,
810 .ret_type = RET_INTEGER,
811 .arg1_type = ARG_PTR_TO_BTF_ID,
812 .arg1_btf_id = &btf_seq_file_ids[0],
813 .arg2_type = ARG_PTR_TO_MEM,
814 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
815 .arg4_type = ARG_ANYTHING,
816};
817
818static __always_inline int
819get_map_perf_counter(struct bpf_map *map, u64 flags,
820 u64 *value, u64 *enabled, u64 *running)
821{
822 struct bpf_array *array = container_of(map, struct bpf_array, map);
823 unsigned int cpu = smp_processor_id();
824 u64 index = flags & BPF_F_INDEX_MASK;
825 struct bpf_event_entry *ee;
826
827 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
828 return -EINVAL;
829 if (index == BPF_F_CURRENT_CPU)
830 index = cpu;
831 if (unlikely(index >= array->map.max_entries))
832 return -E2BIG;
833
834 ee = READ_ONCE(array->ptrs[index]);
835 if (!ee)
836 return -ENOENT;
837
838 return perf_event_read_local(ee->event, value, enabled, running);
839}
840
841BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
842{
843 u64 value = 0;
844 int err;
845
846 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
847
848
849
850
851 if (err)
852 return err;
853 return value;
854}
855
856static const struct bpf_func_proto bpf_perf_event_read_proto = {
857 .func = bpf_perf_event_read,
858 .gpl_only = true,
859 .ret_type = RET_INTEGER,
860 .arg1_type = ARG_CONST_MAP_PTR,
861 .arg2_type = ARG_ANYTHING,
862};
863
864BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
865 struct bpf_perf_event_value *, buf, u32, size)
866{
867 int err = -EINVAL;
868
869 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
870 goto clear;
871 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
872 &buf->running);
873 if (unlikely(err))
874 goto clear;
875 return 0;
876clear:
877 memset(buf, 0, size);
878 return err;
879}
880
881static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
882 .func = bpf_perf_event_read_value,
883 .gpl_only = true,
884 .ret_type = RET_INTEGER,
885 .arg1_type = ARG_CONST_MAP_PTR,
886 .arg2_type = ARG_ANYTHING,
887 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
888 .arg4_type = ARG_CONST_SIZE,
889};
890
891static __always_inline u64
892__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
893 u64 flags, struct perf_sample_data *sd)
894{
895 struct bpf_array *array = container_of(map, struct bpf_array, map);
896 unsigned int cpu = smp_processor_id();
897 u64 index = flags & BPF_F_INDEX_MASK;
898 struct bpf_event_entry *ee;
899 struct perf_event *event;
900
901 if (index == BPF_F_CURRENT_CPU)
902 index = cpu;
903 if (unlikely(index >= array->map.max_entries))
904 return -E2BIG;
905
906 ee = READ_ONCE(array->ptrs[index]);
907 if (!ee)
908 return -ENOENT;
909
910 event = ee->event;
911 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
912 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
913 return -EINVAL;
914
915 if (unlikely(event->oncpu != cpu))
916 return -EOPNOTSUPP;
917
918 return perf_event_output(event, sd, regs);
919}
920
921
922
923
924
925struct bpf_trace_sample_data {
926 struct perf_sample_data sds[3];
927};
928
929static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
930static DEFINE_PER_CPU(int, bpf_trace_nest_level);
931BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
932 u64, flags, void *, data, u64, size)
933{
934 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
935 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
936 struct perf_raw_record raw = {
937 .frag = {
938 .size = size,
939 .data = data,
940 },
941 };
942 struct perf_sample_data *sd;
943 int err;
944
945 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
946 err = -EBUSY;
947 goto out;
948 }
949
950 sd = &sds->sds[nest_level - 1];
951
952 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
953 err = -EINVAL;
954 goto out;
955 }
956
957 perf_sample_data_init(sd, 0, 0);
958 sd->raw = &raw;
959
960 err = __bpf_perf_event_output(regs, map, flags, sd);
961
962out:
963 this_cpu_dec(bpf_trace_nest_level);
964 return err;
965}
966
967static const struct bpf_func_proto bpf_perf_event_output_proto = {
968 .func = bpf_perf_event_output,
969 .gpl_only = true,
970 .ret_type = RET_INTEGER,
971 .arg1_type = ARG_PTR_TO_CTX,
972 .arg2_type = ARG_CONST_MAP_PTR,
973 .arg3_type = ARG_ANYTHING,
974 .arg4_type = ARG_PTR_TO_MEM,
975 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
976};
977
978static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
979struct bpf_nested_pt_regs {
980 struct pt_regs regs[3];
981};
982static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
983static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
984
985u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
986 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
987{
988 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
989 struct perf_raw_frag frag = {
990 .copy = ctx_copy,
991 .size = ctx_size,
992 .data = ctx,
993 };
994 struct perf_raw_record raw = {
995 .frag = {
996 {
997 .next = ctx_size ? &frag : NULL,
998 },
999 .size = meta_size,
1000 .data = meta,
1001 },
1002 };
1003 struct perf_sample_data *sd;
1004 struct pt_regs *regs;
1005 u64 ret;
1006
1007 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
1008 ret = -EBUSY;
1009 goto out;
1010 }
1011 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1012 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
1013
1014 perf_fetch_caller_regs(regs);
1015 perf_sample_data_init(sd, 0, 0);
1016 sd->raw = &raw;
1017
1018 ret = __bpf_perf_event_output(regs, map, flags, sd);
1019out:
1020 this_cpu_dec(bpf_event_output_nest_level);
1021 return ret;
1022}
1023
1024BPF_CALL_0(bpf_get_current_task)
1025{
1026 return (long) current;
1027}
1028
1029const struct bpf_func_proto bpf_get_current_task_proto = {
1030 .func = bpf_get_current_task,
1031 .gpl_only = true,
1032 .ret_type = RET_INTEGER,
1033};
1034
1035BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
1036{
1037 struct bpf_array *array = container_of(map, struct bpf_array, map);
1038 struct cgroup *cgrp;
1039
1040 if (unlikely(idx >= array->map.max_entries))
1041 return -E2BIG;
1042
1043 cgrp = READ_ONCE(array->ptrs[idx]);
1044 if (unlikely(!cgrp))
1045 return -EAGAIN;
1046
1047 return task_under_cgroup_hierarchy(current, cgrp);
1048}
1049
1050static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1051 .func = bpf_current_task_under_cgroup,
1052 .gpl_only = false,
1053 .ret_type = RET_INTEGER,
1054 .arg1_type = ARG_CONST_MAP_PTR,
1055 .arg2_type = ARG_ANYTHING,
1056};
1057
1058struct send_signal_irq_work {
1059 struct irq_work irq_work;
1060 struct task_struct *task;
1061 u32 sig;
1062 enum pid_type type;
1063};
1064
1065static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1066
1067static void do_bpf_send_signal(struct irq_work *entry)
1068{
1069 struct send_signal_irq_work *work;
1070
1071 work = container_of(entry, struct send_signal_irq_work, irq_work);
1072 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
1073}
1074
1075static int bpf_send_signal_common(u32 sig, enum pid_type type)
1076{
1077 struct send_signal_irq_work *work = NULL;
1078
1079
1080
1081
1082
1083
1084 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1085 return -EPERM;
1086 if (unlikely(uaccess_kernel()))
1087 return -EPERM;
1088 if (unlikely(!nmi_uaccess_okay()))
1089 return -EPERM;
1090
1091 if (irqs_disabled()) {
1092
1093
1094
1095 if (unlikely(!valid_signal(sig)))
1096 return -EINVAL;
1097
1098 work = this_cpu_ptr(&send_signal_work);
1099 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
1100 return -EBUSY;
1101
1102
1103
1104
1105
1106 work->task = current;
1107 work->sig = sig;
1108 work->type = type;
1109 irq_work_queue(&work->irq_work);
1110 return 0;
1111 }
1112
1113 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1114}
1115
1116BPF_CALL_1(bpf_send_signal, u32, sig)
1117{
1118 return bpf_send_signal_common(sig, PIDTYPE_TGID);
1119}
1120
1121static const struct bpf_func_proto bpf_send_signal_proto = {
1122 .func = bpf_send_signal,
1123 .gpl_only = false,
1124 .ret_type = RET_INTEGER,
1125 .arg1_type = ARG_ANYTHING,
1126};
1127
1128BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1129{
1130 return bpf_send_signal_common(sig, PIDTYPE_PID);
1131}
1132
1133static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1134 .func = bpf_send_signal_thread,
1135 .gpl_only = false,
1136 .ret_type = RET_INTEGER,
1137 .arg1_type = ARG_ANYTHING,
1138};
1139
1140BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1141{
1142 long len;
1143 char *p;
1144
1145 if (!sz)
1146 return 0;
1147
1148 p = d_path(path, buf, sz);
1149 if (IS_ERR(p)) {
1150 len = PTR_ERR(p);
1151 } else {
1152 len = buf + sz - p;
1153 memmove(buf, p, len);
1154 }
1155
1156 return len;
1157}
1158
1159BTF_SET_START(btf_allowlist_d_path)
1160#ifdef CONFIG_SECURITY
1161BTF_ID(func, security_file_permission)
1162BTF_ID(func, security_inode_getattr)
1163BTF_ID(func, security_file_open)
1164#endif
1165#ifdef CONFIG_SECURITY_PATH
1166BTF_ID(func, security_path_truncate)
1167#endif
1168BTF_ID(func, vfs_truncate)
1169BTF_ID(func, vfs_fallocate)
1170BTF_ID(func, dentry_open)
1171BTF_ID(func, vfs_getattr)
1172BTF_ID(func, filp_close)
1173BTF_SET_END(btf_allowlist_d_path)
1174
1175static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1176{
1177 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
1178}
1179
1180BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
1181
1182static const struct bpf_func_proto bpf_d_path_proto = {
1183 .func = bpf_d_path,
1184 .gpl_only = false,
1185 .ret_type = RET_INTEGER,
1186 .arg1_type = ARG_PTR_TO_BTF_ID,
1187 .arg1_btf_id = &bpf_d_path_btf_ids[0],
1188 .arg2_type = ARG_PTR_TO_MEM,
1189 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1190 .allowed = bpf_d_path_allowed,
1191};
1192
1193#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1194 BTF_F_PTR_RAW | BTF_F_ZERO)
1195
1196static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1197 u64 flags, const struct btf **btf,
1198 s32 *btf_id)
1199{
1200 const struct btf_type *t;
1201
1202 if (unlikely(flags & ~(BTF_F_ALL)))
1203 return -EINVAL;
1204
1205 if (btf_ptr_size != sizeof(struct btf_ptr))
1206 return -EINVAL;
1207
1208 *btf = bpf_get_btf_vmlinux();
1209
1210 if (IS_ERR_OR_NULL(*btf))
1211 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
1212
1213 if (ptr->type_id > 0)
1214 *btf_id = ptr->type_id;
1215 else
1216 return -EINVAL;
1217
1218 if (*btf_id > 0)
1219 t = btf_type_by_id(*btf, *btf_id);
1220 if (*btf_id <= 0 || !t)
1221 return -ENOENT;
1222
1223 return 0;
1224}
1225
1226BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1227 u32, btf_ptr_size, u64, flags)
1228{
1229 const struct btf *btf;
1230 s32 btf_id;
1231 int ret;
1232
1233 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1234 if (ret)
1235 return ret;
1236
1237 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1238 flags);
1239}
1240
1241const struct bpf_func_proto bpf_snprintf_btf_proto = {
1242 .func = bpf_snprintf_btf,
1243 .gpl_only = false,
1244 .ret_type = RET_INTEGER,
1245 .arg1_type = ARG_PTR_TO_MEM,
1246 .arg2_type = ARG_CONST_SIZE,
1247 .arg3_type = ARG_PTR_TO_MEM,
1248 .arg4_type = ARG_CONST_SIZE,
1249 .arg5_type = ARG_ANYTHING,
1250};
1251
1252const struct bpf_func_proto *
1253bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1254{
1255 switch (func_id) {
1256 case BPF_FUNC_map_lookup_elem:
1257 return &bpf_map_lookup_elem_proto;
1258 case BPF_FUNC_map_update_elem:
1259 return &bpf_map_update_elem_proto;
1260 case BPF_FUNC_map_delete_elem:
1261 return &bpf_map_delete_elem_proto;
1262 case BPF_FUNC_map_push_elem:
1263 return &bpf_map_push_elem_proto;
1264 case BPF_FUNC_map_pop_elem:
1265 return &bpf_map_pop_elem_proto;
1266 case BPF_FUNC_map_peek_elem:
1267 return &bpf_map_peek_elem_proto;
1268 case BPF_FUNC_ktime_get_ns:
1269 return &bpf_ktime_get_ns_proto;
1270 case BPF_FUNC_ktime_get_boot_ns:
1271 return &bpf_ktime_get_boot_ns_proto;
1272 case BPF_FUNC_tail_call:
1273 return &bpf_tail_call_proto;
1274 case BPF_FUNC_get_current_pid_tgid:
1275 return &bpf_get_current_pid_tgid_proto;
1276 case BPF_FUNC_get_current_task:
1277 return &bpf_get_current_task_proto;
1278 case BPF_FUNC_get_current_uid_gid:
1279 return &bpf_get_current_uid_gid_proto;
1280 case BPF_FUNC_get_current_comm:
1281 return &bpf_get_current_comm_proto;
1282 case BPF_FUNC_trace_printk:
1283 return bpf_get_trace_printk_proto();
1284 case BPF_FUNC_get_smp_processor_id:
1285 return &bpf_get_smp_processor_id_proto;
1286 case BPF_FUNC_get_numa_node_id:
1287 return &bpf_get_numa_node_id_proto;
1288 case BPF_FUNC_perf_event_read:
1289 return &bpf_perf_event_read_proto;
1290 case BPF_FUNC_probe_write_user:
1291 return bpf_get_probe_write_proto();
1292 case BPF_FUNC_current_task_under_cgroup:
1293 return &bpf_current_task_under_cgroup_proto;
1294 case BPF_FUNC_get_prandom_u32:
1295 return &bpf_get_prandom_u32_proto;
1296 case BPF_FUNC_probe_read_user:
1297 return &bpf_probe_read_user_proto;
1298 case BPF_FUNC_probe_read_kernel:
1299 return &bpf_probe_read_kernel_proto;
1300 case BPF_FUNC_probe_read_user_str:
1301 return &bpf_probe_read_user_str_proto;
1302 case BPF_FUNC_probe_read_kernel_str:
1303 return &bpf_probe_read_kernel_str_proto;
1304#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1305 case BPF_FUNC_probe_read:
1306 return &bpf_probe_read_compat_proto;
1307 case BPF_FUNC_probe_read_str:
1308 return &bpf_probe_read_compat_str_proto;
1309#endif
1310#ifdef CONFIG_CGROUPS
1311 case BPF_FUNC_get_current_cgroup_id:
1312 return &bpf_get_current_cgroup_id_proto;
1313#endif
1314 case BPF_FUNC_send_signal:
1315 return &bpf_send_signal_proto;
1316 case BPF_FUNC_send_signal_thread:
1317 return &bpf_send_signal_thread_proto;
1318 case BPF_FUNC_perf_event_read_value:
1319 return &bpf_perf_event_read_value_proto;
1320 case BPF_FUNC_get_ns_current_pid_tgid:
1321 return &bpf_get_ns_current_pid_tgid_proto;
1322 case BPF_FUNC_ringbuf_output:
1323 return &bpf_ringbuf_output_proto;
1324 case BPF_FUNC_ringbuf_reserve:
1325 return &bpf_ringbuf_reserve_proto;
1326 case BPF_FUNC_ringbuf_submit:
1327 return &bpf_ringbuf_submit_proto;
1328 case BPF_FUNC_ringbuf_discard:
1329 return &bpf_ringbuf_discard_proto;
1330 case BPF_FUNC_ringbuf_query:
1331 return &bpf_ringbuf_query_proto;
1332 case BPF_FUNC_jiffies64:
1333 return &bpf_jiffies64_proto;
1334 case BPF_FUNC_get_task_stack:
1335 return &bpf_get_task_stack_proto;
1336 case BPF_FUNC_copy_from_user:
1337 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1338 case BPF_FUNC_snprintf_btf:
1339 return &bpf_snprintf_btf_proto;
1340 case BPF_FUNC_per_cpu_ptr:
1341 return &bpf_per_cpu_ptr_proto;
1342 case BPF_FUNC_this_cpu_ptr:
1343 return &bpf_this_cpu_ptr_proto;
1344 default:
1345 return NULL;
1346 }
1347}
1348
1349static const struct bpf_func_proto *
1350kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1351{
1352 switch (func_id) {
1353 case BPF_FUNC_perf_event_output:
1354 return &bpf_perf_event_output_proto;
1355 case BPF_FUNC_get_stackid:
1356 return &bpf_get_stackid_proto;
1357 case BPF_FUNC_get_stack:
1358 return &bpf_get_stack_proto;
1359#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1360 case BPF_FUNC_override_return:
1361 return &bpf_override_return_proto;
1362#endif
1363 default:
1364 return bpf_tracing_func_proto(func_id, prog);
1365 }
1366}
1367
1368
1369static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1370 const struct bpf_prog *prog,
1371 struct bpf_insn_access_aux *info)
1372{
1373 if (off < 0 || off >= sizeof(struct pt_regs))
1374 return false;
1375 if (type != BPF_READ)
1376 return false;
1377 if (off % size != 0)
1378 return false;
1379
1380
1381
1382
1383 if (off + size > sizeof(struct pt_regs))
1384 return false;
1385
1386 return true;
1387}
1388
1389const struct bpf_verifier_ops kprobe_verifier_ops = {
1390 .get_func_proto = kprobe_prog_func_proto,
1391 .is_valid_access = kprobe_prog_is_valid_access,
1392};
1393
1394const struct bpf_prog_ops kprobe_prog_ops = {
1395};
1396
1397BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1398 u64, flags, void *, data, u64, size)
1399{
1400 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1401
1402
1403
1404
1405
1406
1407 return ____bpf_perf_event_output(regs, map, flags, data, size);
1408}
1409
1410static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1411 .func = bpf_perf_event_output_tp,
1412 .gpl_only = true,
1413 .ret_type = RET_INTEGER,
1414 .arg1_type = ARG_PTR_TO_CTX,
1415 .arg2_type = ARG_CONST_MAP_PTR,
1416 .arg3_type = ARG_ANYTHING,
1417 .arg4_type = ARG_PTR_TO_MEM,
1418 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1419};
1420
1421BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1422 u64, flags)
1423{
1424 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1425
1426
1427
1428
1429
1430
1431 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1432 flags, 0, 0);
1433}
1434
1435static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1436 .func = bpf_get_stackid_tp,
1437 .gpl_only = true,
1438 .ret_type = RET_INTEGER,
1439 .arg1_type = ARG_PTR_TO_CTX,
1440 .arg2_type = ARG_CONST_MAP_PTR,
1441 .arg3_type = ARG_ANYTHING,
1442};
1443
1444BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1445 u64, flags)
1446{
1447 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1448
1449 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1450 (unsigned long) size, flags, 0);
1451}
1452
1453static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1454 .func = bpf_get_stack_tp,
1455 .gpl_only = true,
1456 .ret_type = RET_INTEGER,
1457 .arg1_type = ARG_PTR_TO_CTX,
1458 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1459 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1460 .arg4_type = ARG_ANYTHING,
1461};
1462
1463static const struct bpf_func_proto *
1464tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1465{
1466 switch (func_id) {
1467 case BPF_FUNC_perf_event_output:
1468 return &bpf_perf_event_output_proto_tp;
1469 case BPF_FUNC_get_stackid:
1470 return &bpf_get_stackid_proto_tp;
1471 case BPF_FUNC_get_stack:
1472 return &bpf_get_stack_proto_tp;
1473 default:
1474 return bpf_tracing_func_proto(func_id, prog);
1475 }
1476}
1477
1478static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1479 const struct bpf_prog *prog,
1480 struct bpf_insn_access_aux *info)
1481{
1482 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1483 return false;
1484 if (type != BPF_READ)
1485 return false;
1486 if (off % size != 0)
1487 return false;
1488
1489 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1490 return true;
1491}
1492
1493const struct bpf_verifier_ops tracepoint_verifier_ops = {
1494 .get_func_proto = tp_prog_func_proto,
1495 .is_valid_access = tp_prog_is_valid_access,
1496};
1497
1498const struct bpf_prog_ops tracepoint_prog_ops = {
1499};
1500
1501BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1502 struct bpf_perf_event_value *, buf, u32, size)
1503{
1504 int err = -EINVAL;
1505
1506 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1507 goto clear;
1508 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1509 &buf->running);
1510 if (unlikely(err))
1511 goto clear;
1512 return 0;
1513clear:
1514 memset(buf, 0, size);
1515 return err;
1516}
1517
1518static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1519 .func = bpf_perf_prog_read_value,
1520 .gpl_only = true,
1521 .ret_type = RET_INTEGER,
1522 .arg1_type = ARG_PTR_TO_CTX,
1523 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1524 .arg3_type = ARG_CONST_SIZE,
1525};
1526
1527BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1528 void *, buf, u32, size, u64, flags)
1529{
1530#ifndef CONFIG_X86
1531 return -ENOENT;
1532#else
1533 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1534 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1535 u32 to_copy;
1536
1537 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1538 return -EINVAL;
1539
1540 if (unlikely(!br_stack))
1541 return -EINVAL;
1542
1543 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1544 return br_stack->nr * br_entry_size;
1545
1546 if (!buf || (size % br_entry_size != 0))
1547 return -EINVAL;
1548
1549 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1550 memcpy(buf, br_stack->entries, to_copy);
1551
1552 return to_copy;
1553#endif
1554}
1555
1556static const struct bpf_func_proto bpf_read_branch_records_proto = {
1557 .func = bpf_read_branch_records,
1558 .gpl_only = true,
1559 .ret_type = RET_INTEGER,
1560 .arg1_type = ARG_PTR_TO_CTX,
1561 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1562 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1563 .arg4_type = ARG_ANYTHING,
1564};
1565
1566static const struct bpf_func_proto *
1567pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1568{
1569 switch (func_id) {
1570 case BPF_FUNC_perf_event_output:
1571 return &bpf_perf_event_output_proto_tp;
1572 case BPF_FUNC_get_stackid:
1573 return &bpf_get_stackid_proto_pe;
1574 case BPF_FUNC_get_stack:
1575 return &bpf_get_stack_proto_pe;
1576 case BPF_FUNC_perf_prog_read_value:
1577 return &bpf_perf_prog_read_value_proto;
1578 case BPF_FUNC_read_branch_records:
1579 return &bpf_read_branch_records_proto;
1580 default:
1581 return bpf_tracing_func_proto(func_id, prog);
1582 }
1583}
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593struct bpf_raw_tp_regs {
1594 struct pt_regs regs[3];
1595};
1596static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1597static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1598static struct pt_regs *get_bpf_raw_tp_regs(void)
1599{
1600 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1601 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1602
1603 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1604 this_cpu_dec(bpf_raw_tp_nest_level);
1605 return ERR_PTR(-EBUSY);
1606 }
1607
1608 return &tp_regs->regs[nest_level - 1];
1609}
1610
1611static void put_bpf_raw_tp_regs(void)
1612{
1613 this_cpu_dec(bpf_raw_tp_nest_level);
1614}
1615
1616BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1617 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1618{
1619 struct pt_regs *regs = get_bpf_raw_tp_regs();
1620 int ret;
1621
1622 if (IS_ERR(regs))
1623 return PTR_ERR(regs);
1624
1625 perf_fetch_caller_regs(regs);
1626 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1627
1628 put_bpf_raw_tp_regs();
1629 return ret;
1630}
1631
1632static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1633 .func = bpf_perf_event_output_raw_tp,
1634 .gpl_only = true,
1635 .ret_type = RET_INTEGER,
1636 .arg1_type = ARG_PTR_TO_CTX,
1637 .arg2_type = ARG_CONST_MAP_PTR,
1638 .arg3_type = ARG_ANYTHING,
1639 .arg4_type = ARG_PTR_TO_MEM,
1640 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1641};
1642
1643extern const struct bpf_func_proto bpf_skb_output_proto;
1644extern const struct bpf_func_proto bpf_xdp_output_proto;
1645
1646BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1647 struct bpf_map *, map, u64, flags)
1648{
1649 struct pt_regs *regs = get_bpf_raw_tp_regs();
1650 int ret;
1651
1652 if (IS_ERR(regs))
1653 return PTR_ERR(regs);
1654
1655 perf_fetch_caller_regs(regs);
1656
1657 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1658 flags, 0, 0);
1659 put_bpf_raw_tp_regs();
1660 return ret;
1661}
1662
1663static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1664 .func = bpf_get_stackid_raw_tp,
1665 .gpl_only = true,
1666 .ret_type = RET_INTEGER,
1667 .arg1_type = ARG_PTR_TO_CTX,
1668 .arg2_type = ARG_CONST_MAP_PTR,
1669 .arg3_type = ARG_ANYTHING,
1670};
1671
1672BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1673 void *, buf, u32, size, u64, flags)
1674{
1675 struct pt_regs *regs = get_bpf_raw_tp_regs();
1676 int ret;
1677
1678 if (IS_ERR(regs))
1679 return PTR_ERR(regs);
1680
1681 perf_fetch_caller_regs(regs);
1682 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1683 (unsigned long) size, flags, 0);
1684 put_bpf_raw_tp_regs();
1685 return ret;
1686}
1687
1688static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1689 .func = bpf_get_stack_raw_tp,
1690 .gpl_only = true,
1691 .ret_type = RET_INTEGER,
1692 .arg1_type = ARG_PTR_TO_CTX,
1693 .arg2_type = ARG_PTR_TO_MEM,
1694 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1695 .arg4_type = ARG_ANYTHING,
1696};
1697
1698static const struct bpf_func_proto *
1699raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1700{
1701 switch (func_id) {
1702 case BPF_FUNC_perf_event_output:
1703 return &bpf_perf_event_output_proto_raw_tp;
1704 case BPF_FUNC_get_stackid:
1705 return &bpf_get_stackid_proto_raw_tp;
1706 case BPF_FUNC_get_stack:
1707 return &bpf_get_stack_proto_raw_tp;
1708 default:
1709 return bpf_tracing_func_proto(func_id, prog);
1710 }
1711}
1712
1713const struct bpf_func_proto *
1714tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1715{
1716 switch (func_id) {
1717#ifdef CONFIG_NET
1718 case BPF_FUNC_skb_output:
1719 return &bpf_skb_output_proto;
1720 case BPF_FUNC_xdp_output:
1721 return &bpf_xdp_output_proto;
1722 case BPF_FUNC_skc_to_tcp6_sock:
1723 return &bpf_skc_to_tcp6_sock_proto;
1724 case BPF_FUNC_skc_to_tcp_sock:
1725 return &bpf_skc_to_tcp_sock_proto;
1726 case BPF_FUNC_skc_to_tcp_timewait_sock:
1727 return &bpf_skc_to_tcp_timewait_sock_proto;
1728 case BPF_FUNC_skc_to_tcp_request_sock:
1729 return &bpf_skc_to_tcp_request_sock_proto;
1730 case BPF_FUNC_skc_to_udp6_sock:
1731 return &bpf_skc_to_udp6_sock_proto;
1732#endif
1733 case BPF_FUNC_seq_printf:
1734 return prog->expected_attach_type == BPF_TRACE_ITER ?
1735 &bpf_seq_printf_proto :
1736 NULL;
1737 case BPF_FUNC_seq_write:
1738 return prog->expected_attach_type == BPF_TRACE_ITER ?
1739 &bpf_seq_write_proto :
1740 NULL;
1741 case BPF_FUNC_seq_printf_btf:
1742 return prog->expected_attach_type == BPF_TRACE_ITER ?
1743 &bpf_seq_printf_btf_proto :
1744 NULL;
1745 case BPF_FUNC_d_path:
1746 return &bpf_d_path_proto;
1747 default:
1748 return raw_tp_prog_func_proto(func_id, prog);
1749 }
1750}
1751
1752static bool raw_tp_prog_is_valid_access(int off, int size,
1753 enum bpf_access_type type,
1754 const struct bpf_prog *prog,
1755 struct bpf_insn_access_aux *info)
1756{
1757 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1758 return false;
1759 if (type != BPF_READ)
1760 return false;
1761 if (off % size != 0)
1762 return false;
1763 return true;
1764}
1765
1766static bool tracing_prog_is_valid_access(int off, int size,
1767 enum bpf_access_type type,
1768 const struct bpf_prog *prog,
1769 struct bpf_insn_access_aux *info)
1770{
1771 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1772 return false;
1773 if (type != BPF_READ)
1774 return false;
1775 if (off % size != 0)
1776 return false;
1777 return btf_ctx_access(off, size, type, prog, info);
1778}
1779
1780int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1781 const union bpf_attr *kattr,
1782 union bpf_attr __user *uattr)
1783{
1784 return -ENOTSUPP;
1785}
1786
1787const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1788 .get_func_proto = raw_tp_prog_func_proto,
1789 .is_valid_access = raw_tp_prog_is_valid_access,
1790};
1791
1792const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1793#ifdef CONFIG_NET
1794 .test_run = bpf_prog_test_run_raw_tp,
1795#endif
1796};
1797
1798const struct bpf_verifier_ops tracing_verifier_ops = {
1799 .get_func_proto = tracing_prog_func_proto,
1800 .is_valid_access = tracing_prog_is_valid_access,
1801};
1802
1803const struct bpf_prog_ops tracing_prog_ops = {
1804 .test_run = bpf_prog_test_run_tracing,
1805};
1806
1807static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1808 enum bpf_access_type type,
1809 const struct bpf_prog *prog,
1810 struct bpf_insn_access_aux *info)
1811{
1812 if (off == 0) {
1813 if (size != sizeof(u64) || type != BPF_READ)
1814 return false;
1815 info->reg_type = PTR_TO_TP_BUFFER;
1816 }
1817 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1818}
1819
1820const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1821 .get_func_proto = raw_tp_prog_func_proto,
1822 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1823};
1824
1825const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1826};
1827
1828static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1829 const struct bpf_prog *prog,
1830 struct bpf_insn_access_aux *info)
1831{
1832 const int size_u64 = sizeof(u64);
1833
1834 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1835 return false;
1836 if (type != BPF_READ)
1837 return false;
1838 if (off % size != 0) {
1839 if (sizeof(unsigned long) != 4)
1840 return false;
1841 if (size != 8)
1842 return false;
1843 if (off % size != 4)
1844 return false;
1845 }
1846
1847 switch (off) {
1848 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1849 bpf_ctx_record_field_size(info, size_u64);
1850 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1851 return false;
1852 break;
1853 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1854 bpf_ctx_record_field_size(info, size_u64);
1855 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1856 return false;
1857 break;
1858 default:
1859 if (size != sizeof(long))
1860 return false;
1861 }
1862
1863 return true;
1864}
1865
1866static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1867 const struct bpf_insn *si,
1868 struct bpf_insn *insn_buf,
1869 struct bpf_prog *prog, u32 *target_size)
1870{
1871 struct bpf_insn *insn = insn_buf;
1872
1873 switch (si->off) {
1874 case offsetof(struct bpf_perf_event_data, sample_period):
1875 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1876 data), si->dst_reg, si->src_reg,
1877 offsetof(struct bpf_perf_event_data_kern, data));
1878 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1879 bpf_target_off(struct perf_sample_data, period, 8,
1880 target_size));
1881 break;
1882 case offsetof(struct bpf_perf_event_data, addr):
1883 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1884 data), si->dst_reg, si->src_reg,
1885 offsetof(struct bpf_perf_event_data_kern, data));
1886 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1887 bpf_target_off(struct perf_sample_data, addr, 8,
1888 target_size));
1889 break;
1890 default:
1891 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1892 regs), si->dst_reg, si->src_reg,
1893 offsetof(struct bpf_perf_event_data_kern, regs));
1894 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1895 si->off);
1896 break;
1897 }
1898
1899 return insn - insn_buf;
1900}
1901
1902const struct bpf_verifier_ops perf_event_verifier_ops = {
1903 .get_func_proto = pe_prog_func_proto,
1904 .is_valid_access = pe_prog_is_valid_access,
1905 .convert_ctx_access = pe_prog_convert_ctx_access,
1906};
1907
1908const struct bpf_prog_ops perf_event_prog_ops = {
1909};
1910
1911static DEFINE_MUTEX(bpf_event_mutex);
1912
1913#define BPF_TRACE_MAX_PROGS 64
1914
1915int perf_event_attach_bpf_prog(struct perf_event *event,
1916 struct bpf_prog *prog)
1917{
1918 struct bpf_prog_array *old_array;
1919 struct bpf_prog_array *new_array;
1920 int ret = -EEXIST;
1921
1922
1923
1924
1925
1926 if (prog->kprobe_override &&
1927 (!trace_kprobe_on_func_entry(event->tp_event) ||
1928 !trace_kprobe_error_injectable(event->tp_event)))
1929 return -EINVAL;
1930
1931 mutex_lock(&bpf_event_mutex);
1932
1933 if (event->prog)
1934 goto unlock;
1935
1936 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1937 if (old_array &&
1938 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1939 ret = -E2BIG;
1940 goto unlock;
1941 }
1942
1943 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1944 if (ret < 0)
1945 goto unlock;
1946
1947
1948 event->prog = prog;
1949 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1950 bpf_prog_array_free(old_array);
1951
1952unlock:
1953 mutex_unlock(&bpf_event_mutex);
1954 return ret;
1955}
1956
1957void perf_event_detach_bpf_prog(struct perf_event *event)
1958{
1959 struct bpf_prog_array *old_array;
1960 struct bpf_prog_array *new_array;
1961 int ret;
1962
1963 mutex_lock(&bpf_event_mutex);
1964
1965 if (!event->prog)
1966 goto unlock;
1967
1968 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1969 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1970 if (ret == -ENOENT)
1971 goto unlock;
1972 if (ret < 0) {
1973 bpf_prog_array_delete_safe(old_array, event->prog);
1974 } else {
1975 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1976 bpf_prog_array_free(old_array);
1977 }
1978
1979 bpf_prog_put(event->prog);
1980 event->prog = NULL;
1981
1982unlock:
1983 mutex_unlock(&bpf_event_mutex);
1984}
1985
1986int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1987{
1988 struct perf_event_query_bpf __user *uquery = info;
1989 struct perf_event_query_bpf query = {};
1990 struct bpf_prog_array *progs;
1991 u32 *ids, prog_cnt, ids_len;
1992 int ret;
1993
1994 if (!perfmon_capable())
1995 return -EPERM;
1996 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1997 return -EINVAL;
1998 if (copy_from_user(&query, uquery, sizeof(query)))
1999 return -EFAULT;
2000
2001 ids_len = query.ids_len;
2002 if (ids_len > BPF_TRACE_MAX_PROGS)
2003 return -E2BIG;
2004 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2005 if (!ids)
2006 return -ENOMEM;
2007
2008
2009
2010
2011
2012
2013
2014 mutex_lock(&bpf_event_mutex);
2015 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2016 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2017 mutex_unlock(&bpf_event_mutex);
2018
2019 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2020 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2021 ret = -EFAULT;
2022
2023 kfree(ids);
2024 return ret;
2025}
2026
2027extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2028extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2029
2030struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2031{
2032 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2033
2034 for (; btp < __stop__bpf_raw_tp; btp++) {
2035 if (!strcmp(btp->tp->name, name))
2036 return btp;
2037 }
2038
2039 return bpf_get_raw_tracepoint_module(name);
2040}
2041
2042void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2043{
2044 struct module *mod = __module_address((unsigned long)btp);
2045
2046 if (mod)
2047 module_put(mod);
2048}
2049
2050static __always_inline
2051void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2052{
2053 cant_sleep();
2054 rcu_read_lock();
2055 (void) BPF_PROG_RUN(prog, args);
2056 rcu_read_unlock();
2057}
2058
2059#define UNPACK(...) __VA_ARGS__
2060#define REPEAT_1(FN, DL, X, ...) FN(X)
2061#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2062#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2063#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2064#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2065#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2066#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2067#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2068#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2069#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2070#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2071#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2072#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2073
2074#define SARG(X) u64 arg##X
2075#define COPY(X) args[X] = arg##X
2076
2077#define __DL_COM (,)
2078#define __DL_SEM (;)
2079
2080#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2081
2082#define BPF_TRACE_DEFN_x(x) \
2083 void bpf_trace_run##x(struct bpf_prog *prog, \
2084 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2085 { \
2086 u64 args[x]; \
2087 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2088 __bpf_trace_run(prog, args); \
2089 } \
2090 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2091BPF_TRACE_DEFN_x(1);
2092BPF_TRACE_DEFN_x(2);
2093BPF_TRACE_DEFN_x(3);
2094BPF_TRACE_DEFN_x(4);
2095BPF_TRACE_DEFN_x(5);
2096BPF_TRACE_DEFN_x(6);
2097BPF_TRACE_DEFN_x(7);
2098BPF_TRACE_DEFN_x(8);
2099BPF_TRACE_DEFN_x(9);
2100BPF_TRACE_DEFN_x(10);
2101BPF_TRACE_DEFN_x(11);
2102BPF_TRACE_DEFN_x(12);
2103
2104static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2105{
2106 struct tracepoint *tp = btp->tp;
2107
2108
2109
2110
2111
2112 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2113 return -EINVAL;
2114
2115 if (prog->aux->max_tp_access > btp->writable_size)
2116 return -EINVAL;
2117
2118 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2119}
2120
2121int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2122{
2123 return __bpf_probe_register(btp, prog);
2124}
2125
2126int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2127{
2128 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2129}
2130
2131int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2132 u32 *fd_type, const char **buf,
2133 u64 *probe_offset, u64 *probe_addr)
2134{
2135 bool is_tracepoint, is_syscall_tp;
2136 struct bpf_prog *prog;
2137 int flags, err = 0;
2138
2139 prog = event->prog;
2140 if (!prog)
2141 return -ENOENT;
2142
2143
2144 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2145 return -EOPNOTSUPP;
2146
2147 *prog_id = prog->aux->id;
2148 flags = event->tp_event->flags;
2149 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2150 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2151
2152 if (is_tracepoint || is_syscall_tp) {
2153 *buf = is_tracepoint ? event->tp_event->tp->name
2154 : event->tp_event->name;
2155 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2156 *probe_offset = 0x0;
2157 *probe_addr = 0x0;
2158 } else {
2159
2160 err = -EOPNOTSUPP;
2161#ifdef CONFIG_KPROBE_EVENTS
2162 if (flags & TRACE_EVENT_FL_KPROBE)
2163 err = bpf_get_kprobe_info(event, fd_type, buf,
2164 probe_offset, probe_addr,
2165 event->attr.type == PERF_TYPE_TRACEPOINT);
2166#endif
2167#ifdef CONFIG_UPROBE_EVENTS
2168 if (flags & TRACE_EVENT_FL_UPROBE)
2169 err = bpf_get_uprobe_info(event, fd_type, buf,
2170 probe_offset,
2171 event->attr.type == PERF_TYPE_TRACEPOINT);
2172#endif
2173 }
2174
2175 return err;
2176}
2177
2178static int __init send_signal_irq_work_init(void)
2179{
2180 int cpu;
2181 struct send_signal_irq_work *work;
2182
2183 for_each_possible_cpu(cpu) {
2184 work = per_cpu_ptr(&send_signal_work, cpu);
2185 init_irq_work(&work->irq_work, do_bpf_send_signal);
2186 }
2187 return 0;
2188}
2189
2190subsys_initcall(send_signal_irq_work_init);
2191
2192#ifdef CONFIG_MODULES
2193static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2194 void *module)
2195{
2196 struct bpf_trace_module *btm, *tmp;
2197 struct module *mod = module;
2198 int ret = 0;
2199
2200 if (mod->num_bpf_raw_events == 0 ||
2201 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2202 goto out;
2203
2204 mutex_lock(&bpf_module_mutex);
2205
2206 switch (op) {
2207 case MODULE_STATE_COMING:
2208 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2209 if (btm) {
2210 btm->module = module;
2211 list_add(&btm->list, &bpf_trace_modules);
2212 } else {
2213 ret = -ENOMEM;
2214 }
2215 break;
2216 case MODULE_STATE_GOING:
2217 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2218 if (btm->module == module) {
2219 list_del(&btm->list);
2220 kfree(btm);
2221 break;
2222 }
2223 }
2224 break;
2225 }
2226
2227 mutex_unlock(&bpf_module_mutex);
2228
2229out:
2230 return notifier_from_errno(ret);
2231}
2232
2233static struct notifier_block bpf_module_nb = {
2234 .notifier_call = bpf_event_notify,
2235};
2236
2237static int __init bpf_event_init(void)
2238{
2239 register_module_notifier(&bpf_module_nb);
2240 return 0;
2241}
2242
2243fs_initcall(bpf_event_init);
2244#endif
2245