1
2
3
4#include <linux/bpf.h>
5#include <linux/btf.h>
6#include <linux/bpf-cgroup.h>
7#include <linux/rcupdate.h>
8#include <linux/random.h>
9#include <linux/smp.h>
10#include <linux/topology.h>
11#include <linux/ktime.h>
12#include <linux/sched.h>
13#include <linux/uidgid.h>
14#include <linux/filter.h>
15#include <linux/ctype.h>
16#include <linux/jiffies.h>
17#include <linux/pid_namespace.h>
18#include <linux/proc_ns.h>
19#include <linux/security.h>
20#include <linux/btf_ids.h>
21
22#include "../../lib/kstrtox.h"
23
24
25
26
27
28
29
30
31
32
33BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
34{
35 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
36 return (unsigned long) map->ops->map_lookup_elem(map, key);
37}
38
39const struct bpf_func_proto bpf_map_lookup_elem_proto = {
40 .func = bpf_map_lookup_elem,
41 .gpl_only = false,
42 .pkt_access = true,
43 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
44 .arg1_type = ARG_CONST_MAP_PTR,
45 .arg2_type = ARG_PTR_TO_MAP_KEY,
46};
47
48BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
49 void *, value, u64, flags)
50{
51 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
52 return map->ops->map_update_elem(map, key, value, flags);
53}
54
55const struct bpf_func_proto bpf_map_update_elem_proto = {
56 .func = bpf_map_update_elem,
57 .gpl_only = false,
58 .pkt_access = true,
59 .ret_type = RET_INTEGER,
60 .arg1_type = ARG_CONST_MAP_PTR,
61 .arg2_type = ARG_PTR_TO_MAP_KEY,
62 .arg3_type = ARG_PTR_TO_MAP_VALUE,
63 .arg4_type = ARG_ANYTHING,
64};
65
66BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
67{
68 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
69 return map->ops->map_delete_elem(map, key);
70}
71
72const struct bpf_func_proto bpf_map_delete_elem_proto = {
73 .func = bpf_map_delete_elem,
74 .gpl_only = false,
75 .pkt_access = true,
76 .ret_type = RET_INTEGER,
77 .arg1_type = ARG_CONST_MAP_PTR,
78 .arg2_type = ARG_PTR_TO_MAP_KEY,
79};
80
81BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
82{
83 return map->ops->map_push_elem(map, value, flags);
84}
85
86const struct bpf_func_proto bpf_map_push_elem_proto = {
87 .func = bpf_map_push_elem,
88 .gpl_only = false,
89 .pkt_access = true,
90 .ret_type = RET_INTEGER,
91 .arg1_type = ARG_CONST_MAP_PTR,
92 .arg2_type = ARG_PTR_TO_MAP_VALUE,
93 .arg3_type = ARG_ANYTHING,
94};
95
96BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
97{
98 return map->ops->map_pop_elem(map, value);
99}
100
101const struct bpf_func_proto bpf_map_pop_elem_proto = {
102 .func = bpf_map_pop_elem,
103 .gpl_only = false,
104 .ret_type = RET_INTEGER,
105 .arg1_type = ARG_CONST_MAP_PTR,
106 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
107};
108
109BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
110{
111 return map->ops->map_peek_elem(map, value);
112}
113
114const struct bpf_func_proto bpf_map_peek_elem_proto = {
115 .func = bpf_map_peek_elem,
116 .gpl_only = false,
117 .ret_type = RET_INTEGER,
118 .arg1_type = ARG_CONST_MAP_PTR,
119 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
120};
121
122const struct bpf_func_proto bpf_get_prandom_u32_proto = {
123 .func = bpf_user_rnd_u32,
124 .gpl_only = false,
125 .ret_type = RET_INTEGER,
126};
127
128BPF_CALL_0(bpf_get_smp_processor_id)
129{
130 return smp_processor_id();
131}
132
133const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
134 .func = bpf_get_smp_processor_id,
135 .gpl_only = false,
136 .ret_type = RET_INTEGER,
137};
138
139BPF_CALL_0(bpf_get_numa_node_id)
140{
141 return numa_node_id();
142}
143
144const struct bpf_func_proto bpf_get_numa_node_id_proto = {
145 .func = bpf_get_numa_node_id,
146 .gpl_only = false,
147 .ret_type = RET_INTEGER,
148};
149
150BPF_CALL_0(bpf_ktime_get_ns)
151{
152
153 return ktime_get_mono_fast_ns();
154}
155
156const struct bpf_func_proto bpf_ktime_get_ns_proto = {
157 .func = bpf_ktime_get_ns,
158 .gpl_only = false,
159 .ret_type = RET_INTEGER,
160};
161
162BPF_CALL_0(bpf_ktime_get_boot_ns)
163{
164
165 return ktime_get_boot_fast_ns();
166}
167
168const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
169 .func = bpf_ktime_get_boot_ns,
170 .gpl_only = false,
171 .ret_type = RET_INTEGER,
172};
173
174BPF_CALL_0(bpf_ktime_get_coarse_ns)
175{
176 return ktime_get_coarse_ns();
177}
178
179const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
180 .func = bpf_ktime_get_coarse_ns,
181 .gpl_only = false,
182 .ret_type = RET_INTEGER,
183};
184
185BPF_CALL_0(bpf_get_current_pid_tgid)
186{
187 struct task_struct *task = current;
188
189 if (unlikely(!task))
190 return -EINVAL;
191
192 return (u64) task->tgid << 32 | task->pid;
193}
194
195const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
196 .func = bpf_get_current_pid_tgid,
197 .gpl_only = false,
198 .ret_type = RET_INTEGER,
199};
200
201BPF_CALL_0(bpf_get_current_uid_gid)
202{
203 struct task_struct *task = current;
204 kuid_t uid;
205 kgid_t gid;
206
207 if (unlikely(!task))
208 return -EINVAL;
209
210 current_uid_gid(&uid, &gid);
211 return (u64) from_kgid(&init_user_ns, gid) << 32 |
212 from_kuid(&init_user_ns, uid);
213}
214
215const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
216 .func = bpf_get_current_uid_gid,
217 .gpl_only = false,
218 .ret_type = RET_INTEGER,
219};
220
221BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
222{
223 struct task_struct *task = current;
224
225 if (unlikely(!task))
226 goto err_clear;
227
228
229 strscpy(buf, task->comm, size);
230 return 0;
231err_clear:
232 memset(buf, 0, size);
233 return -EINVAL;
234}
235
236const struct bpf_func_proto bpf_get_current_comm_proto = {
237 .func = bpf_get_current_comm,
238 .gpl_only = false,
239 .ret_type = RET_INTEGER,
240 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
241 .arg2_type = ARG_CONST_SIZE,
242};
243
244#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
245
246static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
247{
248 arch_spinlock_t *l = (void *)lock;
249 union {
250 __u32 val;
251 arch_spinlock_t lock;
252 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
253
254 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
255 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
256 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
257 arch_spin_lock(l);
258}
259
260static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
261{
262 arch_spinlock_t *l = (void *)lock;
263
264 arch_spin_unlock(l);
265}
266
267#else
268
269static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
270{
271 atomic_t *l = (void *)lock;
272
273 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
274 do {
275 atomic_cond_read_relaxed(l, !VAL);
276 } while (atomic_xchg(l, 1));
277}
278
279static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
280{
281 atomic_t *l = (void *)lock;
282
283 atomic_set_release(l, 0);
284}
285
286#endif
287
288static DEFINE_PER_CPU(unsigned long, irqsave_flags);
289
290static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
291{
292 unsigned long flags;
293
294 local_irq_save(flags);
295 __bpf_spin_lock(lock);
296 __this_cpu_write(irqsave_flags, flags);
297}
298
299notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
300{
301 __bpf_spin_lock_irqsave(lock);
302 return 0;
303}
304
305const struct bpf_func_proto bpf_spin_lock_proto = {
306 .func = bpf_spin_lock,
307 .gpl_only = false,
308 .ret_type = RET_VOID,
309 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
310};
311
312static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
313{
314 unsigned long flags;
315
316 flags = __this_cpu_read(irqsave_flags);
317 __bpf_spin_unlock(lock);
318 local_irq_restore(flags);
319}
320
321notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
322{
323 __bpf_spin_unlock_irqrestore(lock);
324 return 0;
325}
326
327const struct bpf_func_proto bpf_spin_unlock_proto = {
328 .func = bpf_spin_unlock,
329 .gpl_only = false,
330 .ret_type = RET_VOID,
331 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
332};
333
334void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
335 bool lock_src)
336{
337 struct bpf_spin_lock *lock;
338
339 if (lock_src)
340 lock = src + map->spin_lock_off;
341 else
342 lock = dst + map->spin_lock_off;
343 preempt_disable();
344 __bpf_spin_lock_irqsave(lock);
345 copy_map_value(map, dst, src);
346 __bpf_spin_unlock_irqrestore(lock);
347 preempt_enable();
348}
349
350BPF_CALL_0(bpf_jiffies64)
351{
352 return get_jiffies_64();
353}
354
355const struct bpf_func_proto bpf_jiffies64_proto = {
356 .func = bpf_jiffies64,
357 .gpl_only = false,
358 .ret_type = RET_INTEGER,
359};
360
361#ifdef CONFIG_CGROUPS
362BPF_CALL_0(bpf_get_current_cgroup_id)
363{
364 struct cgroup *cgrp;
365 u64 cgrp_id;
366
367 rcu_read_lock();
368 cgrp = task_dfl_cgroup(current);
369 cgrp_id = cgroup_id(cgrp);
370 rcu_read_unlock();
371
372 return cgrp_id;
373}
374
375const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
376 .func = bpf_get_current_cgroup_id,
377 .gpl_only = false,
378 .ret_type = RET_INTEGER,
379};
380
381BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
382{
383 struct cgroup *cgrp;
384 struct cgroup *ancestor;
385 u64 cgrp_id;
386
387 rcu_read_lock();
388 cgrp = task_dfl_cgroup(current);
389 ancestor = cgroup_ancestor(cgrp, ancestor_level);
390 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
391 rcu_read_unlock();
392
393 return cgrp_id;
394}
395
396const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
397 .func = bpf_get_current_ancestor_cgroup_id,
398 .gpl_only = false,
399 .ret_type = RET_INTEGER,
400 .arg1_type = ARG_ANYTHING,
401};
402
403#ifdef CONFIG_CGROUP_BPF
404
405BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
406{
407
408
409
410
411 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
412 struct bpf_cgroup_storage *storage;
413 struct bpf_cg_run_ctx *ctx;
414 void *ptr;
415
416
417 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
418 storage = ctx->prog_item->cgroup_storage[stype];
419
420 if (stype == BPF_CGROUP_STORAGE_SHARED)
421 ptr = &READ_ONCE(storage->buf)->data[0];
422 else
423 ptr = this_cpu_ptr(storage->percpu_buf);
424
425 return (unsigned long)ptr;
426}
427
428const struct bpf_func_proto bpf_get_local_storage_proto = {
429 .func = bpf_get_local_storage,
430 .gpl_only = false,
431 .ret_type = RET_PTR_TO_MAP_VALUE,
432 .arg1_type = ARG_CONST_MAP_PTR,
433 .arg2_type = ARG_ANYTHING,
434};
435#endif
436
437#define BPF_STRTOX_BASE_MASK 0x1F
438
439static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
440 unsigned long long *res, bool *is_negative)
441{
442 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
443 const char *cur_buf = buf;
444 size_t cur_len = buf_len;
445 unsigned int consumed;
446 size_t val_len;
447 char str[64];
448
449 if (!buf || !buf_len || !res || !is_negative)
450 return -EINVAL;
451
452 if (base != 0 && base != 8 && base != 10 && base != 16)
453 return -EINVAL;
454
455 if (flags & ~BPF_STRTOX_BASE_MASK)
456 return -EINVAL;
457
458 while (cur_buf < buf + buf_len && isspace(*cur_buf))
459 ++cur_buf;
460
461 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
462 if (*is_negative)
463 ++cur_buf;
464
465 consumed = cur_buf - buf;
466 cur_len -= consumed;
467 if (!cur_len)
468 return -EINVAL;
469
470 cur_len = min(cur_len, sizeof(str) - 1);
471 memcpy(str, cur_buf, cur_len);
472 str[cur_len] = '\0';
473 cur_buf = str;
474
475 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
476 val_len = _parse_integer(cur_buf, base, res);
477
478 if (val_len & KSTRTOX_OVERFLOW)
479 return -ERANGE;
480
481 if (val_len == 0)
482 return -EINVAL;
483
484 cur_buf += val_len;
485 consumed += cur_buf - str;
486
487 return consumed;
488}
489
490static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
491 long long *res)
492{
493 unsigned long long _res;
494 bool is_negative;
495 int err;
496
497 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
498 if (err < 0)
499 return err;
500 if (is_negative) {
501 if ((long long)-_res > 0)
502 return -ERANGE;
503 *res = -_res;
504 } else {
505 if ((long long)_res < 0)
506 return -ERANGE;
507 *res = _res;
508 }
509 return err;
510}
511
512BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
513 long *, res)
514{
515 long long _res;
516 int err;
517
518 err = __bpf_strtoll(buf, buf_len, flags, &_res);
519 if (err < 0)
520 return err;
521 if (_res != (long)_res)
522 return -ERANGE;
523 *res = _res;
524 return err;
525}
526
527const struct bpf_func_proto bpf_strtol_proto = {
528 .func = bpf_strtol,
529 .gpl_only = false,
530 .ret_type = RET_INTEGER,
531 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
532 .arg2_type = ARG_CONST_SIZE,
533 .arg3_type = ARG_ANYTHING,
534 .arg4_type = ARG_PTR_TO_LONG,
535};
536
537BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
538 unsigned long *, res)
539{
540 unsigned long long _res;
541 bool is_negative;
542 int err;
543
544 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
545 if (err < 0)
546 return err;
547 if (is_negative)
548 return -EINVAL;
549 if (_res != (unsigned long)_res)
550 return -ERANGE;
551 *res = _res;
552 return err;
553}
554
555const struct bpf_func_proto bpf_strtoul_proto = {
556 .func = bpf_strtoul,
557 .gpl_only = false,
558 .ret_type = RET_INTEGER,
559 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
560 .arg2_type = ARG_CONST_SIZE,
561 .arg3_type = ARG_ANYTHING,
562 .arg4_type = ARG_PTR_TO_LONG,
563};
564#endif
565
566BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
567{
568 return strncmp(s1, s2, s1_sz);
569}
570
571const struct bpf_func_proto bpf_strncmp_proto = {
572 .func = bpf_strncmp,
573 .gpl_only = false,
574 .ret_type = RET_INTEGER,
575 .arg1_type = ARG_PTR_TO_MEM,
576 .arg2_type = ARG_CONST_SIZE,
577 .arg3_type = ARG_PTR_TO_CONST_STR,
578};
579
580BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
581 struct bpf_pidns_info *, nsdata, u32, size)
582{
583 struct task_struct *task = current;
584 struct pid_namespace *pidns;
585 int err = -EINVAL;
586
587 if (unlikely(size != sizeof(struct bpf_pidns_info)))
588 goto clear;
589
590 if (unlikely((u64)(dev_t)dev != dev))
591 goto clear;
592
593 if (unlikely(!task))
594 goto clear;
595
596 pidns = task_active_pid_ns(task);
597 if (unlikely(!pidns)) {
598 err = -ENOENT;
599 goto clear;
600 }
601
602 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
603 goto clear;
604
605 nsdata->pid = task_pid_nr_ns(task, pidns);
606 nsdata->tgid = task_tgid_nr_ns(task, pidns);
607 return 0;
608clear:
609 memset((void *)nsdata, 0, (size_t) size);
610 return err;
611}
612
613const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
614 .func = bpf_get_ns_current_pid_tgid,
615 .gpl_only = false,
616 .ret_type = RET_INTEGER,
617 .arg1_type = ARG_ANYTHING,
618 .arg2_type = ARG_ANYTHING,
619 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
620 .arg4_type = ARG_CONST_SIZE,
621};
622
623static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
624 .func = bpf_get_raw_cpu_id,
625 .gpl_only = false,
626 .ret_type = RET_INTEGER,
627};
628
629BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
630 u64, flags, void *, data, u64, size)
631{
632 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
633 return -EINVAL;
634
635 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
636}
637
638const struct bpf_func_proto bpf_event_output_data_proto = {
639 .func = bpf_event_output_data,
640 .gpl_only = true,
641 .ret_type = RET_INTEGER,
642 .arg1_type = ARG_PTR_TO_CTX,
643 .arg2_type = ARG_CONST_MAP_PTR,
644 .arg3_type = ARG_ANYTHING,
645 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
646 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
647};
648
649BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
650 const void __user *, user_ptr)
651{
652 int ret = copy_from_user(dst, user_ptr, size);
653
654 if (unlikely(ret)) {
655 memset(dst, 0, size);
656 ret = -EFAULT;
657 }
658
659 return ret;
660}
661
662const struct bpf_func_proto bpf_copy_from_user_proto = {
663 .func = bpf_copy_from_user,
664 .gpl_only = false,
665 .ret_type = RET_INTEGER,
666 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
667 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
668 .arg3_type = ARG_ANYTHING,
669};
670
671BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
672 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
673{
674 int ret;
675
676
677 if (unlikely(flags))
678 return -EINVAL;
679
680 if (unlikely(!size))
681 return 0;
682
683 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
684 if (ret == size)
685 return 0;
686
687 memset(dst, 0, size);
688
689 return ret < 0 ? ret : -EFAULT;
690}
691
692const struct bpf_func_proto bpf_copy_from_user_task_proto = {
693 .func = bpf_copy_from_user_task,
694 .gpl_only = true,
695 .ret_type = RET_INTEGER,
696 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
697 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
698 .arg3_type = ARG_ANYTHING,
699 .arg4_type = ARG_PTR_TO_BTF_ID,
700 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
701 .arg5_type = ARG_ANYTHING
702};
703
704BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
705{
706 if (cpu >= nr_cpu_ids)
707 return (unsigned long)NULL;
708
709 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
710}
711
712const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
713 .func = bpf_per_cpu_ptr,
714 .gpl_only = false,
715 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
716 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
717 .arg2_type = ARG_ANYTHING,
718};
719
720BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
721{
722 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
723}
724
725const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
726 .func = bpf_this_cpu_ptr,
727 .gpl_only = false,
728 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
729 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
730};
731
732static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
733 size_t bufsz)
734{
735 void __user *user_ptr = (__force void __user *)unsafe_ptr;
736
737 buf[0] = 0;
738
739 switch (fmt_ptype) {
740 case 's':
741#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
742 if ((unsigned long)unsafe_ptr < TASK_SIZE)
743 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
744 fallthrough;
745#endif
746 case 'k':
747 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
748 case 'u':
749 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
750 }
751
752 return -EINVAL;
753}
754
755
756
757
758#define MAX_BPRINTF_BUF_LEN 512
759
760
761#define MAX_BPRINTF_NEST_LEVEL 3
762struct bpf_bprintf_buffers {
763 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
764};
765static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
766static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
767
768static int try_get_fmt_tmp_buf(char **tmp_buf)
769{
770 struct bpf_bprintf_buffers *bufs;
771 int nest_level;
772
773 preempt_disable();
774 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
775 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
776 this_cpu_dec(bpf_bprintf_nest_level);
777 preempt_enable();
778 return -EBUSY;
779 }
780 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
781 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
782
783 return 0;
784}
785
786void bpf_bprintf_cleanup(void)
787{
788 if (this_cpu_read(bpf_bprintf_nest_level)) {
789 this_cpu_dec(bpf_bprintf_nest_level);
790 preempt_enable();
791 }
792}
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
809 u32 **bin_args, u32 num_args)
810{
811 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
812 size_t sizeof_cur_arg, sizeof_cur_ip;
813 int err, i, num_spec = 0;
814 u64 cur_arg;
815 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
816
817 fmt_end = strnchr(fmt, fmt_size, 0);
818 if (!fmt_end)
819 return -EINVAL;
820 fmt_size = fmt_end - fmt;
821
822 if (bin_args) {
823 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
824 return -EBUSY;
825
826 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
827 *bin_args = (u32 *)tmp_buf;
828 }
829
830 for (i = 0; i < fmt_size; i++) {
831 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
832 err = -EINVAL;
833 goto out;
834 }
835
836 if (fmt[i] != '%')
837 continue;
838
839 if (fmt[i + 1] == '%') {
840 i++;
841 continue;
842 }
843
844 if (num_spec >= num_args) {
845 err = -EINVAL;
846 goto out;
847 }
848
849
850
851
852 i++;
853
854
855 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
856 fmt[i] == ' ')
857 i++;
858 if (fmt[i] >= '1' && fmt[i] <= '9') {
859 i++;
860 while (fmt[i] >= '0' && fmt[i] <= '9')
861 i++;
862 }
863
864 if (fmt[i] == 'p') {
865 sizeof_cur_arg = sizeof(long);
866
867 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
868 fmt[i + 2] == 's') {
869 fmt_ptype = fmt[i + 1];
870 i += 2;
871 goto fmt_str;
872 }
873
874 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
875 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
876 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
877 fmt[i + 1] == 'S') {
878
879 if (tmp_buf)
880 cur_arg = raw_args[num_spec];
881 i++;
882 goto nocopy_fmt;
883 }
884
885 if (fmt[i + 1] == 'B') {
886 if (tmp_buf) {
887 err = snprintf(tmp_buf,
888 (tmp_buf_end - tmp_buf),
889 "%pB",
890 (void *)(long)raw_args[num_spec]);
891 tmp_buf += (err + 1);
892 }
893
894 i++;
895 num_spec++;
896 continue;
897 }
898
899
900 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
901 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
902 err = -EINVAL;
903 goto out;
904 }
905
906 i += 2;
907 if (!tmp_buf)
908 goto nocopy_fmt;
909
910 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
911 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
912 err = -ENOSPC;
913 goto out;
914 }
915
916 unsafe_ptr = (char *)(long)raw_args[num_spec];
917 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
918 sizeof_cur_ip);
919 if (err < 0)
920 memset(cur_ip, 0, sizeof_cur_ip);
921
922
923
924
925
926 ip_spec[2] = fmt[i - 1];
927 ip_spec[3] = fmt[i];
928 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
929 ip_spec, &cur_ip);
930
931 tmp_buf += err + 1;
932 num_spec++;
933
934 continue;
935 } else if (fmt[i] == 's') {
936 fmt_ptype = fmt[i];
937fmt_str:
938 if (fmt[i + 1] != 0 &&
939 !isspace(fmt[i + 1]) &&
940 !ispunct(fmt[i + 1])) {
941 err = -EINVAL;
942 goto out;
943 }
944
945 if (!tmp_buf)
946 goto nocopy_fmt;
947
948 if (tmp_buf_end == tmp_buf) {
949 err = -ENOSPC;
950 goto out;
951 }
952
953 unsafe_ptr = (char *)(long)raw_args[num_spec];
954 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
955 fmt_ptype,
956 tmp_buf_end - tmp_buf);
957 if (err < 0) {
958 tmp_buf[0] = '\0';
959 err = 1;
960 }
961
962 tmp_buf += err;
963 num_spec++;
964
965 continue;
966 } else if (fmt[i] == 'c') {
967 if (!tmp_buf)
968 goto nocopy_fmt;
969
970 if (tmp_buf_end == tmp_buf) {
971 err = -ENOSPC;
972 goto out;
973 }
974
975 *tmp_buf = raw_args[num_spec];
976 tmp_buf++;
977 num_spec++;
978
979 continue;
980 }
981
982 sizeof_cur_arg = sizeof(int);
983
984 if (fmt[i] == 'l') {
985 sizeof_cur_arg = sizeof(long);
986 i++;
987 }
988 if (fmt[i] == 'l') {
989 sizeof_cur_arg = sizeof(long long);
990 i++;
991 }
992
993 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
994 fmt[i] != 'x' && fmt[i] != 'X') {
995 err = -EINVAL;
996 goto out;
997 }
998
999 if (tmp_buf)
1000 cur_arg = raw_args[num_spec];
1001nocopy_fmt:
1002 if (tmp_buf) {
1003 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1004 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1005 err = -ENOSPC;
1006 goto out;
1007 }
1008
1009 if (sizeof_cur_arg == 8) {
1010 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1011 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1012 } else {
1013 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1014 }
1015 tmp_buf += sizeof_cur_arg;
1016 }
1017 num_spec++;
1018 }
1019
1020 err = 0;
1021out:
1022 if (err)
1023 bpf_bprintf_cleanup();
1024 return err;
1025}
1026
1027BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1028 const void *, data, u32, data_len)
1029{
1030 int err, num_args;
1031 u32 *bin_args;
1032
1033 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1034 (data_len && !data))
1035 return -EINVAL;
1036 num_args = data_len / 8;
1037
1038
1039
1040
1041 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1042 if (err < 0)
1043 return err;
1044
1045 err = bstr_printf(str, str_size, fmt, bin_args);
1046
1047 bpf_bprintf_cleanup();
1048
1049 return err + 1;
1050}
1051
1052const struct bpf_func_proto bpf_snprintf_proto = {
1053 .func = bpf_snprintf,
1054 .gpl_only = true,
1055 .ret_type = RET_INTEGER,
1056 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1057 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1058 .arg3_type = ARG_PTR_TO_CONST_STR,
1059 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1060 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1061};
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079struct bpf_hrtimer {
1080 struct hrtimer timer;
1081 struct bpf_map *map;
1082 struct bpf_prog *prog;
1083 void __rcu *callback_fn;
1084 void *value;
1085};
1086
1087
1088struct bpf_timer_kern {
1089 struct bpf_hrtimer *timer;
1090
1091
1092
1093
1094 struct bpf_spin_lock lock;
1095} __attribute__((aligned(8)));
1096
1097static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1098
1099static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1100{
1101 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1102 struct bpf_map *map = t->map;
1103 void *value = t->value;
1104 bpf_callback_t callback_fn;
1105 void *key;
1106 u32 idx;
1107
1108 BTF_TYPE_EMIT(struct bpf_timer);
1109 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1110 if (!callback_fn)
1111 goto out;
1112
1113
1114
1115
1116
1117
1118
1119 this_cpu_write(hrtimer_running, t);
1120 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1121 struct bpf_array *array = container_of(map, struct bpf_array, map);
1122
1123
1124 idx = ((char *)value - array->value) / array->elem_size;
1125 key = &idx;
1126 } else {
1127 key = value - round_up(map->key_size, 8);
1128 }
1129
1130 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1131
1132
1133 this_cpu_write(hrtimer_running, NULL);
1134out:
1135 return HRTIMER_NORESTART;
1136}
1137
1138BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1139 u64, flags)
1140{
1141 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1142 struct bpf_hrtimer *t;
1143 int ret = 0;
1144
1145 BUILD_BUG_ON(MAX_CLOCKS != 16);
1146 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1147 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1148
1149 if (in_nmi())
1150 return -EOPNOTSUPP;
1151
1152 if (flags >= MAX_CLOCKS ||
1153
1154 (clockid != CLOCK_MONOTONIC &&
1155 clockid != CLOCK_REALTIME &&
1156 clockid != CLOCK_BOOTTIME))
1157 return -EINVAL;
1158 __bpf_spin_lock_irqsave(&timer->lock);
1159 t = timer->timer;
1160 if (t) {
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164 if (!atomic64_read(&map->usercnt)) {
1165
1166
1167
1168 ret = -EPERM;
1169 goto out;
1170 }
1171
1172 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1173 if (!t) {
1174 ret = -ENOMEM;
1175 goto out;
1176 }
1177 t->value = (void *)timer - map->timer_off;
1178 t->map = map;
1179 t->prog = NULL;
1180 rcu_assign_pointer(t->callback_fn, NULL);
1181 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1182 t->timer.function = bpf_timer_cb;
1183 timer->timer = t;
1184out:
1185 __bpf_spin_unlock_irqrestore(&timer->lock);
1186 return ret;
1187}
1188
1189static const struct bpf_func_proto bpf_timer_init_proto = {
1190 .func = bpf_timer_init,
1191 .gpl_only = true,
1192 .ret_type = RET_INTEGER,
1193 .arg1_type = ARG_PTR_TO_TIMER,
1194 .arg2_type = ARG_CONST_MAP_PTR,
1195 .arg3_type = ARG_ANYTHING,
1196};
1197
1198BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1199 struct bpf_prog_aux *, aux)
1200{
1201 struct bpf_prog *prev, *prog = aux->prog;
1202 struct bpf_hrtimer *t;
1203 int ret = 0;
1204
1205 if (in_nmi())
1206 return -EOPNOTSUPP;
1207 __bpf_spin_lock_irqsave(&timer->lock);
1208 t = timer->timer;
1209 if (!t) {
1210 ret = -EINVAL;
1211 goto out;
1212 }
1213 if (!atomic64_read(&t->map->usercnt)) {
1214
1215
1216
1217
1218
1219 ret = -EPERM;
1220 goto out;
1221 }
1222 prev = t->prog;
1223 if (prev != prog) {
1224
1225
1226
1227 prog = bpf_prog_inc_not_zero(prog);
1228 if (IS_ERR(prog)) {
1229 ret = PTR_ERR(prog);
1230 goto out;
1231 }
1232 if (prev)
1233
1234 bpf_prog_put(prev);
1235 t->prog = prog;
1236 }
1237 rcu_assign_pointer(t->callback_fn, callback_fn);
1238out:
1239 __bpf_spin_unlock_irqrestore(&timer->lock);
1240 return ret;
1241}
1242
1243static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1244 .func = bpf_timer_set_callback,
1245 .gpl_only = true,
1246 .ret_type = RET_INTEGER,
1247 .arg1_type = ARG_PTR_TO_TIMER,
1248 .arg2_type = ARG_PTR_TO_FUNC,
1249};
1250
1251BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1252{
1253 struct bpf_hrtimer *t;
1254 int ret = 0;
1255
1256 if (in_nmi())
1257 return -EOPNOTSUPP;
1258 if (flags)
1259 return -EINVAL;
1260 __bpf_spin_lock_irqsave(&timer->lock);
1261 t = timer->timer;
1262 if (!t || !t->prog) {
1263 ret = -EINVAL;
1264 goto out;
1265 }
1266 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1267out:
1268 __bpf_spin_unlock_irqrestore(&timer->lock);
1269 return ret;
1270}
1271
1272static const struct bpf_func_proto bpf_timer_start_proto = {
1273 .func = bpf_timer_start,
1274 .gpl_only = true,
1275 .ret_type = RET_INTEGER,
1276 .arg1_type = ARG_PTR_TO_TIMER,
1277 .arg2_type = ARG_ANYTHING,
1278 .arg3_type = ARG_ANYTHING,
1279};
1280
1281static void drop_prog_refcnt(struct bpf_hrtimer *t)
1282{
1283 struct bpf_prog *prog = t->prog;
1284
1285 if (prog) {
1286 bpf_prog_put(prog);
1287 t->prog = NULL;
1288 rcu_assign_pointer(t->callback_fn, NULL);
1289 }
1290}
1291
1292BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1293{
1294 struct bpf_hrtimer *t;
1295 int ret = 0;
1296
1297 if (in_nmi())
1298 return -EOPNOTSUPP;
1299 __bpf_spin_lock_irqsave(&timer->lock);
1300 t = timer->timer;
1301 if (!t) {
1302 ret = -EINVAL;
1303 goto out;
1304 }
1305 if (this_cpu_read(hrtimer_running) == t) {
1306
1307
1308
1309
1310 ret = -EDEADLK;
1311 goto out;
1312 }
1313 drop_prog_refcnt(t);
1314out:
1315 __bpf_spin_unlock_irqrestore(&timer->lock);
1316
1317
1318
1319 ret = ret ?: hrtimer_cancel(&t->timer);
1320 return ret;
1321}
1322
1323static const struct bpf_func_proto bpf_timer_cancel_proto = {
1324 .func = bpf_timer_cancel,
1325 .gpl_only = true,
1326 .ret_type = RET_INTEGER,
1327 .arg1_type = ARG_PTR_TO_TIMER,
1328};
1329
1330
1331
1332
1333void bpf_timer_cancel_and_free(void *val)
1334{
1335 struct bpf_timer_kern *timer = val;
1336 struct bpf_hrtimer *t;
1337
1338
1339 if (!READ_ONCE(timer->timer))
1340 return;
1341
1342 __bpf_spin_lock_irqsave(&timer->lock);
1343
1344 t = timer->timer;
1345 if (!t)
1346 goto out;
1347 drop_prog_refcnt(t);
1348
1349
1350
1351 timer->timer = NULL;
1352out:
1353 __bpf_spin_unlock_irqrestore(&timer->lock);
1354 if (!t)
1355 return;
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 if (this_cpu_read(hrtimer_running) != t)
1373 hrtimer_cancel(&t->timer);
1374 kfree(t);
1375}
1376
1377const struct bpf_func_proto bpf_get_current_task_proto __weak;
1378const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1379const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1380const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1381const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1382const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1383const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1384
1385const struct bpf_func_proto *
1386bpf_base_func_proto(enum bpf_func_id func_id)
1387{
1388 switch (func_id) {
1389 case BPF_FUNC_map_lookup_elem:
1390 return &bpf_map_lookup_elem_proto;
1391 case BPF_FUNC_map_update_elem:
1392 return &bpf_map_update_elem_proto;
1393 case BPF_FUNC_map_delete_elem:
1394 return &bpf_map_delete_elem_proto;
1395 case BPF_FUNC_map_push_elem:
1396 return &bpf_map_push_elem_proto;
1397 case BPF_FUNC_map_pop_elem:
1398 return &bpf_map_pop_elem_proto;
1399 case BPF_FUNC_map_peek_elem:
1400 return &bpf_map_peek_elem_proto;
1401 case BPF_FUNC_get_prandom_u32:
1402 return &bpf_get_prandom_u32_proto;
1403 case BPF_FUNC_get_smp_processor_id:
1404 return &bpf_get_raw_smp_processor_id_proto;
1405 case BPF_FUNC_get_numa_node_id:
1406 return &bpf_get_numa_node_id_proto;
1407 case BPF_FUNC_tail_call:
1408 return &bpf_tail_call_proto;
1409 case BPF_FUNC_ktime_get_ns:
1410 return &bpf_ktime_get_ns_proto;
1411 case BPF_FUNC_ktime_get_boot_ns:
1412 return &bpf_ktime_get_boot_ns_proto;
1413 case BPF_FUNC_ringbuf_output:
1414 return &bpf_ringbuf_output_proto;
1415 case BPF_FUNC_ringbuf_reserve:
1416 return &bpf_ringbuf_reserve_proto;
1417 case BPF_FUNC_ringbuf_submit:
1418 return &bpf_ringbuf_submit_proto;
1419 case BPF_FUNC_ringbuf_discard:
1420 return &bpf_ringbuf_discard_proto;
1421 case BPF_FUNC_ringbuf_query:
1422 return &bpf_ringbuf_query_proto;
1423 case BPF_FUNC_for_each_map_elem:
1424 return &bpf_for_each_map_elem_proto;
1425 case BPF_FUNC_loop:
1426 return &bpf_loop_proto;
1427 case BPF_FUNC_strncmp:
1428 return &bpf_strncmp_proto;
1429 default:
1430 break;
1431 }
1432
1433 if (!bpf_capable())
1434 return NULL;
1435
1436 switch (func_id) {
1437 case BPF_FUNC_spin_lock:
1438 return &bpf_spin_lock_proto;
1439 case BPF_FUNC_spin_unlock:
1440 return &bpf_spin_unlock_proto;
1441 case BPF_FUNC_jiffies64:
1442 return &bpf_jiffies64_proto;
1443 case BPF_FUNC_per_cpu_ptr:
1444 return &bpf_per_cpu_ptr_proto;
1445 case BPF_FUNC_this_cpu_ptr:
1446 return &bpf_this_cpu_ptr_proto;
1447 case BPF_FUNC_timer_init:
1448 return &bpf_timer_init_proto;
1449 case BPF_FUNC_timer_set_callback:
1450 return &bpf_timer_set_callback_proto;
1451 case BPF_FUNC_timer_start:
1452 return &bpf_timer_start_proto;
1453 case BPF_FUNC_timer_cancel:
1454 return &bpf_timer_cancel_proto;
1455 default:
1456 break;
1457 }
1458
1459 if (!perfmon_capable())
1460 return NULL;
1461
1462 switch (func_id) {
1463 case BPF_FUNC_trace_printk:
1464 return bpf_get_trace_printk_proto();
1465 case BPF_FUNC_get_current_task:
1466 return &bpf_get_current_task_proto;
1467 case BPF_FUNC_get_current_task_btf:
1468 return &bpf_get_current_task_btf_proto;
1469 case BPF_FUNC_probe_read_user:
1470 return &bpf_probe_read_user_proto;
1471 case BPF_FUNC_probe_read_kernel:
1472 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1473 NULL : &bpf_probe_read_kernel_proto;
1474 case BPF_FUNC_probe_read_user_str:
1475 return &bpf_probe_read_user_str_proto;
1476 case BPF_FUNC_probe_read_kernel_str:
1477 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1478 NULL : &bpf_probe_read_kernel_str_proto;
1479 case BPF_FUNC_snprintf_btf:
1480 return &bpf_snprintf_btf_proto;
1481 case BPF_FUNC_snprintf:
1482 return &bpf_snprintf_proto;
1483 case BPF_FUNC_task_pt_regs:
1484 return &bpf_task_pt_regs_proto;
1485 case BPF_FUNC_trace_vprintk:
1486 return bpf_get_trace_vprintk_proto();
1487 default:
1488 return NULL;
1489 }
1490}
1491