1
2
3
4#include <linux/bpf.h>
5#include <linux/rcupdate.h>
6#include <linux/random.h>
7#include <linux/smp.h>
8#include <linux/topology.h>
9#include <linux/ktime.h>
10#include <linux/sched.h>
11#include <linux/uidgid.h>
12#include <linux/filter.h>
13#include <linux/ctype.h>
14#include <linux/jiffies.h>
15
16#include "../../lib/kstrtox.h"
17
18
19
20
21
22
23
24
25
26
27BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
28{
29 WARN_ON_ONCE(!rcu_read_lock_held());
30 return (unsigned long) map->ops->map_lookup_elem(map, key);
31}
32
33const struct bpf_func_proto bpf_map_lookup_elem_proto = {
34 .func = bpf_map_lookup_elem,
35 .gpl_only = false,
36 .pkt_access = true,
37 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
38 .arg1_type = ARG_CONST_MAP_PTR,
39 .arg2_type = ARG_PTR_TO_MAP_KEY,
40};
41
42BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
43 void *, value, u64, flags)
44{
45 WARN_ON_ONCE(!rcu_read_lock_held());
46 return map->ops->map_update_elem(map, key, value, flags);
47}
48
49const struct bpf_func_proto bpf_map_update_elem_proto = {
50 .func = bpf_map_update_elem,
51 .gpl_only = false,
52 .pkt_access = true,
53 .ret_type = RET_INTEGER,
54 .arg1_type = ARG_CONST_MAP_PTR,
55 .arg2_type = ARG_PTR_TO_MAP_KEY,
56 .arg3_type = ARG_PTR_TO_MAP_VALUE,
57 .arg4_type = ARG_ANYTHING,
58};
59
60BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
61{
62 WARN_ON_ONCE(!rcu_read_lock_held());
63 return map->ops->map_delete_elem(map, key);
64}
65
66const struct bpf_func_proto bpf_map_delete_elem_proto = {
67 .func = bpf_map_delete_elem,
68 .gpl_only = false,
69 .pkt_access = true,
70 .ret_type = RET_INTEGER,
71 .arg1_type = ARG_CONST_MAP_PTR,
72 .arg2_type = ARG_PTR_TO_MAP_KEY,
73};
74
75BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
76{
77 return map->ops->map_push_elem(map, value, flags);
78}
79
80const struct bpf_func_proto bpf_map_push_elem_proto = {
81 .func = bpf_map_push_elem,
82 .gpl_only = false,
83 .pkt_access = true,
84 .ret_type = RET_INTEGER,
85 .arg1_type = ARG_CONST_MAP_PTR,
86 .arg2_type = ARG_PTR_TO_MAP_VALUE,
87 .arg3_type = ARG_ANYTHING,
88};
89
90BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
91{
92 return map->ops->map_pop_elem(map, value);
93}
94
95const struct bpf_func_proto bpf_map_pop_elem_proto = {
96 .func = bpf_map_pop_elem,
97 .gpl_only = false,
98 .ret_type = RET_INTEGER,
99 .arg1_type = ARG_CONST_MAP_PTR,
100 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
101};
102
103BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
104{
105 return map->ops->map_peek_elem(map, value);
106}
107
108const struct bpf_func_proto bpf_map_peek_elem_proto = {
109 .func = bpf_map_pop_elem,
110 .gpl_only = false,
111 .ret_type = RET_INTEGER,
112 .arg1_type = ARG_CONST_MAP_PTR,
113 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
114};
115
116const struct bpf_func_proto bpf_get_prandom_u32_proto = {
117 .func = bpf_user_rnd_u32,
118 .gpl_only = false,
119 .ret_type = RET_INTEGER,
120};
121
122BPF_CALL_0(bpf_get_smp_processor_id)
123{
124 return smp_processor_id();
125}
126
127const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
128 .func = bpf_get_smp_processor_id,
129 .gpl_only = false,
130 .ret_type = RET_INTEGER,
131};
132
133BPF_CALL_0(bpf_get_numa_node_id)
134{
135 return numa_node_id();
136}
137
138const struct bpf_func_proto bpf_get_numa_node_id_proto = {
139 .func = bpf_get_numa_node_id,
140 .gpl_only = false,
141 .ret_type = RET_INTEGER,
142};
143
144BPF_CALL_0(bpf_ktime_get_ns)
145{
146
147 return ktime_get_mono_fast_ns();
148}
149
150const struct bpf_func_proto bpf_ktime_get_ns_proto = {
151 .func = bpf_ktime_get_ns,
152 .gpl_only = true,
153 .ret_type = RET_INTEGER,
154};
155
156BPF_CALL_0(bpf_get_current_pid_tgid)
157{
158 struct task_struct *task = current;
159
160 if (unlikely(!task))
161 return -EINVAL;
162
163 return (u64) task->tgid << 32 | task->pid;
164}
165
166const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
167 .func = bpf_get_current_pid_tgid,
168 .gpl_only = false,
169 .ret_type = RET_INTEGER,
170};
171
172BPF_CALL_0(bpf_get_current_uid_gid)
173{
174 struct task_struct *task = current;
175 kuid_t uid;
176 kgid_t gid;
177
178 if (unlikely(!task))
179 return -EINVAL;
180
181 current_uid_gid(&uid, &gid);
182 return (u64) from_kgid(&init_user_ns, gid) << 32 |
183 from_kuid(&init_user_ns, uid);
184}
185
186const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
187 .func = bpf_get_current_uid_gid,
188 .gpl_only = false,
189 .ret_type = RET_INTEGER,
190};
191
192BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
193{
194 struct task_struct *task = current;
195
196 if (unlikely(!task))
197 goto err_clear;
198
199 strncpy(buf, task->comm, size);
200
201
202
203
204
205 buf[size - 1] = 0;
206 return 0;
207err_clear:
208 memset(buf, 0, size);
209 return -EINVAL;
210}
211
212const struct bpf_func_proto bpf_get_current_comm_proto = {
213 .func = bpf_get_current_comm,
214 .gpl_only = false,
215 .ret_type = RET_INTEGER,
216 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
217 .arg2_type = ARG_CONST_SIZE,
218};
219
220#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
221
222static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
223{
224 arch_spinlock_t *l = (void *)lock;
225 union {
226 __u32 val;
227 arch_spinlock_t lock;
228 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
229
230 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
231 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
232 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
233 arch_spin_lock(l);
234}
235
236static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
237{
238 arch_spinlock_t *l = (void *)lock;
239
240 arch_spin_unlock(l);
241}
242
243#else
244
245static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
246{
247 atomic_t *l = (void *)lock;
248
249 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
250 do {
251 atomic_cond_read_relaxed(l, !VAL);
252 } while (atomic_xchg(l, 1));
253}
254
255static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
256{
257 atomic_t *l = (void *)lock;
258
259 atomic_set_release(l, 0);
260}
261
262#endif
263
264static DEFINE_PER_CPU(unsigned long, irqsave_flags);
265
266notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
267{
268 unsigned long flags;
269
270 local_irq_save(flags);
271 __bpf_spin_lock(lock);
272 __this_cpu_write(irqsave_flags, flags);
273 return 0;
274}
275
276const struct bpf_func_proto bpf_spin_lock_proto = {
277 .func = bpf_spin_lock,
278 .gpl_only = false,
279 .ret_type = RET_VOID,
280 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
281};
282
283notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
284{
285 unsigned long flags;
286
287 flags = __this_cpu_read(irqsave_flags);
288 __bpf_spin_unlock(lock);
289 local_irq_restore(flags);
290 return 0;
291}
292
293const struct bpf_func_proto bpf_spin_unlock_proto = {
294 .func = bpf_spin_unlock,
295 .gpl_only = false,
296 .ret_type = RET_VOID,
297 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
298};
299
300void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
301 bool lock_src)
302{
303 struct bpf_spin_lock *lock;
304
305 if (lock_src)
306 lock = src + map->spin_lock_off;
307 else
308 lock = dst + map->spin_lock_off;
309 preempt_disable();
310 ____bpf_spin_lock(lock);
311 copy_map_value(map, dst, src);
312 ____bpf_spin_unlock(lock);
313 preempt_enable();
314}
315
316BPF_CALL_0(bpf_jiffies64)
317{
318 return get_jiffies_64();
319}
320
321const struct bpf_func_proto bpf_jiffies64_proto = {
322 .func = bpf_jiffies64,
323 .gpl_only = false,
324 .ret_type = RET_INTEGER,
325};
326
327#ifdef CONFIG_CGROUPS
328BPF_CALL_0(bpf_get_current_cgroup_id)
329{
330 struct cgroup *cgrp = task_dfl_cgroup(current);
331
332 return cgroup_id(cgrp);
333}
334
335const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
336 .func = bpf_get_current_cgroup_id,
337 .gpl_only = false,
338 .ret_type = RET_INTEGER,
339};
340
341#ifdef CONFIG_CGROUP_BPF
342DECLARE_PER_CPU(struct bpf_cgroup_storage*,
343 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
344
345BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
346{
347
348
349
350
351 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
352 struct bpf_cgroup_storage *storage;
353 void *ptr;
354
355 storage = this_cpu_read(bpf_cgroup_storage[stype]);
356
357 if (stype == BPF_CGROUP_STORAGE_SHARED)
358 ptr = &READ_ONCE(storage->buf)->data[0];
359 else
360 ptr = this_cpu_ptr(storage->percpu_buf);
361
362 return (unsigned long)ptr;
363}
364
365const struct bpf_func_proto bpf_get_local_storage_proto = {
366 .func = bpf_get_local_storage,
367 .gpl_only = false,
368 .ret_type = RET_PTR_TO_MAP_VALUE,
369 .arg1_type = ARG_CONST_MAP_PTR,
370 .arg2_type = ARG_ANYTHING,
371};
372#endif
373
374#define BPF_STRTOX_BASE_MASK 0x1F
375
376static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
377 unsigned long long *res, bool *is_negative)
378{
379 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
380 const char *cur_buf = buf;
381 size_t cur_len = buf_len;
382 unsigned int consumed;
383 size_t val_len;
384 char str[64];
385
386 if (!buf || !buf_len || !res || !is_negative)
387 return -EINVAL;
388
389 if (base != 0 && base != 8 && base != 10 && base != 16)
390 return -EINVAL;
391
392 if (flags & ~BPF_STRTOX_BASE_MASK)
393 return -EINVAL;
394
395 while (cur_buf < buf + buf_len && isspace(*cur_buf))
396 ++cur_buf;
397
398 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
399 if (*is_negative)
400 ++cur_buf;
401
402 consumed = cur_buf - buf;
403 cur_len -= consumed;
404 if (!cur_len)
405 return -EINVAL;
406
407 cur_len = min(cur_len, sizeof(str) - 1);
408 memcpy(str, cur_buf, cur_len);
409 str[cur_len] = '\0';
410 cur_buf = str;
411
412 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
413 val_len = _parse_integer(cur_buf, base, res);
414
415 if (val_len & KSTRTOX_OVERFLOW)
416 return -ERANGE;
417
418 if (val_len == 0)
419 return -EINVAL;
420
421 cur_buf += val_len;
422 consumed += cur_buf - str;
423
424 return consumed;
425}
426
427static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
428 long long *res)
429{
430 unsigned long long _res;
431 bool is_negative;
432 int err;
433
434 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
435 if (err < 0)
436 return err;
437 if (is_negative) {
438 if ((long long)-_res > 0)
439 return -ERANGE;
440 *res = -_res;
441 } else {
442 if ((long long)_res < 0)
443 return -ERANGE;
444 *res = _res;
445 }
446 return err;
447}
448
449BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
450 long *, res)
451{
452 long long _res;
453 int err;
454
455 err = __bpf_strtoll(buf, buf_len, flags, &_res);
456 if (err < 0)
457 return err;
458 if (_res != (long)_res)
459 return -ERANGE;
460 *res = _res;
461 return err;
462}
463
464const struct bpf_func_proto bpf_strtol_proto = {
465 .func = bpf_strtol,
466 .gpl_only = false,
467 .ret_type = RET_INTEGER,
468 .arg1_type = ARG_PTR_TO_MEM,
469 .arg2_type = ARG_CONST_SIZE,
470 .arg3_type = ARG_ANYTHING,
471 .arg4_type = ARG_PTR_TO_LONG,
472};
473
474BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
475 unsigned long *, res)
476{
477 unsigned long long _res;
478 bool is_negative;
479 int err;
480
481 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
482 if (err < 0)
483 return err;
484 if (is_negative)
485 return -EINVAL;
486 if (_res != (unsigned long)_res)
487 return -ERANGE;
488 *res = _res;
489 return err;
490}
491
492const struct bpf_func_proto bpf_strtoul_proto = {
493 .func = bpf_strtoul,
494 .gpl_only = false,
495 .ret_type = RET_INTEGER,
496 .arg1_type = ARG_PTR_TO_MEM,
497 .arg2_type = ARG_CONST_SIZE,
498 .arg3_type = ARG_ANYTHING,
499 .arg4_type = ARG_PTR_TO_LONG,
500};
501#endif
502