1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bpf.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/filter.h>
18#include <linux/perf_event.h>
19
20#include "map_in_map.h"
21
22#define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
24
25static void bpf_array_free_percpu(struct bpf_array *array)
26{
27 int i;
28
29 for (i = 0; i < array->map.max_entries; i++) {
30 free_percpu(array->pptrs[i]);
31 cond_resched();
32 }
33}
34
35static int bpf_array_alloc_percpu(struct bpf_array *array)
36{
37 void __percpu *ptr;
38 int i;
39
40 for (i = 0; i < array->map.max_entries; i++) {
41 ptr = __alloc_percpu_gfp(array->elem_size, 8,
42 GFP_USER | __GFP_NOWARN);
43 if (!ptr) {
44 bpf_array_free_percpu(array);
45 return -ENOMEM;
46 }
47 array->pptrs[i] = ptr;
48 cond_resched();
49 }
50
51 return 0;
52}
53
54
55static int array_map_alloc_check(union bpf_attr *attr)
56{
57 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
58 int numa_node = bpf_map_attr_numa_node(attr);
59
60
61 if (attr->max_entries == 0 || attr->key_size != 4 ||
62 attr->value_size == 0 ||
63 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
64 (percpu && numa_node != NUMA_NO_NODE))
65 return -EINVAL;
66
67 if (attr->value_size > KMALLOC_MAX_SIZE)
68
69
70
71 return -E2BIG;
72
73 return 0;
74}
75
76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
77{
78 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
79 int ret, numa_node = bpf_map_attr_numa_node(attr);
80 u32 elem_size, index_mask, max_entries;
81 bool unpriv = !capable(CAP_SYS_ADMIN);
82 u64 cost, array_size, mask64;
83 struct bpf_array *array;
84
85 elem_size = round_up(attr->value_size, 8);
86
87 max_entries = attr->max_entries;
88
89
90
91
92
93 mask64 = fls_long(max_entries - 1);
94 mask64 = 1ULL << mask64;
95 mask64 -= 1;
96
97 index_mask = mask64;
98 if (unpriv) {
99
100
101
102 max_entries = index_mask + 1;
103
104 if (max_entries < attr->max_entries)
105 return ERR_PTR(-E2BIG);
106 }
107
108 array_size = sizeof(*array);
109 if (percpu)
110 array_size += (u64) max_entries * sizeof(void *);
111 else
112 array_size += (u64) max_entries * elem_size;
113
114
115 cost = array_size;
116 if (cost >= U32_MAX - PAGE_SIZE)
117 return ERR_PTR(-ENOMEM);
118 if (percpu) {
119 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-ENOMEM);
122 }
123 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125 ret = bpf_map_precharge_memlock(cost);
126 if (ret < 0)
127 return ERR_PTR(ret);
128
129
130 array = bpf_map_area_alloc(array_size, numa_node);
131 if (!array)
132 return ERR_PTR(-ENOMEM);
133 array->index_mask = index_mask;
134 array->map.unpriv_array = unpriv;
135
136
137 bpf_map_init_from_attr(&array->map, attr);
138 array->map.pages = cost;
139 array->elem_size = elem_size;
140
141 if (percpu && bpf_array_alloc_percpu(array)) {
142 bpf_map_area_free(array);
143 return ERR_PTR(-ENOMEM);
144 }
145
146 return &array->map;
147}
148
149
150static void *array_map_lookup_elem(struct bpf_map *map, void *key)
151{
152 struct bpf_array *array = container_of(map, struct bpf_array, map);
153 u32 index = *(u32 *)key;
154
155 if (unlikely(index >= array->map.max_entries))
156 return NULL;
157
158 return array->value + array->elem_size * (index & array->index_mask);
159}
160
161
162static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
163{
164 struct bpf_array *array = container_of(map, struct bpf_array, map);
165 struct bpf_insn *insn = insn_buf;
166 u32 elem_size = round_up(map->value_size, 8);
167 const int ret = BPF_REG_0;
168 const int map_ptr = BPF_REG_1;
169 const int index = BPF_REG_2;
170
171 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
172 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
173 if (map->unpriv_array) {
174 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
175 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
176 } else {
177 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
178 }
179
180 if (is_power_of_2(elem_size)) {
181 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
182 } else {
183 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
184 }
185 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
186 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
187 *insn++ = BPF_MOV64_IMM(ret, 0);
188 return insn - insn_buf;
189}
190
191
192static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
193{
194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 u32 index = *(u32 *)key;
196
197 if (unlikely(index >= array->map.max_entries))
198 return NULL;
199
200 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
201}
202
203int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
204{
205 struct bpf_array *array = container_of(map, struct bpf_array, map);
206 u32 index = *(u32 *)key;
207 void __percpu *pptr;
208 int cpu, off = 0;
209 u32 size;
210
211 if (unlikely(index >= array->map.max_entries))
212 return -ENOENT;
213
214
215
216
217
218 size = round_up(map->value_size, 8);
219 rcu_read_lock();
220 pptr = array->pptrs[index & array->index_mask];
221 for_each_possible_cpu(cpu) {
222 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
223 off += size;
224 }
225 rcu_read_unlock();
226 return 0;
227}
228
229
230static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
231{
232 struct bpf_array *array = container_of(map, struct bpf_array, map);
233 u32 index = key ? *(u32 *)key : U32_MAX;
234 u32 *next = (u32 *)next_key;
235
236 if (index >= array->map.max_entries) {
237 *next = 0;
238 return 0;
239 }
240
241 if (index == array->map.max_entries - 1)
242 return -ENOENT;
243
244 *next = index + 1;
245 return 0;
246}
247
248
249static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
250 u64 map_flags)
251{
252 struct bpf_array *array = container_of(map, struct bpf_array, map);
253 u32 index = *(u32 *)key;
254
255 if (unlikely(map_flags > BPF_EXIST))
256
257 return -EINVAL;
258
259 if (unlikely(index >= array->map.max_entries))
260
261 return -E2BIG;
262
263 if (unlikely(map_flags == BPF_NOEXIST))
264
265 return -EEXIST;
266
267 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
268 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
269 value, map->value_size);
270 else
271 memcpy(array->value +
272 array->elem_size * (index & array->index_mask),
273 value, map->value_size);
274 return 0;
275}
276
277int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
278 u64 map_flags)
279{
280 struct bpf_array *array = container_of(map, struct bpf_array, map);
281 u32 index = *(u32 *)key;
282 void __percpu *pptr;
283 int cpu, off = 0;
284 u32 size;
285
286 if (unlikely(map_flags > BPF_EXIST))
287
288 return -EINVAL;
289
290 if (unlikely(index >= array->map.max_entries))
291
292 return -E2BIG;
293
294 if (unlikely(map_flags == BPF_NOEXIST))
295
296 return -EEXIST;
297
298
299
300
301
302
303
304 size = round_up(map->value_size, 8);
305 rcu_read_lock();
306 pptr = array->pptrs[index & array->index_mask];
307 for_each_possible_cpu(cpu) {
308 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
309 off += size;
310 }
311 rcu_read_unlock();
312 return 0;
313}
314
315
316static int array_map_delete_elem(struct bpf_map *map, void *key)
317{
318 return -EINVAL;
319}
320
321
322static void array_map_free(struct bpf_map *map)
323{
324 struct bpf_array *array = container_of(map, struct bpf_array, map);
325
326
327
328
329
330
331 synchronize_rcu();
332
333 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
334 bpf_array_free_percpu(array);
335
336 bpf_map_area_free(array);
337}
338
339const struct bpf_map_ops array_map_ops = {
340 .map_alloc_check = array_map_alloc_check,
341 .map_alloc = array_map_alloc,
342 .map_free = array_map_free,
343 .map_get_next_key = array_map_get_next_key,
344 .map_lookup_elem = array_map_lookup_elem,
345 .map_update_elem = array_map_update_elem,
346 .map_delete_elem = array_map_delete_elem,
347 .map_gen_lookup = array_map_gen_lookup,
348};
349
350const struct bpf_map_ops percpu_array_map_ops = {
351 .map_alloc_check = array_map_alloc_check,
352 .map_alloc = array_map_alloc,
353 .map_free = array_map_free,
354 .map_get_next_key = array_map_get_next_key,
355 .map_lookup_elem = percpu_array_map_lookup_elem,
356 .map_update_elem = array_map_update_elem,
357 .map_delete_elem = array_map_delete_elem,
358};
359
360static int fd_array_map_alloc_check(union bpf_attr *attr)
361{
362
363 if (attr->value_size != sizeof(u32))
364 return -EINVAL;
365 return array_map_alloc_check(attr);
366}
367
368static void fd_array_map_free(struct bpf_map *map)
369{
370 struct bpf_array *array = container_of(map, struct bpf_array, map);
371 int i;
372
373 synchronize_rcu();
374
375
376 for (i = 0; i < array->map.max_entries; i++)
377 BUG_ON(array->ptrs[i] != NULL);
378
379 bpf_map_area_free(array);
380}
381
382static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
383{
384 return NULL;
385}
386
387
388int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
389{
390 void **elem, *ptr;
391 int ret = 0;
392
393 if (!map->ops->map_fd_sys_lookup_elem)
394 return -ENOTSUPP;
395
396 rcu_read_lock();
397 elem = array_map_lookup_elem(map, key);
398 if (elem && (ptr = READ_ONCE(*elem)))
399 *value = map->ops->map_fd_sys_lookup_elem(ptr);
400 else
401 ret = -ENOENT;
402 rcu_read_unlock();
403
404 return ret;
405}
406
407
408int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
409 void *key, void *value, u64 map_flags)
410{
411 struct bpf_array *array = container_of(map, struct bpf_array, map);
412 void *new_ptr, *old_ptr;
413 u32 index = *(u32 *)key, ufd;
414
415 if (map_flags != BPF_ANY)
416 return -EINVAL;
417
418 if (index >= array->map.max_entries)
419 return -E2BIG;
420
421 ufd = *(u32 *)value;
422 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
423 if (IS_ERR(new_ptr))
424 return PTR_ERR(new_ptr);
425
426 old_ptr = xchg(array->ptrs + index, new_ptr);
427 if (old_ptr)
428 map->ops->map_fd_put_ptr(old_ptr);
429
430 return 0;
431}
432
433static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
434{
435 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 void *old_ptr;
437 u32 index = *(u32 *)key;
438
439 if (index >= array->map.max_entries)
440 return -E2BIG;
441
442 old_ptr = xchg(array->ptrs + index, NULL);
443 if (old_ptr) {
444 map->ops->map_fd_put_ptr(old_ptr);
445 return 0;
446 } else {
447 return -ENOENT;
448 }
449}
450
451static void *prog_fd_array_get_ptr(struct bpf_map *map,
452 struct file *map_file, int fd)
453{
454 struct bpf_array *array = container_of(map, struct bpf_array, map);
455 struct bpf_prog *prog = bpf_prog_get(fd);
456
457 if (IS_ERR(prog))
458 return prog;
459
460 if (!bpf_prog_array_compatible(array, prog)) {
461 bpf_prog_put(prog);
462 return ERR_PTR(-EINVAL);
463 }
464
465 return prog;
466}
467
468static void prog_fd_array_put_ptr(void *ptr)
469{
470 bpf_prog_put(ptr);
471}
472
473static u32 prog_fd_array_sys_lookup_elem(void *ptr)
474{
475 return ((struct bpf_prog *)ptr)->aux->id;
476}
477
478
479void bpf_fd_array_map_clear(struct bpf_map *map)
480{
481 struct bpf_array *array = container_of(map, struct bpf_array, map);
482 int i;
483
484 for (i = 0; i < array->map.max_entries; i++)
485 fd_array_map_delete_elem(map, &i);
486}
487
488const struct bpf_map_ops prog_array_map_ops = {
489 .map_alloc_check = fd_array_map_alloc_check,
490 .map_alloc = array_map_alloc,
491 .map_free = fd_array_map_free,
492 .map_get_next_key = array_map_get_next_key,
493 .map_lookup_elem = fd_array_map_lookup_elem,
494 .map_delete_elem = fd_array_map_delete_elem,
495 .map_fd_get_ptr = prog_fd_array_get_ptr,
496 .map_fd_put_ptr = prog_fd_array_put_ptr,
497 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
498};
499
500static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
501 struct file *map_file)
502{
503 struct bpf_event_entry *ee;
504
505 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
506 if (ee) {
507 ee->event = perf_file->private_data;
508 ee->perf_file = perf_file;
509 ee->map_file = map_file;
510 }
511
512 return ee;
513}
514
515static void __bpf_event_entry_free(struct rcu_head *rcu)
516{
517 struct bpf_event_entry *ee;
518
519 ee = container_of(rcu, struct bpf_event_entry, rcu);
520 fput(ee->perf_file);
521 kfree(ee);
522}
523
524static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
525{
526 call_rcu(&ee->rcu, __bpf_event_entry_free);
527}
528
529static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
530 struct file *map_file, int fd)
531{
532 struct bpf_event_entry *ee;
533 struct perf_event *event;
534 struct file *perf_file;
535 u64 value;
536
537 perf_file = perf_event_get(fd);
538 if (IS_ERR(perf_file))
539 return perf_file;
540
541 ee = ERR_PTR(-EOPNOTSUPP);
542 event = perf_file->private_data;
543 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
544 goto err_out;
545
546 ee = bpf_event_entry_gen(perf_file, map_file);
547 if (ee)
548 return ee;
549 ee = ERR_PTR(-ENOMEM);
550err_out:
551 fput(perf_file);
552 return ee;
553}
554
555static void perf_event_fd_array_put_ptr(void *ptr)
556{
557 bpf_event_entry_free_rcu(ptr);
558}
559
560static void perf_event_fd_array_release(struct bpf_map *map,
561 struct file *map_file)
562{
563 struct bpf_array *array = container_of(map, struct bpf_array, map);
564 struct bpf_event_entry *ee;
565 int i;
566
567 rcu_read_lock();
568 for (i = 0; i < array->map.max_entries; i++) {
569 ee = READ_ONCE(array->ptrs[i]);
570 if (ee && ee->map_file == map_file)
571 fd_array_map_delete_elem(map, &i);
572 }
573 rcu_read_unlock();
574}
575
576const struct bpf_map_ops perf_event_array_map_ops = {
577 .map_alloc_check = fd_array_map_alloc_check,
578 .map_alloc = array_map_alloc,
579 .map_free = fd_array_map_free,
580 .map_get_next_key = array_map_get_next_key,
581 .map_lookup_elem = fd_array_map_lookup_elem,
582 .map_delete_elem = fd_array_map_delete_elem,
583 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
584 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
585 .map_release = perf_event_fd_array_release,
586};
587
588#ifdef CONFIG_SOCK_CGROUP_DATA
589static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
590 struct file *map_file ,
591 int fd)
592{
593 return cgroup_get_from_fd(fd);
594}
595
596static void cgroup_fd_array_put_ptr(void *ptr)
597{
598
599 cgroup_put(ptr);
600}
601
602static void cgroup_fd_array_free(struct bpf_map *map)
603{
604 bpf_fd_array_map_clear(map);
605 fd_array_map_free(map);
606}
607
608const struct bpf_map_ops cgroup_array_map_ops = {
609 .map_alloc_check = fd_array_map_alloc_check,
610 .map_alloc = array_map_alloc,
611 .map_free = cgroup_fd_array_free,
612 .map_get_next_key = array_map_get_next_key,
613 .map_lookup_elem = fd_array_map_lookup_elem,
614 .map_delete_elem = fd_array_map_delete_elem,
615 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
616 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
617};
618#endif
619
620static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
621{
622 struct bpf_map *map, *inner_map_meta;
623
624 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
625 if (IS_ERR(inner_map_meta))
626 return inner_map_meta;
627
628 map = array_map_alloc(attr);
629 if (IS_ERR(map)) {
630 bpf_map_meta_free(inner_map_meta);
631 return map;
632 }
633
634 map->inner_map_meta = inner_map_meta;
635
636 return map;
637}
638
639static void array_of_map_free(struct bpf_map *map)
640{
641
642
643
644 bpf_map_meta_free(map->inner_map_meta);
645 bpf_fd_array_map_clear(map);
646 fd_array_map_free(map);
647}
648
649static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
650{
651 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
652
653 if (!inner_map)
654 return NULL;
655
656 return READ_ONCE(*inner_map);
657}
658
659static u32 array_of_map_gen_lookup(struct bpf_map *map,
660 struct bpf_insn *insn_buf)
661{
662 struct bpf_array *array = container_of(map, struct bpf_array, map);
663 u32 elem_size = round_up(map->value_size, 8);
664 struct bpf_insn *insn = insn_buf;
665 const int ret = BPF_REG_0;
666 const int map_ptr = BPF_REG_1;
667 const int index = BPF_REG_2;
668
669 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
670 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
671 if (map->unpriv_array) {
672 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
673 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
674 } else {
675 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
676 }
677 if (is_power_of_2(elem_size))
678 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
679 else
680 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
681 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
682 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
683 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
684 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
685 *insn++ = BPF_MOV64_IMM(ret, 0);
686
687 return insn - insn_buf;
688}
689
690const struct bpf_map_ops array_of_maps_map_ops = {
691 .map_alloc_check = fd_array_map_alloc_check,
692 .map_alloc = array_of_map_alloc,
693 .map_free = array_of_map_free,
694 .map_get_next_key = array_map_get_next_key,
695 .map_lookup_elem = array_of_map_lookup_elem,
696 .map_delete_elem = fd_array_map_delete_elem,
697 .map_fd_get_ptr = bpf_map_fd_get_ptr,
698 .map_fd_put_ptr = bpf_map_fd_put_ptr,
699 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
700 .map_gen_lookup = array_of_map_gen_lookup,
701};
702