1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/bpf.h>
20#include <linux/filter.h>
21#include <linux/ptr_ring.h>
22#include <net/xdp.h>
23
24#include <linux/sched.h>
25#include <linux/workqueue.h>
26#include <linux/kthread.h>
27#include <linux/capability.h>
28#include <trace/events/xdp.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32
33
34
35
36
37
38
39
40#define CPU_MAP_BULK_SIZE 8
41struct bpf_cpu_map_entry;
42struct bpf_cpu_map;
43
44struct xdp_bulk_queue {
45 void *q[CPU_MAP_BULK_SIZE];
46 struct list_head flush_node;
47 struct bpf_cpu_map_entry *obj;
48 unsigned int count;
49};
50
51
52struct bpf_cpu_map_entry {
53 u32 cpu;
54 int map_id;
55
56
57 struct xdp_bulk_queue __percpu *bulkq;
58
59 struct bpf_cpu_map *cmap;
60
61
62 struct ptr_ring *queue;
63 struct task_struct *kthread;
64
65 struct bpf_cpumap_val value;
66 struct bpf_prog *prog;
67
68 atomic_t refcnt;
69 struct rcu_head rcu;
70
71 struct work_struct kthread_stop_wq;
72};
73
74struct bpf_cpu_map {
75 struct bpf_map map;
76
77 struct bpf_cpu_map_entry **cpu_map;
78};
79
80static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
81
82static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
83{
84 u32 value_size = attr->value_size;
85 struct bpf_cpu_map *cmap;
86 int err = -ENOMEM;
87
88 if (!bpf_capable())
89 return ERR_PTR(-EPERM);
90
91
92 if (attr->max_entries == 0 || attr->key_size != 4 ||
93 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
94 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
95 attr->map_flags & ~BPF_F_NUMA_NODE)
96 return ERR_PTR(-EINVAL);
97
98 cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
99 if (!cmap)
100 return ERR_PTR(-ENOMEM);
101
102 bpf_map_init_from_attr(&cmap->map, attr);
103
104
105 if (cmap->map.max_entries > NR_CPUS) {
106 err = -E2BIG;
107 goto free_cmap;
108 }
109
110
111 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
112 sizeof(struct bpf_cpu_map_entry *),
113 cmap->map.numa_node);
114 if (!cmap->cpu_map)
115 goto free_cmap;
116
117 return &cmap->map;
118free_cmap:
119 kfree(cmap);
120 return ERR_PTR(err);
121}
122
123static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
124{
125 atomic_inc(&rcpu->refcnt);
126}
127
128
129static void cpu_map_kthread_stop(struct work_struct *work)
130{
131 struct bpf_cpu_map_entry *rcpu;
132
133 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
134
135
136
137
138 rcu_barrier();
139
140
141 kthread_stop(rcpu->kthread);
142}
143
144static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
145{
146
147
148
149
150
151 struct xdp_frame *xdpf;
152
153 while ((xdpf = ptr_ring_consume(ring)))
154 if (WARN_ON_ONCE(xdpf))
155 xdp_return_frame(xdpf);
156}
157
158static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
159{
160 if (atomic_dec_and_test(&rcpu->refcnt)) {
161 if (rcpu->prog)
162 bpf_prog_put(rcpu->prog);
163
164 __cpu_map_ring_cleanup(rcpu->queue);
165 ptr_ring_cleanup(rcpu->queue, NULL);
166 kfree(rcpu->queue);
167 kfree(rcpu);
168 }
169}
170
171static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
172 void **frames, int n,
173 struct xdp_cpumap_stats *stats)
174{
175 struct xdp_rxq_info rxq;
176 struct xdp_buff xdp;
177 int i, nframes = 0;
178
179 if (!rcpu->prog)
180 return n;
181
182 rcu_read_lock_bh();
183
184 xdp_set_return_frame_no_direct();
185 xdp.rxq = &rxq;
186
187 for (i = 0; i < n; i++) {
188 struct xdp_frame *xdpf = frames[i];
189 u32 act;
190 int err;
191
192 rxq.dev = xdpf->dev_rx;
193 rxq.mem = xdpf->mem;
194
195
196 xdp_convert_frame_to_buff(xdpf, &xdp);
197
198 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
199 switch (act) {
200 case XDP_PASS:
201 err = xdp_update_frame_from_buff(&xdp, xdpf);
202 if (err < 0) {
203 xdp_return_frame(xdpf);
204 stats->drop++;
205 } else {
206 frames[nframes++] = xdpf;
207 stats->pass++;
208 }
209 break;
210 case XDP_REDIRECT:
211 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
212 rcpu->prog);
213 if (unlikely(err)) {
214 xdp_return_frame(xdpf);
215 stats->drop++;
216 } else {
217 stats->redirect++;
218 }
219 break;
220 default:
221 bpf_warn_invalid_xdp_action(act);
222 fallthrough;
223 case XDP_DROP:
224 xdp_return_frame(xdpf);
225 stats->drop++;
226 break;
227 }
228 }
229
230 if (stats->redirect)
231 xdp_do_flush_map();
232
233 xdp_clear_return_frame_no_direct();
234
235 rcu_read_unlock_bh();
236
237 return nframes;
238}
239
240#define CPUMAP_BATCH 8
241
242static int cpu_map_kthread_run(void *data)
243{
244 struct bpf_cpu_map_entry *rcpu = data;
245
246 set_current_state(TASK_INTERRUPTIBLE);
247
248
249
250
251
252
253 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
254 struct xdp_cpumap_stats stats = {};
255 unsigned int kmem_alloc_drops = 0, sched = 0;
256 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257 void *frames[CPUMAP_BATCH];
258 void *skbs[CPUMAP_BATCH];
259 int i, n, m, nframes;
260 LIST_HEAD(list);
261
262
263 if (__ptr_ring_empty(rcpu->queue)) {
264 set_current_state(TASK_INTERRUPTIBLE);
265
266 if (__ptr_ring_empty(rcpu->queue)) {
267 schedule();
268 sched = 1;
269 } else {
270 __set_current_state(TASK_RUNNING);
271 }
272 } else {
273 sched = cond_resched();
274 }
275
276
277
278
279
280
281 n = __ptr_ring_consume_batched(rcpu->queue, frames,
282 CPUMAP_BATCH);
283 for (i = 0; i < n; i++) {
284 void *f = frames[i];
285 struct page *page = virt_to_page(f);
286
287
288
289
290
291 prefetchw(page);
292 }
293
294
295 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
296 if (nframes) {
297 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
298 if (unlikely(m == 0)) {
299 for (i = 0; i < nframes; i++)
300 skbs[i] = NULL;
301 kmem_alloc_drops += nframes;
302 }
303 }
304
305 local_bh_disable();
306 for (i = 0; i < nframes; i++) {
307 struct xdp_frame *xdpf = frames[i];
308 struct sk_buff *skb = skbs[i];
309
310 skb = __xdp_build_skb_from_frame(xdpf, skb,
311 xdpf->dev_rx);
312 if (!skb) {
313 xdp_return_frame(xdpf);
314 continue;
315 }
316
317 list_add_tail(&skb->list, &list);
318 }
319 netif_receive_skb_list(&list);
320
321
322 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
323 sched, &stats);
324
325 local_bh_enable();
326 }
327 __set_current_state(TASK_RUNNING);
328
329 put_cpu_map_entry(rcpu);
330 return 0;
331}
332
333bool cpu_map_prog_allowed(struct bpf_map *map)
334{
335 return map->map_type == BPF_MAP_TYPE_CPUMAP &&
336 map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
337}
338
339static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
340{
341 struct bpf_prog *prog;
342
343 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
344 if (IS_ERR(prog))
345 return PTR_ERR(prog);
346
347 if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
348 bpf_prog_put(prog);
349 return -EINVAL;
350 }
351
352 rcpu->value.bpf_prog.id = prog->aux->id;
353 rcpu->prog = prog;
354
355 return 0;
356}
357
358static struct bpf_cpu_map_entry *
359__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
360 u32 cpu)
361{
362 int numa, err, i, fd = value->bpf_prog.fd;
363 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
364 struct bpf_cpu_map_entry *rcpu;
365 struct xdp_bulk_queue *bq;
366
367
368 numa = cpu_to_node(cpu);
369
370 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
371 if (!rcpu)
372 return NULL;
373
374
375 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
376 sizeof(void *), gfp);
377 if (!rcpu->bulkq)
378 goto free_rcu;
379
380 for_each_possible_cpu(i) {
381 bq = per_cpu_ptr(rcpu->bulkq, i);
382 bq->obj = rcpu;
383 }
384
385
386 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
387 numa);
388 if (!rcpu->queue)
389 goto free_bulkq;
390
391 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
392 if (err)
393 goto free_queue;
394
395 rcpu->cpu = cpu;
396 rcpu->map_id = map->id;
397 rcpu->value.qsize = value->qsize;
398
399 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
400 goto free_ptr_ring;
401
402
403 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
404 "cpumap/%d/map:%d", cpu,
405 map->id);
406 if (IS_ERR(rcpu->kthread))
407 goto free_prog;
408
409 get_cpu_map_entry(rcpu);
410 get_cpu_map_entry(rcpu);
411
412
413 kthread_bind(rcpu->kthread, cpu);
414 wake_up_process(rcpu->kthread);
415
416 return rcpu;
417
418free_prog:
419 if (rcpu->prog)
420 bpf_prog_put(rcpu->prog);
421free_ptr_ring:
422 ptr_ring_cleanup(rcpu->queue, NULL);
423free_queue:
424 kfree(rcpu->queue);
425free_bulkq:
426 free_percpu(rcpu->bulkq);
427free_rcu:
428 kfree(rcpu);
429 return NULL;
430}
431
432static void __cpu_map_entry_free(struct rcu_head *rcu)
433{
434 struct bpf_cpu_map_entry *rcpu;
435
436
437
438
439
440
441 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
442
443 free_percpu(rcpu->bulkq);
444
445 put_cpu_map_entry(rcpu);
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
468 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
469{
470 struct bpf_cpu_map_entry *old_rcpu;
471
472 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
473 if (old_rcpu) {
474 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
475 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
476 schedule_work(&old_rcpu->kthread_stop_wq);
477 }
478}
479
480static int cpu_map_delete_elem(struct bpf_map *map, void *key)
481{
482 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
483 u32 key_cpu = *(u32 *)key;
484
485 if (key_cpu >= map->max_entries)
486 return -EINVAL;
487
488
489 __cpu_map_entry_replace(cmap, key_cpu, NULL);
490 return 0;
491}
492
493static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
494 u64 map_flags)
495{
496 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
497 struct bpf_cpumap_val cpumap_value = {};
498 struct bpf_cpu_map_entry *rcpu;
499
500 u32 key_cpu = *(u32 *)key;
501
502 memcpy(&cpumap_value, value, map->value_size);
503
504 if (unlikely(map_flags > BPF_EXIST))
505 return -EINVAL;
506 if (unlikely(key_cpu >= cmap->map.max_entries))
507 return -E2BIG;
508 if (unlikely(map_flags == BPF_NOEXIST))
509 return -EEXIST;
510 if (unlikely(cpumap_value.qsize > 16384))
511 return -EOVERFLOW;
512
513
514 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
515 return -ENODEV;
516
517 if (cpumap_value.qsize == 0) {
518 rcpu = NULL;
519 } else {
520
521 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
522 if (!rcpu)
523 return -ENOMEM;
524 rcpu->cmap = cmap;
525 }
526 rcu_read_lock();
527 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
528 rcu_read_unlock();
529 return 0;
530}
531
532static void cpu_map_free(struct bpf_map *map)
533{
534 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
535 u32 i;
536
537
538
539
540
541
542
543
544
545
546 synchronize_rcu();
547
548
549
550
551 for (i = 0; i < cmap->map.max_entries; i++) {
552 struct bpf_cpu_map_entry *rcpu;
553
554 rcpu = READ_ONCE(cmap->cpu_map[i]);
555 if (!rcpu)
556 continue;
557
558
559 __cpu_map_entry_replace(cmap, i, NULL);
560 }
561 bpf_map_area_free(cmap->cpu_map);
562 kfree(cmap);
563}
564
565static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
566{
567 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
568 struct bpf_cpu_map_entry *rcpu;
569
570 if (key >= map->max_entries)
571 return NULL;
572
573 rcpu = READ_ONCE(cmap->cpu_map[key]);
574 return rcpu;
575}
576
577static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
578{
579 struct bpf_cpu_map_entry *rcpu =
580 __cpu_map_lookup_elem(map, *(u32 *)key);
581
582 return rcpu ? &rcpu->value : NULL;
583}
584
585static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
586{
587 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
588 u32 index = key ? *(u32 *)key : U32_MAX;
589 u32 *next = next_key;
590
591 if (index >= cmap->map.max_entries) {
592 *next = 0;
593 return 0;
594 }
595
596 if (index == cmap->map.max_entries - 1)
597 return -ENOENT;
598 *next = index + 1;
599 return 0;
600}
601
602static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
603{
604 return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
605}
606
607static int cpu_map_btf_id;
608const struct bpf_map_ops cpu_map_ops = {
609 .map_meta_equal = bpf_map_meta_equal,
610 .map_alloc = cpu_map_alloc,
611 .map_free = cpu_map_free,
612 .map_delete_elem = cpu_map_delete_elem,
613 .map_update_elem = cpu_map_update_elem,
614 .map_lookup_elem = cpu_map_lookup_elem,
615 .map_get_next_key = cpu_map_get_next_key,
616 .map_check_btf = map_check_no_btf,
617 .map_btf_name = "bpf_cpu_map",
618 .map_btf_id = &cpu_map_btf_id,
619 .map_redirect = cpu_map_redirect,
620};
621
622static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
623{
624 struct bpf_cpu_map_entry *rcpu = bq->obj;
625 unsigned int processed = 0, drops = 0;
626 const int to_cpu = rcpu->cpu;
627 struct ptr_ring *q;
628 int i;
629
630 if (unlikely(!bq->count))
631 return;
632
633 q = rcpu->queue;
634 spin_lock(&q->producer_lock);
635
636 for (i = 0; i < bq->count; i++) {
637 struct xdp_frame *xdpf = bq->q[i];
638 int err;
639
640 err = __ptr_ring_produce(q, xdpf);
641 if (err) {
642 drops++;
643 xdp_return_frame_rx_napi(xdpf);
644 }
645 processed++;
646 }
647 bq->count = 0;
648 spin_unlock(&q->producer_lock);
649
650 __list_del_clearprev(&bq->flush_node);
651
652
653 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
654}
655
656
657
658
659static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
660{
661 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
662 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
663
664 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
665 bq_flush_to_queue(bq);
666
667
668
669
670
671
672
673
674
675
676 bq->q[bq->count++] = xdpf;
677
678 if (!bq->flush_node.prev)
679 list_add(&bq->flush_node, flush_list);
680}
681
682int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
683 struct net_device *dev_rx)
684{
685 struct xdp_frame *xdpf;
686
687 xdpf = xdp_convert_buff_to_frame(xdp);
688 if (unlikely(!xdpf))
689 return -EOVERFLOW;
690
691
692 xdpf->dev_rx = dev_rx;
693
694 bq_enqueue(rcpu, xdpf);
695 return 0;
696}
697
698void __cpu_map_flush(void)
699{
700 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
701 struct xdp_bulk_queue *bq, *tmp;
702
703 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
704 bq_flush_to_queue(bq);
705
706
707 wake_up_process(bq->obj->kthread);
708 }
709}
710
711static int __init cpu_map_init(void)
712{
713 int cpu;
714
715 for_each_possible_cpu(cpu)
716 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
717 return 0;
718}
719
720subsys_initcall(cpu_map_init);
721