1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/bpf.h>
20#include <linux/filter.h>
21#include <linux/ptr_ring.h>
22#include <net/xdp.h>
23
24#include <linux/sched.h>
25#include <linux/workqueue.h>
26#include <linux/kthread.h>
27#include <linux/capability.h>
28#include <trace/events/xdp.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32
33
34
35
36
37
38
39
40#define CPU_MAP_BULK_SIZE 8
41struct bpf_cpu_map_entry;
42struct bpf_cpu_map;
43
44struct xdp_bulk_queue {
45 void *q[CPU_MAP_BULK_SIZE];
46 struct list_head flush_node;
47 struct bpf_cpu_map_entry *obj;
48 unsigned int count;
49};
50
51
52struct bpf_cpu_map_entry {
53 u32 cpu;
54 int map_id;
55
56
57 struct xdp_bulk_queue __percpu *bulkq;
58
59 struct bpf_cpu_map *cmap;
60
61
62 struct ptr_ring *queue;
63 struct task_struct *kthread;
64
65 struct bpf_cpumap_val value;
66 struct bpf_prog *prog;
67
68 atomic_t refcnt;
69 struct rcu_head rcu;
70
71 struct work_struct kthread_stop_wq;
72};
73
74struct bpf_cpu_map {
75 struct bpf_map map;
76
77 struct bpf_cpu_map_entry **cpu_map;
78};
79
80static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
81
82static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
83{
84 u32 value_size = attr->value_size;
85 struct bpf_cpu_map *cmap;
86 int err = -ENOMEM;
87
88 if (!bpf_capable())
89 return ERR_PTR(-EPERM);
90
91
92 if (attr->max_entries == 0 || attr->key_size != 4 ||
93 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
94 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
95 attr->map_flags & ~BPF_F_NUMA_NODE)
96 return ERR_PTR(-EINVAL);
97
98 cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
99 if (!cmap)
100 return ERR_PTR(-ENOMEM);
101
102 bpf_map_init_from_attr(&cmap->map, attr);
103
104
105 if (cmap->map.max_entries > NR_CPUS) {
106 err = -E2BIG;
107 goto free_cmap;
108 }
109
110
111 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
112 sizeof(struct bpf_cpu_map_entry *),
113 cmap->map.numa_node);
114 if (!cmap->cpu_map)
115 goto free_cmap;
116
117 return &cmap->map;
118free_cmap:
119 kfree(cmap);
120 return ERR_PTR(err);
121}
122
123static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
124{
125 atomic_inc(&rcpu->refcnt);
126}
127
128
129static void cpu_map_kthread_stop(struct work_struct *work)
130{
131 struct bpf_cpu_map_entry *rcpu;
132
133 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
134
135
136
137
138 rcu_barrier();
139
140
141 kthread_stop(rcpu->kthread);
142}
143
144static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
145 struct sk_buff *skb)
146{
147 unsigned int hard_start_headroom;
148 unsigned int frame_size;
149 void *pkt_data_start;
150
151
152 hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
153
154
155
156
157 frame_size = xdpf->frame_sz;
158
159 pkt_data_start = xdpf->data - hard_start_headroom;
160 skb = build_skb_around(skb, pkt_data_start, frame_size);
161 if (unlikely(!skb))
162 return NULL;
163
164 skb_reserve(skb, hard_start_headroom);
165 __skb_put(skb, xdpf->len);
166 if (xdpf->metasize)
167 skb_metadata_set(skb, xdpf->metasize);
168
169
170 skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
171
172
173
174
175
176
177
178
179 xdp_release_frame(xdpf);
180
181
182 xdp_scrub_frame(xdpf);
183
184 return skb;
185}
186
187static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
188{
189
190
191
192
193
194 struct xdp_frame *xdpf;
195
196 while ((xdpf = ptr_ring_consume(ring)))
197 if (WARN_ON_ONCE(xdpf))
198 xdp_return_frame(xdpf);
199}
200
201static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
202{
203 if (atomic_dec_and_test(&rcpu->refcnt)) {
204 if (rcpu->prog)
205 bpf_prog_put(rcpu->prog);
206
207 __cpu_map_ring_cleanup(rcpu->queue);
208 ptr_ring_cleanup(rcpu->queue, NULL);
209 kfree(rcpu->queue);
210 kfree(rcpu);
211 }
212}
213
214static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
215 void **frames, int n,
216 struct xdp_cpumap_stats *stats)
217{
218 struct xdp_rxq_info rxq;
219 struct xdp_buff xdp;
220 int i, nframes = 0;
221
222 if (!rcpu->prog)
223 return n;
224
225 rcu_read_lock_bh();
226
227 xdp_set_return_frame_no_direct();
228 xdp.rxq = &rxq;
229
230 for (i = 0; i < n; i++) {
231 struct xdp_frame *xdpf = frames[i];
232 u32 act;
233 int err;
234
235 rxq.dev = xdpf->dev_rx;
236 rxq.mem = xdpf->mem;
237
238
239 xdp_convert_frame_to_buff(xdpf, &xdp);
240
241 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
242 switch (act) {
243 case XDP_PASS:
244 err = xdp_update_frame_from_buff(&xdp, xdpf);
245 if (err < 0) {
246 xdp_return_frame(xdpf);
247 stats->drop++;
248 } else {
249 frames[nframes++] = xdpf;
250 stats->pass++;
251 }
252 break;
253 case XDP_REDIRECT:
254 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
255 rcpu->prog);
256 if (unlikely(err)) {
257 xdp_return_frame(xdpf);
258 stats->drop++;
259 } else {
260 stats->redirect++;
261 }
262 break;
263 default:
264 bpf_warn_invalid_xdp_action(act);
265
266 case XDP_DROP:
267 xdp_return_frame(xdpf);
268 stats->drop++;
269 break;
270 }
271 }
272
273 if (stats->redirect)
274 xdp_do_flush_map();
275
276 xdp_clear_return_frame_no_direct();
277
278 rcu_read_unlock_bh();
279
280 return nframes;
281}
282
283#define CPUMAP_BATCH 8
284
285static int cpu_map_kthread_run(void *data)
286{
287 struct bpf_cpu_map_entry *rcpu = data;
288
289 set_current_state(TASK_INTERRUPTIBLE);
290
291
292
293
294
295
296 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
297 struct xdp_cpumap_stats stats = {};
298 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
299 unsigned int drops = 0, sched = 0;
300 void *frames[CPUMAP_BATCH];
301 void *skbs[CPUMAP_BATCH];
302 int i, n, m, nframes;
303
304
305 if (__ptr_ring_empty(rcpu->queue)) {
306 set_current_state(TASK_INTERRUPTIBLE);
307
308 if (__ptr_ring_empty(rcpu->queue)) {
309 schedule();
310 sched = 1;
311 } else {
312 __set_current_state(TASK_RUNNING);
313 }
314 } else {
315 sched = cond_resched();
316 }
317
318
319
320
321
322
323 n = __ptr_ring_consume_batched(rcpu->queue, frames,
324 CPUMAP_BATCH);
325 for (i = 0; i < n; i++) {
326 void *f = frames[i];
327 struct page *page = virt_to_page(f);
328
329
330
331
332
333 prefetchw(page);
334 }
335
336
337 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
338 if (nframes) {
339 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
340 if (unlikely(m == 0)) {
341 for (i = 0; i < nframes; i++)
342 skbs[i] = NULL;
343 drops += nframes;
344 }
345 }
346
347 local_bh_disable();
348 for (i = 0; i < nframes; i++) {
349 struct xdp_frame *xdpf = frames[i];
350 struct sk_buff *skb = skbs[i];
351 int ret;
352
353 skb = cpu_map_build_skb(xdpf, skb);
354 if (!skb) {
355 xdp_return_frame(xdpf);
356 continue;
357 }
358
359
360 ret = netif_receive_skb_core(skb);
361 if (ret == NET_RX_DROP)
362 drops++;
363 }
364
365 trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats);
366
367 local_bh_enable();
368 }
369 __set_current_state(TASK_RUNNING);
370
371 put_cpu_map_entry(rcpu);
372 return 0;
373}
374
375bool cpu_map_prog_allowed(struct bpf_map *map)
376{
377 return map->map_type == BPF_MAP_TYPE_CPUMAP &&
378 map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
379}
380
381static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
382{
383 struct bpf_prog *prog;
384
385 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
386 if (IS_ERR(prog))
387 return PTR_ERR(prog);
388
389 if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
390 bpf_prog_put(prog);
391 return -EINVAL;
392 }
393
394 rcpu->value.bpf_prog.id = prog->aux->id;
395 rcpu->prog = prog;
396
397 return 0;
398}
399
400static struct bpf_cpu_map_entry *
401__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
402 u32 cpu)
403{
404 int numa, err, i, fd = value->bpf_prog.fd;
405 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
406 struct bpf_cpu_map_entry *rcpu;
407 struct xdp_bulk_queue *bq;
408
409
410 numa = cpu_to_node(cpu);
411
412 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
413 if (!rcpu)
414 return NULL;
415
416
417 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
418 sizeof(void *), gfp);
419 if (!rcpu->bulkq)
420 goto free_rcu;
421
422 for_each_possible_cpu(i) {
423 bq = per_cpu_ptr(rcpu->bulkq, i);
424 bq->obj = rcpu;
425 }
426
427
428 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
429 numa);
430 if (!rcpu->queue)
431 goto free_bulkq;
432
433 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
434 if (err)
435 goto free_queue;
436
437 rcpu->cpu = cpu;
438 rcpu->map_id = map->id;
439 rcpu->value.qsize = value->qsize;
440
441 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
442 goto free_ptr_ring;
443
444
445 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
446 "cpumap/%d/map:%d", cpu,
447 map->id);
448 if (IS_ERR(rcpu->kthread))
449 goto free_prog;
450
451 get_cpu_map_entry(rcpu);
452 get_cpu_map_entry(rcpu);
453
454
455 kthread_bind(rcpu->kthread, cpu);
456 wake_up_process(rcpu->kthread);
457
458 return rcpu;
459
460free_prog:
461 if (rcpu->prog)
462 bpf_prog_put(rcpu->prog);
463free_ptr_ring:
464 ptr_ring_cleanup(rcpu->queue, NULL);
465free_queue:
466 kfree(rcpu->queue);
467free_bulkq:
468 free_percpu(rcpu->bulkq);
469free_rcu:
470 kfree(rcpu);
471 return NULL;
472}
473
474static void __cpu_map_entry_free(struct rcu_head *rcu)
475{
476 struct bpf_cpu_map_entry *rcpu;
477
478
479
480
481
482
483 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
484
485 free_percpu(rcpu->bulkq);
486
487 put_cpu_map_entry(rcpu);
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
510 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
511{
512 struct bpf_cpu_map_entry *old_rcpu;
513
514 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
515 if (old_rcpu) {
516 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
517 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
518 schedule_work(&old_rcpu->kthread_stop_wq);
519 }
520}
521
522static int cpu_map_delete_elem(struct bpf_map *map, void *key)
523{
524 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
525 u32 key_cpu = *(u32 *)key;
526
527 if (key_cpu >= map->max_entries)
528 return -EINVAL;
529
530
531 __cpu_map_entry_replace(cmap, key_cpu, NULL);
532 return 0;
533}
534
535static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
536 u64 map_flags)
537{
538 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
539 struct bpf_cpumap_val cpumap_value = {};
540 struct bpf_cpu_map_entry *rcpu;
541
542 u32 key_cpu = *(u32 *)key;
543
544 memcpy(&cpumap_value, value, map->value_size);
545
546 if (unlikely(map_flags > BPF_EXIST))
547 return -EINVAL;
548 if (unlikely(key_cpu >= cmap->map.max_entries))
549 return -E2BIG;
550 if (unlikely(map_flags == BPF_NOEXIST))
551 return -EEXIST;
552 if (unlikely(cpumap_value.qsize > 16384))
553 return -EOVERFLOW;
554
555
556 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
557 return -ENODEV;
558
559 if (cpumap_value.qsize == 0) {
560 rcpu = NULL;
561 } else {
562
563 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
564 if (!rcpu)
565 return -ENOMEM;
566 rcpu->cmap = cmap;
567 }
568 rcu_read_lock();
569 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
570 rcu_read_unlock();
571 return 0;
572}
573
574static void cpu_map_free(struct bpf_map *map)
575{
576 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
577 u32 i;
578
579
580
581
582
583
584
585
586
587
588 bpf_clear_redirect_map(map);
589 synchronize_rcu();
590
591
592
593
594 for (i = 0; i < cmap->map.max_entries; i++) {
595 struct bpf_cpu_map_entry *rcpu;
596
597 rcpu = READ_ONCE(cmap->cpu_map[i]);
598 if (!rcpu)
599 continue;
600
601
602 __cpu_map_entry_replace(cmap, i, NULL);
603 }
604 bpf_map_area_free(cmap->cpu_map);
605 kfree(cmap);
606}
607
608struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
609{
610 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
611 struct bpf_cpu_map_entry *rcpu;
612
613 if (key >= map->max_entries)
614 return NULL;
615
616 rcpu = READ_ONCE(cmap->cpu_map[key]);
617 return rcpu;
618}
619
620static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
621{
622 struct bpf_cpu_map_entry *rcpu =
623 __cpu_map_lookup_elem(map, *(u32 *)key);
624
625 return rcpu ? &rcpu->value : NULL;
626}
627
628static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
629{
630 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
631 u32 index = key ? *(u32 *)key : U32_MAX;
632 u32 *next = next_key;
633
634 if (index >= cmap->map.max_entries) {
635 *next = 0;
636 return 0;
637 }
638
639 if (index == cmap->map.max_entries - 1)
640 return -ENOENT;
641 *next = index + 1;
642 return 0;
643}
644
645static int cpu_map_btf_id;
646const struct bpf_map_ops cpu_map_ops = {
647 .map_meta_equal = bpf_map_meta_equal,
648 .map_alloc = cpu_map_alloc,
649 .map_free = cpu_map_free,
650 .map_delete_elem = cpu_map_delete_elem,
651 .map_update_elem = cpu_map_update_elem,
652 .map_lookup_elem = cpu_map_lookup_elem,
653 .map_get_next_key = cpu_map_get_next_key,
654 .map_check_btf = map_check_no_btf,
655 .map_btf_name = "bpf_cpu_map",
656 .map_btf_id = &cpu_map_btf_id,
657};
658
659static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
660{
661 struct bpf_cpu_map_entry *rcpu = bq->obj;
662 unsigned int processed = 0, drops = 0;
663 const int to_cpu = rcpu->cpu;
664 struct ptr_ring *q;
665 int i;
666
667 if (unlikely(!bq->count))
668 return;
669
670 q = rcpu->queue;
671 spin_lock(&q->producer_lock);
672
673 for (i = 0; i < bq->count; i++) {
674 struct xdp_frame *xdpf = bq->q[i];
675 int err;
676
677 err = __ptr_ring_produce(q, xdpf);
678 if (err) {
679 drops++;
680 xdp_return_frame_rx_napi(xdpf);
681 }
682 processed++;
683 }
684 bq->count = 0;
685 spin_unlock(&q->producer_lock);
686
687 __list_del_clearprev(&bq->flush_node);
688
689
690 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
691}
692
693
694
695
696static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
697{
698 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
699 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
700
701 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
702 bq_flush_to_queue(bq);
703
704
705
706
707
708
709
710
711
712
713 bq->q[bq->count++] = xdpf;
714
715 if (!bq->flush_node.prev)
716 list_add(&bq->flush_node, flush_list);
717}
718
719int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
720 struct net_device *dev_rx)
721{
722 struct xdp_frame *xdpf;
723
724 xdpf = xdp_convert_buff_to_frame(xdp);
725 if (unlikely(!xdpf))
726 return -EOVERFLOW;
727
728
729 xdpf->dev_rx = dev_rx;
730
731 bq_enqueue(rcpu, xdpf);
732 return 0;
733}
734
735void __cpu_map_flush(void)
736{
737 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
738 struct xdp_bulk_queue *bq, *tmp;
739
740 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
741 bq_flush_to_queue(bq);
742
743
744 wake_up_process(bq->obj->kthread);
745 }
746}
747
748static int __init cpu_map_init(void)
749{
750 int cpu;
751
752 for_each_possible_cpu(cpu)
753 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
754 return 0;
755}
756
757subsys_initcall(cpu_map_init);
758