1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/bpf.h>
20#include <linux/filter.h>
21#include <linux/ptr_ring.h>
22#include <net/xdp.h>
23
24#include <linux/sched.h>
25#include <linux/workqueue.h>
26#include <linux/kthread.h>
27#include <linux/capability.h>
28#include <trace/events/xdp.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32
33
34
35
36
37
38
39
40#define CPU_MAP_BULK_SIZE 8
41struct xdp_bulk_queue {
42 void *q[CPU_MAP_BULK_SIZE];
43 unsigned int count;
44};
45
46
47struct bpf_cpu_map_entry {
48 u32 cpu;
49 int map_id;
50 u32 qsize;
51
52
53 struct xdp_bulk_queue __percpu *bulkq;
54
55
56 struct ptr_ring *queue;
57 struct task_struct *kthread;
58 struct work_struct kthread_stop_wq;
59
60 atomic_t refcnt;
61 struct rcu_head rcu;
62};
63
64struct bpf_cpu_map {
65 struct bpf_map map;
66
67 struct bpf_cpu_map_entry **cpu_map;
68 unsigned long __percpu *flush_needed;
69};
70
71static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
72 struct xdp_bulk_queue *bq, bool in_napi_ctx);
73
74static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
75{
76 return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
77}
78
79static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
80{
81 struct bpf_cpu_map *cmap;
82 int err = -ENOMEM;
83 u64 cost;
84 int ret;
85
86 if (!capable(CAP_SYS_ADMIN))
87 return ERR_PTR(-EPERM);
88
89
90 if (attr->max_entries == 0 || attr->key_size != 4 ||
91 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
92 return ERR_PTR(-EINVAL);
93
94 cmap = kzalloc(sizeof(*cmap), GFP_USER);
95 if (!cmap)
96 return ERR_PTR(-ENOMEM);
97
98 bpf_map_init_from_attr(&cmap->map, attr);
99
100
101 if (cmap->map.max_entries > NR_CPUS) {
102 err = -E2BIG;
103 goto free_cmap;
104 }
105
106
107 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
108 cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
109 if (cost >= U32_MAX - PAGE_SIZE)
110 goto free_cmap;
111 cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
112
113
114 ret = bpf_map_precharge_memlock(cmap->map.pages);
115 if (ret) {
116 err = ret;
117 goto free_cmap;
118 }
119
120
121 cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr),
122 __alignof__(unsigned long));
123 if (!cmap->flush_needed)
124 goto free_cmap;
125
126
127 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
128 sizeof(struct bpf_cpu_map_entry *),
129 cmap->map.numa_node);
130 if (!cmap->cpu_map)
131 goto free_percpu;
132
133 return &cmap->map;
134free_percpu:
135 free_percpu(cmap->flush_needed);
136free_cmap:
137 kfree(cmap);
138 return ERR_PTR(err);
139}
140
141static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
142{
143 atomic_inc(&rcpu->refcnt);
144}
145
146
147static void cpu_map_kthread_stop(struct work_struct *work)
148{
149 struct bpf_cpu_map_entry *rcpu;
150
151 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
152
153
154
155
156 rcu_barrier();
157
158
159 kthread_stop(rcpu->kthread);
160}
161
162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
163 struct xdp_frame *xdpf)
164{
165 unsigned int frame_size;
166 void *pkt_data_start;
167 struct sk_buff *skb;
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186 frame_size = SKB_DATA_ALIGN(xdpf->len) + xdpf->headroom +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
188
189 pkt_data_start = xdpf->data - xdpf->headroom;
190 skb = build_skb(pkt_data_start, frame_size);
191 if (!skb)
192 return NULL;
193
194 skb_reserve(skb, xdpf->headroom);
195 __skb_put(skb, xdpf->len);
196 if (xdpf->metasize)
197 skb_metadata_set(skb, xdpf->metasize);
198
199
200 skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
201
202
203
204
205
206
207
208 return skb;
209}
210
211static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
212{
213
214
215
216
217
218 struct xdp_frame *xdpf;
219
220 while ((xdpf = ptr_ring_consume(ring)))
221 if (WARN_ON_ONCE(xdpf))
222 xdp_return_frame(xdpf);
223}
224
225static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
226{
227 if (atomic_dec_and_test(&rcpu->refcnt)) {
228
229 __cpu_map_ring_cleanup(rcpu->queue);
230 ptr_ring_cleanup(rcpu->queue, NULL);
231 kfree(rcpu->queue);
232 kfree(rcpu);
233 }
234}
235
236static int cpu_map_kthread_run(void *data)
237{
238 struct bpf_cpu_map_entry *rcpu = data;
239
240 set_current_state(TASK_INTERRUPTIBLE);
241
242
243
244
245
246
247 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
248 unsigned int processed = 0, drops = 0, sched = 0;
249 struct xdp_frame *xdpf;
250
251
252 if (__ptr_ring_empty(rcpu->queue)) {
253 set_current_state(TASK_INTERRUPTIBLE);
254
255 if (__ptr_ring_empty(rcpu->queue)) {
256 schedule();
257 sched = 1;
258 } else {
259 __set_current_state(TASK_RUNNING);
260 }
261 } else {
262 sched = cond_resched();
263 }
264
265
266 local_bh_disable();
267
268
269
270
271
272 while ((xdpf = __ptr_ring_consume(rcpu->queue))) {
273 struct sk_buff *skb;
274 int ret;
275
276 skb = cpu_map_build_skb(rcpu, xdpf);
277 if (!skb) {
278 xdp_return_frame(xdpf);
279 continue;
280 }
281
282
283 ret = netif_receive_skb_core(skb);
284 if (ret == NET_RX_DROP)
285 drops++;
286
287
288 if (++processed == 8)
289 break;
290 }
291
292 trace_xdp_cpumap_kthread(rcpu->map_id, processed, drops, sched);
293
294 local_bh_enable();
295 }
296 __set_current_state(TASK_RUNNING);
297
298 put_cpu_map_entry(rcpu);
299 return 0;
300}
301
302static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
303 int map_id)
304{
305 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
306 struct bpf_cpu_map_entry *rcpu;
307 int numa, err;
308
309
310 numa = cpu_to_node(cpu);
311
312 rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
313 if (!rcpu)
314 return NULL;
315
316
317 rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
318 sizeof(void *), gfp);
319 if (!rcpu->bulkq)
320 goto free_rcu;
321
322
323 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
324 if (!rcpu->queue)
325 goto free_bulkq;
326
327 err = ptr_ring_init(rcpu->queue, qsize, gfp);
328 if (err)
329 goto free_queue;
330
331 rcpu->cpu = cpu;
332 rcpu->map_id = map_id;
333 rcpu->qsize = qsize;
334
335
336 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
337 "cpumap/%d/map:%d", cpu, map_id);
338 if (IS_ERR(rcpu->kthread))
339 goto free_ptr_ring;
340
341 get_cpu_map_entry(rcpu);
342 get_cpu_map_entry(rcpu);
343
344
345 kthread_bind(rcpu->kthread, cpu);
346 wake_up_process(rcpu->kthread);
347
348 return rcpu;
349
350free_ptr_ring:
351 ptr_ring_cleanup(rcpu->queue, NULL);
352free_queue:
353 kfree(rcpu->queue);
354free_bulkq:
355 free_percpu(rcpu->bulkq);
356free_rcu:
357 kfree(rcpu);
358 return NULL;
359}
360
361static void __cpu_map_entry_free(struct rcu_head *rcu)
362{
363 struct bpf_cpu_map_entry *rcpu;
364 int cpu;
365
366
367
368
369
370
371 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
372
373
374 for_each_online_cpu(cpu) {
375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
376
377
378 bq_flush_to_queue(rcpu, bq, false);
379 }
380 free_percpu(rcpu->bulkq);
381
382 put_cpu_map_entry(rcpu);
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
405 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
406{
407 struct bpf_cpu_map_entry *old_rcpu;
408
409 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
410 if (old_rcpu) {
411 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
412 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
413 schedule_work(&old_rcpu->kthread_stop_wq);
414 }
415}
416
417static int cpu_map_delete_elem(struct bpf_map *map, void *key)
418{
419 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
420 u32 key_cpu = *(u32 *)key;
421
422 if (key_cpu >= map->max_entries)
423 return -EINVAL;
424
425
426 __cpu_map_entry_replace(cmap, key_cpu, NULL);
427 return 0;
428}
429
430static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
431 u64 map_flags)
432{
433 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
434 struct bpf_cpu_map_entry *rcpu;
435
436
437 u32 key_cpu = *(u32 *)key;
438
439 u32 qsize = *(u32 *)value;
440
441 if (unlikely(map_flags > BPF_EXIST))
442 return -EINVAL;
443 if (unlikely(key_cpu >= cmap->map.max_entries))
444 return -E2BIG;
445 if (unlikely(map_flags == BPF_NOEXIST))
446 return -EEXIST;
447 if (unlikely(qsize > 16384))
448 return -EOVERFLOW;
449
450
451 if (!cpu_possible(key_cpu))
452 return -ENODEV;
453
454 if (qsize == 0) {
455 rcpu = NULL;
456 } else {
457
458 rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
459 if (!rcpu)
460 return -ENOMEM;
461 }
462 rcu_read_lock();
463 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
464 rcu_read_unlock();
465 return 0;
466}
467
468static void cpu_map_free(struct bpf_map *map)
469{
470 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
471 int cpu;
472 u32 i;
473
474
475
476
477
478
479
480
481
482 synchronize_rcu();
483
484
485
486
487
488
489 for_each_online_cpu(cpu) {
490 unsigned long *bitmap = per_cpu_ptr(cmap->flush_needed, cpu);
491
492 while (!bitmap_empty(bitmap, cmap->map.max_entries))
493 cond_resched();
494 }
495
496
497
498
499 for (i = 0; i < cmap->map.max_entries; i++) {
500 struct bpf_cpu_map_entry *rcpu;
501
502 rcpu = READ_ONCE(cmap->cpu_map[i]);
503 if (!rcpu)
504 continue;
505
506
507 __cpu_map_entry_replace(cmap, i, NULL);
508 }
509 free_percpu(cmap->flush_needed);
510 bpf_map_area_free(cmap->cpu_map);
511 kfree(cmap);
512}
513
514struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
515{
516 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
517 struct bpf_cpu_map_entry *rcpu;
518
519 if (key >= map->max_entries)
520 return NULL;
521
522 rcpu = READ_ONCE(cmap->cpu_map[key]);
523 return rcpu;
524}
525
526static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
527{
528 struct bpf_cpu_map_entry *rcpu =
529 __cpu_map_lookup_elem(map, *(u32 *)key);
530
531 return rcpu ? &rcpu->qsize : NULL;
532}
533
534static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
535{
536 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
537 u32 index = key ? *(u32 *)key : U32_MAX;
538 u32 *next = next_key;
539
540 if (index >= cmap->map.max_entries) {
541 *next = 0;
542 return 0;
543 }
544
545 if (index == cmap->map.max_entries - 1)
546 return -ENOENT;
547 *next = index + 1;
548 return 0;
549}
550
551const struct bpf_map_ops cpu_map_ops = {
552 .map_alloc = cpu_map_alloc,
553 .map_free = cpu_map_free,
554 .map_delete_elem = cpu_map_delete_elem,
555 .map_update_elem = cpu_map_update_elem,
556 .map_lookup_elem = cpu_map_lookup_elem,
557 .map_get_next_key = cpu_map_get_next_key,
558};
559
560static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
561 struct xdp_bulk_queue *bq, bool in_napi_ctx)
562{
563 unsigned int processed = 0, drops = 0;
564 const int to_cpu = rcpu->cpu;
565 struct ptr_ring *q;
566 int i;
567
568 if (unlikely(!bq->count))
569 return 0;
570
571 q = rcpu->queue;
572 spin_lock(&q->producer_lock);
573
574 for (i = 0; i < bq->count; i++) {
575 struct xdp_frame *xdpf = bq->q[i];
576 int err;
577
578 err = __ptr_ring_produce(q, xdpf);
579 if (err) {
580 drops++;
581 if (likely(in_napi_ctx))
582 xdp_return_frame_rx_napi(xdpf);
583 else
584 xdp_return_frame(xdpf);
585 }
586 processed++;
587 }
588 bq->count = 0;
589 spin_unlock(&q->producer_lock);
590
591
592 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
593 return 0;
594}
595
596
597
598
599static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
600{
601 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
602
603 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
604 bq_flush_to_queue(rcpu, bq, true);
605
606
607
608
609
610
611
612
613
614
615 bq->q[bq->count++] = xdpf;
616 return 0;
617}
618
619int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
620 struct net_device *dev_rx)
621{
622 struct xdp_frame *xdpf;
623
624 xdpf = convert_to_xdp_frame(xdp);
625 if (unlikely(!xdpf))
626 return -EOVERFLOW;
627
628
629 xdpf->dev_rx = dev_rx;
630
631 bq_enqueue(rcpu, xdpf);
632 return 0;
633}
634
635void __cpu_map_insert_ctx(struct bpf_map *map, u32 bit)
636{
637 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
638 unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed);
639
640 __set_bit(bit, bitmap);
641}
642
643void __cpu_map_flush(struct bpf_map *map)
644{
645 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
646 unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed);
647 u32 bit;
648
649
650
651
652
653 for_each_set_bit(bit, bitmap, map->max_entries) {
654 struct bpf_cpu_map_entry *rcpu = READ_ONCE(cmap->cpu_map[bit]);
655 struct xdp_bulk_queue *bq;
656
657
658
659
660 if (unlikely(!rcpu))
661 continue;
662
663 __clear_bit(bit, bitmap);
664
665
666 bq = this_cpu_ptr(rcpu->bulkq);
667 bq_flush_to_queue(rcpu, bq, true);
668
669
670 wake_up_process(rcpu->kthread);
671 }
672}
673