1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include <linux/bpf.h>
56#include <net/xdp.h>
57#include <linux/filter.h>
58#include <trace/events/xdp.h>
59
60#define DEV_CREATE_FLAG_MASK \
61 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
62
63struct xdp_dev_bulk_queue {
64 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
65 struct list_head flush_node;
66 struct net_device *dev;
67 struct net_device *dev_rx;
68 unsigned int count;
69};
70
71struct bpf_dtab_netdev {
72 struct net_device *dev;
73 struct hlist_node index_hlist;
74 struct bpf_dtab *dtab;
75 struct bpf_prog *xdp_prog;
76 struct rcu_head rcu;
77 unsigned int idx;
78 struct bpf_devmap_val val;
79};
80
81struct bpf_dtab {
82 struct bpf_map map;
83 struct bpf_dtab_netdev **netdev_map;
84 struct list_head list;
85
86
87 struct hlist_head *dev_index_head;
88 spinlock_t index_lock;
89 unsigned int items;
90 u32 n_buckets;
91};
92
93static DEFINE_PER_CPU(struct list_head, dev_flush_list);
94static DEFINE_SPINLOCK(dev_map_lock);
95static LIST_HEAD(dev_map_list);
96
97static struct hlist_head *dev_map_create_hash(unsigned int entries,
98 int numa_node)
99{
100 int i;
101 struct hlist_head *hash;
102
103 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
104 if (hash != NULL)
105 for (i = 0; i < entries; i++)
106 INIT_HLIST_HEAD(&hash[i]);
107
108 return hash;
109}
110
111static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
112 int idx)
113{
114 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
115}
116
117static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
118{
119 u32 valsize = attr->value_size;
120
121
122
123
124
125 if (attr->max_entries == 0 || attr->key_size != 4 ||
126 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
127 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
128 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
129 return -EINVAL;
130
131
132
133
134 attr->map_flags |= BPF_F_RDONLY_PROG;
135
136
137 bpf_map_init_from_attr(&dtab->map, attr);
138
139 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
140 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
141
142 if (!dtab->n_buckets)
143 return -EINVAL;
144 }
145
146 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
147 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
148 dtab->map.numa_node);
149 if (!dtab->dev_index_head)
150 return -ENOMEM;
151
152 spin_lock_init(&dtab->index_lock);
153 } else {
154 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
155 sizeof(struct bpf_dtab_netdev *),
156 dtab->map.numa_node);
157 if (!dtab->netdev_map)
158 return -ENOMEM;
159 }
160
161 return 0;
162}
163
164static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
165{
166 struct bpf_dtab *dtab;
167 int err;
168
169 if (!capable(CAP_NET_ADMIN))
170 return ERR_PTR(-EPERM);
171
172 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
173 if (!dtab)
174 return ERR_PTR(-ENOMEM);
175
176 err = dev_map_init_map(dtab, attr);
177 if (err) {
178 kfree(dtab);
179 return ERR_PTR(err);
180 }
181
182 spin_lock(&dev_map_lock);
183 list_add_tail_rcu(&dtab->list, &dev_map_list);
184 spin_unlock(&dev_map_lock);
185
186 return &dtab->map;
187}
188
189static void dev_map_free(struct bpf_map *map)
190{
191 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
192 int i;
193
194
195
196
197
198
199
200
201
202
203
204 spin_lock(&dev_map_lock);
205 list_del_rcu(&dtab->list);
206 spin_unlock(&dev_map_lock);
207
208 bpf_clear_redirect_map(map);
209 synchronize_rcu();
210
211
212 rcu_barrier();
213
214 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
215 for (i = 0; i < dtab->n_buckets; i++) {
216 struct bpf_dtab_netdev *dev;
217 struct hlist_head *head;
218 struct hlist_node *next;
219
220 head = dev_map_index_hash(dtab, i);
221
222 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
223 hlist_del_rcu(&dev->index_hlist);
224 if (dev->xdp_prog)
225 bpf_prog_put(dev->xdp_prog);
226 dev_put(dev->dev);
227 kfree(dev);
228 }
229 }
230
231 bpf_map_area_free(dtab->dev_index_head);
232 } else {
233 for (i = 0; i < dtab->map.max_entries; i++) {
234 struct bpf_dtab_netdev *dev;
235
236 dev = dtab->netdev_map[i];
237 if (!dev)
238 continue;
239
240 if (dev->xdp_prog)
241 bpf_prog_put(dev->xdp_prog);
242 dev_put(dev->dev);
243 kfree(dev);
244 }
245
246 bpf_map_area_free(dtab->netdev_map);
247 }
248
249 kfree(dtab);
250}
251
252static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
253{
254 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
255 u32 index = key ? *(u32 *)key : U32_MAX;
256 u32 *next = next_key;
257
258 if (index >= dtab->map.max_entries) {
259 *next = 0;
260 return 0;
261 }
262
263 if (index == dtab->map.max_entries - 1)
264 return -ENOENT;
265 *next = index + 1;
266 return 0;
267}
268
269struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
270{
271 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
272 struct hlist_head *head = dev_map_index_hash(dtab, key);
273 struct bpf_dtab_netdev *dev;
274
275 hlist_for_each_entry_rcu(dev, head, index_hlist,
276 lockdep_is_held(&dtab->index_lock))
277 if (dev->idx == key)
278 return dev;
279
280 return NULL;
281}
282
283static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
284 void *next_key)
285{
286 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
287 u32 idx, *next = next_key;
288 struct bpf_dtab_netdev *dev, *next_dev;
289 struct hlist_head *head;
290 int i = 0;
291
292 if (!key)
293 goto find_first;
294
295 idx = *(u32 *)key;
296
297 dev = __dev_map_hash_lookup_elem(map, idx);
298 if (!dev)
299 goto find_first;
300
301 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
302 struct bpf_dtab_netdev, index_hlist);
303
304 if (next_dev) {
305 *next = next_dev->idx;
306 return 0;
307 }
308
309 i = idx & (dtab->n_buckets - 1);
310 i++;
311
312 find_first:
313 for (; i < dtab->n_buckets; i++) {
314 head = dev_map_index_hash(dtab, i);
315
316 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
317 struct bpf_dtab_netdev,
318 index_hlist);
319 if (next_dev) {
320 *next = next_dev->idx;
321 return 0;
322 }
323 }
324
325 return -ENOENT;
326}
327
328bool dev_map_can_have_prog(struct bpf_map *map)
329{
330 if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
331 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
332 map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
333 return true;
334
335 return false;
336}
337
338static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
339{
340 struct net_device *dev = bq->dev;
341 int sent = 0, drops = 0, err = 0;
342 int i;
343
344 if (unlikely(!bq->count))
345 return;
346
347 for (i = 0; i < bq->count; i++) {
348 struct xdp_frame *xdpf = bq->q[i];
349
350 prefetch(xdpf);
351 }
352
353 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
354 if (sent < 0) {
355 err = sent;
356 sent = 0;
357 goto error;
358 }
359 drops = bq->count - sent;
360out:
361 bq->count = 0;
362
363 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
364 bq->dev_rx = NULL;
365 __list_del_clearprev(&bq->flush_node);
366 return;
367error:
368
369
370
371 for (i = 0; i < bq->count; i++) {
372 struct xdp_frame *xdpf = bq->q[i];
373
374 xdp_return_frame_rx_napi(xdpf);
375 drops++;
376 }
377 goto out;
378}
379
380
381
382
383
384
385
386
387
388
389
390void __dev_flush(void)
391{
392 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
393 struct xdp_dev_bulk_queue *bq, *tmp;
394
395 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
396 bq_xmit_all(bq, XDP_XMIT_FLUSH);
397}
398
399
400
401
402
403struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
404{
405 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
406 struct bpf_dtab_netdev *obj;
407
408 if (key >= map->max_entries)
409 return NULL;
410
411 obj = READ_ONCE(dtab->netdev_map[key]);
412 return obj;
413}
414
415
416
417
418static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
419 struct net_device *dev_rx)
420{
421 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
422 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
423
424 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
425 bq_xmit_all(bq, 0);
426
427
428
429
430
431 if (!bq->dev_rx)
432 bq->dev_rx = dev_rx;
433
434 bq->q[bq->count++] = xdpf;
435
436 if (!bq->flush_node.prev)
437 list_add(&bq->flush_node, flush_list);
438}
439
440static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
441 struct net_device *dev_rx)
442{
443 struct xdp_frame *xdpf;
444 int err;
445
446 if (!dev->netdev_ops->ndo_xdp_xmit)
447 return -EOPNOTSUPP;
448
449 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
450 if (unlikely(err))
451 return err;
452
453 xdpf = xdp_convert_buff_to_frame(xdp);
454 if (unlikely(!xdpf))
455 return -EOVERFLOW;
456
457 bq_enqueue(dev, xdpf, dev_rx);
458 return 0;
459}
460
461static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
462 struct xdp_buff *xdp,
463 struct bpf_prog *xdp_prog)
464{
465 struct xdp_txq_info txq = { .dev = dev };
466 u32 act;
467
468 xdp_set_data_meta_invalid(xdp);
469 xdp->txq = &txq;
470
471 act = bpf_prog_run_xdp(xdp_prog, xdp);
472 switch (act) {
473 case XDP_PASS:
474 return xdp;
475 case XDP_DROP:
476 break;
477 default:
478 bpf_warn_invalid_xdp_action(act);
479
480 case XDP_ABORTED:
481 trace_xdp_exception(dev, xdp_prog, act);
482 break;
483 }
484
485 xdp_return_buff(xdp);
486 return NULL;
487}
488
489int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
490 struct net_device *dev_rx)
491{
492 return __xdp_enqueue(dev, xdp, dev_rx);
493}
494
495int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
496 struct net_device *dev_rx)
497{
498 struct net_device *dev = dst->dev;
499
500 if (dst->xdp_prog) {
501 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
502 if (!xdp)
503 return 0;
504 }
505 return __xdp_enqueue(dev, xdp, dev_rx);
506}
507
508int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
509 struct bpf_prog *xdp_prog)
510{
511 int err;
512
513 err = xdp_ok_fwd_dev(dst->dev, skb->len);
514 if (unlikely(err))
515 return err;
516 skb->dev = dst->dev;
517 generic_xdp_tx(skb, xdp_prog);
518
519 return 0;
520}
521
522static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
523{
524 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
525
526 return obj ? &obj->val : NULL;
527}
528
529static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
530{
531 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
532 *(u32 *)key);
533 return obj ? &obj->val : NULL;
534}
535
536static void __dev_map_entry_free(struct rcu_head *rcu)
537{
538 struct bpf_dtab_netdev *dev;
539
540 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
541 if (dev->xdp_prog)
542 bpf_prog_put(dev->xdp_prog);
543 dev_put(dev->dev);
544 kfree(dev);
545}
546
547static int dev_map_delete_elem(struct bpf_map *map, void *key)
548{
549 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
550 struct bpf_dtab_netdev *old_dev;
551 int k = *(u32 *)key;
552
553 if (k >= map->max_entries)
554 return -EINVAL;
555
556
557
558
559
560
561
562
563 old_dev = xchg(&dtab->netdev_map[k], NULL);
564 if (old_dev)
565 call_rcu(&old_dev->rcu, __dev_map_entry_free);
566 return 0;
567}
568
569static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
570{
571 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
572 struct bpf_dtab_netdev *old_dev;
573 int k = *(u32 *)key;
574 unsigned long flags;
575 int ret = -ENOENT;
576
577 spin_lock_irqsave(&dtab->index_lock, flags);
578
579 old_dev = __dev_map_hash_lookup_elem(map, k);
580 if (old_dev) {
581 dtab->items--;
582 hlist_del_init_rcu(&old_dev->index_hlist);
583 call_rcu(&old_dev->rcu, __dev_map_entry_free);
584 ret = 0;
585 }
586 spin_unlock_irqrestore(&dtab->index_lock, flags);
587
588 return ret;
589}
590
591static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
592 struct bpf_dtab *dtab,
593 struct bpf_devmap_val *val,
594 unsigned int idx)
595{
596 struct bpf_prog *prog = NULL;
597 struct bpf_dtab_netdev *dev;
598
599 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
600 GFP_ATOMIC | __GFP_NOWARN,
601 dtab->map.numa_node);
602 if (!dev)
603 return ERR_PTR(-ENOMEM);
604
605 dev->dev = dev_get_by_index(net, val->ifindex);
606 if (!dev->dev)
607 goto err_out;
608
609 if (val->bpf_prog.fd > 0) {
610 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
611 BPF_PROG_TYPE_XDP, false);
612 if (IS_ERR(prog))
613 goto err_put_dev;
614 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
615 goto err_put_prog;
616 }
617
618 dev->idx = idx;
619 dev->dtab = dtab;
620 if (prog) {
621 dev->xdp_prog = prog;
622 dev->val.bpf_prog.id = prog->aux->id;
623 } else {
624 dev->xdp_prog = NULL;
625 dev->val.bpf_prog.id = 0;
626 }
627 dev->val.ifindex = val->ifindex;
628
629 return dev;
630err_put_prog:
631 bpf_prog_put(prog);
632err_put_dev:
633 dev_put(dev->dev);
634err_out:
635 kfree(dev);
636 return ERR_PTR(-EINVAL);
637}
638
639static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
640 void *key, void *value, u64 map_flags)
641{
642 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
643 struct bpf_dtab_netdev *dev, *old_dev;
644 struct bpf_devmap_val val = {};
645 u32 i = *(u32 *)key;
646
647 if (unlikely(map_flags > BPF_EXIST))
648 return -EINVAL;
649 if (unlikely(i >= dtab->map.max_entries))
650 return -E2BIG;
651 if (unlikely(map_flags == BPF_NOEXIST))
652 return -EEXIST;
653
654
655 memcpy(&val, value, map->value_size);
656
657 if (!val.ifindex) {
658 dev = NULL;
659
660 if (val.bpf_prog.fd > 0)
661 return -EINVAL;
662 } else {
663 dev = __dev_map_alloc_node(net, dtab, &val, i);
664 if (IS_ERR(dev))
665 return PTR_ERR(dev);
666 }
667
668
669
670
671
672 old_dev = xchg(&dtab->netdev_map[i], dev);
673 if (old_dev)
674 call_rcu(&old_dev->rcu, __dev_map_entry_free);
675
676 return 0;
677}
678
679static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
680 u64 map_flags)
681{
682 return __dev_map_update_elem(current->nsproxy->net_ns,
683 map, key, value, map_flags);
684}
685
686static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
687 void *key, void *value, u64 map_flags)
688{
689 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
690 struct bpf_dtab_netdev *dev, *old_dev;
691 struct bpf_devmap_val val = {};
692 u32 idx = *(u32 *)key;
693 unsigned long flags;
694 int err = -EEXIST;
695
696
697 memcpy(&val, value, map->value_size);
698
699 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
700 return -EINVAL;
701
702 spin_lock_irqsave(&dtab->index_lock, flags);
703
704 old_dev = __dev_map_hash_lookup_elem(map, idx);
705 if (old_dev && (map_flags & BPF_NOEXIST))
706 goto out_err;
707
708 dev = __dev_map_alloc_node(net, dtab, &val, idx);
709 if (IS_ERR(dev)) {
710 err = PTR_ERR(dev);
711 goto out_err;
712 }
713
714 if (old_dev) {
715 hlist_del_rcu(&old_dev->index_hlist);
716 } else {
717 if (dtab->items >= dtab->map.max_entries) {
718 spin_unlock_irqrestore(&dtab->index_lock, flags);
719 call_rcu(&dev->rcu, __dev_map_entry_free);
720 return -E2BIG;
721 }
722 dtab->items++;
723 }
724
725 hlist_add_head_rcu(&dev->index_hlist,
726 dev_map_index_hash(dtab, idx));
727 spin_unlock_irqrestore(&dtab->index_lock, flags);
728
729 if (old_dev)
730 call_rcu(&old_dev->rcu, __dev_map_entry_free);
731
732 return 0;
733
734out_err:
735 spin_unlock_irqrestore(&dtab->index_lock, flags);
736 return err;
737}
738
739static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
740 u64 map_flags)
741{
742 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
743 map, key, value, map_flags);
744}
745
746static int dev_map_btf_id;
747const struct bpf_map_ops dev_map_ops = {
748 .map_meta_equal = bpf_map_meta_equal,
749 .map_alloc = dev_map_alloc,
750 .map_free = dev_map_free,
751 .map_get_next_key = dev_map_get_next_key,
752 .map_lookup_elem = dev_map_lookup_elem,
753 .map_update_elem = dev_map_update_elem,
754 .map_delete_elem = dev_map_delete_elem,
755 .map_check_btf = map_check_no_btf,
756 .map_btf_name = "bpf_dtab",
757 .map_btf_id = &dev_map_btf_id,
758};
759
760static int dev_map_hash_map_btf_id;
761const struct bpf_map_ops dev_map_hash_ops = {
762 .map_meta_equal = bpf_map_meta_equal,
763 .map_alloc = dev_map_alloc,
764 .map_free = dev_map_free,
765 .map_get_next_key = dev_map_hash_get_next_key,
766 .map_lookup_elem = dev_map_hash_lookup_elem,
767 .map_update_elem = dev_map_hash_update_elem,
768 .map_delete_elem = dev_map_hash_delete_elem,
769 .map_check_btf = map_check_no_btf,
770 .map_btf_name = "bpf_dtab",
771 .map_btf_id = &dev_map_hash_map_btf_id,
772};
773
774static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
775 struct net_device *netdev)
776{
777 unsigned long flags;
778 u32 i;
779
780 spin_lock_irqsave(&dtab->index_lock, flags);
781 for (i = 0; i < dtab->n_buckets; i++) {
782 struct bpf_dtab_netdev *dev;
783 struct hlist_head *head;
784 struct hlist_node *next;
785
786 head = dev_map_index_hash(dtab, i);
787
788 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
789 if (netdev != dev->dev)
790 continue;
791
792 dtab->items--;
793 hlist_del_rcu(&dev->index_hlist);
794 call_rcu(&dev->rcu, __dev_map_entry_free);
795 }
796 }
797 spin_unlock_irqrestore(&dtab->index_lock, flags);
798}
799
800static int dev_map_notification(struct notifier_block *notifier,
801 ulong event, void *ptr)
802{
803 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
804 struct bpf_dtab *dtab;
805 int i, cpu;
806
807 switch (event) {
808 case NETDEV_REGISTER:
809 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
810 break;
811
812
813 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
814 if (!netdev->xdp_bulkq)
815 return NOTIFY_BAD;
816
817 for_each_possible_cpu(cpu)
818 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
819 break;
820 case NETDEV_UNREGISTER:
821
822
823
824
825
826 rcu_read_lock();
827 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
828 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
829 dev_map_hash_remove_netdev(dtab, netdev);
830 continue;
831 }
832
833 for (i = 0; i < dtab->map.max_entries; i++) {
834 struct bpf_dtab_netdev *dev, *odev;
835
836 dev = READ_ONCE(dtab->netdev_map[i]);
837 if (!dev || netdev != dev->dev)
838 continue;
839 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
840 if (dev == odev)
841 call_rcu(&dev->rcu,
842 __dev_map_entry_free);
843 }
844 }
845 rcu_read_unlock();
846 break;
847 default:
848 break;
849 }
850 return NOTIFY_OK;
851}
852
853static struct notifier_block dev_map_notifier = {
854 .notifier_call = dev_map_notification,
855};
856
857static int __init dev_map_init(void)
858{
859 int cpu;
860
861
862 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
863 offsetof(struct _bpf_dtab_netdev, dev));
864 register_netdevice_notifier(&dev_map_notifier);
865
866 for_each_possible_cpu(cpu)
867 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
868 return 0;
869}
870
871subsys_initcall(dev_map_init);
872