1
2#include <linux/kernel.h>
3#include <linux/netdevice.h>
4#include <linux/rtnetlink.h>
5#include <linux/slab.h>
6#include <net/switchdev.h>
7
8#include "br_private.h"
9#include "br_private_tunnel.h"
10
11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14 const void *ptr)
15{
16 const struct net_bridge_vlan *vle = ptr;
17 u16 vid = *(u16 *)arg->key;
18
19 return vle->vid != vid;
20}
21
22static const struct rhashtable_params br_vlan_rht_params = {
23 .head_offset = offsetof(struct net_bridge_vlan, vnode),
24 .key_offset = offsetof(struct net_bridge_vlan, vid),
25 .key_len = sizeof(u16),
26 .nelem_hint = 3,
27 .locks_mul = 1,
28 .max_size = VLAN_N_VID,
29 .obj_cmpfn = br_vlan_cmp,
30 .automatic_shrinking = true,
31};
32
33static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
34{
35 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
36}
37
38static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
39 const struct net_bridge_vlan *v)
40{
41 if (vg->pvid == v->vid)
42 return false;
43
44 smp_wmb();
45 br_vlan_set_pvid_state(vg, v->state);
46 vg->pvid = v->vid;
47
48 return true;
49}
50
51static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
52{
53 if (vg->pvid != vid)
54 return false;
55
56 smp_wmb();
57 vg->pvid = 0;
58
59 return true;
60}
61
62
63static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
64{
65 struct net_bridge_vlan_group *vg;
66 u16 old_flags = v->flags;
67 bool ret;
68
69 if (br_vlan_is_master(v))
70 vg = br_vlan_group(v->br);
71 else
72 vg = nbp_vlan_group(v->port);
73
74 if (flags & BRIDGE_VLAN_INFO_PVID)
75 ret = __vlan_add_pvid(vg, v);
76 else
77 ret = __vlan_delete_pvid(vg, v->vid);
78
79 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
80 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
81 else
82 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
83
84 return ret || !!(old_flags ^ v->flags);
85}
86
87static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
88 struct net_bridge_vlan *v, u16 flags,
89 struct netlink_ext_ack *extack)
90{
91 int err;
92
93
94
95
96 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
97 if (err == -EOPNOTSUPP)
98 return vlan_vid_add(dev, br->vlan_proto, v->vid);
99 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
100 return err;
101}
102
103static void __vlan_add_list(struct net_bridge_vlan *v)
104{
105 struct net_bridge_vlan_group *vg;
106 struct list_head *headp, *hpos;
107 struct net_bridge_vlan *vent;
108
109 if (br_vlan_is_master(v))
110 vg = br_vlan_group(v->br);
111 else
112 vg = nbp_vlan_group(v->port);
113
114 headp = &vg->vlan_list;
115 list_for_each_prev(hpos, headp) {
116 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
117 if (v->vid >= vent->vid)
118 break;
119 }
120 list_add_rcu(&v->vlist, hpos);
121}
122
123static void __vlan_del_list(struct net_bridge_vlan *v)
124{
125 list_del_rcu(&v->vlist);
126}
127
128static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
129 const struct net_bridge_vlan *v)
130{
131 int err;
132
133
134
135
136 err = br_switchdev_port_vlan_del(dev, v->vid);
137 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
138 vlan_vid_del(dev, br->vlan_proto, v->vid);
139 return err == -EOPNOTSUPP ? 0 : err;
140}
141
142
143
144
145static struct net_bridge_vlan *
146br_vlan_get_master(struct net_bridge *br, u16 vid,
147 struct netlink_ext_ack *extack)
148{
149 struct net_bridge_vlan_group *vg;
150 struct net_bridge_vlan *masterv;
151
152 vg = br_vlan_group(br);
153 masterv = br_vlan_find(vg, vid);
154 if (!masterv) {
155 bool changed;
156
157
158 if (br_vlan_add(br, vid, 0, &changed, extack))
159 return NULL;
160 masterv = br_vlan_find(vg, vid);
161 if (WARN_ON(!masterv))
162 return NULL;
163 refcount_set(&masterv->refcnt, 1);
164 return masterv;
165 }
166 refcount_inc(&masterv->refcnt);
167
168 return masterv;
169}
170
171static void br_master_vlan_rcu_free(struct rcu_head *rcu)
172{
173 struct net_bridge_vlan *v;
174
175 v = container_of(rcu, struct net_bridge_vlan, rcu);
176 WARN_ON(!br_vlan_is_master(v));
177 free_percpu(v->stats);
178 v->stats = NULL;
179 kfree(v);
180}
181
182static void br_vlan_put_master(struct net_bridge_vlan *masterv)
183{
184 struct net_bridge_vlan_group *vg;
185
186 if (!br_vlan_is_master(masterv))
187 return;
188
189 vg = br_vlan_group(masterv->br);
190 if (refcount_dec_and_test(&masterv->refcnt)) {
191 rhashtable_remove_fast(&vg->vlan_hash,
192 &masterv->vnode, br_vlan_rht_params);
193 __vlan_del_list(masterv);
194 br_multicast_toggle_one_vlan(masterv, false);
195 br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
196 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
197 }
198}
199
200static void nbp_vlan_rcu_free(struct rcu_head *rcu)
201{
202 struct net_bridge_vlan *v;
203
204 v = container_of(rcu, struct net_bridge_vlan, rcu);
205 WARN_ON(br_vlan_is_master(v));
206
207 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
208 free_percpu(v->stats);
209 v->stats = NULL;
210 kfree(v);
211}
212
213
214
215
216
217
218
219
220
221
222
223
224static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
225 struct netlink_ext_ack *extack)
226{
227 struct net_bridge_vlan *masterv = NULL;
228 struct net_bridge_port *p = NULL;
229 struct net_bridge_vlan_group *vg;
230 struct net_device *dev;
231 struct net_bridge *br;
232 int err;
233
234 if (br_vlan_is_master(v)) {
235 br = v->br;
236 dev = br->dev;
237 vg = br_vlan_group(br);
238 } else {
239 p = v->port;
240 br = p->br;
241 dev = p->dev;
242 vg = nbp_vlan_group(p);
243 }
244
245 if (p) {
246
247
248
249
250 err = __vlan_vid_add(dev, br, v, flags, extack);
251 if (err)
252 goto out;
253
254
255 if (flags & BRIDGE_VLAN_INFO_MASTER) {
256 bool changed;
257
258 err = br_vlan_add(br, v->vid,
259 flags | BRIDGE_VLAN_INFO_BRENTRY,
260 &changed, extack);
261 if (err)
262 goto out_filt;
263
264 if (changed)
265 br_vlan_notify(br, NULL, v->vid, 0,
266 RTM_NEWVLAN);
267 }
268
269 masterv = br_vlan_get_master(br, v->vid, extack);
270 if (!masterv) {
271 err = -ENOMEM;
272 goto out_filt;
273 }
274 v->brvlan = masterv;
275 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
276 v->stats =
277 netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
278 if (!v->stats) {
279 err = -ENOMEM;
280 goto out_filt;
281 }
282 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
283 } else {
284 v->stats = masterv->stats;
285 }
286 br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
287 } else {
288 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
289 if (err && err != -EOPNOTSUPP)
290 goto out;
291 br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
292 v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
293 }
294
295
296 if (br_vlan_should_use(v)) {
297 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
298 if (err) {
299 br_err(br, "failed insert local address into bridge forwarding table\n");
300 goto out_filt;
301 }
302 vg->num_vlans++;
303 }
304
305
306 v->state = BR_STATE_FORWARDING;
307
308 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
309 br_vlan_rht_params);
310 if (err)
311 goto out_fdb_insert;
312
313 __vlan_add_list(v);
314 __vlan_add_flags(v, flags);
315 br_multicast_toggle_one_vlan(v, true);
316
317 if (p)
318 nbp_vlan_set_vlan_dev_state(p, v->vid);
319out:
320 return err;
321
322out_fdb_insert:
323 if (br_vlan_should_use(v)) {
324 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
325 vg->num_vlans--;
326 }
327
328out_filt:
329 if (p) {
330 __vlan_vid_del(dev, br, v);
331 if (masterv) {
332 if (v->stats && masterv->stats != v->stats)
333 free_percpu(v->stats);
334 v->stats = NULL;
335
336 br_vlan_put_master(masterv);
337 v->brvlan = NULL;
338 }
339 } else {
340 br_switchdev_port_vlan_del(dev, v->vid);
341 }
342
343 goto out;
344}
345
346static int __vlan_del(struct net_bridge_vlan *v)
347{
348 struct net_bridge_vlan *masterv = v;
349 struct net_bridge_vlan_group *vg;
350 struct net_bridge_port *p = NULL;
351 int err = 0;
352
353 if (br_vlan_is_master(v)) {
354 vg = br_vlan_group(v->br);
355 } else {
356 p = v->port;
357 vg = nbp_vlan_group(v->port);
358 masterv = v->brvlan;
359 }
360
361 __vlan_delete_pvid(vg, v->vid);
362 if (p) {
363 err = __vlan_vid_del(p->dev, p->br, v);
364 if (err)
365 goto out;
366 } else {
367 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
368 if (err && err != -EOPNOTSUPP)
369 goto out;
370 err = 0;
371 }
372
373 if (br_vlan_should_use(v)) {
374 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
375 vg->num_vlans--;
376 }
377
378 if (masterv != v) {
379 vlan_tunnel_info_del(vg, v);
380 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
381 br_vlan_rht_params);
382 __vlan_del_list(v);
383 nbp_vlan_set_vlan_dev_state(p, v->vid);
384 br_multicast_toggle_one_vlan(v, false);
385 br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
386 call_rcu(&v->rcu, nbp_vlan_rcu_free);
387 }
388
389 br_vlan_put_master(masterv);
390out:
391 return err;
392}
393
394static void __vlan_group_free(struct net_bridge_vlan_group *vg)
395{
396 WARN_ON(!list_empty(&vg->vlan_list));
397 rhashtable_destroy(&vg->vlan_hash);
398 vlan_tunnel_deinit(vg);
399 kfree(vg);
400}
401
402static void __vlan_flush(const struct net_bridge *br,
403 const struct net_bridge_port *p,
404 struct net_bridge_vlan_group *vg)
405{
406 struct net_bridge_vlan *vlan, *tmp;
407 u16 v_start = 0, v_end = 0;
408
409 __vlan_delete_pvid(vg, vg->pvid);
410 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
411
412 if (!v_start) {
413 v_start = vlan->vid;
414 } else if (vlan->vid - v_end != 1) {
415
416 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
417 v_start = vlan->vid;
418 }
419 v_end = vlan->vid;
420
421 __vlan_del(vlan);
422 }
423
424
425 if (v_start)
426 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
427}
428
429struct sk_buff *br_handle_vlan(struct net_bridge *br,
430 const struct net_bridge_port *p,
431 struct net_bridge_vlan_group *vg,
432 struct sk_buff *skb)
433{
434 struct pcpu_sw_netstats *stats;
435 struct net_bridge_vlan *v;
436 u16 vid;
437
438
439 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
440 goto out;
441
442
443
444
445
446 br_vlan_get_tag(skb, &vid);
447 v = br_vlan_find(vg, vid);
448
449
450
451
452
453 if (!v || !br_vlan_should_use(v)) {
454 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
455 goto out;
456 } else {
457 kfree_skb(skb);
458 return NULL;
459 }
460 }
461 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
462 stats = this_cpu_ptr(v->stats);
463 u64_stats_update_begin(&stats->syncp);
464 stats->tx_bytes += skb->len;
465 stats->tx_packets++;
466 u64_stats_update_end(&stats->syncp);
467 }
468
469
470
471
472
473
474
475
476 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
477 !br_switchdev_frame_uses_tx_fwd_offload(skb))
478 __vlan_hwaccel_clear_tag(skb);
479
480 if (p && (p->flags & BR_VLAN_TUNNEL) &&
481 br_handle_egress_vlan_tunnel(skb, v)) {
482 kfree_skb(skb);
483 return NULL;
484 }
485out:
486 return skb;
487}
488
489
490static bool __allowed_ingress(const struct net_bridge *br,
491 struct net_bridge_vlan_group *vg,
492 struct sk_buff *skb, u16 *vid,
493 u8 *state,
494 struct net_bridge_vlan **vlan)
495{
496 struct pcpu_sw_netstats *stats;
497 struct net_bridge_vlan *v;
498 bool tagged;
499
500 BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
501
502
503
504
505 if (unlikely(!skb_vlan_tag_present(skb) &&
506 skb->protocol == br->vlan_proto)) {
507 skb = skb_vlan_untag(skb);
508 if (unlikely(!skb))
509 return false;
510 }
511
512 if (!br_vlan_get_tag(skb, vid)) {
513
514 if (skb->vlan_proto != br->vlan_proto) {
515
516 skb_push(skb, ETH_HLEN);
517 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
518 skb_vlan_tag_get(skb));
519 if (unlikely(!skb))
520 return false;
521
522 skb_pull(skb, ETH_HLEN);
523 skb_reset_mac_len(skb);
524 *vid = 0;
525 tagged = false;
526 } else {
527 tagged = true;
528 }
529 } else {
530
531 tagged = false;
532 }
533
534 if (!*vid) {
535 u16 pvid = br_get_pvid(vg);
536
537
538
539
540
541 if (!pvid)
542 goto drop;
543
544
545
546
547 *vid = pvid;
548 if (likely(!tagged))
549
550 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
551 else
552
553
554
555
556
557 skb->vlan_tci |= pvid;
558
559
560 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
561 !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
562 if (*state == BR_STATE_FORWARDING) {
563 *state = br_vlan_get_pvid_state(vg);
564 return br_vlan_state_allowed(*state, true);
565 } else {
566 return true;
567 }
568 }
569 }
570 v = br_vlan_find(vg, *vid);
571 if (!v || !br_vlan_should_use(v))
572 goto drop;
573
574 if (*state == BR_STATE_FORWARDING) {
575 *state = br_vlan_get_state(v);
576 if (!br_vlan_state_allowed(*state, true))
577 goto drop;
578 }
579
580 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
581 stats = this_cpu_ptr(v->stats);
582 u64_stats_update_begin(&stats->syncp);
583 stats->rx_bytes += skb->len;
584 stats->rx_packets++;
585 u64_stats_update_end(&stats->syncp);
586 }
587
588 *vlan = v;
589
590 return true;
591
592drop:
593 kfree_skb(skb);
594 return false;
595}
596
597bool br_allowed_ingress(const struct net_bridge *br,
598 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
599 u16 *vid, u8 *state,
600 struct net_bridge_vlan **vlan)
601{
602
603
604
605 *vlan = NULL;
606 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
607 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
608 return true;
609 }
610
611 return __allowed_ingress(br, vg, skb, vid, state, vlan);
612}
613
614
615bool br_allowed_egress(struct net_bridge_vlan_group *vg,
616 const struct sk_buff *skb)
617{
618 const struct net_bridge_vlan *v;
619 u16 vid;
620
621
622 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
623 return true;
624
625 br_vlan_get_tag(skb, &vid);
626 v = br_vlan_find(vg, vid);
627 if (v && br_vlan_should_use(v) &&
628 br_vlan_state_allowed(br_vlan_get_state(v), false))
629 return true;
630
631 return false;
632}
633
634
635bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
636{
637 struct net_bridge_vlan_group *vg;
638 struct net_bridge *br = p->br;
639 struct net_bridge_vlan *v;
640
641
642 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
643 return true;
644
645 vg = nbp_vlan_group_rcu(p);
646 if (!vg || !vg->num_vlans)
647 return false;
648
649 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
650 *vid = 0;
651
652 if (!*vid) {
653 *vid = br_get_pvid(vg);
654 if (!*vid ||
655 !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
656 return false;
657
658 return true;
659 }
660
661 v = br_vlan_find(vg, *vid);
662 if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
663 return true;
664
665 return false;
666}
667
668static int br_vlan_add_existing(struct net_bridge *br,
669 struct net_bridge_vlan_group *vg,
670 struct net_bridge_vlan *vlan,
671 u16 flags, bool *changed,
672 struct netlink_ext_ack *extack)
673{
674 int err;
675
676 err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
677 if (err && err != -EOPNOTSUPP)
678 return err;
679
680 if (!br_vlan_is_brentry(vlan)) {
681
682 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
683 err = -EINVAL;
684 goto err_flags;
685 }
686
687 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
688 vlan->vid);
689 if (err) {
690 br_err(br, "failed to insert local address into bridge forwarding table\n");
691 goto err_fdb_insert;
692 }
693
694 refcount_inc(&vlan->refcnt);
695 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
696 vg->num_vlans++;
697 *changed = true;
698 br_multicast_toggle_one_vlan(vlan, true);
699 }
700
701 if (__vlan_add_flags(vlan, flags))
702 *changed = true;
703
704 return 0;
705
706err_fdb_insert:
707err_flags:
708 br_switchdev_port_vlan_del(br->dev, vlan->vid);
709 return err;
710}
711
712
713
714
715
716int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
717 struct netlink_ext_ack *extack)
718{
719 struct net_bridge_vlan_group *vg;
720 struct net_bridge_vlan *vlan;
721 int ret;
722
723 ASSERT_RTNL();
724
725 *changed = false;
726 vg = br_vlan_group(br);
727 vlan = br_vlan_find(vg, vid);
728 if (vlan)
729 return br_vlan_add_existing(br, vg, vlan, flags, changed,
730 extack);
731
732 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
733 if (!vlan)
734 return -ENOMEM;
735
736 vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
737 if (!vlan->stats) {
738 kfree(vlan);
739 return -ENOMEM;
740 }
741 vlan->vid = vid;
742 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
743 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
744 vlan->br = br;
745 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
746 refcount_set(&vlan->refcnt, 1);
747 ret = __vlan_add(vlan, flags, extack);
748 if (ret) {
749 free_percpu(vlan->stats);
750 kfree(vlan);
751 } else {
752 *changed = true;
753 }
754
755 return ret;
756}
757
758
759
760
761int br_vlan_delete(struct net_bridge *br, u16 vid)
762{
763 struct net_bridge_vlan_group *vg;
764 struct net_bridge_vlan *v;
765
766 ASSERT_RTNL();
767
768 vg = br_vlan_group(br);
769 v = br_vlan_find(vg, vid);
770 if (!v || !br_vlan_is_brentry(v))
771 return -ENOENT;
772
773 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
774 br_fdb_delete_by_port(br, NULL, vid, 0);
775
776 vlan_tunnel_info_del(vg, v);
777
778 return __vlan_del(v);
779}
780
781void br_vlan_flush(struct net_bridge *br)
782{
783 struct net_bridge_vlan_group *vg;
784
785 ASSERT_RTNL();
786
787 vg = br_vlan_group(br);
788 __vlan_flush(br, NULL, vg);
789 RCU_INIT_POINTER(br->vlgrp, NULL);
790 synchronize_rcu();
791 __vlan_group_free(vg);
792}
793
794struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
795{
796 if (!vg)
797 return NULL;
798
799 return br_vlan_lookup(&vg->vlan_hash, vid);
800}
801
802
803static void recalculate_group_addr(struct net_bridge *br)
804{
805 if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
806 return;
807
808 spin_lock_bh(&br->lock);
809 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
810 br->vlan_proto == htons(ETH_P_8021Q)) {
811
812 br->group_addr[5] = 0x00;
813 } else {
814
815 br->group_addr[5] = 0x08;
816 }
817 spin_unlock_bh(&br->lock);
818}
819
820
821void br_recalculate_fwd_mask(struct net_bridge *br)
822{
823 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
824 br->vlan_proto == htons(ETH_P_8021Q))
825 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
826 else
827 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
828 ~(1u << br->group_addr[5]);
829}
830
831int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
832 struct netlink_ext_ack *extack)
833{
834 struct switchdev_attr attr = {
835 .orig_dev = br->dev,
836 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
837 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
838 .u.vlan_filtering = val,
839 };
840 int err;
841
842 if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
843 return 0;
844
845 br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
846
847 err = switchdev_port_attr_set(br->dev, &attr, extack);
848 if (err && err != -EOPNOTSUPP) {
849 br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
850 return err;
851 }
852
853 br_manage_promisc(br);
854 recalculate_group_addr(br);
855 br_recalculate_fwd_mask(br);
856 if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
857 br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
858 br_multicast_toggle_vlan_snooping(br, false, NULL);
859 }
860
861 return 0;
862}
863
864bool br_vlan_enabled(const struct net_device *dev)
865{
866 struct net_bridge *br = netdev_priv(dev);
867
868 return br_opt_get(br, BROPT_VLAN_ENABLED);
869}
870EXPORT_SYMBOL_GPL(br_vlan_enabled);
871
872int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
873{
874 struct net_bridge *br = netdev_priv(dev);
875
876 *p_proto = ntohs(br->vlan_proto);
877
878 return 0;
879}
880EXPORT_SYMBOL_GPL(br_vlan_get_proto);
881
882int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
883 struct netlink_ext_ack *extack)
884{
885 struct switchdev_attr attr = {
886 .orig_dev = br->dev,
887 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
888 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
889 .u.vlan_protocol = ntohs(proto),
890 };
891 int err = 0;
892 struct net_bridge_port *p;
893 struct net_bridge_vlan *vlan;
894 struct net_bridge_vlan_group *vg;
895 __be16 oldproto = br->vlan_proto;
896
897 if (br->vlan_proto == proto)
898 return 0;
899
900 err = switchdev_port_attr_set(br->dev, &attr, extack);
901 if (err && err != -EOPNOTSUPP)
902 return err;
903
904
905 list_for_each_entry(p, &br->port_list, list) {
906 vg = nbp_vlan_group(p);
907 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
908 err = vlan_vid_add(p->dev, proto, vlan->vid);
909 if (err)
910 goto err_filt;
911 }
912 }
913
914 br->vlan_proto = proto;
915
916 recalculate_group_addr(br);
917 br_recalculate_fwd_mask(br);
918
919
920 list_for_each_entry(p, &br->port_list, list) {
921 vg = nbp_vlan_group(p);
922 list_for_each_entry(vlan, &vg->vlan_list, vlist)
923 vlan_vid_del(p->dev, oldproto, vlan->vid);
924 }
925
926 return 0;
927
928err_filt:
929 attr.u.vlan_protocol = ntohs(oldproto);
930 switchdev_port_attr_set(br->dev, &attr, NULL);
931
932 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
933 vlan_vid_del(p->dev, proto, vlan->vid);
934
935 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
936 vg = nbp_vlan_group(p);
937 list_for_each_entry(vlan, &vg->vlan_list, vlist)
938 vlan_vid_del(p->dev, proto, vlan->vid);
939 }
940
941 return err;
942}
943
944int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
945 struct netlink_ext_ack *extack)
946{
947 if (!eth_type_vlan(htons(val)))
948 return -EPROTONOSUPPORT;
949
950 return __br_vlan_set_proto(br, htons(val), extack);
951}
952
953int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
954{
955 switch (val) {
956 case 0:
957 case 1:
958 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 return 0;
965}
966
967int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
968{
969 struct net_bridge_port *p;
970
971
972 list_for_each_entry(p, &br->port_list, list) {
973 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
974
975 if (vg->num_vlans)
976 return -EBUSY;
977 }
978
979 switch (val) {
980 case 0:
981 case 1:
982 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
983 break;
984 default:
985 return -EINVAL;
986 }
987
988 return 0;
989}
990
991static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
992{
993 struct net_bridge_vlan *v;
994
995 if (vid != vg->pvid)
996 return false;
997
998 v = br_vlan_lookup(&vg->vlan_hash, vid);
999 if (v && br_vlan_should_use(v) &&
1000 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1001 return true;
1002
1003 return false;
1004}
1005
1006static void br_vlan_disable_default_pvid(struct net_bridge *br)
1007{
1008 struct net_bridge_port *p;
1009 u16 pvid = br->default_pvid;
1010
1011
1012
1013
1014 if (vlan_default_pvid(br_vlan_group(br), pvid)) {
1015 if (!br_vlan_delete(br, pvid))
1016 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1017 }
1018
1019 list_for_each_entry(p, &br->port_list, list) {
1020 if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1021 !nbp_vlan_delete(p, pvid))
1022 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1023 }
1024
1025 br->default_pvid = 0;
1026}
1027
1028int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1029 struct netlink_ext_ack *extack)
1030{
1031 const struct net_bridge_vlan *pvent;
1032 struct net_bridge_vlan_group *vg;
1033 struct net_bridge_port *p;
1034 unsigned long *changed;
1035 bool vlchange;
1036 u16 old_pvid;
1037 int err = 0;
1038
1039 if (!pvid) {
1040 br_vlan_disable_default_pvid(br);
1041 return 0;
1042 }
1043
1044 changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1045 if (!changed)
1046 return -ENOMEM;
1047
1048 old_pvid = br->default_pvid;
1049
1050
1051
1052
1053 vg = br_vlan_group(br);
1054 pvent = br_vlan_find(vg, pvid);
1055 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1056 (!pvent || !br_vlan_should_use(pvent))) {
1057 err = br_vlan_add(br, pvid,
1058 BRIDGE_VLAN_INFO_PVID |
1059 BRIDGE_VLAN_INFO_UNTAGGED |
1060 BRIDGE_VLAN_INFO_BRENTRY,
1061 &vlchange, extack);
1062 if (err)
1063 goto out;
1064
1065 if (br_vlan_delete(br, old_pvid))
1066 br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1067 br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1068 set_bit(0, changed);
1069 }
1070
1071 list_for_each_entry(p, &br->port_list, list) {
1072
1073
1074
1075 vg = nbp_vlan_group(p);
1076 if ((old_pvid &&
1077 !vlan_default_pvid(vg, old_pvid)) ||
1078 br_vlan_find(vg, pvid))
1079 continue;
1080
1081 err = nbp_vlan_add(p, pvid,
1082 BRIDGE_VLAN_INFO_PVID |
1083 BRIDGE_VLAN_INFO_UNTAGGED,
1084 &vlchange, extack);
1085 if (err)
1086 goto err_port;
1087 if (nbp_vlan_delete(p, old_pvid))
1088 br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1089 br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1090 set_bit(p->port_no, changed);
1091 }
1092
1093 br->default_pvid = pvid;
1094
1095out:
1096 bitmap_free(changed);
1097 return err;
1098
1099err_port:
1100 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1101 if (!test_bit(p->port_no, changed))
1102 continue;
1103
1104 if (old_pvid) {
1105 nbp_vlan_add(p, old_pvid,
1106 BRIDGE_VLAN_INFO_PVID |
1107 BRIDGE_VLAN_INFO_UNTAGGED,
1108 &vlchange, NULL);
1109 br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1110 }
1111 nbp_vlan_delete(p, pvid);
1112 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1113 }
1114
1115 if (test_bit(0, changed)) {
1116 if (old_pvid) {
1117 br_vlan_add(br, old_pvid,
1118 BRIDGE_VLAN_INFO_PVID |
1119 BRIDGE_VLAN_INFO_UNTAGGED |
1120 BRIDGE_VLAN_INFO_BRENTRY,
1121 &vlchange, NULL);
1122 br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1123 }
1124 br_vlan_delete(br, pvid);
1125 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1126 }
1127 goto out;
1128}
1129
1130int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1131 struct netlink_ext_ack *extack)
1132{
1133 u16 pvid = val;
1134 int err = 0;
1135
1136 if (val >= VLAN_VID_MASK)
1137 return -EINVAL;
1138
1139 if (pvid == br->default_pvid)
1140 goto out;
1141
1142
1143 if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1144 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1145 err = -EPERM;
1146 goto out;
1147 }
1148 err = __br_vlan_set_default_pvid(br, pvid, extack);
1149out:
1150 return err;
1151}
1152
1153int br_vlan_init(struct net_bridge *br)
1154{
1155 struct net_bridge_vlan_group *vg;
1156 int ret = -ENOMEM;
1157
1158 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1159 if (!vg)
1160 goto out;
1161 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1162 if (ret)
1163 goto err_rhtbl;
1164 ret = vlan_tunnel_init(vg);
1165 if (ret)
1166 goto err_tunnel_init;
1167 INIT_LIST_HEAD(&vg->vlan_list);
1168 br->vlan_proto = htons(ETH_P_8021Q);
1169 br->default_pvid = 1;
1170 rcu_assign_pointer(br->vlgrp, vg);
1171
1172out:
1173 return ret;
1174
1175err_tunnel_init:
1176 rhashtable_destroy(&vg->vlan_hash);
1177err_rhtbl:
1178 kfree(vg);
1179
1180 goto out;
1181}
1182
1183int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1184{
1185 struct switchdev_attr attr = {
1186 .orig_dev = p->br->dev,
1187 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1188 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1189 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1190 };
1191 struct net_bridge_vlan_group *vg;
1192 int ret = -ENOMEM;
1193
1194 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1195 if (!vg)
1196 goto out;
1197
1198 ret = switchdev_port_attr_set(p->dev, &attr, extack);
1199 if (ret && ret != -EOPNOTSUPP)
1200 goto err_vlan_enabled;
1201
1202 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1203 if (ret)
1204 goto err_rhtbl;
1205 ret = vlan_tunnel_init(vg);
1206 if (ret)
1207 goto err_tunnel_init;
1208 INIT_LIST_HEAD(&vg->vlan_list);
1209 rcu_assign_pointer(p->vlgrp, vg);
1210 if (p->br->default_pvid) {
1211 bool changed;
1212
1213 ret = nbp_vlan_add(p, p->br->default_pvid,
1214 BRIDGE_VLAN_INFO_PVID |
1215 BRIDGE_VLAN_INFO_UNTAGGED,
1216 &changed, extack);
1217 if (ret)
1218 goto err_vlan_add;
1219 br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1220 }
1221out:
1222 return ret;
1223
1224err_vlan_add:
1225 RCU_INIT_POINTER(p->vlgrp, NULL);
1226 synchronize_rcu();
1227 vlan_tunnel_deinit(vg);
1228err_tunnel_init:
1229 rhashtable_destroy(&vg->vlan_hash);
1230err_rhtbl:
1231err_vlan_enabled:
1232 kfree(vg);
1233
1234 goto out;
1235}
1236
1237
1238
1239
1240
1241int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1242 bool *changed, struct netlink_ext_ack *extack)
1243{
1244 struct net_bridge_vlan *vlan;
1245 int ret;
1246
1247 ASSERT_RTNL();
1248
1249 *changed = false;
1250 vlan = br_vlan_find(nbp_vlan_group(port), vid);
1251 if (vlan) {
1252
1253 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1254 if (ret && ret != -EOPNOTSUPP)
1255 return ret;
1256 *changed = __vlan_add_flags(vlan, flags);
1257
1258 return 0;
1259 }
1260
1261 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1262 if (!vlan)
1263 return -ENOMEM;
1264
1265 vlan->vid = vid;
1266 vlan->port = port;
1267 ret = __vlan_add(vlan, flags, extack);
1268 if (ret)
1269 kfree(vlan);
1270 else
1271 *changed = true;
1272
1273 return ret;
1274}
1275
1276
1277
1278
1279int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1280{
1281 struct net_bridge_vlan *v;
1282
1283 ASSERT_RTNL();
1284
1285 v = br_vlan_find(nbp_vlan_group(port), vid);
1286 if (!v)
1287 return -ENOENT;
1288 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1289 br_fdb_delete_by_port(port->br, port, vid, 0);
1290
1291 return __vlan_del(v);
1292}
1293
1294void nbp_vlan_flush(struct net_bridge_port *port)
1295{
1296 struct net_bridge_vlan_group *vg;
1297
1298 ASSERT_RTNL();
1299
1300 vg = nbp_vlan_group(port);
1301 __vlan_flush(port->br, port, vg);
1302 RCU_INIT_POINTER(port->vlgrp, NULL);
1303 synchronize_rcu();
1304 __vlan_group_free(vg);
1305}
1306
1307void br_vlan_get_stats(const struct net_bridge_vlan *v,
1308 struct pcpu_sw_netstats *stats)
1309{
1310 int i;
1311
1312 memset(stats, 0, sizeof(*stats));
1313 for_each_possible_cpu(i) {
1314 u64 rxpackets, rxbytes, txpackets, txbytes;
1315 struct pcpu_sw_netstats *cpu_stats;
1316 unsigned int start;
1317
1318 cpu_stats = per_cpu_ptr(v->stats, i);
1319 do {
1320 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1321 rxpackets = cpu_stats->rx_packets;
1322 rxbytes = cpu_stats->rx_bytes;
1323 txbytes = cpu_stats->tx_bytes;
1324 txpackets = cpu_stats->tx_packets;
1325 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1326
1327 stats->rx_packets += rxpackets;
1328 stats->rx_bytes += rxbytes;
1329 stats->tx_bytes += txbytes;
1330 stats->tx_packets += txpackets;
1331 }
1332}
1333
1334int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1335{
1336 struct net_bridge_vlan_group *vg;
1337 struct net_bridge_port *p;
1338
1339 ASSERT_RTNL();
1340 p = br_port_get_check_rtnl(dev);
1341 if (p)
1342 vg = nbp_vlan_group(p);
1343 else if (netif_is_bridge_master(dev))
1344 vg = br_vlan_group(netdev_priv(dev));
1345 else
1346 return -EINVAL;
1347
1348 *p_pvid = br_get_pvid(vg);
1349 return 0;
1350}
1351EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1352
1353int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1354{
1355 struct net_bridge_vlan_group *vg;
1356 struct net_bridge_port *p;
1357
1358 p = br_port_get_check_rcu(dev);
1359 if (p)
1360 vg = nbp_vlan_group_rcu(p);
1361 else if (netif_is_bridge_master(dev))
1362 vg = br_vlan_group_rcu(netdev_priv(dev));
1363 else
1364 return -EINVAL;
1365
1366 *p_pvid = br_get_pvid(vg);
1367 return 0;
1368}
1369EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1370
1371int br_vlan_get_info(const struct net_device *dev, u16 vid,
1372 struct bridge_vlan_info *p_vinfo)
1373{
1374 struct net_bridge_vlan_group *vg;
1375 struct net_bridge_vlan *v;
1376 struct net_bridge_port *p;
1377
1378 ASSERT_RTNL();
1379 p = br_port_get_check_rtnl(dev);
1380 if (p)
1381 vg = nbp_vlan_group(p);
1382 else if (netif_is_bridge_master(dev))
1383 vg = br_vlan_group(netdev_priv(dev));
1384 else
1385 return -EINVAL;
1386
1387 v = br_vlan_find(vg, vid);
1388 if (!v)
1389 return -ENOENT;
1390
1391 p_vinfo->vid = vid;
1392 p_vinfo->flags = v->flags;
1393 if (vid == br_get_pvid(vg))
1394 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1395 return 0;
1396}
1397EXPORT_SYMBOL_GPL(br_vlan_get_info);
1398
1399int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
1400 struct bridge_vlan_info *p_vinfo)
1401{
1402 struct net_bridge_vlan_group *vg;
1403 struct net_bridge_vlan *v;
1404 struct net_bridge_port *p;
1405
1406 p = br_port_get_check_rcu(dev);
1407 if (p)
1408 vg = nbp_vlan_group_rcu(p);
1409 else if (netif_is_bridge_master(dev))
1410 vg = br_vlan_group_rcu(netdev_priv(dev));
1411 else
1412 return -EINVAL;
1413
1414 v = br_vlan_find(vg, vid);
1415 if (!v)
1416 return -ENOENT;
1417
1418 p_vinfo->vid = vid;
1419 p_vinfo->flags = v->flags;
1420 if (vid == br_get_pvid(vg))
1421 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1422 return 0;
1423}
1424EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
1425
1426static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1427{
1428 return is_vlan_dev(dev) &&
1429 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1430}
1431
1432static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1433 __always_unused struct netdev_nested_priv *priv)
1434{
1435 return br_vlan_is_bind_vlan_dev(dev);
1436}
1437
1438static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1439{
1440 int found;
1441
1442 rcu_read_lock();
1443 found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1444 NULL);
1445 rcu_read_unlock();
1446
1447 return !!found;
1448}
1449
1450struct br_vlan_bind_walk_data {
1451 u16 vid;
1452 struct net_device *result;
1453};
1454
1455static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1456 struct netdev_nested_priv *priv)
1457{
1458 struct br_vlan_bind_walk_data *data = priv->data;
1459 int found = 0;
1460
1461 if (br_vlan_is_bind_vlan_dev(dev) &&
1462 vlan_dev_priv(dev)->vlan_id == data->vid) {
1463 data->result = dev;
1464 found = 1;
1465 }
1466
1467 return found;
1468}
1469
1470static struct net_device *
1471br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1472{
1473 struct br_vlan_bind_walk_data data = {
1474 .vid = vid,
1475 };
1476 struct netdev_nested_priv priv = {
1477 .data = (void *)&data,
1478 };
1479
1480 rcu_read_lock();
1481 netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1482 &priv);
1483 rcu_read_unlock();
1484
1485 return data.result;
1486}
1487
1488static bool br_vlan_is_dev_up(const struct net_device *dev)
1489{
1490 return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1491}
1492
1493static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1494 struct net_device *vlan_dev)
1495{
1496 u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1497 struct net_bridge_vlan_group *vg;
1498 struct net_bridge_port *p;
1499 bool has_carrier = false;
1500
1501 if (!netif_carrier_ok(br->dev)) {
1502 netif_carrier_off(vlan_dev);
1503 return;
1504 }
1505
1506 list_for_each_entry(p, &br->port_list, list) {
1507 vg = nbp_vlan_group(p);
1508 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1509 has_carrier = true;
1510 break;
1511 }
1512 }
1513
1514 if (has_carrier)
1515 netif_carrier_on(vlan_dev);
1516 else
1517 netif_carrier_off(vlan_dev);
1518}
1519
1520static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1521{
1522 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1523 struct net_bridge_vlan *vlan;
1524 struct net_device *vlan_dev;
1525
1526 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1527 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1528 vlan->vid);
1529 if (vlan_dev) {
1530 if (br_vlan_is_dev_up(p->dev)) {
1531 if (netif_carrier_ok(p->br->dev))
1532 netif_carrier_on(vlan_dev);
1533 } else {
1534 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1535 }
1536 }
1537 }
1538}
1539
1540static void br_vlan_upper_change(struct net_device *dev,
1541 struct net_device *upper_dev,
1542 bool linking)
1543{
1544 struct net_bridge *br = netdev_priv(dev);
1545
1546 if (!br_vlan_is_bind_vlan_dev(upper_dev))
1547 return;
1548
1549 if (linking) {
1550 br_vlan_set_vlan_dev_state(br, upper_dev);
1551 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1552 } else {
1553 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1554 br_vlan_has_upper_bind_vlan_dev(dev));
1555 }
1556}
1557
1558struct br_vlan_link_state_walk_data {
1559 struct net_bridge *br;
1560};
1561
1562static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1563 struct netdev_nested_priv *priv)
1564{
1565 struct br_vlan_link_state_walk_data *data = priv->data;
1566
1567 if (br_vlan_is_bind_vlan_dev(vlan_dev))
1568 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1569
1570 return 0;
1571}
1572
1573static void br_vlan_link_state_change(struct net_device *dev,
1574 struct net_bridge *br)
1575{
1576 struct br_vlan_link_state_walk_data data = {
1577 .br = br
1578 };
1579 struct netdev_nested_priv priv = {
1580 .data = (void *)&data,
1581 };
1582
1583 rcu_read_lock();
1584 netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1585 &priv);
1586 rcu_read_unlock();
1587}
1588
1589
1590static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1591{
1592 struct net_device *vlan_dev;
1593
1594 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1595 return;
1596
1597 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1598 if (vlan_dev)
1599 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1600}
1601
1602
1603int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1604{
1605 struct netdev_notifier_changeupper_info *info;
1606 struct net_bridge *br = netdev_priv(dev);
1607 int vlcmd = 0, ret = 0;
1608 bool changed = false;
1609
1610 switch (event) {
1611 case NETDEV_REGISTER:
1612 ret = br_vlan_add(br, br->default_pvid,
1613 BRIDGE_VLAN_INFO_PVID |
1614 BRIDGE_VLAN_INFO_UNTAGGED |
1615 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1616 vlcmd = RTM_NEWVLAN;
1617 break;
1618 case NETDEV_UNREGISTER:
1619 changed = !br_vlan_delete(br, br->default_pvid);
1620 vlcmd = RTM_DELVLAN;
1621 break;
1622 case NETDEV_CHANGEUPPER:
1623 info = ptr;
1624 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1625 break;
1626
1627 case NETDEV_CHANGE:
1628 case NETDEV_UP:
1629 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1630 break;
1631 br_vlan_link_state_change(dev, br);
1632 break;
1633 }
1634 if (changed)
1635 br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1636
1637 return ret;
1638}
1639
1640
1641void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1642{
1643 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1644 return;
1645
1646 switch (event) {
1647 case NETDEV_CHANGE:
1648 case NETDEV_DOWN:
1649 case NETDEV_UP:
1650 br_vlan_set_all_vlan_dev_state(p);
1651 break;
1652 }
1653}
1654
1655static bool br_vlan_stats_fill(struct sk_buff *skb,
1656 const struct net_bridge_vlan *v)
1657{
1658 struct pcpu_sw_netstats stats;
1659 struct nlattr *nest;
1660
1661 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1662 if (!nest)
1663 return false;
1664
1665 br_vlan_get_stats(v, &stats);
1666 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1667 BRIDGE_VLANDB_STATS_PAD) ||
1668 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1669 stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1670 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1671 BRIDGE_VLANDB_STATS_PAD) ||
1672 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1673 stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1674 goto out_err;
1675
1676 nla_nest_end(skb, nest);
1677
1678 return true;
1679
1680out_err:
1681 nla_nest_cancel(skb, nest);
1682 return false;
1683}
1684
1685
1686static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1687 const struct net_bridge_vlan *v_opts,
1688 u16 flags,
1689 bool dump_stats)
1690{
1691 struct bridge_vlan_info info;
1692 struct nlattr *nest;
1693
1694 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1695 if (!nest)
1696 return false;
1697
1698 memset(&info, 0, sizeof(info));
1699 info.vid = vid;
1700 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1701 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1702 if (flags & BRIDGE_VLAN_INFO_PVID)
1703 info.flags |= BRIDGE_VLAN_INFO_PVID;
1704
1705 if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1706 goto out_err;
1707
1708 if (vid_range && vid < vid_range &&
1709 !(flags & BRIDGE_VLAN_INFO_PVID) &&
1710 nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1711 goto out_err;
1712
1713 if (v_opts) {
1714 if (!br_vlan_opts_fill(skb, v_opts))
1715 goto out_err;
1716
1717 if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1718 goto out_err;
1719 }
1720
1721 nla_nest_end(skb, nest);
1722
1723 return true;
1724
1725out_err:
1726 nla_nest_cancel(skb, nest);
1727 return false;
1728}
1729
1730static size_t rtnl_vlan_nlmsg_size(void)
1731{
1732 return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1733 + nla_total_size(0)
1734 + nla_total_size(sizeof(u16))
1735 + nla_total_size(sizeof(struct bridge_vlan_info))
1736 + br_vlan_opts_nl_size();
1737}
1738
1739void br_vlan_notify(const struct net_bridge *br,
1740 const struct net_bridge_port *p,
1741 u16 vid, u16 vid_range,
1742 int cmd)
1743{
1744 struct net_bridge_vlan_group *vg;
1745 struct net_bridge_vlan *v = NULL;
1746 struct br_vlan_msg *bvm;
1747 struct nlmsghdr *nlh;
1748 struct sk_buff *skb;
1749 int err = -ENOBUFS;
1750 struct net *net;
1751 u16 flags = 0;
1752 int ifindex;
1753
1754
1755 ASSERT_RTNL();
1756
1757 if (p) {
1758 ifindex = p->dev->ifindex;
1759 vg = nbp_vlan_group(p);
1760 net = dev_net(p->dev);
1761 } else {
1762 ifindex = br->dev->ifindex;
1763 vg = br_vlan_group(br);
1764 net = dev_net(br->dev);
1765 }
1766
1767 skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1768 if (!skb)
1769 goto out_err;
1770
1771 err = -EMSGSIZE;
1772 nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1773 if (!nlh)
1774 goto out_err;
1775 bvm = nlmsg_data(nlh);
1776 memset(bvm, 0, sizeof(*bvm));
1777 bvm->family = AF_BRIDGE;
1778 bvm->ifindex = ifindex;
1779
1780 switch (cmd) {
1781 case RTM_NEWVLAN:
1782
1783 v = br_vlan_find(vg, vid);
1784 if (!v || !br_vlan_should_use(v))
1785 goto out_kfree;
1786
1787 flags = v->flags;
1788 if (br_get_pvid(vg) == v->vid)
1789 flags |= BRIDGE_VLAN_INFO_PVID;
1790 break;
1791 case RTM_DELVLAN:
1792 break;
1793 default:
1794 goto out_kfree;
1795 }
1796
1797 if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1798 goto out_err;
1799
1800 nlmsg_end(skb, nlh);
1801 rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1802 return;
1803
1804out_err:
1805 rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1806out_kfree:
1807 kfree_skb(skb);
1808}
1809
1810static int br_vlan_replay_one(struct notifier_block *nb,
1811 struct net_device *dev,
1812 struct switchdev_obj_port_vlan *vlan,
1813 const void *ctx, unsigned long action,
1814 struct netlink_ext_ack *extack)
1815{
1816 struct switchdev_notifier_port_obj_info obj_info = {
1817 .info = {
1818 .dev = dev,
1819 .extack = extack,
1820 .ctx = ctx,
1821 },
1822 .obj = &vlan->obj,
1823 };
1824 int err;
1825
1826 err = nb->notifier_call(nb, action, &obj_info);
1827 return notifier_to_errno(err);
1828}
1829
1830int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
1831 const void *ctx, bool adding, struct notifier_block *nb,
1832 struct netlink_ext_ack *extack)
1833{
1834 struct net_bridge_vlan_group *vg;
1835 struct net_bridge_vlan *v;
1836 struct net_bridge_port *p;
1837 struct net_bridge *br;
1838 unsigned long action;
1839 int err = 0;
1840 u16 pvid;
1841
1842 ASSERT_RTNL();
1843
1844 if (!nb)
1845 return 0;
1846
1847 if (!netif_is_bridge_master(br_dev))
1848 return -EINVAL;
1849
1850 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1851 return -EINVAL;
1852
1853 if (netif_is_bridge_master(dev)) {
1854 br = netdev_priv(dev);
1855 vg = br_vlan_group(br);
1856 p = NULL;
1857 } else {
1858 p = br_port_get_rtnl(dev);
1859 if (WARN_ON(!p))
1860 return -EINVAL;
1861 vg = nbp_vlan_group(p);
1862 br = p->br;
1863 }
1864
1865 if (!vg)
1866 return 0;
1867
1868 if (adding)
1869 action = SWITCHDEV_PORT_OBJ_ADD;
1870 else
1871 action = SWITCHDEV_PORT_OBJ_DEL;
1872
1873 pvid = br_get_pvid(vg);
1874
1875 list_for_each_entry(v, &vg->vlan_list, vlist) {
1876 struct switchdev_obj_port_vlan vlan = {
1877 .obj.orig_dev = dev,
1878 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1879 .flags = br_vlan_flags(v, pvid),
1880 .vid = v->vid,
1881 };
1882
1883 if (!br_vlan_should_use(v))
1884 continue;
1885
1886 err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
1887 if (err)
1888 return err;
1889 }
1890
1891 return err;
1892}
1893
1894
1895bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1896 const struct net_bridge_vlan *range_end)
1897{
1898 return v_curr->vid - range_end->vid == 1 &&
1899 range_end->flags == v_curr->flags &&
1900 br_vlan_opts_eq_range(v_curr, range_end);
1901}
1902
1903static int br_vlan_dump_dev(const struct net_device *dev,
1904 struct sk_buff *skb,
1905 struct netlink_callback *cb,
1906 u32 dump_flags)
1907{
1908 struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1909 bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1910 bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1911 struct net_bridge_vlan_group *vg;
1912 int idx = 0, s_idx = cb->args[1];
1913 struct nlmsghdr *nlh = NULL;
1914 struct net_bridge_port *p;
1915 struct br_vlan_msg *bvm;
1916 struct net_bridge *br;
1917 int err = 0;
1918 u16 pvid;
1919
1920 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1921 return -EINVAL;
1922
1923 if (netif_is_bridge_master(dev)) {
1924 br = netdev_priv(dev);
1925 vg = br_vlan_group_rcu(br);
1926 p = NULL;
1927 } else {
1928
1929 if (dump_global)
1930 return 0;
1931
1932 p = br_port_get_rcu(dev);
1933 if (WARN_ON(!p))
1934 return -EINVAL;
1935 vg = nbp_vlan_group_rcu(p);
1936 br = p->br;
1937 }
1938
1939 if (!vg)
1940 return 0;
1941
1942 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1943 RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1944 if (!nlh)
1945 return -EMSGSIZE;
1946 bvm = nlmsg_data(nlh);
1947 memset(bvm, 0, sizeof(*bvm));
1948 bvm->family = PF_BRIDGE;
1949 bvm->ifindex = dev->ifindex;
1950 pvid = br_get_pvid(vg);
1951
1952
1953 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1954 if (!dump_global && !br_vlan_should_use(v))
1955 continue;
1956 if (idx < s_idx) {
1957 idx++;
1958 continue;
1959 }
1960
1961 if (!range_start) {
1962 range_start = v;
1963 range_end = v;
1964 continue;
1965 }
1966
1967 if (dump_global) {
1968 if (br_vlan_global_opts_can_enter_range(v, range_end))
1969 goto update_end;
1970 if (!br_vlan_global_opts_fill(skb, range_start->vid,
1971 range_end->vid,
1972 range_start)) {
1973 err = -EMSGSIZE;
1974 break;
1975 }
1976
1977 idx += range_end->vid - range_start->vid + 1;
1978
1979 range_start = v;
1980 } else if (dump_stats || v->vid == pvid ||
1981 !br_vlan_can_enter_range(v, range_end)) {
1982 u16 vlan_flags = br_vlan_flags(range_start, pvid);
1983
1984 if (!br_vlan_fill_vids(skb, range_start->vid,
1985 range_end->vid, range_start,
1986 vlan_flags, dump_stats)) {
1987 err = -EMSGSIZE;
1988 break;
1989 }
1990
1991 idx += range_end->vid - range_start->vid + 1;
1992
1993 range_start = v;
1994 }
1995update_end:
1996 range_end = v;
1997 }
1998
1999
2000
2001
2002
2003
2004 if (!err && range_start) {
2005 if (dump_global &&
2006 !br_vlan_global_opts_fill(skb, range_start->vid,
2007 range_end->vid, range_start))
2008 err = -EMSGSIZE;
2009 else if (!dump_global &&
2010 !br_vlan_fill_vids(skb, range_start->vid,
2011 range_end->vid, range_start,
2012 br_vlan_flags(range_start, pvid),
2013 dump_stats))
2014 err = -EMSGSIZE;
2015 }
2016
2017 cb->args[1] = err ? idx : 0;
2018
2019 nlmsg_end(skb, nlh);
2020
2021 return err;
2022}
2023
2024static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
2025 [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
2026};
2027
2028static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
2029{
2030 struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2031 int idx = 0, err = 0, s_idx = cb->args[0];
2032 struct net *net = sock_net(skb->sk);
2033 struct br_vlan_msg *bvm;
2034 struct net_device *dev;
2035 u32 dump_flags = 0;
2036
2037 err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2038 br_vlan_db_dump_pol, cb->extack);
2039 if (err < 0)
2040 return err;
2041
2042 bvm = nlmsg_data(cb->nlh);
2043 if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2044 dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2045
2046 rcu_read_lock();
2047 if (bvm->ifindex) {
2048 dev = dev_get_by_index_rcu(net, bvm->ifindex);
2049 if (!dev) {
2050 err = -ENODEV;
2051 goto out_err;
2052 }
2053 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2054 if (err && err != -EMSGSIZE)
2055 goto out_err;
2056 } else {
2057 for_each_netdev_rcu(net, dev) {
2058 if (idx < s_idx)
2059 goto skip;
2060
2061 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2062 if (err == -EMSGSIZE)
2063 break;
2064skip:
2065 idx++;
2066 }
2067 }
2068 cb->args[0] = idx;
2069 rcu_read_unlock();
2070
2071 return skb->len;
2072
2073out_err:
2074 rcu_read_unlock();
2075
2076 return err;
2077}
2078
2079static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2080 [BRIDGE_VLANDB_ENTRY_INFO] =
2081 NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2082 [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
2083 [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
2084 [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2085 [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
2086};
2087
2088static int br_vlan_rtm_process_one(struct net_device *dev,
2089 const struct nlattr *attr,
2090 int cmd, struct netlink_ext_ack *extack)
2091{
2092 struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2093 struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2094 bool changed = false, skip_processing = false;
2095 struct net_bridge_vlan_group *vg;
2096 struct net_bridge_port *p = NULL;
2097 int err = 0, cmdmap = 0;
2098 struct net_bridge *br;
2099
2100 if (netif_is_bridge_master(dev)) {
2101 br = netdev_priv(dev);
2102 vg = br_vlan_group(br);
2103 } else {
2104 p = br_port_get_rtnl(dev);
2105 if (WARN_ON(!p))
2106 return -ENODEV;
2107 br = p->br;
2108 vg = nbp_vlan_group(p);
2109 }
2110
2111 if (WARN_ON(!vg))
2112 return -ENODEV;
2113
2114 err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2115 br_vlan_db_policy, extack);
2116 if (err)
2117 return err;
2118
2119 if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2120 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2121 return -EINVAL;
2122 }
2123 memset(&vrange_end, 0, sizeof(vrange_end));
2124
2125 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2126 if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2127 BRIDGE_VLAN_INFO_RANGE_END)) {
2128 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2129 return -EINVAL;
2130 }
2131 if (!br_vlan_valid_id(vinfo->vid, extack))
2132 return -EINVAL;
2133
2134 if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2135 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2136
2137 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2138 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2139
2140
2141 vinfo_last = vinfo;
2142 vinfo = &vrange_end;
2143
2144 if (!br_vlan_valid_id(vinfo->vid, extack) ||
2145 !br_vlan_valid_range(vinfo, vinfo_last, extack))
2146 return -EINVAL;
2147 }
2148
2149 switch (cmd) {
2150 case RTM_NEWVLAN:
2151 cmdmap = RTM_SETLINK;
2152 skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2153 break;
2154 case RTM_DELVLAN:
2155 cmdmap = RTM_DELLINK;
2156 break;
2157 }
2158
2159 if (!skip_processing) {
2160 struct bridge_vlan_info *tmp_last = vinfo_last;
2161
2162
2163 err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2164 &changed, extack);
2165
2166
2167 if (changed)
2168 br_ifinfo_notify(cmdmap, br, p);
2169
2170 if (err)
2171 return err;
2172 }
2173
2174
2175 if (cmd == RTM_NEWVLAN) {
2176 struct net_bridge_vlan *range_start, *range_end;
2177
2178 if (vinfo_last) {
2179 range_start = br_vlan_find(vg, vinfo_last->vid);
2180 range_end = br_vlan_find(vg, vinfo->vid);
2181 } else {
2182 range_start = br_vlan_find(vg, vinfo->vid);
2183 range_end = range_start;
2184 }
2185
2186 err = br_vlan_process_options(br, p, range_start, range_end,
2187 tb, extack);
2188 }
2189
2190 return err;
2191}
2192
2193static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2194 struct netlink_ext_ack *extack)
2195{
2196 struct net *net = sock_net(skb->sk);
2197 struct br_vlan_msg *bvm;
2198 struct net_device *dev;
2199 struct nlattr *attr;
2200 int err, vlans = 0;
2201 int rem;
2202
2203
2204 err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2205 extack);
2206 if (err < 0)
2207 return err;
2208
2209 bvm = nlmsg_data(nlh);
2210 dev = __dev_get_by_index(net, bvm->ifindex);
2211 if (!dev)
2212 return -ENODEV;
2213
2214 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2215 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2216 return -EINVAL;
2217 }
2218
2219 nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2220 switch (nla_type(attr)) {
2221 case BRIDGE_VLANDB_ENTRY:
2222 err = br_vlan_rtm_process_one(dev, attr,
2223 nlh->nlmsg_type,
2224 extack);
2225 break;
2226 case BRIDGE_VLANDB_GLOBAL_OPTIONS:
2227 err = br_vlan_rtm_process_global_options(dev, attr,
2228 nlh->nlmsg_type,
2229 extack);
2230 break;
2231 default:
2232 continue;
2233 }
2234
2235 vlans++;
2236 if (err)
2237 break;
2238 }
2239 if (!vlans) {
2240 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2241 err = -EINVAL;
2242 }
2243
2244 return err;
2245}
2246
2247void br_vlan_rtnl_init(void)
2248{
2249 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2250 br_vlan_rtm_dump, 0);
2251 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2252 br_vlan_rtm_process, NULL, 0);
2253 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2254 br_vlan_rtm_process, NULL, 0);
2255}
2256
2257void br_vlan_rtnl_uninit(void)
2258{
2259 rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2260 rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2261 rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2262}
2263