1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/netpoll.h>
18#include <linux/ethtool.h>
19#include <linux/if_arp.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/rtnetlink.h>
23#include <linux/if_ether.h>
24#include <linux/slab.h>
25#include <net/sock.h>
26#include <linux/if_vlan.h>
27#include <net/switchdev.h>
28
29#include "br_private.h"
30
31
32
33
34
35
36
37static int port_cost(struct net_device *dev)
38{
39 struct ethtool_link_ksettings ecmd;
40
41 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
42 switch (ecmd.base.speed) {
43 case SPEED_10000:
44 return 2;
45 case SPEED_1000:
46 return 4;
47 case SPEED_100:
48 return 19;
49 case SPEED_10:
50 return 100;
51 }
52 }
53
54
55 if (!strncmp(dev->name, "lec", 3))
56 return 7;
57
58 if (!strncmp(dev->name, "plip", 4))
59 return 2500;
60
61 return 100;
62}
63
64
65
66void br_port_carrier_check(struct net_bridge_port *p)
67{
68 struct net_device *dev = p->dev;
69 struct net_bridge *br = p->br;
70
71 if (!(p->flags & BR_ADMIN_COST) &&
72 netif_running(dev) && netif_oper_up(dev))
73 p->path_cost = port_cost(dev);
74
75 if (!netif_running(br->dev))
76 return;
77
78 spin_lock_bh(&br->lock);
79 if (netif_running(dev) && netif_oper_up(dev)) {
80 if (p->state == BR_STATE_DISABLED)
81 br_stp_enable_port(p);
82 } else {
83 if (p->state != BR_STATE_DISABLED)
84 br_stp_disable_port(p);
85 }
86 spin_unlock_bh(&br->lock);
87}
88
89static void br_port_set_promisc(struct net_bridge_port *p)
90{
91 int err = 0;
92
93 if (br_promisc_port(p))
94 return;
95
96 err = dev_set_promiscuity(p->dev, 1);
97 if (err)
98 return;
99
100 br_fdb_unsync_static(p->br, p);
101 p->flags |= BR_PROMISC;
102}
103
104static void br_port_clear_promisc(struct net_bridge_port *p)
105{
106 int err;
107
108
109
110
111
112
113 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
114 return;
115
116
117
118
119 err = br_fdb_sync_static(p->br, p);
120 if (err)
121 return;
122
123 dev_set_promiscuity(p->dev, -1);
124 p->flags &= ~BR_PROMISC;
125}
126
127
128
129
130
131
132void br_manage_promisc(struct net_bridge *br)
133{
134 struct net_bridge_port *p;
135 bool set_all = false;
136
137
138
139
140 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
141 set_all = true;
142
143 list_for_each_entry(p, &br->port_list, list) {
144 if (set_all) {
145 br_port_set_promisc(p);
146 } else {
147
148
149
150
151
152
153
154
155
156
157 if (br->auto_cnt == 0 ||
158 (br->auto_cnt == 1 && br_auto_port(p)))
159 br_port_clear_promisc(p);
160 else
161 br_port_set_promisc(p);
162 }
163 }
164}
165
166static void nbp_update_port_count(struct net_bridge *br)
167{
168 struct net_bridge_port *p;
169 u32 cnt = 0;
170
171 list_for_each_entry(p, &br->port_list, list) {
172 if (br_auto_port(p))
173 cnt++;
174 }
175 if (br->auto_cnt != cnt) {
176 br->auto_cnt = cnt;
177 br_manage_promisc(br);
178 }
179}
180
181static void nbp_delete_promisc(struct net_bridge_port *p)
182{
183
184
185
186
187 dev_set_allmulti(p->dev, -1);
188 if (br_promisc_port(p))
189 dev_set_promiscuity(p->dev, -1);
190 else
191 br_fdb_unsync_static(p->br, p);
192}
193
194static void release_nbp(struct kobject *kobj)
195{
196 struct net_bridge_port *p
197 = container_of(kobj, struct net_bridge_port, kobj);
198 kfree(p);
199}
200
201static struct kobj_type brport_ktype = {
202#ifdef CONFIG_SYSFS
203 .sysfs_ops = &brport_sysfs_ops,
204#endif
205 .release = release_nbp,
206};
207
208static void destroy_nbp(struct net_bridge_port *p)
209{
210 struct net_device *dev = p->dev;
211
212 p->br = NULL;
213 p->dev = NULL;
214 dev_put(dev);
215
216 kobject_put(&p->kobj);
217}
218
219static void destroy_nbp_rcu(struct rcu_head *head)
220{
221 struct net_bridge_port *p =
222 container_of(head, struct net_bridge_port, rcu);
223 destroy_nbp(p);
224}
225
226static unsigned get_max_headroom(struct net_bridge *br)
227{
228 unsigned max_headroom = 0;
229 struct net_bridge_port *p;
230
231 list_for_each_entry(p, &br->port_list, list) {
232 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
233
234 if (dev_headroom > max_headroom)
235 max_headroom = dev_headroom;
236 }
237
238 return max_headroom;
239}
240
241static void update_headroom(struct net_bridge *br, int new_hr)
242{
243 struct net_bridge_port *p;
244
245 list_for_each_entry(p, &br->port_list, list)
246 netdev_set_rx_headroom(p->dev, new_hr);
247
248 br->dev->needed_headroom = new_hr;
249}
250
251
252
253
254
255
256
257
258
259
260static void del_nbp(struct net_bridge_port *p)
261{
262 struct net_bridge *br = p->br;
263 struct net_device *dev = p->dev;
264
265 sysfs_remove_link(br->ifobj, p->dev->name);
266
267 nbp_delete_promisc(p);
268
269 spin_lock_bh(&br->lock);
270 br_stp_disable_port(p);
271 spin_unlock_bh(&br->lock);
272
273 br_ifinfo_notify(RTM_DELLINK, p);
274
275 list_del_rcu(&p->list);
276 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
277 update_headroom(br, get_max_headroom(br));
278 netdev_reset_rx_headroom(dev);
279
280 nbp_vlan_flush(p);
281 br_fdb_delete_by_port(br, p, 0, 1);
282 switchdev_deferred_process();
283
284 nbp_update_port_count(br);
285
286 netdev_upper_dev_unlink(dev, br->dev);
287
288 dev->priv_flags &= ~IFF_BRIDGE_PORT;
289
290 netdev_rx_handler_unregister(dev);
291
292 br_multicast_del_port(p);
293
294 kobject_uevent(&p->kobj, KOBJ_REMOVE);
295 kobject_del(&p->kobj);
296
297 br_netpoll_disable(p);
298
299 call_rcu(&p->rcu, destroy_nbp_rcu);
300}
301
302
303void br_dev_delete(struct net_device *dev, struct list_head *head)
304{
305 struct net_bridge *br = netdev_priv(dev);
306 struct net_bridge_port *p, *n;
307
308 list_for_each_entry_safe(p, n, &br->port_list, list) {
309 del_nbp(p);
310 }
311
312 br_fdb_delete_by_port(br, NULL, 0, 1);
313
314 cancel_delayed_work_sync(&br->gc_work);
315
316 br_sysfs_delbr(br->dev);
317 unregister_netdevice_queue(br->dev, head);
318}
319
320
321static int find_portno(struct net_bridge *br)
322{
323 int index;
324 struct net_bridge_port *p;
325 unsigned long *inuse;
326
327 inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
328 GFP_KERNEL);
329 if (!inuse)
330 return -ENOMEM;
331
332 set_bit(0, inuse);
333 list_for_each_entry(p, &br->port_list, list) {
334 set_bit(p->port_no, inuse);
335 }
336 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
337 kfree(inuse);
338
339 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
340}
341
342
343static struct net_bridge_port *new_nbp(struct net_bridge *br,
344 struct net_device *dev)
345{
346 struct net_bridge_port *p;
347 int index, err;
348
349 index = find_portno(br);
350 if (index < 0)
351 return ERR_PTR(index);
352
353 p = kzalloc(sizeof(*p), GFP_KERNEL);
354 if (p == NULL)
355 return ERR_PTR(-ENOMEM);
356
357 p->br = br;
358 dev_hold(dev);
359 p->dev = dev;
360 p->path_cost = port_cost(dev);
361 p->priority = 0x8000 >> BR_PORT_BITS;
362 p->port_no = index;
363 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
364 br_init_port(p);
365 br_set_state(p, BR_STATE_DISABLED);
366 br_stp_port_timer_init(p);
367 err = br_multicast_add_port(p);
368 if (err) {
369 dev_put(dev);
370 kfree(p);
371 p = ERR_PTR(err);
372 }
373
374 return p;
375}
376
377int br_add_bridge(struct net *net, const char *name)
378{
379 struct net_device *dev;
380 int res;
381
382 dev = alloc_netdev(sizeof(struct net_bridge), name,
383 br_dev_setup);
384
385 if (!dev)
386 return -ENOMEM;
387
388 dev_net_set(dev, net);
389 dev->rtnl_link_ops = &br_link_ops;
390
391 res = register_netdev(dev);
392 if (res)
393 free_netdev(dev);
394 return res;
395}
396
397int br_del_bridge(struct net *net, const char *name)
398{
399 struct net_device *dev;
400 int ret = 0;
401
402 rtnl_lock();
403 dev = __dev_get_by_name(net, name);
404 if (dev == NULL)
405 ret = -ENXIO;
406
407 else if (!(dev->priv_flags & IFF_EBRIDGE)) {
408
409 ret = -EPERM;
410 }
411
412 else if (dev->flags & IFF_UP) {
413
414 ret = -EBUSY;
415 }
416
417 else
418 br_dev_delete(dev, NULL);
419
420 rtnl_unlock();
421 return ret;
422}
423
424
425int br_min_mtu(const struct net_bridge *br)
426{
427 const struct net_bridge_port *p;
428 int mtu = 0;
429
430 ASSERT_RTNL();
431
432 if (list_empty(&br->port_list))
433 mtu = ETH_DATA_LEN;
434 else {
435 list_for_each_entry(p, &br->port_list, list) {
436 if (!mtu || p->dev->mtu < mtu)
437 mtu = p->dev->mtu;
438 }
439 }
440 return mtu;
441}
442
443static void br_set_gso_limits(struct net_bridge *br)
444{
445 unsigned int gso_max_size = GSO_MAX_SIZE;
446 u16 gso_max_segs = GSO_MAX_SEGS;
447 const struct net_bridge_port *p;
448
449 list_for_each_entry(p, &br->port_list, list) {
450 gso_max_size = min(gso_max_size, p->dev->gso_max_size);
451 gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
452 }
453 br->dev->gso_max_size = gso_max_size;
454 br->dev->gso_max_segs = gso_max_segs;
455}
456
457
458
459
460netdev_features_t br_features_recompute(struct net_bridge *br,
461 netdev_features_t features)
462{
463 struct net_bridge_port *p;
464 netdev_features_t mask;
465
466 if (list_empty(&br->port_list))
467 return features;
468
469 mask = features;
470 features &= ~NETIF_F_ONE_FOR_ALL;
471
472 list_for_each_entry(p, &br->port_list, list) {
473 features = netdev_increment_features(features,
474 p->dev->features, mask);
475 }
476 features = netdev_add_tso_features(features, mask);
477
478 return features;
479}
480
481
482int br_add_if(struct net_bridge *br, struct net_device *dev)
483{
484 struct net_bridge_port *p;
485 int err = 0;
486 unsigned br_hr, dev_hr;
487 bool changed_addr;
488
489
490
491
492
493
494
495 if ((dev->flags & IFF_LOOPBACK) ||
496 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
497 !is_valid_ether_addr(dev->dev_addr) ||
498 netdev_uses_dsa(dev))
499 return -EINVAL;
500
501
502 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
503 return -ELOOP;
504
505
506 if (br_port_exists(dev))
507 return -EBUSY;
508
509
510 if (dev->priv_flags & IFF_DONT_BRIDGE)
511 return -EOPNOTSUPP;
512
513 p = new_nbp(br, dev);
514 if (IS_ERR(p))
515 return PTR_ERR(p);
516
517 call_netdevice_notifiers(NETDEV_JOIN, dev);
518
519 err = dev_set_allmulti(dev, 1);
520 if (err)
521 goto put_back;
522
523 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
524 SYSFS_BRIDGE_PORT_ATTR);
525 if (err)
526 goto err1;
527
528 err = br_sysfs_addif(p);
529 if (err)
530 goto err2;
531
532 err = br_netpoll_enable(p);
533 if (err)
534 goto err3;
535
536 err = netdev_rx_handler_register(dev, br_handle_frame, p);
537 if (err)
538 goto err4;
539
540 dev->priv_flags |= IFF_BRIDGE_PORT;
541
542 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL);
543 if (err)
544 goto err5;
545
546 err = nbp_switchdev_mark_set(p);
547 if (err)
548 goto err6;
549
550 dev_disable_lro(dev);
551
552 list_add_rcu(&p->list, &br->port_list);
553
554 nbp_update_port_count(br);
555
556 netdev_update_features(br->dev);
557
558 br_hr = br->dev->needed_headroom;
559 dev_hr = netdev_get_fwd_headroom(dev);
560 if (br_hr < dev_hr)
561 update_headroom(br, dev_hr);
562 else
563 netdev_set_rx_headroom(dev, br_hr);
564
565 if (br_fdb_insert(br, p, dev->dev_addr, 0))
566 netdev_err(dev, "failed insert local address bridge forwarding table\n");
567
568 err = nbp_vlan_init(p);
569 if (err) {
570 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
571 goto err7;
572 }
573
574 spin_lock_bh(&br->lock);
575 changed_addr = br_stp_recalculate_bridge_id(br);
576
577 if (netif_running(dev) && netif_oper_up(dev) &&
578 (br->dev->flags & IFF_UP))
579 br_stp_enable_port(p);
580 spin_unlock_bh(&br->lock);
581
582 br_ifinfo_notify(RTM_NEWLINK, p);
583
584 if (changed_addr)
585 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
586
587 dev_set_mtu(br->dev, br_min_mtu(br));
588 br_set_gso_limits(br);
589
590 kobject_uevent(&p->kobj, KOBJ_ADD);
591
592 return 0;
593
594err7:
595 list_del_rcu(&p->list);
596 br_fdb_delete_by_port(br, p, 0, 1);
597 nbp_update_port_count(br);
598err6:
599 netdev_upper_dev_unlink(dev, br->dev);
600err5:
601 dev->priv_flags &= ~IFF_BRIDGE_PORT;
602 netdev_rx_handler_unregister(dev);
603err4:
604 br_netpoll_disable(p);
605err3:
606 sysfs_remove_link(br->ifobj, p->dev->name);
607err2:
608 kobject_put(&p->kobj);
609 p = NULL;
610err1:
611 dev_set_allmulti(dev, -1);
612put_back:
613 dev_put(dev);
614 kfree(p);
615 return err;
616}
617
618
619int br_del_if(struct net_bridge *br, struct net_device *dev)
620{
621 struct net_bridge_port *p;
622 bool changed_addr;
623
624 p = br_port_get_rtnl(dev);
625 if (!p || p->br != br)
626 return -EINVAL;
627
628
629
630
631
632 del_nbp(p);
633
634 dev_set_mtu(br->dev, br_min_mtu(br));
635 br_set_gso_limits(br);
636
637 spin_lock_bh(&br->lock);
638 changed_addr = br_stp_recalculate_bridge_id(br);
639 spin_unlock_bh(&br->lock);
640
641 if (changed_addr)
642 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
643
644 netdev_update_features(br->dev);
645
646 return 0;
647}
648
649void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
650{
651 struct net_bridge *br = p->br;
652
653 if (mask & BR_AUTO_MASK)
654 nbp_update_port_count(br);
655}
656