1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/rcupdate.h>
17#include <linux/errno.h>
18#include <linux/ctype.h>
19#include <linux/notifier.h>
20#include <linux/netdevice.h>
21#include <linux/netpoll.h>
22#include <linux/if_vlan.h>
23#include <linux/if_arp.h>
24#include <linux/socket.h>
25#include <linux/etherdevice.h>
26#include <linux/rtnetlink.h>
27#include <net/rtnetlink.h>
28#include <net/genetlink.h>
29#include <net/netlink.h>
30#include <net/sch_generic.h>
31#include <net/switchdev.h>
32#include <generated/utsrelease.h>
33#include <linux/if_team.h>
34
35#define DRV_NAME "team"
36
37
38
39
40
41
42#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
43
44static struct team_port *team_port_get_rcu(const struct net_device *dev)
45{
46 return rcu_dereference(dev->rx_handler_data);
47}
48
49static struct team_port *team_port_get_rtnl(const struct net_device *dev)
50{
51 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
52
53 return team_port_exists(dev) ? port : NULL;
54}
55
56
57
58
59
60static int __set_port_dev_addr(struct net_device *port_dev,
61 const unsigned char *dev_addr)
62{
63 struct sockaddr_storage addr;
64
65 memcpy(addr.__data, dev_addr, port_dev->addr_len);
66 addr.ss_family = port_dev->type;
67 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
68}
69
70static int team_port_set_orig_dev_addr(struct team_port *port)
71{
72 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
73}
74
75static int team_port_set_team_dev_addr(struct team *team,
76 struct team_port *port)
77{
78 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
79}
80
81int team_modeop_port_enter(struct team *team, struct team_port *port)
82{
83 return team_port_set_team_dev_addr(team, port);
84}
85EXPORT_SYMBOL(team_modeop_port_enter);
86
87void team_modeop_port_change_dev_addr(struct team *team,
88 struct team_port *port)
89{
90 team_port_set_team_dev_addr(team, port);
91}
92EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
93
94static void team_lower_state_changed(struct team_port *port)
95{
96 struct netdev_lag_lower_state_info info;
97
98 info.link_up = port->linkup;
99 info.tx_enabled = team_port_enabled(port);
100 netdev_lower_state_changed(port->dev, &info);
101}
102
103static void team_refresh_port_linkup(struct team_port *port)
104{
105 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
106 port->state.linkup;
107
108 if (port->linkup != new_linkup) {
109 port->linkup = new_linkup;
110 team_lower_state_changed(port);
111 }
112}
113
114
115
116
117
118
119struct team_option_inst {
120 struct list_head list;
121 struct list_head tmp_list;
122 struct team_option *option;
123 struct team_option_inst_info info;
124 bool changed;
125 bool removed;
126};
127
128static struct team_option *__team_find_option(struct team *team,
129 const char *opt_name)
130{
131 struct team_option *option;
132
133 list_for_each_entry(option, &team->option_list, list) {
134 if (strcmp(option->name, opt_name) == 0)
135 return option;
136 }
137 return NULL;
138}
139
140static void __team_option_inst_del(struct team_option_inst *opt_inst)
141{
142 list_del(&opt_inst->list);
143 kfree(opt_inst);
144}
145
146static void __team_option_inst_del_option(struct team *team,
147 struct team_option *option)
148{
149 struct team_option_inst *opt_inst, *tmp;
150
151 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
152 if (opt_inst->option == option)
153 __team_option_inst_del(opt_inst);
154 }
155}
156
157static int __team_option_inst_add(struct team *team, struct team_option *option,
158 struct team_port *port)
159{
160 struct team_option_inst *opt_inst;
161 unsigned int array_size;
162 unsigned int i;
163 int err;
164
165 array_size = option->array_size;
166 if (!array_size)
167 array_size = 1;
168
169 for (i = 0; i < array_size; i++) {
170 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
171 if (!opt_inst)
172 return -ENOMEM;
173 opt_inst->option = option;
174 opt_inst->info.port = port;
175 opt_inst->info.array_index = i;
176 opt_inst->changed = true;
177 opt_inst->removed = false;
178 list_add_tail(&opt_inst->list, &team->option_inst_list);
179 if (option->init) {
180 err = option->init(team, &opt_inst->info);
181 if (err)
182 return err;
183 }
184
185 }
186 return 0;
187}
188
189static int __team_option_inst_add_option(struct team *team,
190 struct team_option *option)
191{
192 int err;
193
194 if (!option->per_port) {
195 err = __team_option_inst_add(team, option, NULL);
196 if (err)
197 goto inst_del_option;
198 }
199 return 0;
200
201inst_del_option:
202 __team_option_inst_del_option(team, option);
203 return err;
204}
205
206static void __team_option_inst_mark_removed_option(struct team *team,
207 struct team_option *option)
208{
209 struct team_option_inst *opt_inst;
210
211 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
212 if (opt_inst->option == option) {
213 opt_inst->changed = true;
214 opt_inst->removed = true;
215 }
216 }
217}
218
219static void __team_option_inst_del_port(struct team *team,
220 struct team_port *port)
221{
222 struct team_option_inst *opt_inst, *tmp;
223
224 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
225 if (opt_inst->option->per_port &&
226 opt_inst->info.port == port)
227 __team_option_inst_del(opt_inst);
228 }
229}
230
231static int __team_option_inst_add_port(struct team *team,
232 struct team_port *port)
233{
234 struct team_option *option;
235 int err;
236
237 list_for_each_entry(option, &team->option_list, list) {
238 if (!option->per_port)
239 continue;
240 err = __team_option_inst_add(team, option, port);
241 if (err)
242 goto inst_del_port;
243 }
244 return 0;
245
246inst_del_port:
247 __team_option_inst_del_port(team, port);
248 return err;
249}
250
251static void __team_option_inst_mark_removed_port(struct team *team,
252 struct team_port *port)
253{
254 struct team_option_inst *opt_inst;
255
256 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
257 if (opt_inst->info.port == port) {
258 opt_inst->changed = true;
259 opt_inst->removed = true;
260 }
261 }
262}
263
264static int __team_options_register(struct team *team,
265 const struct team_option *option,
266 size_t option_count)
267{
268 int i;
269 struct team_option **dst_opts;
270 int err;
271
272 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
273 GFP_KERNEL);
274 if (!dst_opts)
275 return -ENOMEM;
276 for (i = 0; i < option_count; i++, option++) {
277 if (__team_find_option(team, option->name)) {
278 err = -EEXIST;
279 goto alloc_rollback;
280 }
281 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
282 if (!dst_opts[i]) {
283 err = -ENOMEM;
284 goto alloc_rollback;
285 }
286 }
287
288 for (i = 0; i < option_count; i++) {
289 err = __team_option_inst_add_option(team, dst_opts[i]);
290 if (err)
291 goto inst_rollback;
292 list_add_tail(&dst_opts[i]->list, &team->option_list);
293 }
294
295 kfree(dst_opts);
296 return 0;
297
298inst_rollback:
299 for (i--; i >= 0; i--)
300 __team_option_inst_del_option(team, dst_opts[i]);
301
302 i = option_count - 1;
303alloc_rollback:
304 for (i--; i >= 0; i--)
305 kfree(dst_opts[i]);
306
307 kfree(dst_opts);
308 return err;
309}
310
311static void __team_options_mark_removed(struct team *team,
312 const struct team_option *option,
313 size_t option_count)
314{
315 int i;
316
317 for (i = 0; i < option_count; i++, option++) {
318 struct team_option *del_opt;
319
320 del_opt = __team_find_option(team, option->name);
321 if (del_opt)
322 __team_option_inst_mark_removed_option(team, del_opt);
323 }
324}
325
326static void __team_options_unregister(struct team *team,
327 const struct team_option *option,
328 size_t option_count)
329{
330 int i;
331
332 for (i = 0; i < option_count; i++, option++) {
333 struct team_option *del_opt;
334
335 del_opt = __team_find_option(team, option->name);
336 if (del_opt) {
337 __team_option_inst_del_option(team, del_opt);
338 list_del(&del_opt->list);
339 kfree(del_opt);
340 }
341 }
342}
343
344static void __team_options_change_check(struct team *team);
345
346int team_options_register(struct team *team,
347 const struct team_option *option,
348 size_t option_count)
349{
350 int err;
351
352 err = __team_options_register(team, option, option_count);
353 if (err)
354 return err;
355 __team_options_change_check(team);
356 return 0;
357}
358EXPORT_SYMBOL(team_options_register);
359
360void team_options_unregister(struct team *team,
361 const struct team_option *option,
362 size_t option_count)
363{
364 __team_options_mark_removed(team, option, option_count);
365 __team_options_change_check(team);
366 __team_options_unregister(team, option, option_count);
367}
368EXPORT_SYMBOL(team_options_unregister);
369
370static int team_option_get(struct team *team,
371 struct team_option_inst *opt_inst,
372 struct team_gsetter_ctx *ctx)
373{
374 if (!opt_inst->option->getter)
375 return -EOPNOTSUPP;
376 return opt_inst->option->getter(team, ctx);
377}
378
379static int team_option_set(struct team *team,
380 struct team_option_inst *opt_inst,
381 struct team_gsetter_ctx *ctx)
382{
383 if (!opt_inst->option->setter)
384 return -EOPNOTSUPP;
385 return opt_inst->option->setter(team, ctx);
386}
387
388void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
389{
390 struct team_option_inst *opt_inst;
391
392 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
393 opt_inst->changed = true;
394}
395EXPORT_SYMBOL(team_option_inst_set_change);
396
397void team_options_change_check(struct team *team)
398{
399 __team_options_change_check(team);
400}
401EXPORT_SYMBOL(team_options_change_check);
402
403
404
405
406
407
408static LIST_HEAD(mode_list);
409static DEFINE_SPINLOCK(mode_list_lock);
410
411struct team_mode_item {
412 struct list_head list;
413 const struct team_mode *mode;
414};
415
416static struct team_mode_item *__find_mode(const char *kind)
417{
418 struct team_mode_item *mitem;
419
420 list_for_each_entry(mitem, &mode_list, list) {
421 if (strcmp(mitem->mode->kind, kind) == 0)
422 return mitem;
423 }
424 return NULL;
425}
426
427static bool is_good_mode_name(const char *name)
428{
429 while (*name != '\0') {
430 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
431 return false;
432 name++;
433 }
434 return true;
435}
436
437int team_mode_register(const struct team_mode *mode)
438{
439 int err = 0;
440 struct team_mode_item *mitem;
441
442 if (!is_good_mode_name(mode->kind) ||
443 mode->priv_size > TEAM_MODE_PRIV_SIZE)
444 return -EINVAL;
445
446 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
447 if (!mitem)
448 return -ENOMEM;
449
450 spin_lock(&mode_list_lock);
451 if (__find_mode(mode->kind)) {
452 err = -EEXIST;
453 kfree(mitem);
454 goto unlock;
455 }
456 mitem->mode = mode;
457 list_add_tail(&mitem->list, &mode_list);
458unlock:
459 spin_unlock(&mode_list_lock);
460 return err;
461}
462EXPORT_SYMBOL(team_mode_register);
463
464void team_mode_unregister(const struct team_mode *mode)
465{
466 struct team_mode_item *mitem;
467
468 spin_lock(&mode_list_lock);
469 mitem = __find_mode(mode->kind);
470 if (mitem) {
471 list_del_init(&mitem->list);
472 kfree(mitem);
473 }
474 spin_unlock(&mode_list_lock);
475}
476EXPORT_SYMBOL(team_mode_unregister);
477
478static const struct team_mode *team_mode_get(const char *kind)
479{
480 struct team_mode_item *mitem;
481 const struct team_mode *mode = NULL;
482
483 spin_lock(&mode_list_lock);
484 mitem = __find_mode(kind);
485 if (!mitem) {
486 spin_unlock(&mode_list_lock);
487 request_module("team-mode-%s", kind);
488 spin_lock(&mode_list_lock);
489 mitem = __find_mode(kind);
490 }
491 if (mitem) {
492 mode = mitem->mode;
493 if (!try_module_get(mode->owner))
494 mode = NULL;
495 }
496
497 spin_unlock(&mode_list_lock);
498 return mode;
499}
500
501static void team_mode_put(const struct team_mode *mode)
502{
503 module_put(mode->owner);
504}
505
506static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
507{
508 dev_kfree_skb_any(skb);
509 return false;
510}
511
512static rx_handler_result_t team_dummy_receive(struct team *team,
513 struct team_port *port,
514 struct sk_buff *skb)
515{
516 return RX_HANDLER_ANOTHER;
517}
518
519static const struct team_mode __team_no_mode = {
520 .kind = "*NOMODE*",
521};
522
523static bool team_is_mode_set(struct team *team)
524{
525 return team->mode != &__team_no_mode;
526}
527
528static void team_set_no_mode(struct team *team)
529{
530 team->user_carrier_enabled = false;
531 team->mode = &__team_no_mode;
532}
533
534static void team_adjust_ops(struct team *team)
535{
536
537
538
539
540
541 if (!team->en_port_count || !team_is_mode_set(team) ||
542 !team->mode->ops->transmit)
543 team->ops.transmit = team_dummy_transmit;
544 else
545 team->ops.transmit = team->mode->ops->transmit;
546
547 if (!team->en_port_count || !team_is_mode_set(team) ||
548 !team->mode->ops->receive)
549 team->ops.receive = team_dummy_receive;
550 else
551 team->ops.receive = team->mode->ops->receive;
552}
553
554
555
556
557
558
559static int __team_change_mode(struct team *team,
560 const struct team_mode *new_mode)
561{
562
563 if (team_is_mode_set(team)) {
564 void (*exit_op)(struct team *team) = team->ops.exit;
565
566
567 memset(&team->ops, 0, sizeof(struct team_mode_ops));
568 team_adjust_ops(team);
569
570 if (exit_op)
571 exit_op(team);
572 team_mode_put(team->mode);
573 team_set_no_mode(team);
574
575 memset(&team->mode_priv, 0,
576 sizeof(struct team) - offsetof(struct team, mode_priv));
577 }
578
579 if (!new_mode)
580 return 0;
581
582 if (new_mode->ops->init) {
583 int err;
584
585 err = new_mode->ops->init(team);
586 if (err)
587 return err;
588 }
589
590 team->mode = new_mode;
591 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
592 team_adjust_ops(team);
593
594 return 0;
595}
596
597static int team_change_mode(struct team *team, const char *kind)
598{
599 const struct team_mode *new_mode;
600 struct net_device *dev = team->dev;
601 int err;
602
603 if (!list_empty(&team->port_list)) {
604 netdev_err(dev, "No ports can be present during mode change\n");
605 return -EBUSY;
606 }
607
608 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
609 netdev_err(dev, "Unable to change to the same mode the team is in\n");
610 return -EINVAL;
611 }
612
613 new_mode = team_mode_get(kind);
614 if (!new_mode) {
615 netdev_err(dev, "Mode \"%s\" not found\n", kind);
616 return -EINVAL;
617 }
618
619 err = __team_change_mode(team, new_mode);
620 if (err) {
621 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
622 team_mode_put(new_mode);
623 return err;
624 }
625
626 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
627 return 0;
628}
629
630
631
632
633
634
635static void team_notify_peers_work(struct work_struct *work)
636{
637 struct team *team;
638 int val;
639
640 team = container_of(work, struct team, notify_peers.dw.work);
641
642 if (!rtnl_trylock()) {
643 schedule_delayed_work(&team->notify_peers.dw, 0);
644 return;
645 }
646 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
647 if (val < 0) {
648 rtnl_unlock();
649 return;
650 }
651 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
652 rtnl_unlock();
653 if (val)
654 schedule_delayed_work(&team->notify_peers.dw,
655 msecs_to_jiffies(team->notify_peers.interval));
656}
657
658static void team_notify_peers(struct team *team)
659{
660 if (!team->notify_peers.count || !netif_running(team->dev))
661 return;
662 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
663 schedule_delayed_work(&team->notify_peers.dw, 0);
664}
665
666static void team_notify_peers_init(struct team *team)
667{
668 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
669}
670
671static void team_notify_peers_fini(struct team *team)
672{
673 cancel_delayed_work_sync(&team->notify_peers.dw);
674}
675
676
677
678
679
680
681static void team_mcast_rejoin_work(struct work_struct *work)
682{
683 struct team *team;
684 int val;
685
686 team = container_of(work, struct team, mcast_rejoin.dw.work);
687
688 if (!rtnl_trylock()) {
689 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
690 return;
691 }
692 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
693 if (val < 0) {
694 rtnl_unlock();
695 return;
696 }
697 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
698 rtnl_unlock();
699 if (val)
700 schedule_delayed_work(&team->mcast_rejoin.dw,
701 msecs_to_jiffies(team->mcast_rejoin.interval));
702}
703
704static void team_mcast_rejoin(struct team *team)
705{
706 if (!team->mcast_rejoin.count || !netif_running(team->dev))
707 return;
708 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
709 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
710}
711
712static void team_mcast_rejoin_init(struct team *team)
713{
714 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
715}
716
717static void team_mcast_rejoin_fini(struct team *team)
718{
719 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
720}
721
722
723
724
725
726
727
728static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
729{
730 struct sk_buff *skb = *pskb;
731 struct team_port *port;
732 struct team *team;
733 rx_handler_result_t res;
734
735 skb = skb_share_check(skb, GFP_ATOMIC);
736 if (!skb)
737 return RX_HANDLER_CONSUMED;
738
739 *pskb = skb;
740
741 port = team_port_get_rcu(skb->dev);
742 team = port->team;
743 if (!team_port_enabled(port)) {
744
745 res = RX_HANDLER_EXACT;
746 } else {
747 res = team->ops.receive(team, port, skb);
748 }
749 if (res == RX_HANDLER_ANOTHER) {
750 struct team_pcpu_stats *pcpu_stats;
751
752 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
753 u64_stats_update_begin(&pcpu_stats->syncp);
754 pcpu_stats->rx_packets++;
755 pcpu_stats->rx_bytes += skb->len;
756 if (skb->pkt_type == PACKET_MULTICAST)
757 pcpu_stats->rx_multicast++;
758 u64_stats_update_end(&pcpu_stats->syncp);
759
760 skb->dev = team->dev;
761 } else if (res == RX_HANDLER_EXACT) {
762 this_cpu_inc(team->pcpu_stats->rx_nohandler);
763 } else {
764 this_cpu_inc(team->pcpu_stats->rx_dropped);
765 }
766
767 return res;
768}
769
770
771
772
773
774
775static int team_queue_override_init(struct team *team)
776{
777 struct list_head *listarr;
778 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
779 unsigned int i;
780
781 if (!queue_cnt)
782 return 0;
783 listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
784 if (!listarr)
785 return -ENOMEM;
786 team->qom_lists = listarr;
787 for (i = 0; i < queue_cnt; i++)
788 INIT_LIST_HEAD(listarr++);
789 return 0;
790}
791
792static void team_queue_override_fini(struct team *team)
793{
794 kfree(team->qom_lists);
795}
796
797static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
798{
799 return &team->qom_lists[queue_id - 1];
800}
801
802
803
804
805static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
806{
807 struct list_head *qom_list;
808 struct team_port *port;
809
810 if (!team->queue_override_enabled || !skb->queue_mapping)
811 return false;
812 qom_list = __team_get_qom_list(team, skb->queue_mapping);
813 list_for_each_entry_rcu(port, qom_list, qom_list) {
814 if (!team_dev_queue_xmit(team, port, skb))
815 return true;
816 }
817 return false;
818}
819
820static void __team_queue_override_port_del(struct team *team,
821 struct team_port *port)
822{
823 if (!port->queue_id)
824 return;
825 list_del_rcu(&port->qom_list);
826}
827
828static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
829 struct team_port *cur)
830{
831 if (port->priority < cur->priority)
832 return true;
833 if (port->priority > cur->priority)
834 return false;
835 if (port->index < cur->index)
836 return true;
837 return false;
838}
839
840static void __team_queue_override_port_add(struct team *team,
841 struct team_port *port)
842{
843 struct team_port *cur;
844 struct list_head *qom_list;
845 struct list_head *node;
846
847 if (!port->queue_id)
848 return;
849 qom_list = __team_get_qom_list(team, port->queue_id);
850 node = qom_list;
851 list_for_each_entry(cur, qom_list, qom_list) {
852 if (team_queue_override_port_has_gt_prio_than(port, cur))
853 break;
854 node = &cur->qom_list;
855 }
856 list_add_tail_rcu(&port->qom_list, node);
857}
858
859static void __team_queue_override_enabled_check(struct team *team)
860{
861 struct team_port *port;
862 bool enabled = false;
863
864 list_for_each_entry(port, &team->port_list, list) {
865 if (port->queue_id) {
866 enabled = true;
867 break;
868 }
869 }
870 if (enabled == team->queue_override_enabled)
871 return;
872 netdev_dbg(team->dev, "%s queue override\n",
873 enabled ? "Enabling" : "Disabling");
874 team->queue_override_enabled = enabled;
875}
876
877static void team_queue_override_port_prio_changed(struct team *team,
878 struct team_port *port)
879{
880 if (!port->queue_id || team_port_enabled(port))
881 return;
882 __team_queue_override_port_del(team, port);
883 __team_queue_override_port_add(team, port);
884 __team_queue_override_enabled_check(team);
885}
886
887static void team_queue_override_port_change_queue_id(struct team *team,
888 struct team_port *port,
889 u16 new_queue_id)
890{
891 if (team_port_enabled(port)) {
892 __team_queue_override_port_del(team, port);
893 port->queue_id = new_queue_id;
894 __team_queue_override_port_add(team, port);
895 __team_queue_override_enabled_check(team);
896 } else {
897 port->queue_id = new_queue_id;
898 }
899}
900
901static void team_queue_override_port_add(struct team *team,
902 struct team_port *port)
903{
904 __team_queue_override_port_add(team, port);
905 __team_queue_override_enabled_check(team);
906}
907
908static void team_queue_override_port_del(struct team *team,
909 struct team_port *port)
910{
911 __team_queue_override_port_del(team, port);
912 __team_queue_override_enabled_check(team);
913}
914
915
916
917
918
919
920static bool team_port_find(const struct team *team,
921 const struct team_port *port)
922{
923 struct team_port *cur;
924
925 list_for_each_entry(cur, &team->port_list, list)
926 if (cur == port)
927 return true;
928 return false;
929}
930
931
932
933
934
935
936
937static void team_port_enable(struct team *team,
938 struct team_port *port)
939{
940 if (team_port_enabled(port))
941 return;
942 port->index = team->en_port_count++;
943 hlist_add_head_rcu(&port->hlist,
944 team_port_index_hash(team, port->index));
945 team_adjust_ops(team);
946 team_queue_override_port_add(team, port);
947 if (team->ops.port_enabled)
948 team->ops.port_enabled(team, port);
949 team_notify_peers(team);
950 team_mcast_rejoin(team);
951 team_lower_state_changed(port);
952}
953
954static void __reconstruct_port_hlist(struct team *team, int rm_index)
955{
956 int i;
957 struct team_port *port;
958
959 for (i = rm_index + 1; i < team->en_port_count; i++) {
960 port = team_get_port_by_index(team, i);
961 hlist_del_rcu(&port->hlist);
962 port->index--;
963 hlist_add_head_rcu(&port->hlist,
964 team_port_index_hash(team, port->index));
965 }
966}
967
968static void team_port_disable(struct team *team,
969 struct team_port *port)
970{
971 if (!team_port_enabled(port))
972 return;
973 if (team->ops.port_disabled)
974 team->ops.port_disabled(team, port);
975 hlist_del_rcu(&port->hlist);
976 __reconstruct_port_hlist(team, port->index);
977 port->index = -1;
978 team->en_port_count--;
979 team_queue_override_port_del(team, port);
980 team_adjust_ops(team);
981 team_notify_peers(team);
982 team_mcast_rejoin(team);
983 team_lower_state_changed(port);
984}
985
986#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
987 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
988 NETIF_F_HIGHDMA | NETIF_F_LRO)
989
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992
993static void __team_compute_features(struct team *team)
994{
995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
997 netdev_features_t enc_features = TEAM_ENC_FEATURES;
998 unsigned short max_hard_header_len = ETH_HLEN;
999 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1000 IFF_XMIT_DST_RELEASE_PERM;
1001
1002 list_for_each_entry(port, &team->port_list, list) {
1003 vlan_features = netdev_increment_features(vlan_features,
1004 port->dev->vlan_features,
1005 TEAM_VLAN_FEATURES);
1006 enc_features =
1007 netdev_increment_features(enc_features,
1008 port->dev->hw_enc_features,
1009 TEAM_ENC_FEATURES);
1010
1011
1012 dst_release_flag &= port->dev->priv_flags;
1013 if (port->dev->hard_header_len > max_hard_header_len)
1014 max_hard_header_len = port->dev->hard_header_len;
1015 }
1016
1017 team->dev->vlan_features = vlan_features;
1018 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
1019 team->dev->hard_header_len = max_hard_header_len;
1020
1021 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1022 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024}
1025
1026static void team_compute_features(struct team *team)
1027{
1028 mutex_lock(&team->lock);
1029 __team_compute_features(team);
1030 mutex_unlock(&team->lock);
1031 netdev_change_features(team->dev);
1032}
1033
1034static int team_port_enter(struct team *team, struct team_port *port)
1035{
1036 int err = 0;
1037
1038 dev_hold(team->dev);
1039 if (team->ops.port_enter) {
1040 err = team->ops.port_enter(team, port);
1041 if (err) {
1042 netdev_err(team->dev, "Device %s failed to enter team mode\n",
1043 port->dev->name);
1044 goto err_port_enter;
1045 }
1046 }
1047
1048 return 0;
1049
1050err_port_enter:
1051 dev_put(team->dev);
1052
1053 return err;
1054}
1055
1056static void team_port_leave(struct team *team, struct team_port *port)
1057{
1058 if (team->ops.port_leave)
1059 team->ops.port_leave(team, port);
1060 dev_put(team->dev);
1061}
1062
1063#ifdef CONFIG_NET_POLL_CONTROLLER
1064static int __team_port_enable_netpoll(struct team_port *port)
1065{
1066 struct netpoll *np;
1067 int err;
1068
1069 np = kzalloc(sizeof(*np), GFP_KERNEL);
1070 if (!np)
1071 return -ENOMEM;
1072
1073 err = __netpoll_setup(np, port->dev);
1074 if (err) {
1075 kfree(np);
1076 return err;
1077 }
1078 port->np = np;
1079 return err;
1080}
1081
1082static int team_port_enable_netpoll(struct team_port *port)
1083{
1084 if (!port->team->dev->npinfo)
1085 return 0;
1086
1087 return __team_port_enable_netpoll(port);
1088}
1089
1090static void team_port_disable_netpoll(struct team_port *port)
1091{
1092 struct netpoll *np = port->np;
1093
1094 if (!np)
1095 return;
1096 port->np = NULL;
1097
1098
1099 synchronize_rcu_bh();
1100 __netpoll_cleanup(np);
1101 kfree(np);
1102}
1103#else
1104static int team_port_enable_netpoll(struct team_port *port)
1105{
1106 return 0;
1107}
1108static void team_port_disable_netpoll(struct team_port *port)
1109{
1110}
1111#endif
1112
1113static int team_upper_dev_link(struct team *team, struct team_port *port)
1114{
1115 struct netdev_lag_upper_info lag_upper_info;
1116 int err;
1117
1118 lag_upper_info.tx_type = team->mode->lag_tx_type;
1119 lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1120 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1121 &lag_upper_info);
1122 if (err)
1123 return err;
1124 port->dev->priv_flags |= IFF_TEAM_PORT;
1125 return 0;
1126}
1127
1128static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1129{
1130 netdev_upper_dev_unlink(port->dev, team->dev);
1131 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1132}
1133
1134static void __team_port_change_port_added(struct team_port *port, bool linkup);
1135static int team_dev_type_check_change(struct net_device *dev,
1136 struct net_device *port_dev);
1137
1138static int team_port_add(struct team *team, struct net_device *port_dev)
1139{
1140 struct net_device *dev = team->dev;
1141 struct team_port *port;
1142 char *portname = port_dev->name;
1143 int err;
1144
1145 if (port_dev->flags & IFF_LOOPBACK) {
1146 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1147 portname);
1148 return -EINVAL;
1149 }
1150
1151 if (team_port_exists(port_dev)) {
1152 netdev_err(dev, "Device %s is already a port "
1153 "of a team device\n", portname);
1154 return -EBUSY;
1155 }
1156
1157 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1158 vlan_uses_dev(dev)) {
1159 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1160 portname);
1161 return -EPERM;
1162 }
1163
1164 err = team_dev_type_check_change(dev, port_dev);
1165 if (err)
1166 return err;
1167
1168 if (port_dev->flags & IFF_UP) {
1169 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1170 portname);
1171 return -EBUSY;
1172 }
1173
1174 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1175 GFP_KERNEL);
1176 if (!port)
1177 return -ENOMEM;
1178
1179 port->dev = port_dev;
1180 port->team = team;
1181 INIT_LIST_HEAD(&port->qom_list);
1182
1183 port->orig.mtu = port_dev->mtu;
1184 err = dev_set_mtu(port_dev, dev->mtu);
1185 if (err) {
1186 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1187 goto err_set_mtu;
1188 }
1189
1190 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1191
1192 err = team_port_enter(team, port);
1193 if (err) {
1194 netdev_err(dev, "Device %s failed to enter team mode\n",
1195 portname);
1196 goto err_port_enter;
1197 }
1198
1199 err = dev_open(port_dev);
1200 if (err) {
1201 netdev_dbg(dev, "Device %s opening failed\n",
1202 portname);
1203 goto err_dev_open;
1204 }
1205
1206 err = vlan_vids_add_by_dev(port_dev, dev);
1207 if (err) {
1208 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1209 portname);
1210 goto err_vids_add;
1211 }
1212
1213 err = team_port_enable_netpoll(port);
1214 if (err) {
1215 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1216 portname);
1217 goto err_enable_netpoll;
1218 }
1219
1220 if (!(dev->features & NETIF_F_LRO))
1221 dev_disable_lro(port_dev);
1222
1223 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1224 port);
1225 if (err) {
1226 netdev_err(dev, "Device %s failed to register rx_handler\n",
1227 portname);
1228 goto err_handler_register;
1229 }
1230
1231 err = team_upper_dev_link(team, port);
1232 if (err) {
1233 netdev_err(dev, "Device %s failed to set upper link\n",
1234 portname);
1235 goto err_set_upper_link;
1236 }
1237
1238 err = __team_option_inst_add_port(team, port);
1239 if (err) {
1240 netdev_err(dev, "Device %s failed to add per-port options\n",
1241 portname);
1242 goto err_option_port_add;
1243 }
1244
1245 netif_addr_lock_bh(dev);
1246 dev_uc_sync_multiple(port_dev, dev);
1247 dev_mc_sync_multiple(port_dev, dev);
1248 netif_addr_unlock_bh(dev);
1249
1250 port->index = -1;
1251 list_add_tail_rcu(&port->list, &team->port_list);
1252 team_port_enable(team, port);
1253 __team_compute_features(team);
1254 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1255 __team_options_change_check(team);
1256
1257 netdev_info(dev, "Port device %s added\n", portname);
1258
1259 return 0;
1260
1261err_option_port_add:
1262 team_upper_dev_unlink(team, port);
1263
1264err_set_upper_link:
1265 netdev_rx_handler_unregister(port_dev);
1266
1267err_handler_register:
1268 team_port_disable_netpoll(port);
1269
1270err_enable_netpoll:
1271 vlan_vids_del_by_dev(port_dev, dev);
1272
1273err_vids_add:
1274 dev_close(port_dev);
1275
1276err_dev_open:
1277 team_port_leave(team, port);
1278 team_port_set_orig_dev_addr(port);
1279
1280err_port_enter:
1281 dev_set_mtu(port_dev, port->orig.mtu);
1282
1283err_set_mtu:
1284 kfree(port);
1285
1286 return err;
1287}
1288
1289static void __team_port_change_port_removed(struct team_port *port);
1290
1291static int team_port_del(struct team *team, struct net_device *port_dev)
1292{
1293 struct net_device *dev = team->dev;
1294 struct team_port *port;
1295 char *portname = port_dev->name;
1296
1297 port = team_port_get_rtnl(port_dev);
1298 if (!port || !team_port_find(team, port)) {
1299 netdev_err(dev, "Device %s does not act as a port of this team\n",
1300 portname);
1301 return -ENOENT;
1302 }
1303
1304 team_port_disable(team, port);
1305 list_del_rcu(&port->list);
1306 team_upper_dev_unlink(team, port);
1307 netdev_rx_handler_unregister(port_dev);
1308 team_port_disable_netpoll(port);
1309 vlan_vids_del_by_dev(port_dev, dev);
1310 dev_uc_unsync(port_dev, dev);
1311 dev_mc_unsync(port_dev, dev);
1312 dev_close(port_dev);
1313 team_port_leave(team, port);
1314
1315 __team_option_inst_mark_removed_port(team, port);
1316 __team_options_change_check(team);
1317 __team_option_inst_del_port(team, port);
1318 __team_port_change_port_removed(port);
1319
1320 team_port_set_orig_dev_addr(port);
1321 dev_set_mtu(port_dev, port->orig.mtu);
1322 kfree_rcu(port, rcu);
1323 netdev_info(dev, "Port device %s removed\n", portname);
1324 __team_compute_features(team);
1325
1326 return 0;
1327}
1328
1329
1330
1331
1332
1333
1334static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1335{
1336 ctx->data.str_val = team->mode->kind;
1337 return 0;
1338}
1339
1340static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1341{
1342 return team_change_mode(team, ctx->data.str_val);
1343}
1344
1345static int team_notify_peers_count_get(struct team *team,
1346 struct team_gsetter_ctx *ctx)
1347{
1348 ctx->data.u32_val = team->notify_peers.count;
1349 return 0;
1350}
1351
1352static int team_notify_peers_count_set(struct team *team,
1353 struct team_gsetter_ctx *ctx)
1354{
1355 team->notify_peers.count = ctx->data.u32_val;
1356 return 0;
1357}
1358
1359static int team_notify_peers_interval_get(struct team *team,
1360 struct team_gsetter_ctx *ctx)
1361{
1362 ctx->data.u32_val = team->notify_peers.interval;
1363 return 0;
1364}
1365
1366static int team_notify_peers_interval_set(struct team *team,
1367 struct team_gsetter_ctx *ctx)
1368{
1369 team->notify_peers.interval = ctx->data.u32_val;
1370 return 0;
1371}
1372
1373static int team_mcast_rejoin_count_get(struct team *team,
1374 struct team_gsetter_ctx *ctx)
1375{
1376 ctx->data.u32_val = team->mcast_rejoin.count;
1377 return 0;
1378}
1379
1380static int team_mcast_rejoin_count_set(struct team *team,
1381 struct team_gsetter_ctx *ctx)
1382{
1383 team->mcast_rejoin.count = ctx->data.u32_val;
1384 return 0;
1385}
1386
1387static int team_mcast_rejoin_interval_get(struct team *team,
1388 struct team_gsetter_ctx *ctx)
1389{
1390 ctx->data.u32_val = team->mcast_rejoin.interval;
1391 return 0;
1392}
1393
1394static int team_mcast_rejoin_interval_set(struct team *team,
1395 struct team_gsetter_ctx *ctx)
1396{
1397 team->mcast_rejoin.interval = ctx->data.u32_val;
1398 return 0;
1399}
1400
1401static int team_port_en_option_get(struct team *team,
1402 struct team_gsetter_ctx *ctx)
1403{
1404 struct team_port *port = ctx->info->port;
1405
1406 ctx->data.bool_val = team_port_enabled(port);
1407 return 0;
1408}
1409
1410static int team_port_en_option_set(struct team *team,
1411 struct team_gsetter_ctx *ctx)
1412{
1413 struct team_port *port = ctx->info->port;
1414
1415 if (ctx->data.bool_val)
1416 team_port_enable(team, port);
1417 else
1418 team_port_disable(team, port);
1419 return 0;
1420}
1421
1422static int team_user_linkup_option_get(struct team *team,
1423 struct team_gsetter_ctx *ctx)
1424{
1425 struct team_port *port = ctx->info->port;
1426
1427 ctx->data.bool_val = port->user.linkup;
1428 return 0;
1429}
1430
1431static void __team_carrier_check(struct team *team);
1432
1433static int team_user_linkup_option_set(struct team *team,
1434 struct team_gsetter_ctx *ctx)
1435{
1436 struct team_port *port = ctx->info->port;
1437
1438 port->user.linkup = ctx->data.bool_val;
1439 team_refresh_port_linkup(port);
1440 __team_carrier_check(port->team);
1441 return 0;
1442}
1443
1444static int team_user_linkup_en_option_get(struct team *team,
1445 struct team_gsetter_ctx *ctx)
1446{
1447 struct team_port *port = ctx->info->port;
1448
1449 ctx->data.bool_val = port->user.linkup_enabled;
1450 return 0;
1451}
1452
1453static int team_user_linkup_en_option_set(struct team *team,
1454 struct team_gsetter_ctx *ctx)
1455{
1456 struct team_port *port = ctx->info->port;
1457
1458 port->user.linkup_enabled = ctx->data.bool_val;
1459 team_refresh_port_linkup(port);
1460 __team_carrier_check(port->team);
1461 return 0;
1462}
1463
1464static int team_priority_option_get(struct team *team,
1465 struct team_gsetter_ctx *ctx)
1466{
1467 struct team_port *port = ctx->info->port;
1468
1469 ctx->data.s32_val = port->priority;
1470 return 0;
1471}
1472
1473static int team_priority_option_set(struct team *team,
1474 struct team_gsetter_ctx *ctx)
1475{
1476 struct team_port *port = ctx->info->port;
1477 s32 priority = ctx->data.s32_val;
1478
1479 if (port->priority == priority)
1480 return 0;
1481 port->priority = priority;
1482 team_queue_override_port_prio_changed(team, port);
1483 return 0;
1484}
1485
1486static int team_queue_id_option_get(struct team *team,
1487 struct team_gsetter_ctx *ctx)
1488{
1489 struct team_port *port = ctx->info->port;
1490
1491 ctx->data.u32_val = port->queue_id;
1492 return 0;
1493}
1494
1495static int team_queue_id_option_set(struct team *team,
1496 struct team_gsetter_ctx *ctx)
1497{
1498 struct team_port *port = ctx->info->port;
1499 u16 new_queue_id = ctx->data.u32_val;
1500
1501 if (port->queue_id == new_queue_id)
1502 return 0;
1503 if (new_queue_id >= team->dev->real_num_tx_queues)
1504 return -EINVAL;
1505 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1506 return 0;
1507}
1508
1509static const struct team_option team_options[] = {
1510 {
1511 .name = "mode",
1512 .type = TEAM_OPTION_TYPE_STRING,
1513 .getter = team_mode_option_get,
1514 .setter = team_mode_option_set,
1515 },
1516 {
1517 .name = "notify_peers_count",
1518 .type = TEAM_OPTION_TYPE_U32,
1519 .getter = team_notify_peers_count_get,
1520 .setter = team_notify_peers_count_set,
1521 },
1522 {
1523 .name = "notify_peers_interval",
1524 .type = TEAM_OPTION_TYPE_U32,
1525 .getter = team_notify_peers_interval_get,
1526 .setter = team_notify_peers_interval_set,
1527 },
1528 {
1529 .name = "mcast_rejoin_count",
1530 .type = TEAM_OPTION_TYPE_U32,
1531 .getter = team_mcast_rejoin_count_get,
1532 .setter = team_mcast_rejoin_count_set,
1533 },
1534 {
1535 .name = "mcast_rejoin_interval",
1536 .type = TEAM_OPTION_TYPE_U32,
1537 .getter = team_mcast_rejoin_interval_get,
1538 .setter = team_mcast_rejoin_interval_set,
1539 },
1540 {
1541 .name = "enabled",
1542 .type = TEAM_OPTION_TYPE_BOOL,
1543 .per_port = true,
1544 .getter = team_port_en_option_get,
1545 .setter = team_port_en_option_set,
1546 },
1547 {
1548 .name = "user_linkup",
1549 .type = TEAM_OPTION_TYPE_BOOL,
1550 .per_port = true,
1551 .getter = team_user_linkup_option_get,
1552 .setter = team_user_linkup_option_set,
1553 },
1554 {
1555 .name = "user_linkup_enabled",
1556 .type = TEAM_OPTION_TYPE_BOOL,
1557 .per_port = true,
1558 .getter = team_user_linkup_en_option_get,
1559 .setter = team_user_linkup_en_option_set,
1560 },
1561 {
1562 .name = "priority",
1563 .type = TEAM_OPTION_TYPE_S32,
1564 .per_port = true,
1565 .getter = team_priority_option_get,
1566 .setter = team_priority_option_set,
1567 },
1568 {
1569 .name = "queue_id",
1570 .type = TEAM_OPTION_TYPE_U32,
1571 .per_port = true,
1572 .getter = team_queue_id_option_get,
1573 .setter = team_queue_id_option_set,
1574 },
1575};
1576
1577static int team_init(struct net_device *dev)
1578{
1579 struct team *team = netdev_priv(dev);
1580 int i;
1581 int err;
1582
1583 team->dev = dev;
1584 mutex_init(&team->lock);
1585 team_set_no_mode(team);
1586
1587 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1588 if (!team->pcpu_stats)
1589 return -ENOMEM;
1590
1591 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1592 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1593 INIT_LIST_HEAD(&team->port_list);
1594 err = team_queue_override_init(team);
1595 if (err)
1596 goto err_team_queue_override_init;
1597
1598 team_adjust_ops(team);
1599
1600 INIT_LIST_HEAD(&team->option_list);
1601 INIT_LIST_HEAD(&team->option_inst_list);
1602
1603 team_notify_peers_init(team);
1604 team_mcast_rejoin_init(team);
1605
1606 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1607 if (err)
1608 goto err_options_register;
1609 netif_carrier_off(dev);
1610
1611 netdev_lockdep_set_classes(dev);
1612
1613 return 0;
1614
1615err_options_register:
1616 team_mcast_rejoin_fini(team);
1617 team_notify_peers_fini(team);
1618 team_queue_override_fini(team);
1619err_team_queue_override_init:
1620 free_percpu(team->pcpu_stats);
1621
1622 return err;
1623}
1624
1625static void team_uninit(struct net_device *dev)
1626{
1627 struct team *team = netdev_priv(dev);
1628 struct team_port *port;
1629 struct team_port *tmp;
1630
1631 mutex_lock(&team->lock);
1632 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1633 team_port_del(team, port->dev);
1634
1635 __team_change_mode(team, NULL);
1636 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1637 team_mcast_rejoin_fini(team);
1638 team_notify_peers_fini(team);
1639 team_queue_override_fini(team);
1640 mutex_unlock(&team->lock);
1641 netdev_change_features(dev);
1642}
1643
1644static void team_destructor(struct net_device *dev)
1645{
1646 struct team *team = netdev_priv(dev);
1647
1648 free_percpu(team->pcpu_stats);
1649}
1650
1651static int team_open(struct net_device *dev)
1652{
1653 return 0;
1654}
1655
1656static int team_close(struct net_device *dev)
1657{
1658 return 0;
1659}
1660
1661
1662
1663
1664static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1665{
1666 struct team *team = netdev_priv(dev);
1667 bool tx_success;
1668 unsigned int len = skb->len;
1669
1670 tx_success = team_queue_override_transmit(team, skb);
1671 if (!tx_success)
1672 tx_success = team->ops.transmit(team, skb);
1673 if (tx_success) {
1674 struct team_pcpu_stats *pcpu_stats;
1675
1676 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1677 u64_stats_update_begin(&pcpu_stats->syncp);
1678 pcpu_stats->tx_packets++;
1679 pcpu_stats->tx_bytes += len;
1680 u64_stats_update_end(&pcpu_stats->syncp);
1681 } else {
1682 this_cpu_inc(team->pcpu_stats->tx_dropped);
1683 }
1684
1685 return NETDEV_TX_OK;
1686}
1687
1688static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1689 void *accel_priv, select_queue_fallback_t fallback)
1690{
1691
1692
1693
1694
1695
1696
1697 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1698
1699
1700
1701
1702 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1703
1704 if (unlikely(txq >= dev->real_num_tx_queues)) {
1705 do {
1706 txq -= dev->real_num_tx_queues;
1707 } while (txq >= dev->real_num_tx_queues);
1708 }
1709 return txq;
1710}
1711
1712static void team_change_rx_flags(struct net_device *dev, int change)
1713{
1714 struct team *team = netdev_priv(dev);
1715 struct team_port *port;
1716 int inc;
1717
1718 rcu_read_lock();
1719 list_for_each_entry_rcu(port, &team->port_list, list) {
1720 if (change & IFF_PROMISC) {
1721 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1722 dev_set_promiscuity(port->dev, inc);
1723 }
1724 if (change & IFF_ALLMULTI) {
1725 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1726 dev_set_allmulti(port->dev, inc);
1727 }
1728 }
1729 rcu_read_unlock();
1730}
1731
1732static void team_set_rx_mode(struct net_device *dev)
1733{
1734 struct team *team = netdev_priv(dev);
1735 struct team_port *port;
1736
1737 rcu_read_lock();
1738 list_for_each_entry_rcu(port, &team->port_list, list) {
1739 dev_uc_sync_multiple(port->dev, dev);
1740 dev_mc_sync_multiple(port->dev, dev);
1741 }
1742 rcu_read_unlock();
1743}
1744
1745static int team_set_mac_address(struct net_device *dev, void *p)
1746{
1747 struct sockaddr *addr = p;
1748 struct team *team = netdev_priv(dev);
1749 struct team_port *port;
1750
1751 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1752 return -EADDRNOTAVAIL;
1753 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1754 mutex_lock(&team->lock);
1755 list_for_each_entry(port, &team->port_list, list)
1756 if (team->ops.port_change_dev_addr)
1757 team->ops.port_change_dev_addr(team, port);
1758 mutex_unlock(&team->lock);
1759 return 0;
1760}
1761
1762static int team_change_mtu(struct net_device *dev, int new_mtu)
1763{
1764 struct team *team = netdev_priv(dev);
1765 struct team_port *port;
1766 int err;
1767
1768
1769
1770
1771
1772 mutex_lock(&team->lock);
1773 team->port_mtu_change_allowed = true;
1774 list_for_each_entry(port, &team->port_list, list) {
1775 err = dev_set_mtu(port->dev, new_mtu);
1776 if (err) {
1777 netdev_err(dev, "Device %s failed to change mtu",
1778 port->dev->name);
1779 goto unwind;
1780 }
1781 }
1782 team->port_mtu_change_allowed = false;
1783 mutex_unlock(&team->lock);
1784
1785 dev->mtu = new_mtu;
1786
1787 return 0;
1788
1789unwind:
1790 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1791 dev_set_mtu(port->dev, dev->mtu);
1792 team->port_mtu_change_allowed = false;
1793 mutex_unlock(&team->lock);
1794
1795 return err;
1796}
1797
1798static void
1799team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1800{
1801 struct team *team = netdev_priv(dev);
1802 struct team_pcpu_stats *p;
1803 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1804 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1805 unsigned int start;
1806 int i;
1807
1808 for_each_possible_cpu(i) {
1809 p = per_cpu_ptr(team->pcpu_stats, i);
1810 do {
1811 start = u64_stats_fetch_begin_irq(&p->syncp);
1812 rx_packets = p->rx_packets;
1813 rx_bytes = p->rx_bytes;
1814 rx_multicast = p->rx_multicast;
1815 tx_packets = p->tx_packets;
1816 tx_bytes = p->tx_bytes;
1817 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1818
1819 stats->rx_packets += rx_packets;
1820 stats->rx_bytes += rx_bytes;
1821 stats->multicast += rx_multicast;
1822 stats->tx_packets += tx_packets;
1823 stats->tx_bytes += tx_bytes;
1824
1825
1826
1827
1828 rx_dropped += p->rx_dropped;
1829 tx_dropped += p->tx_dropped;
1830 rx_nohandler += p->rx_nohandler;
1831 }
1832 stats->rx_dropped = rx_dropped;
1833 stats->tx_dropped = tx_dropped;
1834 stats->rx_nohandler = rx_nohandler;
1835}
1836
1837static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1838{
1839 struct team *team = netdev_priv(dev);
1840 struct team_port *port;
1841 int err;
1842
1843
1844
1845
1846
1847 mutex_lock(&team->lock);
1848 list_for_each_entry(port, &team->port_list, list) {
1849 err = vlan_vid_add(port->dev, proto, vid);
1850 if (err)
1851 goto unwind;
1852 }
1853 mutex_unlock(&team->lock);
1854
1855 return 0;
1856
1857unwind:
1858 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1859 vlan_vid_del(port->dev, proto, vid);
1860 mutex_unlock(&team->lock);
1861
1862 return err;
1863}
1864
1865static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1866{
1867 struct team *team = netdev_priv(dev);
1868 struct team_port *port;
1869
1870 mutex_lock(&team->lock);
1871 list_for_each_entry(port, &team->port_list, list)
1872 vlan_vid_del(port->dev, proto, vid);
1873 mutex_unlock(&team->lock);
1874
1875 return 0;
1876}
1877
1878#ifdef CONFIG_NET_POLL_CONTROLLER
1879static void team_poll_controller(struct net_device *dev)
1880{
1881}
1882
1883static void __team_netpoll_cleanup(struct team *team)
1884{
1885 struct team_port *port;
1886
1887 list_for_each_entry(port, &team->port_list, list)
1888 team_port_disable_netpoll(port);
1889}
1890
1891static void team_netpoll_cleanup(struct net_device *dev)
1892{
1893 struct team *team = netdev_priv(dev);
1894
1895 mutex_lock(&team->lock);
1896 __team_netpoll_cleanup(team);
1897 mutex_unlock(&team->lock);
1898}
1899
1900static int team_netpoll_setup(struct net_device *dev,
1901 struct netpoll_info *npifo, gfp_t gfp)
1902{
1903 struct team *team = netdev_priv(dev);
1904 struct team_port *port;
1905 int err = 0;
1906
1907 mutex_lock(&team->lock);
1908 list_for_each_entry(port, &team->port_list, list) {
1909 err = __team_port_enable_netpoll(port);
1910 if (err) {
1911 __team_netpoll_cleanup(team);
1912 break;
1913 }
1914 }
1915 mutex_unlock(&team->lock);
1916 return err;
1917}
1918#endif
1919
1920static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1921{
1922 struct team *team = netdev_priv(dev);
1923 int err;
1924
1925 mutex_lock(&team->lock);
1926 err = team_port_add(team, port_dev);
1927 mutex_unlock(&team->lock);
1928
1929 if (!err)
1930 netdev_change_features(dev);
1931
1932 return err;
1933}
1934
1935static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1936{
1937 struct team *team = netdev_priv(dev);
1938 int err;
1939
1940 mutex_lock(&team->lock);
1941 err = team_port_del(team, port_dev);
1942 mutex_unlock(&team->lock);
1943
1944 if (!err)
1945 netdev_change_features(dev);
1946
1947 return err;
1948}
1949
1950static netdev_features_t team_fix_features(struct net_device *dev,
1951 netdev_features_t features)
1952{
1953 struct team_port *port;
1954 struct team *team = netdev_priv(dev);
1955 netdev_features_t mask;
1956
1957 mask = features;
1958 features &= ~NETIF_F_ONE_FOR_ALL;
1959 features |= NETIF_F_ALL_FOR_ALL;
1960
1961 rcu_read_lock();
1962 list_for_each_entry_rcu(port, &team->port_list, list) {
1963 features = netdev_increment_features(features,
1964 port->dev->features,
1965 mask);
1966 }
1967 rcu_read_unlock();
1968
1969 features = netdev_add_tso_features(features, mask);
1970
1971 return features;
1972}
1973
1974static int team_change_carrier(struct net_device *dev, bool new_carrier)
1975{
1976 struct team *team = netdev_priv(dev);
1977
1978 team->user_carrier_enabled = true;
1979
1980 if (new_carrier)
1981 netif_carrier_on(dev);
1982 else
1983 netif_carrier_off(dev);
1984 return 0;
1985}
1986
1987static const struct net_device_ops team_netdev_ops = {
1988 .ndo_size = sizeof(struct net_device_ops),
1989 .ndo_init = team_init,
1990 .ndo_uninit = team_uninit,
1991 .ndo_open = team_open,
1992 .ndo_stop = team_close,
1993 .ndo_start_xmit = team_xmit,
1994 .ndo_select_queue = team_select_queue,
1995 .ndo_change_rx_flags = team_change_rx_flags,
1996 .ndo_set_rx_mode = team_set_rx_mode,
1997 .ndo_set_mac_address = team_set_mac_address,
1998 .ndo_change_mtu_rh74 = team_change_mtu,
1999 .ndo_get_stats64 = team_get_stats64,
2000 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
2001 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
2002#ifdef CONFIG_NET_POLL_CONTROLLER
2003 .ndo_poll_controller = team_poll_controller,
2004 .ndo_netpoll_setup = team_netpoll_setup,
2005 .ndo_netpoll_cleanup = team_netpoll_cleanup,
2006#endif
2007 .ndo_add_slave = team_add_slave,
2008 .ndo_del_slave = team_del_slave,
2009 .ndo_fix_features = team_fix_features,
2010 .extended.ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
2011 .extended.ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
2012 .ndo_change_carrier = team_change_carrier,
2013 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
2014 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
2015 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
2016 .ndo_fdb_add = switchdev_port_fdb_add,
2017 .ndo_fdb_del = switchdev_port_fdb_del,
2018 .extended.ndo_fdb_dump = switchdev_port_fdb_dump,
2019 .ndo_features_check = passthru_features_check,
2020};
2021
2022
2023
2024
2025
2026static void team_ethtool_get_drvinfo(struct net_device *dev,
2027 struct ethtool_drvinfo *drvinfo)
2028{
2029 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2030 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2031}
2032
2033static const struct ethtool_ops team_ethtool_ops = {
2034 .get_drvinfo = team_ethtool_get_drvinfo,
2035 .get_link = ethtool_op_get_link,
2036};
2037
2038
2039
2040
2041
2042static void team_setup_by_port(struct net_device *dev,
2043 struct net_device *port_dev)
2044{
2045 dev->header_ops = port_dev->header_ops;
2046 dev->type = port_dev->type;
2047 dev->hard_header_len = port_dev->hard_header_len;
2048 dev->addr_len = port_dev->addr_len;
2049 dev->mtu = port_dev->mtu;
2050 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2051 eth_hw_addr_inherit(dev, port_dev);
2052}
2053
2054static int team_dev_type_check_change(struct net_device *dev,
2055 struct net_device *port_dev)
2056{
2057 struct team *team = netdev_priv(dev);
2058 char *portname = port_dev->name;
2059 int err;
2060
2061 if (dev->type == port_dev->type)
2062 return 0;
2063 if (!list_empty(&team->port_list)) {
2064 netdev_err(dev, "Device %s is of different type\n", portname);
2065 return -EBUSY;
2066 }
2067 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2068 err = notifier_to_errno(err);
2069 if (err) {
2070 netdev_err(dev, "Refused to change device type\n");
2071 return err;
2072 }
2073 dev_uc_flush(dev);
2074 dev_mc_flush(dev);
2075 team_setup_by_port(dev, port_dev);
2076 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2077 return 0;
2078}
2079
2080static void team_setup(struct net_device *dev)
2081{
2082 ether_setup(dev);
2083
2084 dev->netdev_ops = &team_netdev_ops;
2085 dev->ethtool_ops = &team_ethtool_ops;
2086 dev->extended->needs_free_netdev = true;
2087 dev->extended->priv_destructor = team_destructor;
2088 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2089 dev->priv_flags |= IFF_NO_QUEUE;
2090 dev->priv_flags |= IFF_TEAM;
2091
2092
2093
2094
2095
2096
2097 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2098
2099 dev->features |= NETIF_F_LLTX;
2100 dev->features |= NETIF_F_GRO;
2101
2102
2103 dev->features |= NETIF_F_NETNS_LOCAL;
2104
2105 dev->hw_features = TEAM_VLAN_FEATURES |
2106 NETIF_F_HW_VLAN_CTAG_TX |
2107 NETIF_F_HW_VLAN_CTAG_RX |
2108 NETIF_F_HW_VLAN_CTAG_FILTER;
2109
2110 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2111 dev->features |= dev->hw_features;
2112}
2113
2114static int team_newlink(struct net *src_net, struct net_device *dev,
2115 struct nlattr *tb[], struct nlattr *data[])
2116{
2117 if (tb[IFLA_ADDRESS] == NULL)
2118 eth_hw_addr_random(dev);
2119
2120 return register_netdevice(dev);
2121}
2122
2123static int team_validate(struct nlattr *tb[], struct nlattr *data[])
2124{
2125 if (tb[IFLA_ADDRESS]) {
2126 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2127 return -EINVAL;
2128 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2129 return -EADDRNOTAVAIL;
2130 }
2131 return 0;
2132}
2133
2134static unsigned int team_get_num_tx_queues(void)
2135{
2136 return TEAM_DEFAULT_NUM_TX_QUEUES;
2137}
2138
2139static unsigned int team_get_num_rx_queues(void)
2140{
2141 return TEAM_DEFAULT_NUM_RX_QUEUES;
2142}
2143
2144static struct rtnl_link_ops team_link_ops __read_mostly = {
2145 .kind = DRV_NAME,
2146 .priv_size = sizeof(struct team),
2147 .setup = team_setup,
2148 .newlink = team_newlink,
2149 .validate = team_validate,
2150 .get_num_tx_queues = team_get_num_tx_queues,
2151 .get_num_rx_queues = team_get_num_rx_queues,
2152};
2153
2154
2155
2156
2157
2158
2159static struct genl_family team_nl_family;
2160
2161static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2162 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
2163 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
2164 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
2165 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
2166};
2167
2168static const struct nla_policy
2169team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2170 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
2171 [TEAM_ATTR_OPTION_NAME] = {
2172 .type = NLA_STRING,
2173 .len = TEAM_STRING_MAX_LEN,
2174 },
2175 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
2176 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
2177 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
2178};
2179
2180static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2181{
2182 struct sk_buff *msg;
2183 void *hdr;
2184 int err;
2185
2186 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2187 if (!msg)
2188 return -ENOMEM;
2189
2190 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2191 &team_nl_family, 0, TEAM_CMD_NOOP);
2192 if (!hdr) {
2193 err = -EMSGSIZE;
2194 goto err_msg_put;
2195 }
2196
2197 genlmsg_end(msg, hdr);
2198
2199 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2200
2201err_msg_put:
2202 nlmsg_free(msg);
2203
2204 return err;
2205}
2206
2207
2208
2209
2210
2211static struct team *team_nl_team_get(struct genl_info *info)
2212{
2213 struct net *net = genl_info_net(info);
2214 int ifindex;
2215 struct net_device *dev;
2216 struct team *team;
2217
2218 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2219 return NULL;
2220
2221 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2222 dev = dev_get_by_index(net, ifindex);
2223 if (!dev || dev->netdev_ops != &team_netdev_ops) {
2224 if (dev)
2225 dev_put(dev);
2226 return NULL;
2227 }
2228
2229 team = netdev_priv(dev);
2230 mutex_lock(&team->lock);
2231 return team;
2232}
2233
2234static void team_nl_team_put(struct team *team)
2235{
2236 mutex_unlock(&team->lock);
2237 dev_put(team->dev);
2238}
2239
2240typedef int team_nl_send_func_t(struct sk_buff *skb,
2241 struct team *team, u32 portid);
2242
2243static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2244{
2245 return genlmsg_unicast(dev_net(team->dev), skb, portid);
2246}
2247
2248static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2249 struct team_option_inst *opt_inst)
2250{
2251 struct nlattr *option_item;
2252 struct team_option *option = opt_inst->option;
2253 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2254 struct team_gsetter_ctx ctx;
2255 int err;
2256
2257 ctx.info = opt_inst_info;
2258 err = team_option_get(team, opt_inst, &ctx);
2259 if (err)
2260 return err;
2261
2262 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2263 if (!option_item)
2264 return -EMSGSIZE;
2265
2266 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2267 goto nest_cancel;
2268 if (opt_inst_info->port &&
2269 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2270 opt_inst_info->port->dev->ifindex))
2271 goto nest_cancel;
2272 if (opt_inst->option->array_size &&
2273 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2274 opt_inst_info->array_index))
2275 goto nest_cancel;
2276
2277 switch (option->type) {
2278 case TEAM_OPTION_TYPE_U32:
2279 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2280 goto nest_cancel;
2281 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2282 goto nest_cancel;
2283 break;
2284 case TEAM_OPTION_TYPE_STRING:
2285 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2286 goto nest_cancel;
2287 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2288 ctx.data.str_val))
2289 goto nest_cancel;
2290 break;
2291 case TEAM_OPTION_TYPE_BINARY:
2292 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2293 goto nest_cancel;
2294 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2295 ctx.data.bin_val.ptr))
2296 goto nest_cancel;
2297 break;
2298 case TEAM_OPTION_TYPE_BOOL:
2299 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2300 goto nest_cancel;
2301 if (ctx.data.bool_val &&
2302 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2303 goto nest_cancel;
2304 break;
2305 case TEAM_OPTION_TYPE_S32:
2306 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2307 goto nest_cancel;
2308 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2309 goto nest_cancel;
2310 break;
2311 default:
2312 BUG();
2313 }
2314 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2315 goto nest_cancel;
2316 if (opt_inst->changed) {
2317 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2318 goto nest_cancel;
2319 opt_inst->changed = false;
2320 }
2321 nla_nest_end(skb, option_item);
2322 return 0;
2323
2324nest_cancel:
2325 nla_nest_cancel(skb, option_item);
2326 return -EMSGSIZE;
2327}
2328
2329static int __send_and_alloc_skb(struct sk_buff **pskb,
2330 struct team *team, u32 portid,
2331 team_nl_send_func_t *send_func)
2332{
2333 int err;
2334
2335 if (*pskb) {
2336 err = send_func(*pskb, team, portid);
2337 if (err)
2338 return err;
2339 }
2340 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2341 if (!*pskb)
2342 return -ENOMEM;
2343 return 0;
2344}
2345
2346static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2347 int flags, team_nl_send_func_t *send_func,
2348 struct list_head *sel_opt_inst_list)
2349{
2350 struct nlattr *option_list;
2351 struct nlmsghdr *nlh;
2352 void *hdr;
2353 struct team_option_inst *opt_inst;
2354 int err;
2355 struct sk_buff *skb = NULL;
2356 bool incomplete;
2357 int i;
2358
2359 opt_inst = list_first_entry(sel_opt_inst_list,
2360 struct team_option_inst, tmp_list);
2361
2362start_again:
2363 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2364 if (err)
2365 return err;
2366
2367 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2368 TEAM_CMD_OPTIONS_GET);
2369 if (!hdr) {
2370 nlmsg_free(skb);
2371 return -EMSGSIZE;
2372 }
2373
2374 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2375 goto nla_put_failure;
2376 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2377 if (!option_list)
2378 goto nla_put_failure;
2379
2380 i = 0;
2381 incomplete = false;
2382 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2383 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2384 if (err) {
2385 if (err == -EMSGSIZE) {
2386 if (!i)
2387 goto errout;
2388 incomplete = true;
2389 break;
2390 }
2391 goto errout;
2392 }
2393 i++;
2394 }
2395
2396 nla_nest_end(skb, option_list);
2397 genlmsg_end(skb, hdr);
2398 if (incomplete)
2399 goto start_again;
2400
2401send_done:
2402 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2403 if (!nlh) {
2404 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2405 if (err)
2406 return err;
2407 goto send_done;
2408 }
2409
2410 return send_func(skb, team, portid);
2411
2412nla_put_failure:
2413 err = -EMSGSIZE;
2414errout:
2415 genlmsg_cancel(skb, hdr);
2416 nlmsg_free(skb);
2417 return err;
2418}
2419
2420static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2421{
2422 struct team *team;
2423 struct team_option_inst *opt_inst;
2424 int err;
2425 LIST_HEAD(sel_opt_inst_list);
2426
2427 team = team_nl_team_get(info);
2428 if (!team)
2429 return -EINVAL;
2430
2431 list_for_each_entry(opt_inst, &team->option_inst_list, list)
2432 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2433 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2434 NLM_F_ACK, team_nl_send_unicast,
2435 &sel_opt_inst_list);
2436
2437 team_nl_team_put(team);
2438
2439 return err;
2440}
2441
2442static int team_nl_send_event_options_get(struct team *team,
2443 struct list_head *sel_opt_inst_list);
2444
2445static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2446{
2447 struct team *team;
2448 int err = 0;
2449 int i;
2450 struct nlattr *nl_option;
2451 LIST_HEAD(opt_inst_list);
2452
2453 rtnl_lock();
2454
2455 team = team_nl_team_get(info);
2456 if (!team) {
2457 err = -EINVAL;
2458 goto rtnl_unlock;
2459 }
2460
2461 err = -EINVAL;
2462 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2463 err = -EINVAL;
2464 goto team_put;
2465 }
2466
2467 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2468 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2469 struct nlattr *attr;
2470 struct nlattr *attr_data;
2471 enum team_option_type opt_type;
2472 int opt_port_ifindex = 0;
2473 u32 opt_array_index = 0;
2474 bool opt_is_array = false;
2475 struct team_option_inst *opt_inst;
2476 char *opt_name;
2477 bool opt_found = false;
2478
2479 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2480 err = -EINVAL;
2481 goto team_put;
2482 }
2483 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2484 nl_option, team_nl_option_policy);
2485 if (err)
2486 goto team_put;
2487 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2488 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2489 err = -EINVAL;
2490 goto team_put;
2491 }
2492 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2493 case NLA_U32:
2494 opt_type = TEAM_OPTION_TYPE_U32;
2495 break;
2496 case NLA_STRING:
2497 opt_type = TEAM_OPTION_TYPE_STRING;
2498 break;
2499 case NLA_BINARY:
2500 opt_type = TEAM_OPTION_TYPE_BINARY;
2501 break;
2502 case NLA_FLAG:
2503 opt_type = TEAM_OPTION_TYPE_BOOL;
2504 break;
2505 case NLA_S32:
2506 opt_type = TEAM_OPTION_TYPE_S32;
2507 break;
2508 default:
2509 goto team_put;
2510 }
2511
2512 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2513 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2514 err = -EINVAL;
2515 goto team_put;
2516 }
2517
2518 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2519 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2520 if (attr)
2521 opt_port_ifindex = nla_get_u32(attr);
2522
2523 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2524 if (attr) {
2525 opt_is_array = true;
2526 opt_array_index = nla_get_u32(attr);
2527 }
2528
2529 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2530 struct team_option *option = opt_inst->option;
2531 struct team_gsetter_ctx ctx;
2532 struct team_option_inst_info *opt_inst_info;
2533 int tmp_ifindex;
2534
2535 opt_inst_info = &opt_inst->info;
2536 tmp_ifindex = opt_inst_info->port ?
2537 opt_inst_info->port->dev->ifindex : 0;
2538 if (option->type != opt_type ||
2539 strcmp(option->name, opt_name) ||
2540 tmp_ifindex != opt_port_ifindex ||
2541 (option->array_size && !opt_is_array) ||
2542 opt_inst_info->array_index != opt_array_index)
2543 continue;
2544 opt_found = true;
2545 ctx.info = opt_inst_info;
2546 switch (opt_type) {
2547 case TEAM_OPTION_TYPE_U32:
2548 ctx.data.u32_val = nla_get_u32(attr_data);
2549 break;
2550 case TEAM_OPTION_TYPE_STRING:
2551 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2552 err = -EINVAL;
2553 goto team_put;
2554 }
2555 ctx.data.str_val = nla_data(attr_data);
2556 break;
2557 case TEAM_OPTION_TYPE_BINARY:
2558 ctx.data.bin_val.len = nla_len(attr_data);
2559 ctx.data.bin_val.ptr = nla_data(attr_data);
2560 break;
2561 case TEAM_OPTION_TYPE_BOOL:
2562 ctx.data.bool_val = attr_data ? true : false;
2563 break;
2564 case TEAM_OPTION_TYPE_S32:
2565 ctx.data.s32_val = nla_get_s32(attr_data);
2566 break;
2567 default:
2568 BUG();
2569 }
2570 err = team_option_set(team, opt_inst, &ctx);
2571 if (err)
2572 goto team_put;
2573 opt_inst->changed = true;
2574 list_add(&opt_inst->tmp_list, &opt_inst_list);
2575 }
2576 if (!opt_found) {
2577 err = -ENOENT;
2578 goto team_put;
2579 }
2580 }
2581
2582 err = team_nl_send_event_options_get(team, &opt_inst_list);
2583
2584team_put:
2585 team_nl_team_put(team);
2586rtnl_unlock:
2587 rtnl_unlock();
2588 return err;
2589}
2590
2591static int team_nl_fill_one_port_get(struct sk_buff *skb,
2592 struct team_port *port)
2593{
2594 struct nlattr *port_item;
2595
2596 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2597 if (!port_item)
2598 goto nest_cancel;
2599 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2600 goto nest_cancel;
2601 if (port->changed) {
2602 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2603 goto nest_cancel;
2604 port->changed = false;
2605 }
2606 if ((port->removed &&
2607 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2608 (port->state.linkup &&
2609 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2610 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2611 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2612 goto nest_cancel;
2613 nla_nest_end(skb, port_item);
2614 return 0;
2615
2616nest_cancel:
2617 nla_nest_cancel(skb, port_item);
2618 return -EMSGSIZE;
2619}
2620
2621static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2622 int flags, team_nl_send_func_t *send_func,
2623 struct team_port *one_port)
2624{
2625 struct nlattr *port_list;
2626 struct nlmsghdr *nlh;
2627 void *hdr;
2628 struct team_port *port;
2629 int err;
2630 struct sk_buff *skb = NULL;
2631 bool incomplete;
2632 int i;
2633
2634 port = list_first_entry_or_null(&team->port_list,
2635 struct team_port, list);
2636
2637start_again:
2638 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2639 if (err)
2640 return err;
2641
2642 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2643 TEAM_CMD_PORT_LIST_GET);
2644 if (!hdr) {
2645 nlmsg_free(skb);
2646 return -EMSGSIZE;
2647 }
2648
2649 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2650 goto nla_put_failure;
2651 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2652 if (!port_list)
2653 goto nla_put_failure;
2654
2655 i = 0;
2656 incomplete = false;
2657
2658
2659
2660
2661 if (one_port) {
2662 err = team_nl_fill_one_port_get(skb, one_port);
2663 if (err)
2664 goto errout;
2665 } else if (port) {
2666 list_for_each_entry_from(port, &team->port_list, list) {
2667 err = team_nl_fill_one_port_get(skb, port);
2668 if (err) {
2669 if (err == -EMSGSIZE) {
2670 if (!i)
2671 goto errout;
2672 incomplete = true;
2673 break;
2674 }
2675 goto errout;
2676 }
2677 i++;
2678 }
2679 }
2680
2681 nla_nest_end(skb, port_list);
2682 genlmsg_end(skb, hdr);
2683 if (incomplete)
2684 goto start_again;
2685
2686send_done:
2687 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2688 if (!nlh) {
2689 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2690 if (err)
2691 return err;
2692 goto send_done;
2693 }
2694
2695 return send_func(skb, team, portid);
2696
2697nla_put_failure:
2698 err = -EMSGSIZE;
2699errout:
2700 genlmsg_cancel(skb, hdr);
2701 nlmsg_free(skb);
2702 return err;
2703}
2704
2705static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2706 struct genl_info *info)
2707{
2708 struct team *team;
2709 int err;
2710
2711 team = team_nl_team_get(info);
2712 if (!team)
2713 return -EINVAL;
2714
2715 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2716 NLM_F_ACK, team_nl_send_unicast, NULL);
2717
2718 team_nl_team_put(team);
2719
2720 return err;
2721}
2722
2723static const struct genl_ops team_nl_ops[] = {
2724 {
2725 .cmd = TEAM_CMD_NOOP,
2726 .doit = team_nl_cmd_noop,
2727 .policy = team_nl_policy,
2728 },
2729 {
2730 .cmd = TEAM_CMD_OPTIONS_SET,
2731 .doit = team_nl_cmd_options_set,
2732 .policy = team_nl_policy,
2733 .flags = GENL_ADMIN_PERM,
2734 },
2735 {
2736 .cmd = TEAM_CMD_OPTIONS_GET,
2737 .doit = team_nl_cmd_options_get,
2738 .policy = team_nl_policy,
2739 .flags = GENL_ADMIN_PERM,
2740 },
2741 {
2742 .cmd = TEAM_CMD_PORT_LIST_GET,
2743 .doit = team_nl_cmd_port_list_get,
2744 .policy = team_nl_policy,
2745 .flags = GENL_ADMIN_PERM,
2746 },
2747};
2748
2749static const struct genl_multicast_group team_nl_mcgrps[] = {
2750 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2751};
2752
2753static struct genl_family team_nl_family = {
2754 .name = TEAM_GENL_NAME,
2755 .version = TEAM_GENL_VERSION,
2756 .maxattr = TEAM_ATTR_MAX,
2757 .netnsok = true,
2758 .module = THIS_MODULE,
2759 .ops = team_nl_ops,
2760 .n_ops = ARRAY_SIZE(team_nl_ops),
2761 .mcgrps = team_nl_mcgrps,
2762 .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
2763};
2764
2765static int team_nl_send_multicast(struct sk_buff *skb,
2766 struct team *team, u32 portid)
2767{
2768 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2769 skb, 0, 0, GFP_KERNEL);
2770}
2771
2772static int team_nl_send_event_options_get(struct team *team,
2773 struct list_head *sel_opt_inst_list)
2774{
2775 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2776 sel_opt_inst_list);
2777}
2778
2779static int team_nl_send_event_port_get(struct team *team,
2780 struct team_port *port)
2781{
2782 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2783 port);
2784}
2785
2786static int team_nl_init(void)
2787{
2788 return genl_register_family(&team_nl_family);
2789}
2790
2791static void team_nl_fini(void)
2792{
2793 genl_unregister_family(&team_nl_family);
2794}
2795
2796
2797
2798
2799
2800
2801static void __team_options_change_check(struct team *team)
2802{
2803 int err;
2804 struct team_option_inst *opt_inst;
2805 LIST_HEAD(sel_opt_inst_list);
2806
2807 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2808 if (opt_inst->changed)
2809 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2810 }
2811 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2812 if (err && err != -ESRCH)
2813 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2814 err);
2815}
2816
2817
2818
2819static void __team_port_change_send(struct team_port *port, bool linkup)
2820{
2821 int err;
2822
2823 port->changed = true;
2824 port->state.linkup = linkup;
2825 team_refresh_port_linkup(port);
2826 if (linkup) {
2827 struct ethtool_link_ksettings ecmd;
2828
2829 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2830 if (!err) {
2831 port->state.speed = ecmd.base.speed;
2832 port->state.duplex = ecmd.base.duplex;
2833 goto send_event;
2834 }
2835 }
2836 port->state.speed = 0;
2837 port->state.duplex = 0;
2838
2839send_event:
2840 err = team_nl_send_event_port_get(port->team, port);
2841 if (err && err != -ESRCH)
2842 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2843 port->dev->name, err);
2844
2845}
2846
2847static void __team_carrier_check(struct team *team)
2848{
2849 struct team_port *port;
2850 bool team_linkup;
2851
2852 if (team->user_carrier_enabled)
2853 return;
2854
2855 team_linkup = false;
2856 list_for_each_entry(port, &team->port_list, list) {
2857 if (port->linkup) {
2858 team_linkup = true;
2859 break;
2860 }
2861 }
2862
2863 if (team_linkup)
2864 netif_carrier_on(team->dev);
2865 else
2866 netif_carrier_off(team->dev);
2867}
2868
2869static void __team_port_change_check(struct team_port *port, bool linkup)
2870{
2871 if (port->state.linkup != linkup)
2872 __team_port_change_send(port, linkup);
2873 __team_carrier_check(port->team);
2874}
2875
2876static void __team_port_change_port_added(struct team_port *port, bool linkup)
2877{
2878 __team_port_change_send(port, linkup);
2879 __team_carrier_check(port->team);
2880}
2881
2882static void __team_port_change_port_removed(struct team_port *port)
2883{
2884 port->removed = true;
2885 __team_port_change_send(port, false);
2886 __team_carrier_check(port->team);
2887}
2888
2889static void team_port_change_check(struct team_port *port, bool linkup)
2890{
2891 struct team *team = port->team;
2892
2893 mutex_lock(&team->lock);
2894 __team_port_change_check(port, linkup);
2895 mutex_unlock(&team->lock);
2896}
2897
2898
2899
2900
2901
2902
2903static int team_device_event(struct notifier_block *unused,
2904 unsigned long event, void *ptr)
2905{
2906 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2907 struct team_port *port;
2908
2909 port = team_port_get_rtnl(dev);
2910 if (!port)
2911 return NOTIFY_DONE;
2912
2913 switch (event) {
2914 case NETDEV_UP:
2915 if (netif_carrier_ok(dev))
2916 team_port_change_check(port, true);
2917 break;
2918 case NETDEV_DOWN:
2919 team_port_change_check(port, false);
2920 break;
2921 case NETDEV_CHANGE:
2922 if (netif_running(port->dev))
2923 team_port_change_check(port,
2924 !!netif_carrier_ok(port->dev));
2925 break;
2926 case NETDEV_UNREGISTER:
2927 team_del_slave(port->team->dev, dev);
2928 break;
2929 case NETDEV_FEAT_CHANGE:
2930 team_compute_features(port->team);
2931 break;
2932 case NETDEV_PRECHANGEMTU:
2933
2934 if (!port->team->port_mtu_change_allowed)
2935 return NOTIFY_BAD;
2936 break;
2937 case NETDEV_PRE_TYPE_CHANGE:
2938
2939 return NOTIFY_BAD;
2940 case NETDEV_RESEND_IGMP:
2941
2942 call_netdevice_notifiers(event, port->team->dev);
2943 break;
2944 }
2945 return NOTIFY_DONE;
2946}
2947
2948static struct notifier_block team_notifier_block __read_mostly = {
2949 .notifier_call = team_device_event,
2950};
2951
2952
2953
2954
2955
2956
2957static int __init team_module_init(void)
2958{
2959 int err;
2960
2961 register_netdevice_notifier_rh(&team_notifier_block);
2962
2963 err = rtnl_link_register(&team_link_ops);
2964 if (err)
2965 goto err_rtnl_reg;
2966
2967 err = team_nl_init();
2968 if (err)
2969 goto err_nl_init;
2970
2971 return 0;
2972
2973err_nl_init:
2974 rtnl_link_unregister(&team_link_ops);
2975
2976err_rtnl_reg:
2977 unregister_netdevice_notifier_rh(&team_notifier_block);
2978
2979 return err;
2980}
2981
2982static void __exit team_module_exit(void)
2983{
2984 team_nl_fini();
2985 rtnl_link_unregister(&team_link_ops);
2986 unregister_netdevice_notifier_rh(&team_notifier_block);
2987}
2988
2989module_init(team_module_init);
2990module_exit(team_module_exit);
2991
2992MODULE_LICENSE("GPL v2");
2993MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2994MODULE_DESCRIPTION("Ethernet team device driver");
2995MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2996