linux/drivers/net/team/team.c
<<
>>
Prefs
   1/*
   2 * drivers/net/team/team.c - Network team device driver
   3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/rcupdate.h>
  17#include <linux/errno.h>
  18#include <linux/ctype.h>
  19#include <linux/notifier.h>
  20#include <linux/netdevice.h>
  21#include <linux/netpoll.h>
  22#include <linux/if_vlan.h>
  23#include <linux/if_arp.h>
  24#include <linux/socket.h>
  25#include <linux/etherdevice.h>
  26#include <linux/rtnetlink.h>
  27#include <net/rtnetlink.h>
  28#include <net/genetlink.h>
  29#include <net/netlink.h>
  30#include <net/sch_generic.h>
  31#include <net/switchdev.h>
  32#include <generated/utsrelease.h>
  33#include <linux/if_team.h>
  34
  35#define DRV_NAME "team"
  36
  37
  38/**********
  39 * Helpers
  40 **********/
  41
  42#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
  43
  44static struct team_port *team_port_get_rtnl(const struct net_device *dev)
  45{
  46        struct team_port *port = rtnl_dereference(dev->rx_handler_data);
  47
  48        return team_port_exists(dev) ? port : NULL;
  49}
  50
  51/*
  52 * Since the ability to change device address for open port device is tested in
  53 * team_port_add, this function can be called without control of return value
  54 */
  55static int __set_port_dev_addr(struct net_device *port_dev,
  56                               const unsigned char *dev_addr)
  57{
  58        struct sockaddr_storage addr;
  59
  60        memcpy(addr.__data, dev_addr, port_dev->addr_len);
  61        addr.ss_family = port_dev->type;
  62        return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
  63}
  64
  65static int team_port_set_orig_dev_addr(struct team_port *port)
  66{
  67        return __set_port_dev_addr(port->dev, port->orig.dev_addr);
  68}
  69
  70static int team_port_set_team_dev_addr(struct team *team,
  71                                       struct team_port *port)
  72{
  73        return __set_port_dev_addr(port->dev, team->dev->dev_addr);
  74}
  75
  76int team_modeop_port_enter(struct team *team, struct team_port *port)
  77{
  78        return team_port_set_team_dev_addr(team, port);
  79}
  80EXPORT_SYMBOL(team_modeop_port_enter);
  81
  82void team_modeop_port_change_dev_addr(struct team *team,
  83                                      struct team_port *port)
  84{
  85        team_port_set_team_dev_addr(team, port);
  86}
  87EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
  88
  89static void team_lower_state_changed(struct team_port *port)
  90{
  91        struct netdev_lag_lower_state_info info;
  92
  93        info.link_up = port->linkup;
  94        info.tx_enabled = team_port_enabled(port);
  95        netdev_lower_state_changed(port->dev, &info);
  96}
  97
  98static void team_refresh_port_linkup(struct team_port *port)
  99{
 100        bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
 101                                                      port->state.linkup;
 102
 103        if (port->linkup != new_linkup) {
 104                port->linkup = new_linkup;
 105                team_lower_state_changed(port);
 106        }
 107}
 108
 109
 110/*******************
 111 * Options handling
 112 *******************/
 113
 114struct team_option_inst { /* One for each option instance */
 115        struct list_head list;
 116        struct list_head tmp_list;
 117        struct team_option *option;
 118        struct team_option_inst_info info;
 119        bool changed;
 120        bool removed;
 121};
 122
 123static struct team_option *__team_find_option(struct team *team,
 124                                              const char *opt_name)
 125{
 126        struct team_option *option;
 127
 128        list_for_each_entry(option, &team->option_list, list) {
 129                if (strcmp(option->name, opt_name) == 0)
 130                        return option;
 131        }
 132        return NULL;
 133}
 134
 135static void __team_option_inst_del(struct team_option_inst *opt_inst)
 136{
 137        list_del(&opt_inst->list);
 138        kfree(opt_inst);
 139}
 140
 141static void __team_option_inst_del_option(struct team *team,
 142                                          struct team_option *option)
 143{
 144        struct team_option_inst *opt_inst, *tmp;
 145
 146        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 147                if (opt_inst->option == option)
 148                        __team_option_inst_del(opt_inst);
 149        }
 150}
 151
 152static int __team_option_inst_add(struct team *team, struct team_option *option,
 153                                  struct team_port *port)
 154{
 155        struct team_option_inst *opt_inst;
 156        unsigned int array_size;
 157        unsigned int i;
 158        int err;
 159
 160        array_size = option->array_size;
 161        if (!array_size)
 162                array_size = 1; /* No array but still need one instance */
 163
 164        for (i = 0; i < array_size; i++) {
 165                opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
 166                if (!opt_inst)
 167                        return -ENOMEM;
 168                opt_inst->option = option;
 169                opt_inst->info.port = port;
 170                opt_inst->info.array_index = i;
 171                opt_inst->changed = true;
 172                opt_inst->removed = false;
 173                list_add_tail(&opt_inst->list, &team->option_inst_list);
 174                if (option->init) {
 175                        err = option->init(team, &opt_inst->info);
 176                        if (err)
 177                                return err;
 178                }
 179
 180        }
 181        return 0;
 182}
 183
 184static int __team_option_inst_add_option(struct team *team,
 185                                         struct team_option *option)
 186{
 187        int err;
 188
 189        if (!option->per_port) {
 190                err = __team_option_inst_add(team, option, NULL);
 191                if (err)
 192                        goto inst_del_option;
 193        }
 194        return 0;
 195
 196inst_del_option:
 197        __team_option_inst_del_option(team, option);
 198        return err;
 199}
 200
 201static void __team_option_inst_mark_removed_option(struct team *team,
 202                                                   struct team_option *option)
 203{
 204        struct team_option_inst *opt_inst;
 205
 206        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 207                if (opt_inst->option == option) {
 208                        opt_inst->changed = true;
 209                        opt_inst->removed = true;
 210                }
 211        }
 212}
 213
 214static void __team_option_inst_del_port(struct team *team,
 215                                        struct team_port *port)
 216{
 217        struct team_option_inst *opt_inst, *tmp;
 218
 219        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 220                if (opt_inst->option->per_port &&
 221                    opt_inst->info.port == port)
 222                        __team_option_inst_del(opt_inst);
 223        }
 224}
 225
 226static int __team_option_inst_add_port(struct team *team,
 227                                       struct team_port *port)
 228{
 229        struct team_option *option;
 230        int err;
 231
 232        list_for_each_entry(option, &team->option_list, list) {
 233                if (!option->per_port)
 234                        continue;
 235                err = __team_option_inst_add(team, option, port);
 236                if (err)
 237                        goto inst_del_port;
 238        }
 239        return 0;
 240
 241inst_del_port:
 242        __team_option_inst_del_port(team, port);
 243        return err;
 244}
 245
 246static void __team_option_inst_mark_removed_port(struct team *team,
 247                                                 struct team_port *port)
 248{
 249        struct team_option_inst *opt_inst;
 250
 251        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 252                if (opt_inst->info.port == port) {
 253                        opt_inst->changed = true;
 254                        opt_inst->removed = true;
 255                }
 256        }
 257}
 258
 259static bool __team_option_inst_tmp_find(const struct list_head *opts,
 260                                        const struct team_option_inst *needle)
 261{
 262        struct team_option_inst *opt_inst;
 263
 264        list_for_each_entry(opt_inst, opts, tmp_list)
 265                if (opt_inst == needle)
 266                        return true;
 267        return false;
 268}
 269
 270static int __team_options_register(struct team *team,
 271                                   const struct team_option *option,
 272                                   size_t option_count)
 273{
 274        int i;
 275        struct team_option **dst_opts;
 276        int err;
 277
 278        dst_opts = kcalloc(option_count, sizeof(struct team_option *),
 279                           GFP_KERNEL);
 280        if (!dst_opts)
 281                return -ENOMEM;
 282        for (i = 0; i < option_count; i++, option++) {
 283                if (__team_find_option(team, option->name)) {
 284                        err = -EEXIST;
 285                        goto alloc_rollback;
 286                }
 287                dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 288                if (!dst_opts[i]) {
 289                        err = -ENOMEM;
 290                        goto alloc_rollback;
 291                }
 292        }
 293
 294        for (i = 0; i < option_count; i++) {
 295                err = __team_option_inst_add_option(team, dst_opts[i]);
 296                if (err)
 297                        goto inst_rollback;
 298                list_add_tail(&dst_opts[i]->list, &team->option_list);
 299        }
 300
 301        kfree(dst_opts);
 302        return 0;
 303
 304inst_rollback:
 305        for (i--; i >= 0; i--)
 306                __team_option_inst_del_option(team, dst_opts[i]);
 307
 308        i = option_count - 1;
 309alloc_rollback:
 310        for (i--; i >= 0; i--)
 311                kfree(dst_opts[i]);
 312
 313        kfree(dst_opts);
 314        return err;
 315}
 316
 317static void __team_options_mark_removed(struct team *team,
 318                                        const struct team_option *option,
 319                                        size_t option_count)
 320{
 321        int i;
 322
 323        for (i = 0; i < option_count; i++, option++) {
 324                struct team_option *del_opt;
 325
 326                del_opt = __team_find_option(team, option->name);
 327                if (del_opt)
 328                        __team_option_inst_mark_removed_option(team, del_opt);
 329        }
 330}
 331
 332static void __team_options_unregister(struct team *team,
 333                                      const struct team_option *option,
 334                                      size_t option_count)
 335{
 336        int i;
 337
 338        for (i = 0; i < option_count; i++, option++) {
 339                struct team_option *del_opt;
 340
 341                del_opt = __team_find_option(team, option->name);
 342                if (del_opt) {
 343                        __team_option_inst_del_option(team, del_opt);
 344                        list_del(&del_opt->list);
 345                        kfree(del_opt);
 346                }
 347        }
 348}
 349
 350static void __team_options_change_check(struct team *team);
 351
 352int team_options_register(struct team *team,
 353                          const struct team_option *option,
 354                          size_t option_count)
 355{
 356        int err;
 357
 358        err = __team_options_register(team, option, option_count);
 359        if (err)
 360                return err;
 361        __team_options_change_check(team);
 362        return 0;
 363}
 364EXPORT_SYMBOL(team_options_register);
 365
 366void team_options_unregister(struct team *team,
 367                             const struct team_option *option,
 368                             size_t option_count)
 369{
 370        __team_options_mark_removed(team, option, option_count);
 371        __team_options_change_check(team);
 372        __team_options_unregister(team, option, option_count);
 373}
 374EXPORT_SYMBOL(team_options_unregister);
 375
 376static int team_option_get(struct team *team,
 377                           struct team_option_inst *opt_inst,
 378                           struct team_gsetter_ctx *ctx)
 379{
 380        if (!opt_inst->option->getter)
 381                return -EOPNOTSUPP;
 382        return opt_inst->option->getter(team, ctx);
 383}
 384
 385static int team_option_set(struct team *team,
 386                           struct team_option_inst *opt_inst,
 387                           struct team_gsetter_ctx *ctx)
 388{
 389        if (!opt_inst->option->setter)
 390                return -EOPNOTSUPP;
 391        return opt_inst->option->setter(team, ctx);
 392}
 393
 394void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
 395{
 396        struct team_option_inst *opt_inst;
 397
 398        opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
 399        opt_inst->changed = true;
 400}
 401EXPORT_SYMBOL(team_option_inst_set_change);
 402
 403void team_options_change_check(struct team *team)
 404{
 405        __team_options_change_check(team);
 406}
 407EXPORT_SYMBOL(team_options_change_check);
 408
 409
 410/****************
 411 * Mode handling
 412 ****************/
 413
 414static LIST_HEAD(mode_list);
 415static DEFINE_SPINLOCK(mode_list_lock);
 416
 417struct team_mode_item {
 418        struct list_head list;
 419        const struct team_mode *mode;
 420};
 421
 422static struct team_mode_item *__find_mode(const char *kind)
 423{
 424        struct team_mode_item *mitem;
 425
 426        list_for_each_entry(mitem, &mode_list, list) {
 427                if (strcmp(mitem->mode->kind, kind) == 0)
 428                        return mitem;
 429        }
 430        return NULL;
 431}
 432
 433static bool is_good_mode_name(const char *name)
 434{
 435        while (*name != '\0') {
 436                if (!isalpha(*name) && !isdigit(*name) && *name != '_')
 437                        return false;
 438                name++;
 439        }
 440        return true;
 441}
 442
 443int team_mode_register(const struct team_mode *mode)
 444{
 445        int err = 0;
 446        struct team_mode_item *mitem;
 447
 448        if (!is_good_mode_name(mode->kind) ||
 449            mode->priv_size > TEAM_MODE_PRIV_SIZE)
 450                return -EINVAL;
 451
 452        mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
 453        if (!mitem)
 454                return -ENOMEM;
 455
 456        spin_lock(&mode_list_lock);
 457        if (__find_mode(mode->kind)) {
 458                err = -EEXIST;
 459                kfree(mitem);
 460                goto unlock;
 461        }
 462        mitem->mode = mode;
 463        list_add_tail(&mitem->list, &mode_list);
 464unlock:
 465        spin_unlock(&mode_list_lock);
 466        return err;
 467}
 468EXPORT_SYMBOL(team_mode_register);
 469
 470void team_mode_unregister(const struct team_mode *mode)
 471{
 472        struct team_mode_item *mitem;
 473
 474        spin_lock(&mode_list_lock);
 475        mitem = __find_mode(mode->kind);
 476        if (mitem) {
 477                list_del_init(&mitem->list);
 478                kfree(mitem);
 479        }
 480        spin_unlock(&mode_list_lock);
 481}
 482EXPORT_SYMBOL(team_mode_unregister);
 483
 484static const struct team_mode *team_mode_get(const char *kind)
 485{
 486        struct team_mode_item *mitem;
 487        const struct team_mode *mode = NULL;
 488
 489        spin_lock(&mode_list_lock);
 490        mitem = __find_mode(kind);
 491        if (!mitem) {
 492                spin_unlock(&mode_list_lock);
 493                request_module("team-mode-%s", kind);
 494                spin_lock(&mode_list_lock);
 495                mitem = __find_mode(kind);
 496        }
 497        if (mitem) {
 498                mode = mitem->mode;
 499                if (!try_module_get(mode->owner))
 500                        mode = NULL;
 501        }
 502
 503        spin_unlock(&mode_list_lock);
 504        return mode;
 505}
 506
 507static void team_mode_put(const struct team_mode *mode)
 508{
 509        module_put(mode->owner);
 510}
 511
 512static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
 513{
 514        dev_kfree_skb_any(skb);
 515        return false;
 516}
 517
 518static rx_handler_result_t team_dummy_receive(struct team *team,
 519                                              struct team_port *port,
 520                                              struct sk_buff *skb)
 521{
 522        return RX_HANDLER_ANOTHER;
 523}
 524
 525static const struct team_mode __team_no_mode = {
 526        .kind           = "*NOMODE*",
 527};
 528
 529static bool team_is_mode_set(struct team *team)
 530{
 531        return team->mode != &__team_no_mode;
 532}
 533
 534static void team_set_no_mode(struct team *team)
 535{
 536        team->user_carrier_enabled = false;
 537        team->mode = &__team_no_mode;
 538}
 539
 540static void team_adjust_ops(struct team *team)
 541{
 542        /*
 543         * To avoid checks in rx/tx skb paths, ensure here that non-null and
 544         * correct ops are always set.
 545         */
 546
 547        if (!team->en_port_count || !team_is_mode_set(team) ||
 548            !team->mode->ops->transmit)
 549                team->ops.transmit = team_dummy_transmit;
 550        else
 551                team->ops.transmit = team->mode->ops->transmit;
 552
 553        if (!team->en_port_count || !team_is_mode_set(team) ||
 554            !team->mode->ops->receive)
 555                team->ops.receive = team_dummy_receive;
 556        else
 557                team->ops.receive = team->mode->ops->receive;
 558}
 559
 560/*
 561 * We can benefit from the fact that it's ensured no port is present
 562 * at the time of mode change. Therefore no packets are in fly so there's no
 563 * need to set mode operations in any special way.
 564 */
 565static int __team_change_mode(struct team *team,
 566                              const struct team_mode *new_mode)
 567{
 568        /* Check if mode was previously set and do cleanup if so */
 569        if (team_is_mode_set(team)) {
 570                void (*exit_op)(struct team *team) = team->ops.exit;
 571
 572                /* Clear ops area so no callback is called any longer */
 573                memset(&team->ops, 0, sizeof(struct team_mode_ops));
 574                team_adjust_ops(team);
 575
 576                if (exit_op)
 577                        exit_op(team);
 578                team_mode_put(team->mode);
 579                team_set_no_mode(team);
 580                /* zero private data area */
 581                memset(&team->mode_priv, 0,
 582                       sizeof(struct team) - offsetof(struct team, mode_priv));
 583        }
 584
 585        if (!new_mode)
 586                return 0;
 587
 588        if (new_mode->ops->init) {
 589                int err;
 590
 591                err = new_mode->ops->init(team);
 592                if (err)
 593                        return err;
 594        }
 595
 596        team->mode = new_mode;
 597        memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
 598        team_adjust_ops(team);
 599
 600        return 0;
 601}
 602
 603static int team_change_mode(struct team *team, const char *kind)
 604{
 605        const struct team_mode *new_mode;
 606        struct net_device *dev = team->dev;
 607        int err;
 608
 609        if (!list_empty(&team->port_list)) {
 610                netdev_err(dev, "No ports can be present during mode change\n");
 611                return -EBUSY;
 612        }
 613
 614        if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
 615                netdev_err(dev, "Unable to change to the same mode the team is in\n");
 616                return -EINVAL;
 617        }
 618
 619        new_mode = team_mode_get(kind);
 620        if (!new_mode) {
 621                netdev_err(dev, "Mode \"%s\" not found\n", kind);
 622                return -EINVAL;
 623        }
 624
 625        err = __team_change_mode(team, new_mode);
 626        if (err) {
 627                netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
 628                team_mode_put(new_mode);
 629                return err;
 630        }
 631
 632        netdev_info(dev, "Mode changed to \"%s\"\n", kind);
 633        return 0;
 634}
 635
 636
 637/*********************
 638 * Peers notification
 639 *********************/
 640
 641static void team_notify_peers_work(struct work_struct *work)
 642{
 643        struct team *team;
 644        int val;
 645
 646        team = container_of(work, struct team, notify_peers.dw.work);
 647
 648        if (!rtnl_trylock()) {
 649                schedule_delayed_work(&team->notify_peers.dw, 0);
 650                return;
 651        }
 652        val = atomic_dec_if_positive(&team->notify_peers.count_pending);
 653        if (val < 0) {
 654                rtnl_unlock();
 655                return;
 656        }
 657        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
 658        rtnl_unlock();
 659        if (val)
 660                schedule_delayed_work(&team->notify_peers.dw,
 661                                      msecs_to_jiffies(team->notify_peers.interval));
 662}
 663
 664static void team_notify_peers(struct team *team)
 665{
 666        if (!team->notify_peers.count || !netif_running(team->dev))
 667                return;
 668        atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
 669        schedule_delayed_work(&team->notify_peers.dw, 0);
 670}
 671
 672static void team_notify_peers_init(struct team *team)
 673{
 674        INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
 675}
 676
 677static void team_notify_peers_fini(struct team *team)
 678{
 679        cancel_delayed_work_sync(&team->notify_peers.dw);
 680}
 681
 682
 683/*******************************
 684 * Send multicast group rejoins
 685 *******************************/
 686
 687static void team_mcast_rejoin_work(struct work_struct *work)
 688{
 689        struct team *team;
 690        int val;
 691
 692        team = container_of(work, struct team, mcast_rejoin.dw.work);
 693
 694        if (!rtnl_trylock()) {
 695                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 696                return;
 697        }
 698        val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
 699        if (val < 0) {
 700                rtnl_unlock();
 701                return;
 702        }
 703        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
 704        rtnl_unlock();
 705        if (val)
 706                schedule_delayed_work(&team->mcast_rejoin.dw,
 707                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 708}
 709
 710static void team_mcast_rejoin(struct team *team)
 711{
 712        if (!team->mcast_rejoin.count || !netif_running(team->dev))
 713                return;
 714        atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
 715        schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 716}
 717
 718static void team_mcast_rejoin_init(struct team *team)
 719{
 720        INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
 721}
 722
 723static void team_mcast_rejoin_fini(struct team *team)
 724{
 725        cancel_delayed_work_sync(&team->mcast_rejoin.dw);
 726}
 727
 728
 729/************************
 730 * Rx path frame handler
 731 ************************/
 732
 733/* note: already called with rcu_read_lock */
 734static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 735{
 736        struct sk_buff *skb = *pskb;
 737        struct team_port *port;
 738        struct team *team;
 739        rx_handler_result_t res;
 740
 741        skb = skb_share_check(skb, GFP_ATOMIC);
 742        if (!skb)
 743                return RX_HANDLER_CONSUMED;
 744
 745        *pskb = skb;
 746
 747        port = team_port_get_rcu(skb->dev);
 748        team = port->team;
 749        if (!team_port_enabled(port)) {
 750                /* allow exact match delivery for disabled ports */
 751                res = RX_HANDLER_EXACT;
 752        } else {
 753                res = team->ops.receive(team, port, skb);
 754        }
 755        if (res == RX_HANDLER_ANOTHER) {
 756                struct team_pcpu_stats *pcpu_stats;
 757
 758                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
 759                u64_stats_update_begin(&pcpu_stats->syncp);
 760                pcpu_stats->rx_packets++;
 761                pcpu_stats->rx_bytes += skb->len;
 762                if (skb->pkt_type == PACKET_MULTICAST)
 763                        pcpu_stats->rx_multicast++;
 764                u64_stats_update_end(&pcpu_stats->syncp);
 765
 766                skb->dev = team->dev;
 767        } else if (res == RX_HANDLER_EXACT) {
 768                this_cpu_inc(team->pcpu_stats->rx_nohandler);
 769        } else {
 770                this_cpu_inc(team->pcpu_stats->rx_dropped);
 771        }
 772
 773        return res;
 774}
 775
 776
 777/*************************************
 778 * Multiqueue Tx port select override
 779 *************************************/
 780
 781static int team_queue_override_init(struct team *team)
 782{
 783        struct list_head *listarr;
 784        unsigned int queue_cnt = team->dev->num_tx_queues - 1;
 785        unsigned int i;
 786
 787        if (!queue_cnt)
 788                return 0;
 789        listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
 790                                GFP_KERNEL);
 791        if (!listarr)
 792                return -ENOMEM;
 793        team->qom_lists = listarr;
 794        for (i = 0; i < queue_cnt; i++)
 795                INIT_LIST_HEAD(listarr++);
 796        return 0;
 797}
 798
 799static void team_queue_override_fini(struct team *team)
 800{
 801        kfree(team->qom_lists);
 802}
 803
 804static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
 805{
 806        return &team->qom_lists[queue_id - 1];
 807}
 808
 809/*
 810 * note: already called with rcu_read_lock
 811 */
 812static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
 813{
 814        struct list_head *qom_list;
 815        struct team_port *port;
 816
 817        if (!team->queue_override_enabled || !skb->queue_mapping)
 818                return false;
 819        qom_list = __team_get_qom_list(team, skb->queue_mapping);
 820        list_for_each_entry_rcu(port, qom_list, qom_list) {
 821                if (!team_dev_queue_xmit(team, port, skb))
 822                        return true;
 823        }
 824        return false;
 825}
 826
 827static void __team_queue_override_port_del(struct team *team,
 828                                           struct team_port *port)
 829{
 830        if (!port->queue_id)
 831                return;
 832        list_del_rcu(&port->qom_list);
 833}
 834
 835static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
 836                                                      struct team_port *cur)
 837{
 838        if (port->priority < cur->priority)
 839                return true;
 840        if (port->priority > cur->priority)
 841                return false;
 842        if (port->index < cur->index)
 843                return true;
 844        return false;
 845}
 846
 847static void __team_queue_override_port_add(struct team *team,
 848                                           struct team_port *port)
 849{
 850        struct team_port *cur;
 851        struct list_head *qom_list;
 852        struct list_head *node;
 853
 854        if (!port->queue_id)
 855                return;
 856        qom_list = __team_get_qom_list(team, port->queue_id);
 857        node = qom_list;
 858        list_for_each_entry(cur, qom_list, qom_list) {
 859                if (team_queue_override_port_has_gt_prio_than(port, cur))
 860                        break;
 861                node = &cur->qom_list;
 862        }
 863        list_add_tail_rcu(&port->qom_list, node);
 864}
 865
 866static void __team_queue_override_enabled_check(struct team *team)
 867{
 868        struct team_port *port;
 869        bool enabled = false;
 870
 871        list_for_each_entry(port, &team->port_list, list) {
 872                if (port->queue_id) {
 873                        enabled = true;
 874                        break;
 875                }
 876        }
 877        if (enabled == team->queue_override_enabled)
 878                return;
 879        netdev_dbg(team->dev, "%s queue override\n",
 880                   enabled ? "Enabling" : "Disabling");
 881        team->queue_override_enabled = enabled;
 882}
 883
 884static void team_queue_override_port_prio_changed(struct team *team,
 885                                                  struct team_port *port)
 886{
 887        if (!port->queue_id || team_port_enabled(port))
 888                return;
 889        __team_queue_override_port_del(team, port);
 890        __team_queue_override_port_add(team, port);
 891        __team_queue_override_enabled_check(team);
 892}
 893
 894static void team_queue_override_port_change_queue_id(struct team *team,
 895                                                     struct team_port *port,
 896                                                     u16 new_queue_id)
 897{
 898        if (team_port_enabled(port)) {
 899                __team_queue_override_port_del(team, port);
 900                port->queue_id = new_queue_id;
 901                __team_queue_override_port_add(team, port);
 902                __team_queue_override_enabled_check(team);
 903        } else {
 904                port->queue_id = new_queue_id;
 905        }
 906}
 907
 908static void team_queue_override_port_add(struct team *team,
 909                                         struct team_port *port)
 910{
 911        __team_queue_override_port_add(team, port);
 912        __team_queue_override_enabled_check(team);
 913}
 914
 915static void team_queue_override_port_del(struct team *team,
 916                                         struct team_port *port)
 917{
 918        __team_queue_override_port_del(team, port);
 919        __team_queue_override_enabled_check(team);
 920}
 921
 922
 923/****************
 924 * Port handling
 925 ****************/
 926
 927static bool team_port_find(const struct team *team,
 928                           const struct team_port *port)
 929{
 930        struct team_port *cur;
 931
 932        list_for_each_entry(cur, &team->port_list, list)
 933                if (cur == port)
 934                        return true;
 935        return false;
 936}
 937
 938/*
 939 * Enable/disable port by adding to enabled port hashlist and setting
 940 * port->index (Might be racy so reader could see incorrect ifindex when
 941 * processing a flying packet, but that is not a problem). Write guarded
 942 * by team->lock.
 943 */
 944static void team_port_enable(struct team *team,
 945                             struct team_port *port)
 946{
 947        if (team_port_enabled(port))
 948                return;
 949        port->index = team->en_port_count++;
 950        hlist_add_head_rcu(&port->hlist,
 951                           team_port_index_hash(team, port->index));
 952        team_adjust_ops(team);
 953        team_queue_override_port_add(team, port);
 954        if (team->ops.port_enabled)
 955                team->ops.port_enabled(team, port);
 956        team_notify_peers(team);
 957        team_mcast_rejoin(team);
 958        team_lower_state_changed(port);
 959}
 960
 961static void __reconstruct_port_hlist(struct team *team, int rm_index)
 962{
 963        int i;
 964        struct team_port *port;
 965
 966        for (i = rm_index + 1; i < team->en_port_count; i++) {
 967                port = team_get_port_by_index(team, i);
 968                hlist_del_rcu(&port->hlist);
 969                port->index--;
 970                hlist_add_head_rcu(&port->hlist,
 971                                   team_port_index_hash(team, port->index));
 972        }
 973}
 974
 975static void team_port_disable(struct team *team,
 976                              struct team_port *port)
 977{
 978        if (!team_port_enabled(port))
 979                return;
 980        if (team->ops.port_disabled)
 981                team->ops.port_disabled(team, port);
 982        hlist_del_rcu(&port->hlist);
 983        __reconstruct_port_hlist(team, port->index);
 984        port->index = -1;
 985        team->en_port_count--;
 986        team_queue_override_port_del(team, port);
 987        team_adjust_ops(team);
 988        team_lower_state_changed(port);
 989}
 990
 991#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
 992                            NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
 993                            NETIF_F_HIGHDMA | NETIF_F_LRO)
 994
 995#define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
 996                                 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 997
 998static void __team_compute_features(struct team *team)
 999{
1000        struct team_port *port;
1001        netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
1002                                          NETIF_F_ALL_FOR_ALL;
1003        netdev_features_t enc_features  = TEAM_ENC_FEATURES;
1004        unsigned short max_hard_header_len = ETH_HLEN;
1005        unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1006                                        IFF_XMIT_DST_RELEASE_PERM;
1007
1008        list_for_each_entry(port, &team->port_list, list) {
1009                vlan_features = netdev_increment_features(vlan_features,
1010                                        port->dev->vlan_features,
1011                                        TEAM_VLAN_FEATURES);
1012                enc_features =
1013                        netdev_increment_features(enc_features,
1014                                                  port->dev->hw_enc_features,
1015                                                  TEAM_ENC_FEATURES);
1016
1017
1018                dst_release_flag &= port->dev->priv_flags;
1019                if (port->dev->hard_header_len > max_hard_header_len)
1020                        max_hard_header_len = port->dev->hard_header_len;
1021        }
1022
1023        team->dev->vlan_features = vlan_features;
1024        team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1025                                     NETIF_F_GSO_UDP_L4;
1026        team->dev->hard_header_len = max_hard_header_len;
1027
1028        team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1029        if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1030                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1031}
1032
1033static void team_compute_features(struct team *team)
1034{
1035        mutex_lock(&team->lock);
1036        __team_compute_features(team);
1037        mutex_unlock(&team->lock);
1038        netdev_change_features(team->dev);
1039}
1040
1041static int team_port_enter(struct team *team, struct team_port *port)
1042{
1043        int err = 0;
1044
1045        dev_hold(team->dev);
1046        if (team->ops.port_enter) {
1047                err = team->ops.port_enter(team, port);
1048                if (err) {
1049                        netdev_err(team->dev, "Device %s failed to enter team mode\n",
1050                                   port->dev->name);
1051                        goto err_port_enter;
1052                }
1053        }
1054
1055        return 0;
1056
1057err_port_enter:
1058        dev_put(team->dev);
1059
1060        return err;
1061}
1062
1063static void team_port_leave(struct team *team, struct team_port *port)
1064{
1065        if (team->ops.port_leave)
1066                team->ops.port_leave(team, port);
1067        dev_put(team->dev);
1068}
1069
1070#ifdef CONFIG_NET_POLL_CONTROLLER
1071static int __team_port_enable_netpoll(struct team_port *port)
1072{
1073        struct netpoll *np;
1074        int err;
1075
1076        np = kzalloc(sizeof(*np), GFP_KERNEL);
1077        if (!np)
1078                return -ENOMEM;
1079
1080        err = __netpoll_setup(np, port->dev);
1081        if (err) {
1082                kfree(np);
1083                return err;
1084        }
1085        port->np = np;
1086        return err;
1087}
1088
1089static int team_port_enable_netpoll(struct team_port *port)
1090{
1091        if (!port->team->dev->npinfo)
1092                return 0;
1093
1094        return __team_port_enable_netpoll(port);
1095}
1096
1097static void team_port_disable_netpoll(struct team_port *port)
1098{
1099        struct netpoll *np = port->np;
1100
1101        if (!np)
1102                return;
1103        port->np = NULL;
1104
1105        __netpoll_free(np);
1106}
1107#else
1108static int team_port_enable_netpoll(struct team_port *port)
1109{
1110        return 0;
1111}
1112static void team_port_disable_netpoll(struct team_port *port)
1113{
1114}
1115#endif
1116
1117static int team_upper_dev_link(struct team *team, struct team_port *port,
1118                               struct netlink_ext_ack *extack)
1119{
1120        struct netdev_lag_upper_info lag_upper_info;
1121        int err;
1122
1123        lag_upper_info.tx_type = team->mode->lag_tx_type;
1124        lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1125        err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1126                                           &lag_upper_info, extack);
1127        if (err)
1128                return err;
1129        port->dev->priv_flags |= IFF_TEAM_PORT;
1130        return 0;
1131}
1132
1133static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1134{
1135        netdev_upper_dev_unlink(port->dev, team->dev);
1136        port->dev->priv_flags &= ~IFF_TEAM_PORT;
1137}
1138
1139static void __team_port_change_port_added(struct team_port *port, bool linkup);
1140static int team_dev_type_check_change(struct net_device *dev,
1141                                      struct net_device *port_dev);
1142
1143static int team_port_add(struct team *team, struct net_device *port_dev,
1144                         struct netlink_ext_ack *extack)
1145{
1146        struct net_device *dev = team->dev;
1147        struct team_port *port;
1148        char *portname = port_dev->name;
1149        int err;
1150
1151        if (port_dev->flags & IFF_LOOPBACK) {
1152                NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1153                netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1154                           portname);
1155                return -EINVAL;
1156        }
1157
1158        if (team_port_exists(port_dev)) {
1159                NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1160                netdev_err(dev, "Device %s is already a port "
1161                                "of a team device\n", portname);
1162                return -EBUSY;
1163        }
1164
1165        if (dev == port_dev) {
1166                NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1167                netdev_err(dev, "Cannot enslave team device to itself\n");
1168                return -EINVAL;
1169        }
1170
1171        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1172            vlan_uses_dev(dev)) {
1173                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1174                netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1175                           portname);
1176                return -EPERM;
1177        }
1178
1179        err = team_dev_type_check_change(dev, port_dev);
1180        if (err)
1181                return err;
1182
1183        if (port_dev->flags & IFF_UP) {
1184                NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1185                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1186                           portname);
1187                return -EBUSY;
1188        }
1189
1190        port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1191                       GFP_KERNEL);
1192        if (!port)
1193                return -ENOMEM;
1194
1195        port->dev = port_dev;
1196        port->team = team;
1197        INIT_LIST_HEAD(&port->qom_list);
1198
1199        port->orig.mtu = port_dev->mtu;
1200        err = dev_set_mtu(port_dev, dev->mtu);
1201        if (err) {
1202                netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1203                goto err_set_mtu;
1204        }
1205
1206        memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1207
1208        err = team_port_enter(team, port);
1209        if (err) {
1210                netdev_err(dev, "Device %s failed to enter team mode\n",
1211                           portname);
1212                goto err_port_enter;
1213        }
1214
1215        err = dev_open(port_dev);
1216        if (err) {
1217                netdev_dbg(dev, "Device %s opening failed\n",
1218                           portname);
1219                goto err_dev_open;
1220        }
1221
1222        err = vlan_vids_add_by_dev(port_dev, dev);
1223        if (err) {
1224                netdev_err(dev, "Failed to add vlan ids to device %s\n",
1225                                portname);
1226                goto err_vids_add;
1227        }
1228
1229        err = team_port_enable_netpoll(port);
1230        if (err) {
1231                netdev_err(dev, "Failed to enable netpoll on device %s\n",
1232                           portname);
1233                goto err_enable_netpoll;
1234        }
1235
1236        if (!(dev->features & NETIF_F_LRO))
1237                dev_disable_lro(port_dev);
1238
1239        err = netdev_rx_handler_register(port_dev, team_handle_frame,
1240                                         port);
1241        if (err) {
1242                netdev_err(dev, "Device %s failed to register rx_handler\n",
1243                           portname);
1244                goto err_handler_register;
1245        }
1246
1247        err = team_upper_dev_link(team, port, extack);
1248        if (err) {
1249                netdev_err(dev, "Device %s failed to set upper link\n",
1250                           portname);
1251                goto err_set_upper_link;
1252        }
1253
1254        err = __team_option_inst_add_port(team, port);
1255        if (err) {
1256                netdev_err(dev, "Device %s failed to add per-port options\n",
1257                           portname);
1258                goto err_option_port_add;
1259        }
1260
1261        netif_addr_lock_bh(dev);
1262        dev_uc_sync_multiple(port_dev, dev);
1263        dev_mc_sync_multiple(port_dev, dev);
1264        netif_addr_unlock_bh(dev);
1265
1266        port->index = -1;
1267        list_add_tail_rcu(&port->list, &team->port_list);
1268        team_port_enable(team, port);
1269        __team_compute_features(team);
1270        __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1271        __team_options_change_check(team);
1272
1273        netdev_info(dev, "Port device %s added\n", portname);
1274
1275        return 0;
1276
1277err_option_port_add:
1278        team_upper_dev_unlink(team, port);
1279
1280err_set_upper_link:
1281        netdev_rx_handler_unregister(port_dev);
1282
1283err_handler_register:
1284        team_port_disable_netpoll(port);
1285
1286err_enable_netpoll:
1287        vlan_vids_del_by_dev(port_dev, dev);
1288
1289err_vids_add:
1290        dev_close(port_dev);
1291
1292err_dev_open:
1293        team_port_leave(team, port);
1294        team_port_set_orig_dev_addr(port);
1295
1296err_port_enter:
1297        dev_set_mtu(port_dev, port->orig.mtu);
1298
1299err_set_mtu:
1300        kfree(port);
1301
1302        return err;
1303}
1304
1305static void __team_port_change_port_removed(struct team_port *port);
1306
1307static int team_port_del(struct team *team, struct net_device *port_dev)
1308{
1309        struct net_device *dev = team->dev;
1310        struct team_port *port;
1311        char *portname = port_dev->name;
1312
1313        port = team_port_get_rtnl(port_dev);
1314        if (!port || !team_port_find(team, port)) {
1315                netdev_err(dev, "Device %s does not act as a port of this team\n",
1316                           portname);
1317                return -ENOENT;
1318        }
1319
1320        team_port_disable(team, port);
1321        list_del_rcu(&port->list);
1322        team_upper_dev_unlink(team, port);
1323        netdev_rx_handler_unregister(port_dev);
1324        team_port_disable_netpoll(port);
1325        vlan_vids_del_by_dev(port_dev, dev);
1326        dev_uc_unsync(port_dev, dev);
1327        dev_mc_unsync(port_dev, dev);
1328        dev_close(port_dev);
1329        team_port_leave(team, port);
1330
1331        __team_option_inst_mark_removed_port(team, port);
1332        __team_options_change_check(team);
1333        __team_option_inst_del_port(team, port);
1334        __team_port_change_port_removed(port);
1335
1336        team_port_set_orig_dev_addr(port);
1337        dev_set_mtu(port_dev, port->orig.mtu);
1338        kfree_rcu(port, rcu);
1339        netdev_info(dev, "Port device %s removed\n", portname);
1340        __team_compute_features(team);
1341
1342        return 0;
1343}
1344
1345
1346/*****************
1347 * Net device ops
1348 *****************/
1349
1350static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1351{
1352        ctx->data.str_val = team->mode->kind;
1353        return 0;
1354}
1355
1356static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1357{
1358        return team_change_mode(team, ctx->data.str_val);
1359}
1360
1361static int team_notify_peers_count_get(struct team *team,
1362                                       struct team_gsetter_ctx *ctx)
1363{
1364        ctx->data.u32_val = team->notify_peers.count;
1365        return 0;
1366}
1367
1368static int team_notify_peers_count_set(struct team *team,
1369                                       struct team_gsetter_ctx *ctx)
1370{
1371        team->notify_peers.count = ctx->data.u32_val;
1372        return 0;
1373}
1374
1375static int team_notify_peers_interval_get(struct team *team,
1376                                          struct team_gsetter_ctx *ctx)
1377{
1378        ctx->data.u32_val = team->notify_peers.interval;
1379        return 0;
1380}
1381
1382static int team_notify_peers_interval_set(struct team *team,
1383                                          struct team_gsetter_ctx *ctx)
1384{
1385        team->notify_peers.interval = ctx->data.u32_val;
1386        return 0;
1387}
1388
1389static int team_mcast_rejoin_count_get(struct team *team,
1390                                       struct team_gsetter_ctx *ctx)
1391{
1392        ctx->data.u32_val = team->mcast_rejoin.count;
1393        return 0;
1394}
1395
1396static int team_mcast_rejoin_count_set(struct team *team,
1397                                       struct team_gsetter_ctx *ctx)
1398{
1399        team->mcast_rejoin.count = ctx->data.u32_val;
1400        return 0;
1401}
1402
1403static int team_mcast_rejoin_interval_get(struct team *team,
1404                                          struct team_gsetter_ctx *ctx)
1405{
1406        ctx->data.u32_val = team->mcast_rejoin.interval;
1407        return 0;
1408}
1409
1410static int team_mcast_rejoin_interval_set(struct team *team,
1411                                          struct team_gsetter_ctx *ctx)
1412{
1413        team->mcast_rejoin.interval = ctx->data.u32_val;
1414        return 0;
1415}
1416
1417static int team_port_en_option_get(struct team *team,
1418                                   struct team_gsetter_ctx *ctx)
1419{
1420        struct team_port *port = ctx->info->port;
1421
1422        ctx->data.bool_val = team_port_enabled(port);
1423        return 0;
1424}
1425
1426static int team_port_en_option_set(struct team *team,
1427                                   struct team_gsetter_ctx *ctx)
1428{
1429        struct team_port *port = ctx->info->port;
1430
1431        if (ctx->data.bool_val)
1432                team_port_enable(team, port);
1433        else
1434                team_port_disable(team, port);
1435        return 0;
1436}
1437
1438static int team_user_linkup_option_get(struct team *team,
1439                                       struct team_gsetter_ctx *ctx)
1440{
1441        struct team_port *port = ctx->info->port;
1442
1443        ctx->data.bool_val = port->user.linkup;
1444        return 0;
1445}
1446
1447static void __team_carrier_check(struct team *team);
1448
1449static int team_user_linkup_option_set(struct team *team,
1450                                       struct team_gsetter_ctx *ctx)
1451{
1452        struct team_port *port = ctx->info->port;
1453
1454        port->user.linkup = ctx->data.bool_val;
1455        team_refresh_port_linkup(port);
1456        __team_carrier_check(port->team);
1457        return 0;
1458}
1459
1460static int team_user_linkup_en_option_get(struct team *team,
1461                                          struct team_gsetter_ctx *ctx)
1462{
1463        struct team_port *port = ctx->info->port;
1464
1465        ctx->data.bool_val = port->user.linkup_enabled;
1466        return 0;
1467}
1468
1469static int team_user_linkup_en_option_set(struct team *team,
1470                                          struct team_gsetter_ctx *ctx)
1471{
1472        struct team_port *port = ctx->info->port;
1473
1474        port->user.linkup_enabled = ctx->data.bool_val;
1475        team_refresh_port_linkup(port);
1476        __team_carrier_check(port->team);
1477        return 0;
1478}
1479
1480static int team_priority_option_get(struct team *team,
1481                                    struct team_gsetter_ctx *ctx)
1482{
1483        struct team_port *port = ctx->info->port;
1484
1485        ctx->data.s32_val = port->priority;
1486        return 0;
1487}
1488
1489static int team_priority_option_set(struct team *team,
1490                                    struct team_gsetter_ctx *ctx)
1491{
1492        struct team_port *port = ctx->info->port;
1493        s32 priority = ctx->data.s32_val;
1494
1495        if (port->priority == priority)
1496                return 0;
1497        port->priority = priority;
1498        team_queue_override_port_prio_changed(team, port);
1499        return 0;
1500}
1501
1502static int team_queue_id_option_get(struct team *team,
1503                                    struct team_gsetter_ctx *ctx)
1504{
1505        struct team_port *port = ctx->info->port;
1506
1507        ctx->data.u32_val = port->queue_id;
1508        return 0;
1509}
1510
1511static int team_queue_id_option_set(struct team *team,
1512                                    struct team_gsetter_ctx *ctx)
1513{
1514        struct team_port *port = ctx->info->port;
1515        u16 new_queue_id = ctx->data.u32_val;
1516
1517        if (port->queue_id == new_queue_id)
1518                return 0;
1519        if (new_queue_id >= team->dev->real_num_tx_queues)
1520                return -EINVAL;
1521        team_queue_override_port_change_queue_id(team, port, new_queue_id);
1522        return 0;
1523}
1524
1525static const struct team_option team_options[] = {
1526        {
1527                .name = "mode",
1528                .type = TEAM_OPTION_TYPE_STRING,
1529                .getter = team_mode_option_get,
1530                .setter = team_mode_option_set,
1531        },
1532        {
1533                .name = "notify_peers_count",
1534                .type = TEAM_OPTION_TYPE_U32,
1535                .getter = team_notify_peers_count_get,
1536                .setter = team_notify_peers_count_set,
1537        },
1538        {
1539                .name = "notify_peers_interval",
1540                .type = TEAM_OPTION_TYPE_U32,
1541                .getter = team_notify_peers_interval_get,
1542                .setter = team_notify_peers_interval_set,
1543        },
1544        {
1545                .name = "mcast_rejoin_count",
1546                .type = TEAM_OPTION_TYPE_U32,
1547                .getter = team_mcast_rejoin_count_get,
1548                .setter = team_mcast_rejoin_count_set,
1549        },
1550        {
1551                .name = "mcast_rejoin_interval",
1552                .type = TEAM_OPTION_TYPE_U32,
1553                .getter = team_mcast_rejoin_interval_get,
1554                .setter = team_mcast_rejoin_interval_set,
1555        },
1556        {
1557                .name = "enabled",
1558                .type = TEAM_OPTION_TYPE_BOOL,
1559                .per_port = true,
1560                .getter = team_port_en_option_get,
1561                .setter = team_port_en_option_set,
1562        },
1563        {
1564                .name = "user_linkup",
1565                .type = TEAM_OPTION_TYPE_BOOL,
1566                .per_port = true,
1567                .getter = team_user_linkup_option_get,
1568                .setter = team_user_linkup_option_set,
1569        },
1570        {
1571                .name = "user_linkup_enabled",
1572                .type = TEAM_OPTION_TYPE_BOOL,
1573                .per_port = true,
1574                .getter = team_user_linkup_en_option_get,
1575                .setter = team_user_linkup_en_option_set,
1576        },
1577        {
1578                .name = "priority",
1579                .type = TEAM_OPTION_TYPE_S32,
1580                .per_port = true,
1581                .getter = team_priority_option_get,
1582                .setter = team_priority_option_set,
1583        },
1584        {
1585                .name = "queue_id",
1586                .type = TEAM_OPTION_TYPE_U32,
1587                .per_port = true,
1588                .getter = team_queue_id_option_get,
1589                .setter = team_queue_id_option_set,
1590        },
1591};
1592
1593
1594static int team_init(struct net_device *dev)
1595{
1596        struct team *team = netdev_priv(dev);
1597        int i;
1598        int err;
1599
1600        team->dev = dev;
1601        mutex_init(&team->lock);
1602        team_set_no_mode(team);
1603
1604        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1605        if (!team->pcpu_stats)
1606                return -ENOMEM;
1607
1608        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1609                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1610        INIT_LIST_HEAD(&team->port_list);
1611        err = team_queue_override_init(team);
1612        if (err)
1613                goto err_team_queue_override_init;
1614
1615        team_adjust_ops(team);
1616
1617        INIT_LIST_HEAD(&team->option_list);
1618        INIT_LIST_HEAD(&team->option_inst_list);
1619
1620        team_notify_peers_init(team);
1621        team_mcast_rejoin_init(team);
1622
1623        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1624        if (err)
1625                goto err_options_register;
1626        netif_carrier_off(dev);
1627
1628        netdev_lockdep_set_classes(dev);
1629
1630        return 0;
1631
1632err_options_register:
1633        team_mcast_rejoin_fini(team);
1634        team_notify_peers_fini(team);
1635        team_queue_override_fini(team);
1636err_team_queue_override_init:
1637        free_percpu(team->pcpu_stats);
1638
1639        return err;
1640}
1641
1642static void team_uninit(struct net_device *dev)
1643{
1644        struct team *team = netdev_priv(dev);
1645        struct team_port *port;
1646        struct team_port *tmp;
1647
1648        mutex_lock(&team->lock);
1649        list_for_each_entry_safe(port, tmp, &team->port_list, list)
1650                team_port_del(team, port->dev);
1651
1652        __team_change_mode(team, NULL); /* cleanup */
1653        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1654        team_mcast_rejoin_fini(team);
1655        team_notify_peers_fini(team);
1656        team_queue_override_fini(team);
1657        mutex_unlock(&team->lock);
1658        netdev_change_features(dev);
1659}
1660
1661static void team_destructor(struct net_device *dev)
1662{
1663        struct team *team = netdev_priv(dev);
1664
1665        free_percpu(team->pcpu_stats);
1666}
1667
1668static int team_open(struct net_device *dev)
1669{
1670        return 0;
1671}
1672
1673static int team_close(struct net_device *dev)
1674{
1675        return 0;
1676}
1677
1678/*
1679 * note: already called with rcu_read_lock
1680 */
1681static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1682{
1683        struct team *team = netdev_priv(dev);
1684        bool tx_success;
1685        unsigned int len = skb->len;
1686
1687        tx_success = team_queue_override_transmit(team, skb);
1688        if (!tx_success)
1689                tx_success = team->ops.transmit(team, skb);
1690        if (tx_success) {
1691                struct team_pcpu_stats *pcpu_stats;
1692
1693                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1694                u64_stats_update_begin(&pcpu_stats->syncp);
1695                pcpu_stats->tx_packets++;
1696                pcpu_stats->tx_bytes += len;
1697                u64_stats_update_end(&pcpu_stats->syncp);
1698        } else {
1699                this_cpu_inc(team->pcpu_stats->tx_dropped);
1700        }
1701
1702        return NETDEV_TX_OK;
1703}
1704
1705static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1706                             struct net_device *sb_dev,
1707                             select_queue_fallback_t fallback)
1708{
1709        /*
1710         * This helper function exists to help dev_pick_tx get the correct
1711         * destination queue.  Using a helper function skips a call to
1712         * skb_tx_hash and will put the skbs in the queue we expect on their
1713         * way down to the team driver.
1714         */
1715        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1716
1717        /*
1718         * Save the original txq to restore before passing to the driver
1719         */
1720        qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1721
1722        if (unlikely(txq >= dev->real_num_tx_queues)) {
1723                do {
1724                        txq -= dev->real_num_tx_queues;
1725                } while (txq >= dev->real_num_tx_queues);
1726        }
1727        return txq;
1728}
1729
1730static void team_change_rx_flags(struct net_device *dev, int change)
1731{
1732        struct team *team = netdev_priv(dev);
1733        struct team_port *port;
1734        int inc;
1735
1736        rcu_read_lock();
1737        list_for_each_entry_rcu(port, &team->port_list, list) {
1738                if (change & IFF_PROMISC) {
1739                        inc = dev->flags & IFF_PROMISC ? 1 : -1;
1740                        dev_set_promiscuity(port->dev, inc);
1741                }
1742                if (change & IFF_ALLMULTI) {
1743                        inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1744                        dev_set_allmulti(port->dev, inc);
1745                }
1746        }
1747        rcu_read_unlock();
1748}
1749
1750static void team_set_rx_mode(struct net_device *dev)
1751{
1752        struct team *team = netdev_priv(dev);
1753        struct team_port *port;
1754
1755        rcu_read_lock();
1756        list_for_each_entry_rcu(port, &team->port_list, list) {
1757                dev_uc_sync_multiple(port->dev, dev);
1758                dev_mc_sync_multiple(port->dev, dev);
1759        }
1760        rcu_read_unlock();
1761}
1762
1763static int team_set_mac_address(struct net_device *dev, void *p)
1764{
1765        struct sockaddr *addr = p;
1766        struct team *team = netdev_priv(dev);
1767        struct team_port *port;
1768
1769        if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1770                return -EADDRNOTAVAIL;
1771        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1772        mutex_lock(&team->lock);
1773        list_for_each_entry(port, &team->port_list, list)
1774                if (team->ops.port_change_dev_addr)
1775                        team->ops.port_change_dev_addr(team, port);
1776        mutex_unlock(&team->lock);
1777        return 0;
1778}
1779
1780static int team_change_mtu(struct net_device *dev, int new_mtu)
1781{
1782        struct team *team = netdev_priv(dev);
1783        struct team_port *port;
1784        int err;
1785
1786        /*
1787         * Alhough this is reader, it's guarded by team lock. It's not possible
1788         * to traverse list in reverse under rcu_read_lock
1789         */
1790        mutex_lock(&team->lock);
1791        team->port_mtu_change_allowed = true;
1792        list_for_each_entry(port, &team->port_list, list) {
1793                err = dev_set_mtu(port->dev, new_mtu);
1794                if (err) {
1795                        netdev_err(dev, "Device %s failed to change mtu",
1796                                   port->dev->name);
1797                        goto unwind;
1798                }
1799        }
1800        team->port_mtu_change_allowed = false;
1801        mutex_unlock(&team->lock);
1802
1803        dev->mtu = new_mtu;
1804
1805        return 0;
1806
1807unwind:
1808        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1809                dev_set_mtu(port->dev, dev->mtu);
1810        team->port_mtu_change_allowed = false;
1811        mutex_unlock(&team->lock);
1812
1813        return err;
1814}
1815
1816static void
1817team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1818{
1819        struct team *team = netdev_priv(dev);
1820        struct team_pcpu_stats *p;
1821        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1822        u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1823        unsigned int start;
1824        int i;
1825
1826        for_each_possible_cpu(i) {
1827                p = per_cpu_ptr(team->pcpu_stats, i);
1828                do {
1829                        start = u64_stats_fetch_begin_irq(&p->syncp);
1830                        rx_packets      = p->rx_packets;
1831                        rx_bytes        = p->rx_bytes;
1832                        rx_multicast    = p->rx_multicast;
1833                        tx_packets      = p->tx_packets;
1834                        tx_bytes        = p->tx_bytes;
1835                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1836
1837                stats->rx_packets       += rx_packets;
1838                stats->rx_bytes         += rx_bytes;
1839                stats->multicast        += rx_multicast;
1840                stats->tx_packets       += tx_packets;
1841                stats->tx_bytes         += tx_bytes;
1842                /*
1843                 * rx_dropped, tx_dropped & rx_nohandler are u32,
1844                 * updated without syncp protection.
1845                 */
1846                rx_dropped      += p->rx_dropped;
1847                tx_dropped      += p->tx_dropped;
1848                rx_nohandler    += p->rx_nohandler;
1849        }
1850        stats->rx_dropped       = rx_dropped;
1851        stats->tx_dropped       = tx_dropped;
1852        stats->rx_nohandler     = rx_nohandler;
1853}
1854
1855static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1856{
1857        struct team *team = netdev_priv(dev);
1858        struct team_port *port;
1859        int err;
1860
1861        /*
1862         * Alhough this is reader, it's guarded by team lock. It's not possible
1863         * to traverse list in reverse under rcu_read_lock
1864         */
1865        mutex_lock(&team->lock);
1866        list_for_each_entry(port, &team->port_list, list) {
1867                err = vlan_vid_add(port->dev, proto, vid);
1868                if (err)
1869                        goto unwind;
1870        }
1871        mutex_unlock(&team->lock);
1872
1873        return 0;
1874
1875unwind:
1876        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1877                vlan_vid_del(port->dev, proto, vid);
1878        mutex_unlock(&team->lock);
1879
1880        return err;
1881}
1882
1883static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1884{
1885        struct team *team = netdev_priv(dev);
1886        struct team_port *port;
1887
1888        mutex_lock(&team->lock);
1889        list_for_each_entry(port, &team->port_list, list)
1890                vlan_vid_del(port->dev, proto, vid);
1891        mutex_unlock(&team->lock);
1892
1893        return 0;
1894}
1895
1896#ifdef CONFIG_NET_POLL_CONTROLLER
1897static void team_poll_controller(struct net_device *dev)
1898{
1899}
1900
1901static void __team_netpoll_cleanup(struct team *team)
1902{
1903        struct team_port *port;
1904
1905        list_for_each_entry(port, &team->port_list, list)
1906                team_port_disable_netpoll(port);
1907}
1908
1909static void team_netpoll_cleanup(struct net_device *dev)
1910{
1911        struct team *team = netdev_priv(dev);
1912
1913        mutex_lock(&team->lock);
1914        __team_netpoll_cleanup(team);
1915        mutex_unlock(&team->lock);
1916}
1917
1918static int team_netpoll_setup(struct net_device *dev,
1919                              struct netpoll_info *npifo)
1920{
1921        struct team *team = netdev_priv(dev);
1922        struct team_port *port;
1923        int err = 0;
1924
1925        mutex_lock(&team->lock);
1926        list_for_each_entry(port, &team->port_list, list) {
1927                err = __team_port_enable_netpoll(port);
1928                if (err) {
1929                        __team_netpoll_cleanup(team);
1930                        break;
1931                }
1932        }
1933        mutex_unlock(&team->lock);
1934        return err;
1935}
1936#endif
1937
1938static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1939                          struct netlink_ext_ack *extack)
1940{
1941        struct team *team = netdev_priv(dev);
1942        int err;
1943
1944        mutex_lock(&team->lock);
1945        err = team_port_add(team, port_dev, extack);
1946        mutex_unlock(&team->lock);
1947
1948        if (!err)
1949                netdev_change_features(dev);
1950
1951        return err;
1952}
1953
1954static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1955{
1956        struct team *team = netdev_priv(dev);
1957        int err;
1958
1959        mutex_lock(&team->lock);
1960        err = team_port_del(team, port_dev);
1961        mutex_unlock(&team->lock);
1962
1963        if (!err)
1964                netdev_change_features(dev);
1965
1966        return err;
1967}
1968
1969static netdev_features_t team_fix_features(struct net_device *dev,
1970                                           netdev_features_t features)
1971{
1972        struct team_port *port;
1973        struct team *team = netdev_priv(dev);
1974        netdev_features_t mask;
1975
1976        mask = features;
1977        features &= ~NETIF_F_ONE_FOR_ALL;
1978        features |= NETIF_F_ALL_FOR_ALL;
1979
1980        rcu_read_lock();
1981        list_for_each_entry_rcu(port, &team->port_list, list) {
1982                features = netdev_increment_features(features,
1983                                                     port->dev->features,
1984                                                     mask);
1985        }
1986        rcu_read_unlock();
1987
1988        features = netdev_add_tso_features(features, mask);
1989
1990        return features;
1991}
1992
1993static int team_change_carrier(struct net_device *dev, bool new_carrier)
1994{
1995        struct team *team = netdev_priv(dev);
1996
1997        team->user_carrier_enabled = true;
1998
1999        if (new_carrier)
2000                netif_carrier_on(dev);
2001        else
2002                netif_carrier_off(dev);
2003        return 0;
2004}
2005
2006static const struct net_device_ops team_netdev_ops = {
2007        .ndo_init               = team_init,
2008        .ndo_uninit             = team_uninit,
2009        .ndo_open               = team_open,
2010        .ndo_stop               = team_close,
2011        .ndo_start_xmit         = team_xmit,
2012        .ndo_select_queue       = team_select_queue,
2013        .ndo_change_rx_flags    = team_change_rx_flags,
2014        .ndo_set_rx_mode        = team_set_rx_mode,
2015        .ndo_set_mac_address    = team_set_mac_address,
2016        .ndo_change_mtu         = team_change_mtu,
2017        .ndo_get_stats64        = team_get_stats64,
2018        .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
2019        .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
2020#ifdef CONFIG_NET_POLL_CONTROLLER
2021        .ndo_poll_controller    = team_poll_controller,
2022        .ndo_netpoll_setup      = team_netpoll_setup,
2023        .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2024#endif
2025        .ndo_add_slave          = team_add_slave,
2026        .ndo_del_slave          = team_del_slave,
2027        .ndo_fix_features       = team_fix_features,
2028        .ndo_change_carrier     = team_change_carrier,
2029        .ndo_features_check     = passthru_features_check,
2030};
2031
2032/***********************
2033 * ethtool interface
2034 ***********************/
2035
2036static void team_ethtool_get_drvinfo(struct net_device *dev,
2037                                     struct ethtool_drvinfo *drvinfo)
2038{
2039        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2040        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2041}
2042
2043static const struct ethtool_ops team_ethtool_ops = {
2044        .get_drvinfo            = team_ethtool_get_drvinfo,
2045        .get_link               = ethtool_op_get_link,
2046};
2047
2048/***********************
2049 * rt netlink interface
2050 ***********************/
2051
2052static void team_setup_by_port(struct net_device *dev,
2053                               struct net_device *port_dev)
2054{
2055        dev->header_ops = port_dev->header_ops;
2056        dev->type = port_dev->type;
2057        dev->hard_header_len = port_dev->hard_header_len;
2058        dev->addr_len = port_dev->addr_len;
2059        dev->mtu = port_dev->mtu;
2060        memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2061        eth_hw_addr_inherit(dev, port_dev);
2062}
2063
2064static int team_dev_type_check_change(struct net_device *dev,
2065                                      struct net_device *port_dev)
2066{
2067        struct team *team = netdev_priv(dev);
2068        char *portname = port_dev->name;
2069        int err;
2070
2071        if (dev->type == port_dev->type)
2072                return 0;
2073        if (!list_empty(&team->port_list)) {
2074                netdev_err(dev, "Device %s is of different type\n", portname);
2075                return -EBUSY;
2076        }
2077        err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2078        err = notifier_to_errno(err);
2079        if (err) {
2080                netdev_err(dev, "Refused to change device type\n");
2081                return err;
2082        }
2083        dev_uc_flush(dev);
2084        dev_mc_flush(dev);
2085        team_setup_by_port(dev, port_dev);
2086        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2087        return 0;
2088}
2089
2090static void team_setup(struct net_device *dev)
2091{
2092        ether_setup(dev);
2093        dev->max_mtu = ETH_MAX_MTU;
2094
2095        dev->netdev_ops = &team_netdev_ops;
2096        dev->ethtool_ops = &team_ethtool_ops;
2097        dev->needs_free_netdev = true;
2098        dev->priv_destructor = team_destructor;
2099        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2100        dev->priv_flags |= IFF_NO_QUEUE;
2101        dev->priv_flags |= IFF_TEAM;
2102
2103        /*
2104         * Indicate we support unicast address filtering. That way core won't
2105         * bring us to promisc mode in case a unicast addr is added.
2106         * Let this up to underlay drivers.
2107         */
2108        dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2109
2110        dev->features |= NETIF_F_LLTX;
2111        dev->features |= NETIF_F_GRO;
2112
2113        /* Don't allow team devices to change network namespaces. */
2114        dev->features |= NETIF_F_NETNS_LOCAL;
2115
2116        dev->hw_features = TEAM_VLAN_FEATURES |
2117                           NETIF_F_HW_VLAN_CTAG_TX |
2118                           NETIF_F_HW_VLAN_CTAG_RX |
2119                           NETIF_F_HW_VLAN_CTAG_FILTER;
2120
2121        dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2122        dev->features |= dev->hw_features;
2123}
2124
2125static int team_newlink(struct net *src_net, struct net_device *dev,
2126                        struct nlattr *tb[], struct nlattr *data[],
2127                        struct netlink_ext_ack *extack)
2128{
2129        if (tb[IFLA_ADDRESS] == NULL)
2130                eth_hw_addr_random(dev);
2131
2132        return register_netdevice(dev);
2133}
2134
2135static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2136                         struct netlink_ext_ack *extack)
2137{
2138        if (tb[IFLA_ADDRESS]) {
2139                if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2140                        return -EINVAL;
2141                if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2142                        return -EADDRNOTAVAIL;
2143        }
2144        return 0;
2145}
2146
2147static unsigned int team_get_num_tx_queues(void)
2148{
2149        return TEAM_DEFAULT_NUM_TX_QUEUES;
2150}
2151
2152static unsigned int team_get_num_rx_queues(void)
2153{
2154        return TEAM_DEFAULT_NUM_RX_QUEUES;
2155}
2156
2157static struct rtnl_link_ops team_link_ops __read_mostly = {
2158        .kind                   = DRV_NAME,
2159        .priv_size              = sizeof(struct team),
2160        .setup                  = team_setup,
2161        .newlink                = team_newlink,
2162        .validate               = team_validate,
2163        .get_num_tx_queues      = team_get_num_tx_queues,
2164        .get_num_rx_queues      = team_get_num_rx_queues,
2165};
2166
2167
2168/***********************************
2169 * Generic netlink custom interface
2170 ***********************************/
2171
2172static struct genl_family team_nl_family;
2173
2174static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2175        [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2176        [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2177        [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2178        [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2179};
2180
2181static const struct nla_policy
2182team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2183        [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2184        [TEAM_ATTR_OPTION_NAME] = {
2185                .type = NLA_STRING,
2186                .len = TEAM_STRING_MAX_LEN,
2187        },
2188        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2189        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2190        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2191};
2192
2193static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2194{
2195        struct sk_buff *msg;
2196        void *hdr;
2197        int err;
2198
2199        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2200        if (!msg)
2201                return -ENOMEM;
2202
2203        hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2204                          &team_nl_family, 0, TEAM_CMD_NOOP);
2205        if (!hdr) {
2206                err = -EMSGSIZE;
2207                goto err_msg_put;
2208        }
2209
2210        genlmsg_end(msg, hdr);
2211
2212        return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2213
2214err_msg_put:
2215        nlmsg_free(msg);
2216
2217        return err;
2218}
2219
2220/*
2221 * Netlink cmd functions should be locked by following two functions.
2222 * Since dev gets held here, that ensures dev won't disappear in between.
2223 */
2224static struct team *team_nl_team_get(struct genl_info *info)
2225{
2226        struct net *net = genl_info_net(info);
2227        int ifindex;
2228        struct net_device *dev;
2229        struct team *team;
2230
2231        if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2232                return NULL;
2233
2234        ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2235        dev = dev_get_by_index(net, ifindex);
2236        if (!dev || dev->netdev_ops != &team_netdev_ops) {
2237                if (dev)
2238                        dev_put(dev);
2239                return NULL;
2240        }
2241
2242        team = netdev_priv(dev);
2243        mutex_lock(&team->lock);
2244        return team;
2245}
2246
2247static void team_nl_team_put(struct team *team)
2248{
2249        mutex_unlock(&team->lock);
2250        dev_put(team->dev);
2251}
2252
2253typedef int team_nl_send_func_t(struct sk_buff *skb,
2254                                struct team *team, u32 portid);
2255
2256static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2257{
2258        return genlmsg_unicast(dev_net(team->dev), skb, portid);
2259}
2260
2261static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2262                                       struct team_option_inst *opt_inst)
2263{
2264        struct nlattr *option_item;
2265        struct team_option *option = opt_inst->option;
2266        struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2267        struct team_gsetter_ctx ctx;
2268        int err;
2269
2270        ctx.info = opt_inst_info;
2271        err = team_option_get(team, opt_inst, &ctx);
2272        if (err)
2273                return err;
2274
2275        option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2276        if (!option_item)
2277                return -EMSGSIZE;
2278
2279        if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2280                goto nest_cancel;
2281        if (opt_inst_info->port &&
2282            nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2283                        opt_inst_info->port->dev->ifindex))
2284                goto nest_cancel;
2285        if (opt_inst->option->array_size &&
2286            nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2287                        opt_inst_info->array_index))
2288                goto nest_cancel;
2289
2290        switch (option->type) {
2291        case TEAM_OPTION_TYPE_U32:
2292                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2293                        goto nest_cancel;
2294                if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2295                        goto nest_cancel;
2296                break;
2297        case TEAM_OPTION_TYPE_STRING:
2298                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2299                        goto nest_cancel;
2300                if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2301                                   ctx.data.str_val))
2302                        goto nest_cancel;
2303                break;
2304        case TEAM_OPTION_TYPE_BINARY:
2305                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2306                        goto nest_cancel;
2307                if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2308                            ctx.data.bin_val.ptr))
2309                        goto nest_cancel;
2310                break;
2311        case TEAM_OPTION_TYPE_BOOL:
2312                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2313                        goto nest_cancel;
2314                if (ctx.data.bool_val &&
2315                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2316                        goto nest_cancel;
2317                break;
2318        case TEAM_OPTION_TYPE_S32:
2319                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2320                        goto nest_cancel;
2321                if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2322                        goto nest_cancel;
2323                break;
2324        default:
2325                BUG();
2326        }
2327        if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2328                goto nest_cancel;
2329        if (opt_inst->changed) {
2330                if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2331                        goto nest_cancel;
2332                opt_inst->changed = false;
2333        }
2334        nla_nest_end(skb, option_item);
2335        return 0;
2336
2337nest_cancel:
2338        nla_nest_cancel(skb, option_item);
2339        return -EMSGSIZE;
2340}
2341
2342static int __send_and_alloc_skb(struct sk_buff **pskb,
2343                                struct team *team, u32 portid,
2344                                team_nl_send_func_t *send_func)
2345{
2346        int err;
2347
2348        if (*pskb) {
2349                err = send_func(*pskb, team, portid);
2350                if (err)
2351                        return err;
2352        }
2353        *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2354        if (!*pskb)
2355                return -ENOMEM;
2356        return 0;
2357}
2358
2359static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2360                                    int flags, team_nl_send_func_t *send_func,
2361                                    struct list_head *sel_opt_inst_list)
2362{
2363        struct nlattr *option_list;
2364        struct nlmsghdr *nlh;
2365        void *hdr;
2366        struct team_option_inst *opt_inst;
2367        int err;
2368        struct sk_buff *skb = NULL;
2369        bool incomplete;
2370        int i;
2371
2372        opt_inst = list_first_entry(sel_opt_inst_list,
2373                                    struct team_option_inst, tmp_list);
2374
2375start_again:
2376        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2377        if (err)
2378                return err;
2379
2380        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2381                          TEAM_CMD_OPTIONS_GET);
2382        if (!hdr) {
2383                nlmsg_free(skb);
2384                return -EMSGSIZE;
2385        }
2386
2387        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2388                goto nla_put_failure;
2389        option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2390        if (!option_list)
2391                goto nla_put_failure;
2392
2393        i = 0;
2394        incomplete = false;
2395        list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2396                err = team_nl_fill_one_option_get(skb, team, opt_inst);
2397                if (err) {
2398                        if (err == -EMSGSIZE) {
2399                                if (!i)
2400                                        goto errout;
2401                                incomplete = true;
2402                                break;
2403                        }
2404                        goto errout;
2405                }
2406                i++;
2407        }
2408
2409        nla_nest_end(skb, option_list);
2410        genlmsg_end(skb, hdr);
2411        if (incomplete)
2412                goto start_again;
2413
2414send_done:
2415        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2416        if (!nlh) {
2417                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2418                if (err)
2419                        return err;
2420                goto send_done;
2421        }
2422
2423        return send_func(skb, team, portid);
2424
2425nla_put_failure:
2426        err = -EMSGSIZE;
2427errout:
2428        nlmsg_free(skb);
2429        return err;
2430}
2431
2432static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2433{
2434        struct team *team;
2435        struct team_option_inst *opt_inst;
2436        int err;
2437        LIST_HEAD(sel_opt_inst_list);
2438
2439        team = team_nl_team_get(info);
2440        if (!team)
2441                return -EINVAL;
2442
2443        list_for_each_entry(opt_inst, &team->option_inst_list, list)
2444                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2445        err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2446                                       NLM_F_ACK, team_nl_send_unicast,
2447                                       &sel_opt_inst_list);
2448
2449        team_nl_team_put(team);
2450
2451        return err;
2452}
2453
2454static int team_nl_send_event_options_get(struct team *team,
2455                                          struct list_head *sel_opt_inst_list);
2456
2457static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2458{
2459        struct team *team;
2460        int err = 0;
2461        int i;
2462        struct nlattr *nl_option;
2463        LIST_HEAD(opt_inst_list);
2464
2465        rtnl_lock();
2466
2467        team = team_nl_team_get(info);
2468        if (!team) {
2469                err = -EINVAL;
2470                goto rtnl_unlock;
2471        }
2472
2473        err = -EINVAL;
2474        if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2475                err = -EINVAL;
2476                goto team_put;
2477        }
2478
2479        nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2480                struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2481                struct nlattr *attr;
2482                struct nlattr *attr_data;
2483                enum team_option_type opt_type;
2484                int opt_port_ifindex = 0; /* != 0 for per-port options */
2485                u32 opt_array_index = 0;
2486                bool opt_is_array = false;
2487                struct team_option_inst *opt_inst;
2488                char *opt_name;
2489                bool opt_found = false;
2490
2491                if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2492                        err = -EINVAL;
2493                        goto team_put;
2494                }
2495                err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2496                                       nl_option, team_nl_option_policy,
2497                                       info->extack);
2498                if (err)
2499                        goto team_put;
2500                if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2501                    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2502                        err = -EINVAL;
2503                        goto team_put;
2504                }
2505                switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2506                case NLA_U32:
2507                        opt_type = TEAM_OPTION_TYPE_U32;
2508                        break;
2509                case NLA_STRING:
2510                        opt_type = TEAM_OPTION_TYPE_STRING;
2511                        break;
2512                case NLA_BINARY:
2513                        opt_type = TEAM_OPTION_TYPE_BINARY;
2514                        break;
2515                case NLA_FLAG:
2516                        opt_type = TEAM_OPTION_TYPE_BOOL;
2517                        break;
2518                case NLA_S32:
2519                        opt_type = TEAM_OPTION_TYPE_S32;
2520                        break;
2521                default:
2522                        goto team_put;
2523                }
2524
2525                attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2526                if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2527                        err = -EINVAL;
2528                        goto team_put;
2529                }
2530
2531                opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2532                attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2533                if (attr)
2534                        opt_port_ifindex = nla_get_u32(attr);
2535
2536                attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2537                if (attr) {
2538                        opt_is_array = true;
2539                        opt_array_index = nla_get_u32(attr);
2540                }
2541
2542                list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2543                        struct team_option *option = opt_inst->option;
2544                        struct team_gsetter_ctx ctx;
2545                        struct team_option_inst_info *opt_inst_info;
2546                        int tmp_ifindex;
2547
2548                        opt_inst_info = &opt_inst->info;
2549                        tmp_ifindex = opt_inst_info->port ?
2550                                      opt_inst_info->port->dev->ifindex : 0;
2551                        if (option->type != opt_type ||
2552                            strcmp(option->name, opt_name) ||
2553                            tmp_ifindex != opt_port_ifindex ||
2554                            (option->array_size && !opt_is_array) ||
2555                            opt_inst_info->array_index != opt_array_index)
2556                                continue;
2557                        opt_found = true;
2558                        ctx.info = opt_inst_info;
2559                        switch (opt_type) {
2560                        case TEAM_OPTION_TYPE_U32:
2561                                ctx.data.u32_val = nla_get_u32(attr_data);
2562                                break;
2563                        case TEAM_OPTION_TYPE_STRING:
2564                                if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2565                                        err = -EINVAL;
2566                                        goto team_put;
2567                                }
2568                                ctx.data.str_val = nla_data(attr_data);
2569                                break;
2570                        case TEAM_OPTION_TYPE_BINARY:
2571                                ctx.data.bin_val.len = nla_len(attr_data);
2572                                ctx.data.bin_val.ptr = nla_data(attr_data);
2573                                break;
2574                        case TEAM_OPTION_TYPE_BOOL:
2575                                ctx.data.bool_val = attr_data ? true : false;
2576                                break;
2577                        case TEAM_OPTION_TYPE_S32:
2578                                ctx.data.s32_val = nla_get_s32(attr_data);
2579                                break;
2580                        default:
2581                                BUG();
2582                        }
2583                        err = team_option_set(team, opt_inst, &ctx);
2584                        if (err)
2585                                goto team_put;
2586                        opt_inst->changed = true;
2587
2588                        /* dumb/evil user-space can send us duplicate opt,
2589                         * keep only the last one
2590                         */
2591                        if (__team_option_inst_tmp_find(&opt_inst_list,
2592                                                        opt_inst))
2593                                continue;
2594
2595                        list_add(&opt_inst->tmp_list, &opt_inst_list);
2596                }
2597                if (!opt_found) {
2598                        err = -ENOENT;
2599                        goto team_put;
2600                }
2601        }
2602
2603        err = team_nl_send_event_options_get(team, &opt_inst_list);
2604
2605team_put:
2606        team_nl_team_put(team);
2607rtnl_unlock:
2608        rtnl_unlock();
2609        return err;
2610}
2611
2612static int team_nl_fill_one_port_get(struct sk_buff *skb,
2613                                     struct team_port *port)
2614{
2615        struct nlattr *port_item;
2616
2617        port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2618        if (!port_item)
2619                goto nest_cancel;
2620        if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2621                goto nest_cancel;
2622        if (port->changed) {
2623                if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2624                        goto nest_cancel;
2625                port->changed = false;
2626        }
2627        if ((port->removed &&
2628             nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2629            (port->state.linkup &&
2630             nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2631            nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2632            nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2633                goto nest_cancel;
2634        nla_nest_end(skb, port_item);
2635        return 0;
2636
2637nest_cancel:
2638        nla_nest_cancel(skb, port_item);
2639        return -EMSGSIZE;
2640}
2641
2642static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2643                                      int flags, team_nl_send_func_t *send_func,
2644                                      struct team_port *one_port)
2645{
2646        struct nlattr *port_list;
2647        struct nlmsghdr *nlh;
2648        void *hdr;
2649        struct team_port *port;
2650        int err;
2651        struct sk_buff *skb = NULL;
2652        bool incomplete;
2653        int i;
2654
2655        port = list_first_entry_or_null(&team->port_list,
2656                                        struct team_port, list);
2657
2658start_again:
2659        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2660        if (err)
2661                return err;
2662
2663        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2664                          TEAM_CMD_PORT_LIST_GET);
2665        if (!hdr) {
2666                nlmsg_free(skb);
2667                return -EMSGSIZE;
2668        }
2669
2670        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2671                goto nla_put_failure;
2672        port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2673        if (!port_list)
2674                goto nla_put_failure;
2675
2676        i = 0;
2677        incomplete = false;
2678
2679        /* If one port is selected, called wants to send port list containing
2680         * only this port. Otherwise go through all listed ports and send all
2681         */
2682        if (one_port) {
2683                err = team_nl_fill_one_port_get(skb, one_port);
2684                if (err)
2685                        goto errout;
2686        } else if (port) {
2687                list_for_each_entry_from(port, &team->port_list, list) {
2688                        err = team_nl_fill_one_port_get(skb, port);
2689                        if (err) {
2690                                if (err == -EMSGSIZE) {
2691                                        if (!i)
2692                                                goto errout;
2693                                        incomplete = true;
2694                                        break;
2695                                }
2696                                goto errout;
2697                        }
2698                        i++;
2699                }
2700        }
2701
2702        nla_nest_end(skb, port_list);
2703        genlmsg_end(skb, hdr);
2704        if (incomplete)
2705                goto start_again;
2706
2707send_done:
2708        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2709        if (!nlh) {
2710                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2711                if (err)
2712                        return err;
2713                goto send_done;
2714        }
2715
2716        return send_func(skb, team, portid);
2717
2718nla_put_failure:
2719        err = -EMSGSIZE;
2720errout:
2721        nlmsg_free(skb);
2722        return err;
2723}
2724
2725static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2726                                     struct genl_info *info)
2727{
2728        struct team *team;
2729        int err;
2730
2731        team = team_nl_team_get(info);
2732        if (!team)
2733                return -EINVAL;
2734
2735        err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2736                                         NLM_F_ACK, team_nl_send_unicast, NULL);
2737
2738        team_nl_team_put(team);
2739
2740        return err;
2741}
2742
2743static const struct genl_ops team_nl_ops[] = {
2744        {
2745                .cmd = TEAM_CMD_NOOP,
2746                .doit = team_nl_cmd_noop,
2747                .policy = team_nl_policy,
2748        },
2749        {
2750                .cmd = TEAM_CMD_OPTIONS_SET,
2751                .doit = team_nl_cmd_options_set,
2752                .policy = team_nl_policy,
2753                .flags = GENL_ADMIN_PERM,
2754        },
2755        {
2756                .cmd = TEAM_CMD_OPTIONS_GET,
2757                .doit = team_nl_cmd_options_get,
2758                .policy = team_nl_policy,
2759                .flags = GENL_ADMIN_PERM,
2760        },
2761        {
2762                .cmd = TEAM_CMD_PORT_LIST_GET,
2763                .doit = team_nl_cmd_port_list_get,
2764                .policy = team_nl_policy,
2765                .flags = GENL_ADMIN_PERM,
2766        },
2767};
2768
2769static const struct genl_multicast_group team_nl_mcgrps[] = {
2770        { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2771};
2772
2773static struct genl_family team_nl_family __ro_after_init = {
2774        .name           = TEAM_GENL_NAME,
2775        .version        = TEAM_GENL_VERSION,
2776        .maxattr        = TEAM_ATTR_MAX,
2777        .netnsok        = true,
2778        .module         = THIS_MODULE,
2779        .ops            = team_nl_ops,
2780        .n_ops          = ARRAY_SIZE(team_nl_ops),
2781        .mcgrps         = team_nl_mcgrps,
2782        .n_mcgrps       = ARRAY_SIZE(team_nl_mcgrps),
2783};
2784
2785static int team_nl_send_multicast(struct sk_buff *skb,
2786                                  struct team *team, u32 portid)
2787{
2788        return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2789                                       skb, 0, 0, GFP_KERNEL);
2790}
2791
2792static int team_nl_send_event_options_get(struct team *team,
2793                                          struct list_head *sel_opt_inst_list)
2794{
2795        return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2796                                        sel_opt_inst_list);
2797}
2798
2799static int team_nl_send_event_port_get(struct team *team,
2800                                       struct team_port *port)
2801{
2802        return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2803                                          port);
2804}
2805
2806static int __init team_nl_init(void)
2807{
2808        return genl_register_family(&team_nl_family);
2809}
2810
2811static void team_nl_fini(void)
2812{
2813        genl_unregister_family(&team_nl_family);
2814}
2815
2816
2817/******************
2818 * Change checkers
2819 ******************/
2820
2821static void __team_options_change_check(struct team *team)
2822{
2823        int err;
2824        struct team_option_inst *opt_inst;
2825        LIST_HEAD(sel_opt_inst_list);
2826
2827        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2828                if (opt_inst->changed)
2829                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2830        }
2831        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2832        if (err && err != -ESRCH)
2833                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2834                            err);
2835}
2836
2837/* rtnl lock is held */
2838
2839static void __team_port_change_send(struct team_port *port, bool linkup)
2840{
2841        int err;
2842
2843        port->changed = true;
2844        port->state.linkup = linkup;
2845        team_refresh_port_linkup(port);
2846        if (linkup) {
2847                struct ethtool_link_ksettings ecmd;
2848
2849                err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2850                if (!err) {
2851                        port->state.speed = ecmd.base.speed;
2852                        port->state.duplex = ecmd.base.duplex;
2853                        goto send_event;
2854                }
2855        }
2856        port->state.speed = 0;
2857        port->state.duplex = 0;
2858
2859send_event:
2860        err = team_nl_send_event_port_get(port->team, port);
2861        if (err && err != -ESRCH)
2862                netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2863                            port->dev->name, err);
2864
2865}
2866
2867static void __team_carrier_check(struct team *team)
2868{
2869        struct team_port *port;
2870        bool team_linkup;
2871
2872        if (team->user_carrier_enabled)
2873                return;
2874
2875        team_linkup = false;
2876        list_for_each_entry(port, &team->port_list, list) {
2877                if (port->linkup) {
2878                        team_linkup = true;
2879                        break;
2880                }
2881        }
2882
2883        if (team_linkup)
2884                netif_carrier_on(team->dev);
2885        else
2886                netif_carrier_off(team->dev);
2887}
2888
2889static void __team_port_change_check(struct team_port *port, bool linkup)
2890{
2891        if (port->state.linkup != linkup)
2892                __team_port_change_send(port, linkup);
2893        __team_carrier_check(port->team);
2894}
2895
2896static void __team_port_change_port_added(struct team_port *port, bool linkup)
2897{
2898        __team_port_change_send(port, linkup);
2899        __team_carrier_check(port->team);
2900}
2901
2902static void __team_port_change_port_removed(struct team_port *port)
2903{
2904        port->removed = true;
2905        __team_port_change_send(port, false);
2906        __team_carrier_check(port->team);
2907}
2908
2909static void team_port_change_check(struct team_port *port, bool linkup)
2910{
2911        struct team *team = port->team;
2912
2913        mutex_lock(&team->lock);
2914        __team_port_change_check(port, linkup);
2915        mutex_unlock(&team->lock);
2916}
2917
2918
2919/************************************
2920 * Net device notifier event handler
2921 ************************************/
2922
2923static int team_device_event(struct notifier_block *unused,
2924                             unsigned long event, void *ptr)
2925{
2926        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2927        struct team_port *port;
2928
2929        port = team_port_get_rtnl(dev);
2930        if (!port)
2931                return NOTIFY_DONE;
2932
2933        switch (event) {
2934        case NETDEV_UP:
2935                if (netif_carrier_ok(dev))
2936                        team_port_change_check(port, true);
2937                break;
2938        case NETDEV_DOWN:
2939                team_port_change_check(port, false);
2940                break;
2941        case NETDEV_CHANGE:
2942                if (netif_running(port->dev))
2943                        team_port_change_check(port,
2944                                               !!netif_oper_up(port->dev));
2945                break;
2946        case NETDEV_UNREGISTER:
2947                team_del_slave(port->team->dev, dev);
2948                break;
2949        case NETDEV_FEAT_CHANGE:
2950                team_compute_features(port->team);
2951                break;
2952        case NETDEV_PRECHANGEMTU:
2953                /* Forbid to change mtu of underlaying device */
2954                if (!port->team->port_mtu_change_allowed)
2955                        return NOTIFY_BAD;
2956                break;
2957        case NETDEV_PRE_TYPE_CHANGE:
2958                /* Forbid to change type of underlaying device */
2959                return NOTIFY_BAD;
2960        case NETDEV_RESEND_IGMP:
2961                /* Propagate to master device */
2962                call_netdevice_notifiers(event, port->team->dev);
2963                break;
2964        }
2965        return NOTIFY_DONE;
2966}
2967
2968static struct notifier_block team_notifier_block __read_mostly = {
2969        .notifier_call = team_device_event,
2970};
2971
2972
2973/***********************
2974 * Module init and exit
2975 ***********************/
2976
2977static int __init team_module_init(void)
2978{
2979        int err;
2980
2981        register_netdevice_notifier(&team_notifier_block);
2982
2983        err = rtnl_link_register(&team_link_ops);
2984        if (err)
2985                goto err_rtnl_reg;
2986
2987        err = team_nl_init();
2988        if (err)
2989                goto err_nl_init;
2990
2991        return 0;
2992
2993err_nl_init:
2994        rtnl_link_unregister(&team_link_ops);
2995
2996err_rtnl_reg:
2997        unregister_netdevice_notifier(&team_notifier_block);
2998
2999        return err;
3000}
3001
3002static void __exit team_module_exit(void)
3003{
3004        team_nl_fini();
3005        rtnl_link_unregister(&team_link_ops);
3006        unregister_netdevice_notifier(&team_notifier_block);
3007}
3008
3009module_init(team_module_init);
3010module_exit(team_module_exit);
3011
3012MODULE_LICENSE("GPL v2");
3013MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3014MODULE_DESCRIPTION("Ethernet team device driver");
3015MODULE_ALIAS_RTNL_LINK(DRV_NAME);
3016