linux/drivers/net/team/team.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/team/team.c - Network team device driver
   4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
   5 */
   6
   7#include <linux/ethtool.h>
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/slab.h>
  13#include <linux/rcupdate.h>
  14#include <linux/errno.h>
  15#include <linux/ctype.h>
  16#include <linux/notifier.h>
  17#include <linux/netdevice.h>
  18#include <linux/netpoll.h>
  19#include <linux/if_vlan.h>
  20#include <linux/if_arp.h>
  21#include <linux/socket.h>
  22#include <linux/etherdevice.h>
  23#include <linux/rtnetlink.h>
  24#include <net/rtnetlink.h>
  25#include <net/genetlink.h>
  26#include <net/netlink.h>
  27#include <net/sch_generic.h>
  28#include <generated/utsrelease.h>
  29#include <linux/if_team.h>
  30
  31#define DRV_NAME "team"
  32
  33
  34/**********
  35 * Helpers
  36 **********/
  37
  38static struct team_port *team_port_get_rtnl(const struct net_device *dev)
  39{
  40        struct team_port *port = rtnl_dereference(dev->rx_handler_data);
  41
  42        return netif_is_team_port(dev) ? port : NULL;
  43}
  44
  45/*
  46 * Since the ability to change device address for open port device is tested in
  47 * team_port_add, this function can be called without control of return value
  48 */
  49static int __set_port_dev_addr(struct net_device *port_dev,
  50                               const unsigned char *dev_addr)
  51{
  52        struct sockaddr_storage addr;
  53
  54        memcpy(addr.__data, dev_addr, port_dev->addr_len);
  55        addr.ss_family = port_dev->type;
  56        return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
  57}
  58
  59static int team_port_set_orig_dev_addr(struct team_port *port)
  60{
  61        return __set_port_dev_addr(port->dev, port->orig.dev_addr);
  62}
  63
  64static int team_port_set_team_dev_addr(struct team *team,
  65                                       struct team_port *port)
  66{
  67        return __set_port_dev_addr(port->dev, team->dev->dev_addr);
  68}
  69
  70int team_modeop_port_enter(struct team *team, struct team_port *port)
  71{
  72        return team_port_set_team_dev_addr(team, port);
  73}
  74EXPORT_SYMBOL(team_modeop_port_enter);
  75
  76void team_modeop_port_change_dev_addr(struct team *team,
  77                                      struct team_port *port)
  78{
  79        team_port_set_team_dev_addr(team, port);
  80}
  81EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
  82
  83static void team_lower_state_changed(struct team_port *port)
  84{
  85        struct netdev_lag_lower_state_info info;
  86
  87        info.link_up = port->linkup;
  88        info.tx_enabled = team_port_enabled(port);
  89        netdev_lower_state_changed(port->dev, &info);
  90}
  91
  92static void team_refresh_port_linkup(struct team_port *port)
  93{
  94        bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
  95                                                      port->state.linkup;
  96
  97        if (port->linkup != new_linkup) {
  98                port->linkup = new_linkup;
  99                team_lower_state_changed(port);
 100        }
 101}
 102
 103
 104/*******************
 105 * Options handling
 106 *******************/
 107
 108struct team_option_inst { /* One for each option instance */
 109        struct list_head list;
 110        struct list_head tmp_list;
 111        struct team_option *option;
 112        struct team_option_inst_info info;
 113        bool changed;
 114        bool removed;
 115};
 116
 117static struct team_option *__team_find_option(struct team *team,
 118                                              const char *opt_name)
 119{
 120        struct team_option *option;
 121
 122        list_for_each_entry(option, &team->option_list, list) {
 123                if (strcmp(option->name, opt_name) == 0)
 124                        return option;
 125        }
 126        return NULL;
 127}
 128
 129static void __team_option_inst_del(struct team_option_inst *opt_inst)
 130{
 131        list_del(&opt_inst->list);
 132        kfree(opt_inst);
 133}
 134
 135static void __team_option_inst_del_option(struct team *team,
 136                                          struct team_option *option)
 137{
 138        struct team_option_inst *opt_inst, *tmp;
 139
 140        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 141                if (opt_inst->option == option)
 142                        __team_option_inst_del(opt_inst);
 143        }
 144}
 145
 146static int __team_option_inst_add(struct team *team, struct team_option *option,
 147                                  struct team_port *port)
 148{
 149        struct team_option_inst *opt_inst;
 150        unsigned int array_size;
 151        unsigned int i;
 152        int err;
 153
 154        array_size = option->array_size;
 155        if (!array_size)
 156                array_size = 1; /* No array but still need one instance */
 157
 158        for (i = 0; i < array_size; i++) {
 159                opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
 160                if (!opt_inst)
 161                        return -ENOMEM;
 162                opt_inst->option = option;
 163                opt_inst->info.port = port;
 164                opt_inst->info.array_index = i;
 165                opt_inst->changed = true;
 166                opt_inst->removed = false;
 167                list_add_tail(&opt_inst->list, &team->option_inst_list);
 168                if (option->init) {
 169                        err = option->init(team, &opt_inst->info);
 170                        if (err)
 171                                return err;
 172                }
 173
 174        }
 175        return 0;
 176}
 177
 178static int __team_option_inst_add_option(struct team *team,
 179                                         struct team_option *option)
 180{
 181        int err;
 182
 183        if (!option->per_port) {
 184                err = __team_option_inst_add(team, option, NULL);
 185                if (err)
 186                        goto inst_del_option;
 187        }
 188        return 0;
 189
 190inst_del_option:
 191        __team_option_inst_del_option(team, option);
 192        return err;
 193}
 194
 195static void __team_option_inst_mark_removed_option(struct team *team,
 196                                                   struct team_option *option)
 197{
 198        struct team_option_inst *opt_inst;
 199
 200        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 201                if (opt_inst->option == option) {
 202                        opt_inst->changed = true;
 203                        opt_inst->removed = true;
 204                }
 205        }
 206}
 207
 208static void __team_option_inst_del_port(struct team *team,
 209                                        struct team_port *port)
 210{
 211        struct team_option_inst *opt_inst, *tmp;
 212
 213        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 214                if (opt_inst->option->per_port &&
 215                    opt_inst->info.port == port)
 216                        __team_option_inst_del(opt_inst);
 217        }
 218}
 219
 220static int __team_option_inst_add_port(struct team *team,
 221                                       struct team_port *port)
 222{
 223        struct team_option *option;
 224        int err;
 225
 226        list_for_each_entry(option, &team->option_list, list) {
 227                if (!option->per_port)
 228                        continue;
 229                err = __team_option_inst_add(team, option, port);
 230                if (err)
 231                        goto inst_del_port;
 232        }
 233        return 0;
 234
 235inst_del_port:
 236        __team_option_inst_del_port(team, port);
 237        return err;
 238}
 239
 240static void __team_option_inst_mark_removed_port(struct team *team,
 241                                                 struct team_port *port)
 242{
 243        struct team_option_inst *opt_inst;
 244
 245        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 246                if (opt_inst->info.port == port) {
 247                        opt_inst->changed = true;
 248                        opt_inst->removed = true;
 249                }
 250        }
 251}
 252
 253static int __team_options_register(struct team *team,
 254                                   const struct team_option *option,
 255                                   size_t option_count)
 256{
 257        int i;
 258        struct team_option **dst_opts;
 259        int err;
 260
 261        dst_opts = kcalloc(option_count, sizeof(struct team_option *),
 262                           GFP_KERNEL);
 263        if (!dst_opts)
 264                return -ENOMEM;
 265        for (i = 0; i < option_count; i++, option++) {
 266                if (__team_find_option(team, option->name)) {
 267                        err = -EEXIST;
 268                        goto alloc_rollback;
 269                }
 270                dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 271                if (!dst_opts[i]) {
 272                        err = -ENOMEM;
 273                        goto alloc_rollback;
 274                }
 275        }
 276
 277        for (i = 0; i < option_count; i++) {
 278                err = __team_option_inst_add_option(team, dst_opts[i]);
 279                if (err)
 280                        goto inst_rollback;
 281                list_add_tail(&dst_opts[i]->list, &team->option_list);
 282        }
 283
 284        kfree(dst_opts);
 285        return 0;
 286
 287inst_rollback:
 288        for (i--; i >= 0; i--)
 289                __team_option_inst_del_option(team, dst_opts[i]);
 290
 291        i = option_count;
 292alloc_rollback:
 293        for (i--; i >= 0; i--)
 294                kfree(dst_opts[i]);
 295
 296        kfree(dst_opts);
 297        return err;
 298}
 299
 300static void __team_options_mark_removed(struct team *team,
 301                                        const struct team_option *option,
 302                                        size_t option_count)
 303{
 304        int i;
 305
 306        for (i = 0; i < option_count; i++, option++) {
 307                struct team_option *del_opt;
 308
 309                del_opt = __team_find_option(team, option->name);
 310                if (del_opt)
 311                        __team_option_inst_mark_removed_option(team, del_opt);
 312        }
 313}
 314
 315static void __team_options_unregister(struct team *team,
 316                                      const struct team_option *option,
 317                                      size_t option_count)
 318{
 319        int i;
 320
 321        for (i = 0; i < option_count; i++, option++) {
 322                struct team_option *del_opt;
 323
 324                del_opt = __team_find_option(team, option->name);
 325                if (del_opt) {
 326                        __team_option_inst_del_option(team, del_opt);
 327                        list_del(&del_opt->list);
 328                        kfree(del_opt);
 329                }
 330        }
 331}
 332
 333static void __team_options_change_check(struct team *team);
 334
 335int team_options_register(struct team *team,
 336                          const struct team_option *option,
 337                          size_t option_count)
 338{
 339        int err;
 340
 341        err = __team_options_register(team, option, option_count);
 342        if (err)
 343                return err;
 344        __team_options_change_check(team);
 345        return 0;
 346}
 347EXPORT_SYMBOL(team_options_register);
 348
 349void team_options_unregister(struct team *team,
 350                             const struct team_option *option,
 351                             size_t option_count)
 352{
 353        __team_options_mark_removed(team, option, option_count);
 354        __team_options_change_check(team);
 355        __team_options_unregister(team, option, option_count);
 356}
 357EXPORT_SYMBOL(team_options_unregister);
 358
 359static int team_option_get(struct team *team,
 360                           struct team_option_inst *opt_inst,
 361                           struct team_gsetter_ctx *ctx)
 362{
 363        if (!opt_inst->option->getter)
 364                return -EOPNOTSUPP;
 365        return opt_inst->option->getter(team, ctx);
 366}
 367
 368static int team_option_set(struct team *team,
 369                           struct team_option_inst *opt_inst,
 370                           struct team_gsetter_ctx *ctx)
 371{
 372        if (!opt_inst->option->setter)
 373                return -EOPNOTSUPP;
 374        return opt_inst->option->setter(team, ctx);
 375}
 376
 377void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
 378{
 379        struct team_option_inst *opt_inst;
 380
 381        opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
 382        opt_inst->changed = true;
 383}
 384EXPORT_SYMBOL(team_option_inst_set_change);
 385
 386void team_options_change_check(struct team *team)
 387{
 388        __team_options_change_check(team);
 389}
 390EXPORT_SYMBOL(team_options_change_check);
 391
 392
 393/****************
 394 * Mode handling
 395 ****************/
 396
 397static LIST_HEAD(mode_list);
 398static DEFINE_SPINLOCK(mode_list_lock);
 399
 400struct team_mode_item {
 401        struct list_head list;
 402        const struct team_mode *mode;
 403};
 404
 405static struct team_mode_item *__find_mode(const char *kind)
 406{
 407        struct team_mode_item *mitem;
 408
 409        list_for_each_entry(mitem, &mode_list, list) {
 410                if (strcmp(mitem->mode->kind, kind) == 0)
 411                        return mitem;
 412        }
 413        return NULL;
 414}
 415
 416static bool is_good_mode_name(const char *name)
 417{
 418        while (*name != '\0') {
 419                if (!isalpha(*name) && !isdigit(*name) && *name != '_')
 420                        return false;
 421                name++;
 422        }
 423        return true;
 424}
 425
 426int team_mode_register(const struct team_mode *mode)
 427{
 428        int err = 0;
 429        struct team_mode_item *mitem;
 430
 431        if (!is_good_mode_name(mode->kind) ||
 432            mode->priv_size > TEAM_MODE_PRIV_SIZE)
 433                return -EINVAL;
 434
 435        mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
 436        if (!mitem)
 437                return -ENOMEM;
 438
 439        spin_lock(&mode_list_lock);
 440        if (__find_mode(mode->kind)) {
 441                err = -EEXIST;
 442                kfree(mitem);
 443                goto unlock;
 444        }
 445        mitem->mode = mode;
 446        list_add_tail(&mitem->list, &mode_list);
 447unlock:
 448        spin_unlock(&mode_list_lock);
 449        return err;
 450}
 451EXPORT_SYMBOL(team_mode_register);
 452
 453void team_mode_unregister(const struct team_mode *mode)
 454{
 455        struct team_mode_item *mitem;
 456
 457        spin_lock(&mode_list_lock);
 458        mitem = __find_mode(mode->kind);
 459        if (mitem) {
 460                list_del_init(&mitem->list);
 461                kfree(mitem);
 462        }
 463        spin_unlock(&mode_list_lock);
 464}
 465EXPORT_SYMBOL(team_mode_unregister);
 466
 467static const struct team_mode *team_mode_get(const char *kind)
 468{
 469        struct team_mode_item *mitem;
 470        const struct team_mode *mode = NULL;
 471
 472        if (!try_module_get(THIS_MODULE))
 473                return NULL;
 474
 475        spin_lock(&mode_list_lock);
 476        mitem = __find_mode(kind);
 477        if (!mitem) {
 478                spin_unlock(&mode_list_lock);
 479                request_module("team-mode-%s", kind);
 480                spin_lock(&mode_list_lock);
 481                mitem = __find_mode(kind);
 482        }
 483        if (mitem) {
 484                mode = mitem->mode;
 485                if (!try_module_get(mode->owner))
 486                        mode = NULL;
 487        }
 488
 489        spin_unlock(&mode_list_lock);
 490        module_put(THIS_MODULE);
 491        return mode;
 492}
 493
 494static void team_mode_put(const struct team_mode *mode)
 495{
 496        module_put(mode->owner);
 497}
 498
 499static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
 500{
 501        dev_kfree_skb_any(skb);
 502        return false;
 503}
 504
 505static rx_handler_result_t team_dummy_receive(struct team *team,
 506                                              struct team_port *port,
 507                                              struct sk_buff *skb)
 508{
 509        return RX_HANDLER_ANOTHER;
 510}
 511
 512static const struct team_mode __team_no_mode = {
 513        .kind           = "*NOMODE*",
 514};
 515
 516static bool team_is_mode_set(struct team *team)
 517{
 518        return team->mode != &__team_no_mode;
 519}
 520
 521static void team_set_no_mode(struct team *team)
 522{
 523        team->user_carrier_enabled = false;
 524        team->mode = &__team_no_mode;
 525}
 526
 527static void team_adjust_ops(struct team *team)
 528{
 529        /*
 530         * To avoid checks in rx/tx skb paths, ensure here that non-null and
 531         * correct ops are always set.
 532         */
 533
 534        if (!team->en_port_count || !team_is_mode_set(team) ||
 535            !team->mode->ops->transmit)
 536                team->ops.transmit = team_dummy_transmit;
 537        else
 538                team->ops.transmit = team->mode->ops->transmit;
 539
 540        if (!team->en_port_count || !team_is_mode_set(team) ||
 541            !team->mode->ops->receive)
 542                team->ops.receive = team_dummy_receive;
 543        else
 544                team->ops.receive = team->mode->ops->receive;
 545}
 546
 547/*
 548 * We can benefit from the fact that it's ensured no port is present
 549 * at the time of mode change. Therefore no packets are in fly so there's no
 550 * need to set mode operations in any special way.
 551 */
 552static int __team_change_mode(struct team *team,
 553                              const struct team_mode *new_mode)
 554{
 555        /* Check if mode was previously set and do cleanup if so */
 556        if (team_is_mode_set(team)) {
 557                void (*exit_op)(struct team *team) = team->ops.exit;
 558
 559                /* Clear ops area so no callback is called any longer */
 560                memset(&team->ops, 0, sizeof(struct team_mode_ops));
 561                team_adjust_ops(team);
 562
 563                if (exit_op)
 564                        exit_op(team);
 565                team_mode_put(team->mode);
 566                team_set_no_mode(team);
 567                /* zero private data area */
 568                memset(&team->mode_priv, 0,
 569                       sizeof(struct team) - offsetof(struct team, mode_priv));
 570        }
 571
 572        if (!new_mode)
 573                return 0;
 574
 575        if (new_mode->ops->init) {
 576                int err;
 577
 578                err = new_mode->ops->init(team);
 579                if (err)
 580                        return err;
 581        }
 582
 583        team->mode = new_mode;
 584        memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
 585        team_adjust_ops(team);
 586
 587        return 0;
 588}
 589
 590static int team_change_mode(struct team *team, const char *kind)
 591{
 592        const struct team_mode *new_mode;
 593        struct net_device *dev = team->dev;
 594        int err;
 595
 596        if (!list_empty(&team->port_list)) {
 597                netdev_err(dev, "No ports can be present during mode change\n");
 598                return -EBUSY;
 599        }
 600
 601        if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
 602                netdev_err(dev, "Unable to change to the same mode the team is in\n");
 603                return -EINVAL;
 604        }
 605
 606        new_mode = team_mode_get(kind);
 607        if (!new_mode) {
 608                netdev_err(dev, "Mode \"%s\" not found\n", kind);
 609                return -EINVAL;
 610        }
 611
 612        err = __team_change_mode(team, new_mode);
 613        if (err) {
 614                netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
 615                team_mode_put(new_mode);
 616                return err;
 617        }
 618
 619        netdev_info(dev, "Mode changed to \"%s\"\n", kind);
 620        return 0;
 621}
 622
 623
 624/*********************
 625 * Peers notification
 626 *********************/
 627
 628static void team_notify_peers_work(struct work_struct *work)
 629{
 630        struct team *team;
 631        int val;
 632
 633        team = container_of(work, struct team, notify_peers.dw.work);
 634
 635        if (!rtnl_trylock()) {
 636                schedule_delayed_work(&team->notify_peers.dw, 0);
 637                return;
 638        }
 639        val = atomic_dec_if_positive(&team->notify_peers.count_pending);
 640        if (val < 0) {
 641                rtnl_unlock();
 642                return;
 643        }
 644        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
 645        rtnl_unlock();
 646        if (val)
 647                schedule_delayed_work(&team->notify_peers.dw,
 648                                      msecs_to_jiffies(team->notify_peers.interval));
 649}
 650
 651static void team_notify_peers(struct team *team)
 652{
 653        if (!team->notify_peers.count || !netif_running(team->dev))
 654                return;
 655        atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
 656        schedule_delayed_work(&team->notify_peers.dw, 0);
 657}
 658
 659static void team_notify_peers_init(struct team *team)
 660{
 661        INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
 662}
 663
 664static void team_notify_peers_fini(struct team *team)
 665{
 666        cancel_delayed_work_sync(&team->notify_peers.dw);
 667}
 668
 669
 670/*******************************
 671 * Send multicast group rejoins
 672 *******************************/
 673
 674static void team_mcast_rejoin_work(struct work_struct *work)
 675{
 676        struct team *team;
 677        int val;
 678
 679        team = container_of(work, struct team, mcast_rejoin.dw.work);
 680
 681        if (!rtnl_trylock()) {
 682                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 683                return;
 684        }
 685        val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
 686        if (val < 0) {
 687                rtnl_unlock();
 688                return;
 689        }
 690        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
 691        rtnl_unlock();
 692        if (val)
 693                schedule_delayed_work(&team->mcast_rejoin.dw,
 694                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 695}
 696
 697static void team_mcast_rejoin(struct team *team)
 698{
 699        if (!team->mcast_rejoin.count || !netif_running(team->dev))
 700                return;
 701        atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
 702        schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 703}
 704
 705static void team_mcast_rejoin_init(struct team *team)
 706{
 707        INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
 708}
 709
 710static void team_mcast_rejoin_fini(struct team *team)
 711{
 712        cancel_delayed_work_sync(&team->mcast_rejoin.dw);
 713}
 714
 715
 716/************************
 717 * Rx path frame handler
 718 ************************/
 719
 720/* note: already called with rcu_read_lock */
 721static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 722{
 723        struct sk_buff *skb = *pskb;
 724        struct team_port *port;
 725        struct team *team;
 726        rx_handler_result_t res;
 727
 728        skb = skb_share_check(skb, GFP_ATOMIC);
 729        if (!skb)
 730                return RX_HANDLER_CONSUMED;
 731
 732        *pskb = skb;
 733
 734        port = team_port_get_rcu(skb->dev);
 735        team = port->team;
 736        if (!team_port_enabled(port)) {
 737                /* allow exact match delivery for disabled ports */
 738                res = RX_HANDLER_EXACT;
 739        } else {
 740                res = team->ops.receive(team, port, skb);
 741        }
 742        if (res == RX_HANDLER_ANOTHER) {
 743                struct team_pcpu_stats *pcpu_stats;
 744
 745                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
 746                u64_stats_update_begin(&pcpu_stats->syncp);
 747                pcpu_stats->rx_packets++;
 748                pcpu_stats->rx_bytes += skb->len;
 749                if (skb->pkt_type == PACKET_MULTICAST)
 750                        pcpu_stats->rx_multicast++;
 751                u64_stats_update_end(&pcpu_stats->syncp);
 752
 753                skb->dev = team->dev;
 754        } else if (res == RX_HANDLER_EXACT) {
 755                this_cpu_inc(team->pcpu_stats->rx_nohandler);
 756        } else {
 757                this_cpu_inc(team->pcpu_stats->rx_dropped);
 758        }
 759
 760        return res;
 761}
 762
 763
 764/*************************************
 765 * Multiqueue Tx port select override
 766 *************************************/
 767
 768static int team_queue_override_init(struct team *team)
 769{
 770        struct list_head *listarr;
 771        unsigned int queue_cnt = team->dev->num_tx_queues - 1;
 772        unsigned int i;
 773
 774        if (!queue_cnt)
 775                return 0;
 776        listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
 777                                GFP_KERNEL);
 778        if (!listarr)
 779                return -ENOMEM;
 780        team->qom_lists = listarr;
 781        for (i = 0; i < queue_cnt; i++)
 782                INIT_LIST_HEAD(listarr++);
 783        return 0;
 784}
 785
 786static void team_queue_override_fini(struct team *team)
 787{
 788        kfree(team->qom_lists);
 789}
 790
 791static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
 792{
 793        return &team->qom_lists[queue_id - 1];
 794}
 795
 796/*
 797 * note: already called with rcu_read_lock
 798 */
 799static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
 800{
 801        struct list_head *qom_list;
 802        struct team_port *port;
 803
 804        if (!team->queue_override_enabled || !skb->queue_mapping)
 805                return false;
 806        qom_list = __team_get_qom_list(team, skb->queue_mapping);
 807        list_for_each_entry_rcu(port, qom_list, qom_list) {
 808                if (!team_dev_queue_xmit(team, port, skb))
 809                        return true;
 810        }
 811        return false;
 812}
 813
 814static void __team_queue_override_port_del(struct team *team,
 815                                           struct team_port *port)
 816{
 817        if (!port->queue_id)
 818                return;
 819        list_del_rcu(&port->qom_list);
 820}
 821
 822static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
 823                                                      struct team_port *cur)
 824{
 825        if (port->priority < cur->priority)
 826                return true;
 827        if (port->priority > cur->priority)
 828                return false;
 829        if (port->index < cur->index)
 830                return true;
 831        return false;
 832}
 833
 834static void __team_queue_override_port_add(struct team *team,
 835                                           struct team_port *port)
 836{
 837        struct team_port *cur;
 838        struct list_head *qom_list;
 839        struct list_head *node;
 840
 841        if (!port->queue_id)
 842                return;
 843        qom_list = __team_get_qom_list(team, port->queue_id);
 844        node = qom_list;
 845        list_for_each_entry(cur, qom_list, qom_list) {
 846                if (team_queue_override_port_has_gt_prio_than(port, cur))
 847                        break;
 848                node = &cur->qom_list;
 849        }
 850        list_add_tail_rcu(&port->qom_list, node);
 851}
 852
 853static void __team_queue_override_enabled_check(struct team *team)
 854{
 855        struct team_port *port;
 856        bool enabled = false;
 857
 858        list_for_each_entry(port, &team->port_list, list) {
 859                if (port->queue_id) {
 860                        enabled = true;
 861                        break;
 862                }
 863        }
 864        if (enabled == team->queue_override_enabled)
 865                return;
 866        netdev_dbg(team->dev, "%s queue override\n",
 867                   enabled ? "Enabling" : "Disabling");
 868        team->queue_override_enabled = enabled;
 869}
 870
 871static void team_queue_override_port_prio_changed(struct team *team,
 872                                                  struct team_port *port)
 873{
 874        if (!port->queue_id || team_port_enabled(port))
 875                return;
 876        __team_queue_override_port_del(team, port);
 877        __team_queue_override_port_add(team, port);
 878        __team_queue_override_enabled_check(team);
 879}
 880
 881static void team_queue_override_port_change_queue_id(struct team *team,
 882                                                     struct team_port *port,
 883                                                     u16 new_queue_id)
 884{
 885        if (team_port_enabled(port)) {
 886                __team_queue_override_port_del(team, port);
 887                port->queue_id = new_queue_id;
 888                __team_queue_override_port_add(team, port);
 889                __team_queue_override_enabled_check(team);
 890        } else {
 891                port->queue_id = new_queue_id;
 892        }
 893}
 894
 895static void team_queue_override_port_add(struct team *team,
 896                                         struct team_port *port)
 897{
 898        __team_queue_override_port_add(team, port);
 899        __team_queue_override_enabled_check(team);
 900}
 901
 902static void team_queue_override_port_del(struct team *team,
 903                                         struct team_port *port)
 904{
 905        __team_queue_override_port_del(team, port);
 906        __team_queue_override_enabled_check(team);
 907}
 908
 909
 910/****************
 911 * Port handling
 912 ****************/
 913
 914static bool team_port_find(const struct team *team,
 915                           const struct team_port *port)
 916{
 917        struct team_port *cur;
 918
 919        list_for_each_entry(cur, &team->port_list, list)
 920                if (cur == port)
 921                        return true;
 922        return false;
 923}
 924
 925/*
 926 * Enable/disable port by adding to enabled port hashlist and setting
 927 * port->index (Might be racy so reader could see incorrect ifindex when
 928 * processing a flying packet, but that is not a problem). Write guarded
 929 * by team->lock.
 930 */
 931static void team_port_enable(struct team *team,
 932                             struct team_port *port)
 933{
 934        if (team_port_enabled(port))
 935                return;
 936        port->index = team->en_port_count++;
 937        hlist_add_head_rcu(&port->hlist,
 938                           team_port_index_hash(team, port->index));
 939        team_adjust_ops(team);
 940        team_queue_override_port_add(team, port);
 941        if (team->ops.port_enabled)
 942                team->ops.port_enabled(team, port);
 943        team_notify_peers(team);
 944        team_mcast_rejoin(team);
 945        team_lower_state_changed(port);
 946}
 947
 948static void __reconstruct_port_hlist(struct team *team, int rm_index)
 949{
 950        int i;
 951        struct team_port *port;
 952
 953        for (i = rm_index + 1; i < team->en_port_count; i++) {
 954                port = team_get_port_by_index(team, i);
 955                hlist_del_rcu(&port->hlist);
 956                port->index--;
 957                hlist_add_head_rcu(&port->hlist,
 958                                   team_port_index_hash(team, port->index));
 959        }
 960}
 961
 962static void team_port_disable(struct team *team,
 963                              struct team_port *port)
 964{
 965        if (!team_port_enabled(port))
 966                return;
 967        if (team->ops.port_disabled)
 968                team->ops.port_disabled(team, port);
 969        hlist_del_rcu(&port->hlist);
 970        __reconstruct_port_hlist(team, port->index);
 971        port->index = -1;
 972        team->en_port_count--;
 973        team_queue_override_port_del(team, port);
 974        team_adjust_ops(team);
 975        team_lower_state_changed(port);
 976}
 977
 978#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
 979                            NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
 980                            NETIF_F_HIGHDMA | NETIF_F_LRO)
 981
 982#define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
 983                                 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
 984
 985static void __team_compute_features(struct team *team)
 986{
 987        struct team_port *port;
 988        netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
 989                                          NETIF_F_ALL_FOR_ALL;
 990        netdev_features_t enc_features  = TEAM_ENC_FEATURES;
 991        unsigned short max_hard_header_len = ETH_HLEN;
 992        unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
 993                                        IFF_XMIT_DST_RELEASE_PERM;
 994
 995        rcu_read_lock();
 996        list_for_each_entry_rcu(port, &team->port_list, list) {
 997                vlan_features = netdev_increment_features(vlan_features,
 998                                        port->dev->vlan_features,
 999                                        TEAM_VLAN_FEATURES);
1000                enc_features =
1001                        netdev_increment_features(enc_features,
1002                                                  port->dev->hw_enc_features,
1003                                                  TEAM_ENC_FEATURES);
1004
1005
1006                dst_release_flag &= port->dev->priv_flags;
1007                if (port->dev->hard_header_len > max_hard_header_len)
1008                        max_hard_header_len = port->dev->hard_header_len;
1009        }
1010        rcu_read_unlock();
1011
1012        team->dev->vlan_features = vlan_features;
1013        team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1014                                     NETIF_F_HW_VLAN_CTAG_TX |
1015                                     NETIF_F_HW_VLAN_STAG_TX;
1016        team->dev->hard_header_len = max_hard_header_len;
1017
1018        team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1019        if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1020                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1021}
1022
1023static void team_compute_features(struct team *team)
1024{
1025        __team_compute_features(team);
1026        netdev_change_features(team->dev);
1027}
1028
1029static int team_port_enter(struct team *team, struct team_port *port)
1030{
1031        int err = 0;
1032
1033        dev_hold(team->dev);
1034        if (team->ops.port_enter) {
1035                err = team->ops.port_enter(team, port);
1036                if (err) {
1037                        netdev_err(team->dev, "Device %s failed to enter team mode\n",
1038                                   port->dev->name);
1039                        goto err_port_enter;
1040                }
1041        }
1042
1043        return 0;
1044
1045err_port_enter:
1046        dev_put(team->dev);
1047
1048        return err;
1049}
1050
1051static void team_port_leave(struct team *team, struct team_port *port)
1052{
1053        if (team->ops.port_leave)
1054                team->ops.port_leave(team, port);
1055        dev_put(team->dev);
1056}
1057
1058#ifdef CONFIG_NET_POLL_CONTROLLER
1059static int __team_port_enable_netpoll(struct team_port *port)
1060{
1061        struct netpoll *np;
1062        int err;
1063
1064        np = kzalloc(sizeof(*np), GFP_KERNEL);
1065        if (!np)
1066                return -ENOMEM;
1067
1068        err = __netpoll_setup(np, port->dev);
1069        if (err) {
1070                kfree(np);
1071                return err;
1072        }
1073        port->np = np;
1074        return err;
1075}
1076
1077static int team_port_enable_netpoll(struct team_port *port)
1078{
1079        if (!port->team->dev->npinfo)
1080                return 0;
1081
1082        return __team_port_enable_netpoll(port);
1083}
1084
1085static void team_port_disable_netpoll(struct team_port *port)
1086{
1087        struct netpoll *np = port->np;
1088
1089        if (!np)
1090                return;
1091        port->np = NULL;
1092
1093        __netpoll_free(np);
1094}
1095#else
1096static int team_port_enable_netpoll(struct team_port *port)
1097{
1098        return 0;
1099}
1100static void team_port_disable_netpoll(struct team_port *port)
1101{
1102}
1103#endif
1104
1105static int team_upper_dev_link(struct team *team, struct team_port *port,
1106                               struct netlink_ext_ack *extack)
1107{
1108        struct netdev_lag_upper_info lag_upper_info;
1109        int err;
1110
1111        lag_upper_info.tx_type = team->mode->lag_tx_type;
1112        lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1113        err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1114                                           &lag_upper_info, extack);
1115        if (err)
1116                return err;
1117        port->dev->priv_flags |= IFF_TEAM_PORT;
1118        return 0;
1119}
1120
1121static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1122{
1123        netdev_upper_dev_unlink(port->dev, team->dev);
1124        port->dev->priv_flags &= ~IFF_TEAM_PORT;
1125}
1126
1127static void __team_port_change_port_added(struct team_port *port, bool linkup);
1128static int team_dev_type_check_change(struct net_device *dev,
1129                                      struct net_device *port_dev);
1130
1131static int team_port_add(struct team *team, struct net_device *port_dev,
1132                         struct netlink_ext_ack *extack)
1133{
1134        struct net_device *dev = team->dev;
1135        struct team_port *port;
1136        char *portname = port_dev->name;
1137        int err;
1138
1139        if (port_dev->flags & IFF_LOOPBACK) {
1140                NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1141                netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1142                           portname);
1143                return -EINVAL;
1144        }
1145
1146        if (netif_is_team_port(port_dev)) {
1147                NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1148                netdev_err(dev, "Device %s is already a port "
1149                                "of a team device\n", portname);
1150                return -EBUSY;
1151        }
1152
1153        if (dev == port_dev) {
1154                NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1155                netdev_err(dev, "Cannot enslave team device to itself\n");
1156                return -EINVAL;
1157        }
1158
1159        if (netdev_has_upper_dev(dev, port_dev)) {
1160                NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1161                netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1162                           portname);
1163                return -EBUSY;
1164        }
1165
1166        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1167            vlan_uses_dev(dev)) {
1168                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1169                netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1170                           portname);
1171                return -EPERM;
1172        }
1173
1174        err = team_dev_type_check_change(dev, port_dev);
1175        if (err)
1176                return err;
1177
1178        if (port_dev->flags & IFF_UP) {
1179                NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1180                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1181                           portname);
1182                return -EBUSY;
1183        }
1184
1185        port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1186                       GFP_KERNEL);
1187        if (!port)
1188                return -ENOMEM;
1189
1190        port->dev = port_dev;
1191        port->team = team;
1192        INIT_LIST_HEAD(&port->qom_list);
1193
1194        port->orig.mtu = port_dev->mtu;
1195        err = dev_set_mtu(port_dev, dev->mtu);
1196        if (err) {
1197                netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1198                goto err_set_mtu;
1199        }
1200
1201        memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1202
1203        err = team_port_enter(team, port);
1204        if (err) {
1205                netdev_err(dev, "Device %s failed to enter team mode\n",
1206                           portname);
1207                goto err_port_enter;
1208        }
1209
1210        err = dev_open(port_dev, extack);
1211        if (err) {
1212                netdev_dbg(dev, "Device %s opening failed\n",
1213                           portname);
1214                goto err_dev_open;
1215        }
1216
1217        err = vlan_vids_add_by_dev(port_dev, dev);
1218        if (err) {
1219                netdev_err(dev, "Failed to add vlan ids to device %s\n",
1220                                portname);
1221                goto err_vids_add;
1222        }
1223
1224        err = team_port_enable_netpoll(port);
1225        if (err) {
1226                netdev_err(dev, "Failed to enable netpoll on device %s\n",
1227                           portname);
1228                goto err_enable_netpoll;
1229        }
1230
1231        if (!(dev->features & NETIF_F_LRO))
1232                dev_disable_lro(port_dev);
1233
1234        err = netdev_rx_handler_register(port_dev, team_handle_frame,
1235                                         port);
1236        if (err) {
1237                netdev_err(dev, "Device %s failed to register rx_handler\n",
1238                           portname);
1239                goto err_handler_register;
1240        }
1241
1242        err = team_upper_dev_link(team, port, extack);
1243        if (err) {
1244                netdev_err(dev, "Device %s failed to set upper link\n",
1245                           portname);
1246                goto err_set_upper_link;
1247        }
1248
1249        err = __team_option_inst_add_port(team, port);
1250        if (err) {
1251                netdev_err(dev, "Device %s failed to add per-port options\n",
1252                           portname);
1253                goto err_option_port_add;
1254        }
1255
1256        /* set promiscuity level to new slave */
1257        if (dev->flags & IFF_PROMISC) {
1258                err = dev_set_promiscuity(port_dev, 1);
1259                if (err)
1260                        goto err_set_slave_promisc;
1261        }
1262
1263        /* set allmulti level to new slave */
1264        if (dev->flags & IFF_ALLMULTI) {
1265                err = dev_set_allmulti(port_dev, 1);
1266                if (err) {
1267                        if (dev->flags & IFF_PROMISC)
1268                                dev_set_promiscuity(port_dev, -1);
1269                        goto err_set_slave_promisc;
1270                }
1271        }
1272
1273        netif_addr_lock_bh(dev);
1274        dev_uc_sync_multiple(port_dev, dev);
1275        dev_mc_sync_multiple(port_dev, dev);
1276        netif_addr_unlock_bh(dev);
1277
1278        port->index = -1;
1279        list_add_tail_rcu(&port->list, &team->port_list);
1280        team_port_enable(team, port);
1281        __team_compute_features(team);
1282        __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1283        __team_options_change_check(team);
1284
1285        netdev_info(dev, "Port device %s added\n", portname);
1286
1287        return 0;
1288
1289err_set_slave_promisc:
1290        __team_option_inst_del_port(team, port);
1291
1292err_option_port_add:
1293        team_upper_dev_unlink(team, port);
1294
1295err_set_upper_link:
1296        netdev_rx_handler_unregister(port_dev);
1297
1298err_handler_register:
1299        team_port_disable_netpoll(port);
1300
1301err_enable_netpoll:
1302        vlan_vids_del_by_dev(port_dev, dev);
1303
1304err_vids_add:
1305        dev_close(port_dev);
1306
1307err_dev_open:
1308        team_port_leave(team, port);
1309        team_port_set_orig_dev_addr(port);
1310
1311err_port_enter:
1312        dev_set_mtu(port_dev, port->orig.mtu);
1313
1314err_set_mtu:
1315        kfree(port);
1316
1317        return err;
1318}
1319
1320static void __team_port_change_port_removed(struct team_port *port);
1321
1322static int team_port_del(struct team *team, struct net_device *port_dev)
1323{
1324        struct net_device *dev = team->dev;
1325        struct team_port *port;
1326        char *portname = port_dev->name;
1327
1328        port = team_port_get_rtnl(port_dev);
1329        if (!port || !team_port_find(team, port)) {
1330                netdev_err(dev, "Device %s does not act as a port of this team\n",
1331                           portname);
1332                return -ENOENT;
1333        }
1334
1335        team_port_disable(team, port);
1336        list_del_rcu(&port->list);
1337
1338        if (dev->flags & IFF_PROMISC)
1339                dev_set_promiscuity(port_dev, -1);
1340        if (dev->flags & IFF_ALLMULTI)
1341                dev_set_allmulti(port_dev, -1);
1342
1343        team_upper_dev_unlink(team, port);
1344        netdev_rx_handler_unregister(port_dev);
1345        team_port_disable_netpoll(port);
1346        vlan_vids_del_by_dev(port_dev, dev);
1347        dev_uc_unsync(port_dev, dev);
1348        dev_mc_unsync(port_dev, dev);
1349        dev_close(port_dev);
1350        team_port_leave(team, port);
1351
1352        __team_option_inst_mark_removed_port(team, port);
1353        __team_options_change_check(team);
1354        __team_option_inst_del_port(team, port);
1355        __team_port_change_port_removed(port);
1356
1357        team_port_set_orig_dev_addr(port);
1358        dev_set_mtu(port_dev, port->orig.mtu);
1359        kfree_rcu(port, rcu);
1360        netdev_info(dev, "Port device %s removed\n", portname);
1361        __team_compute_features(team);
1362
1363        return 0;
1364}
1365
1366
1367/*****************
1368 * Net device ops
1369 *****************/
1370
1371static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1372{
1373        ctx->data.str_val = team->mode->kind;
1374        return 0;
1375}
1376
1377static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1378{
1379        return team_change_mode(team, ctx->data.str_val);
1380}
1381
1382static int team_notify_peers_count_get(struct team *team,
1383                                       struct team_gsetter_ctx *ctx)
1384{
1385        ctx->data.u32_val = team->notify_peers.count;
1386        return 0;
1387}
1388
1389static int team_notify_peers_count_set(struct team *team,
1390                                       struct team_gsetter_ctx *ctx)
1391{
1392        team->notify_peers.count = ctx->data.u32_val;
1393        return 0;
1394}
1395
1396static int team_notify_peers_interval_get(struct team *team,
1397                                          struct team_gsetter_ctx *ctx)
1398{
1399        ctx->data.u32_val = team->notify_peers.interval;
1400        return 0;
1401}
1402
1403static int team_notify_peers_interval_set(struct team *team,
1404                                          struct team_gsetter_ctx *ctx)
1405{
1406        team->notify_peers.interval = ctx->data.u32_val;
1407        return 0;
1408}
1409
1410static int team_mcast_rejoin_count_get(struct team *team,
1411                                       struct team_gsetter_ctx *ctx)
1412{
1413        ctx->data.u32_val = team->mcast_rejoin.count;
1414        return 0;
1415}
1416
1417static int team_mcast_rejoin_count_set(struct team *team,
1418                                       struct team_gsetter_ctx *ctx)
1419{
1420        team->mcast_rejoin.count = ctx->data.u32_val;
1421        return 0;
1422}
1423
1424static int team_mcast_rejoin_interval_get(struct team *team,
1425                                          struct team_gsetter_ctx *ctx)
1426{
1427        ctx->data.u32_val = team->mcast_rejoin.interval;
1428        return 0;
1429}
1430
1431static int team_mcast_rejoin_interval_set(struct team *team,
1432                                          struct team_gsetter_ctx *ctx)
1433{
1434        team->mcast_rejoin.interval = ctx->data.u32_val;
1435        return 0;
1436}
1437
1438static int team_port_en_option_get(struct team *team,
1439                                   struct team_gsetter_ctx *ctx)
1440{
1441        struct team_port *port = ctx->info->port;
1442
1443        ctx->data.bool_val = team_port_enabled(port);
1444        return 0;
1445}
1446
1447static int team_port_en_option_set(struct team *team,
1448                                   struct team_gsetter_ctx *ctx)
1449{
1450        struct team_port *port = ctx->info->port;
1451
1452        if (ctx->data.bool_val)
1453                team_port_enable(team, port);
1454        else
1455                team_port_disable(team, port);
1456        return 0;
1457}
1458
1459static int team_user_linkup_option_get(struct team *team,
1460                                       struct team_gsetter_ctx *ctx)
1461{
1462        struct team_port *port = ctx->info->port;
1463
1464        ctx->data.bool_val = port->user.linkup;
1465        return 0;
1466}
1467
1468static void __team_carrier_check(struct team *team);
1469
1470static int team_user_linkup_option_set(struct team *team,
1471                                       struct team_gsetter_ctx *ctx)
1472{
1473        struct team_port *port = ctx->info->port;
1474
1475        port->user.linkup = ctx->data.bool_val;
1476        team_refresh_port_linkup(port);
1477        __team_carrier_check(port->team);
1478        return 0;
1479}
1480
1481static int team_user_linkup_en_option_get(struct team *team,
1482                                          struct team_gsetter_ctx *ctx)
1483{
1484        struct team_port *port = ctx->info->port;
1485
1486        ctx->data.bool_val = port->user.linkup_enabled;
1487        return 0;
1488}
1489
1490static int team_user_linkup_en_option_set(struct team *team,
1491                                          struct team_gsetter_ctx *ctx)
1492{
1493        struct team_port *port = ctx->info->port;
1494
1495        port->user.linkup_enabled = ctx->data.bool_val;
1496        team_refresh_port_linkup(port);
1497        __team_carrier_check(port->team);
1498        return 0;
1499}
1500
1501static int team_priority_option_get(struct team *team,
1502                                    struct team_gsetter_ctx *ctx)
1503{
1504        struct team_port *port = ctx->info->port;
1505
1506        ctx->data.s32_val = port->priority;
1507        return 0;
1508}
1509
1510static int team_priority_option_set(struct team *team,
1511                                    struct team_gsetter_ctx *ctx)
1512{
1513        struct team_port *port = ctx->info->port;
1514        s32 priority = ctx->data.s32_val;
1515
1516        if (port->priority == priority)
1517                return 0;
1518        port->priority = priority;
1519        team_queue_override_port_prio_changed(team, port);
1520        return 0;
1521}
1522
1523static int team_queue_id_option_get(struct team *team,
1524                                    struct team_gsetter_ctx *ctx)
1525{
1526        struct team_port *port = ctx->info->port;
1527
1528        ctx->data.u32_val = port->queue_id;
1529        return 0;
1530}
1531
1532static int team_queue_id_option_set(struct team *team,
1533                                    struct team_gsetter_ctx *ctx)
1534{
1535        struct team_port *port = ctx->info->port;
1536        u16 new_queue_id = ctx->data.u32_val;
1537
1538        if (port->queue_id == new_queue_id)
1539                return 0;
1540        if (new_queue_id >= team->dev->real_num_tx_queues)
1541                return -EINVAL;
1542        team_queue_override_port_change_queue_id(team, port, new_queue_id);
1543        return 0;
1544}
1545
1546static const struct team_option team_options[] = {
1547        {
1548                .name = "mode",
1549                .type = TEAM_OPTION_TYPE_STRING,
1550                .getter = team_mode_option_get,
1551                .setter = team_mode_option_set,
1552        },
1553        {
1554                .name = "notify_peers_count",
1555                .type = TEAM_OPTION_TYPE_U32,
1556                .getter = team_notify_peers_count_get,
1557                .setter = team_notify_peers_count_set,
1558        },
1559        {
1560                .name = "notify_peers_interval",
1561                .type = TEAM_OPTION_TYPE_U32,
1562                .getter = team_notify_peers_interval_get,
1563                .setter = team_notify_peers_interval_set,
1564        },
1565        {
1566                .name = "mcast_rejoin_count",
1567                .type = TEAM_OPTION_TYPE_U32,
1568                .getter = team_mcast_rejoin_count_get,
1569                .setter = team_mcast_rejoin_count_set,
1570        },
1571        {
1572                .name = "mcast_rejoin_interval",
1573                .type = TEAM_OPTION_TYPE_U32,
1574                .getter = team_mcast_rejoin_interval_get,
1575                .setter = team_mcast_rejoin_interval_set,
1576        },
1577        {
1578                .name = "enabled",
1579                .type = TEAM_OPTION_TYPE_BOOL,
1580                .per_port = true,
1581                .getter = team_port_en_option_get,
1582                .setter = team_port_en_option_set,
1583        },
1584        {
1585                .name = "user_linkup",
1586                .type = TEAM_OPTION_TYPE_BOOL,
1587                .per_port = true,
1588                .getter = team_user_linkup_option_get,
1589                .setter = team_user_linkup_option_set,
1590        },
1591        {
1592                .name = "user_linkup_enabled",
1593                .type = TEAM_OPTION_TYPE_BOOL,
1594                .per_port = true,
1595                .getter = team_user_linkup_en_option_get,
1596                .setter = team_user_linkup_en_option_set,
1597        },
1598        {
1599                .name = "priority",
1600                .type = TEAM_OPTION_TYPE_S32,
1601                .per_port = true,
1602                .getter = team_priority_option_get,
1603                .setter = team_priority_option_set,
1604        },
1605        {
1606                .name = "queue_id",
1607                .type = TEAM_OPTION_TYPE_U32,
1608                .per_port = true,
1609                .getter = team_queue_id_option_get,
1610                .setter = team_queue_id_option_set,
1611        },
1612};
1613
1614
1615static int team_init(struct net_device *dev)
1616{
1617        struct team *team = netdev_priv(dev);
1618        int i;
1619        int err;
1620
1621        team->dev = dev;
1622        team_set_no_mode(team);
1623
1624        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1625        if (!team->pcpu_stats)
1626                return -ENOMEM;
1627
1628        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1629                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1630        INIT_LIST_HEAD(&team->port_list);
1631        err = team_queue_override_init(team);
1632        if (err)
1633                goto err_team_queue_override_init;
1634
1635        team_adjust_ops(team);
1636
1637        INIT_LIST_HEAD(&team->option_list);
1638        INIT_LIST_HEAD(&team->option_inst_list);
1639
1640        team_notify_peers_init(team);
1641        team_mcast_rejoin_init(team);
1642
1643        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1644        if (err)
1645                goto err_options_register;
1646        netif_carrier_off(dev);
1647
1648        lockdep_register_key(&team->team_lock_key);
1649        __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1650        netdev_lockdep_set_classes(dev);
1651
1652        return 0;
1653
1654err_options_register:
1655        team_mcast_rejoin_fini(team);
1656        team_notify_peers_fini(team);
1657        team_queue_override_fini(team);
1658err_team_queue_override_init:
1659        free_percpu(team->pcpu_stats);
1660
1661        return err;
1662}
1663
1664static void team_uninit(struct net_device *dev)
1665{
1666        struct team *team = netdev_priv(dev);
1667        struct team_port *port;
1668        struct team_port *tmp;
1669
1670        mutex_lock(&team->lock);
1671        list_for_each_entry_safe(port, tmp, &team->port_list, list)
1672                team_port_del(team, port->dev);
1673
1674        __team_change_mode(team, NULL); /* cleanup */
1675        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1676        team_mcast_rejoin_fini(team);
1677        team_notify_peers_fini(team);
1678        team_queue_override_fini(team);
1679        mutex_unlock(&team->lock);
1680        netdev_change_features(dev);
1681        lockdep_unregister_key(&team->team_lock_key);
1682}
1683
1684static void team_destructor(struct net_device *dev)
1685{
1686        struct team *team = netdev_priv(dev);
1687
1688        free_percpu(team->pcpu_stats);
1689}
1690
1691static int team_open(struct net_device *dev)
1692{
1693        return 0;
1694}
1695
1696static int team_close(struct net_device *dev)
1697{
1698        return 0;
1699}
1700
1701/*
1702 * note: already called with rcu_read_lock
1703 */
1704static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1705{
1706        struct team *team = netdev_priv(dev);
1707        bool tx_success;
1708        unsigned int len = skb->len;
1709
1710        tx_success = team_queue_override_transmit(team, skb);
1711        if (!tx_success)
1712                tx_success = team->ops.transmit(team, skb);
1713        if (tx_success) {
1714                struct team_pcpu_stats *pcpu_stats;
1715
1716                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1717                u64_stats_update_begin(&pcpu_stats->syncp);
1718                pcpu_stats->tx_packets++;
1719                pcpu_stats->tx_bytes += len;
1720                u64_stats_update_end(&pcpu_stats->syncp);
1721        } else {
1722                this_cpu_inc(team->pcpu_stats->tx_dropped);
1723        }
1724
1725        return NETDEV_TX_OK;
1726}
1727
1728static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1729                             struct net_device *sb_dev)
1730{
1731        /*
1732         * This helper function exists to help dev_pick_tx get the correct
1733         * destination queue.  Using a helper function skips a call to
1734         * skb_tx_hash and will put the skbs in the queue we expect on their
1735         * way down to the team driver.
1736         */
1737        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1738
1739        /*
1740         * Save the original txq to restore before passing to the driver
1741         */
1742        qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1743
1744        if (unlikely(txq >= dev->real_num_tx_queues)) {
1745                do {
1746                        txq -= dev->real_num_tx_queues;
1747                } while (txq >= dev->real_num_tx_queues);
1748        }
1749        return txq;
1750}
1751
1752static void team_change_rx_flags(struct net_device *dev, int change)
1753{
1754        struct team *team = netdev_priv(dev);
1755        struct team_port *port;
1756        int inc;
1757
1758        rcu_read_lock();
1759        list_for_each_entry_rcu(port, &team->port_list, list) {
1760                if (change & IFF_PROMISC) {
1761                        inc = dev->flags & IFF_PROMISC ? 1 : -1;
1762                        dev_set_promiscuity(port->dev, inc);
1763                }
1764                if (change & IFF_ALLMULTI) {
1765                        inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1766                        dev_set_allmulti(port->dev, inc);
1767                }
1768        }
1769        rcu_read_unlock();
1770}
1771
1772static void team_set_rx_mode(struct net_device *dev)
1773{
1774        struct team *team = netdev_priv(dev);
1775        struct team_port *port;
1776
1777        rcu_read_lock();
1778        list_for_each_entry_rcu(port, &team->port_list, list) {
1779                dev_uc_sync_multiple(port->dev, dev);
1780                dev_mc_sync_multiple(port->dev, dev);
1781        }
1782        rcu_read_unlock();
1783}
1784
1785static int team_set_mac_address(struct net_device *dev, void *p)
1786{
1787        struct sockaddr *addr = p;
1788        struct team *team = netdev_priv(dev);
1789        struct team_port *port;
1790
1791        if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1792                return -EADDRNOTAVAIL;
1793        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1794        mutex_lock(&team->lock);
1795        list_for_each_entry(port, &team->port_list, list)
1796                if (team->ops.port_change_dev_addr)
1797                        team->ops.port_change_dev_addr(team, port);
1798        mutex_unlock(&team->lock);
1799        return 0;
1800}
1801
1802static int team_change_mtu(struct net_device *dev, int new_mtu)
1803{
1804        struct team *team = netdev_priv(dev);
1805        struct team_port *port;
1806        int err;
1807
1808        /*
1809         * Alhough this is reader, it's guarded by team lock. It's not possible
1810         * to traverse list in reverse under rcu_read_lock
1811         */
1812        mutex_lock(&team->lock);
1813        team->port_mtu_change_allowed = true;
1814        list_for_each_entry(port, &team->port_list, list) {
1815                err = dev_set_mtu(port->dev, new_mtu);
1816                if (err) {
1817                        netdev_err(dev, "Device %s failed to change mtu",
1818                                   port->dev->name);
1819                        goto unwind;
1820                }
1821        }
1822        team->port_mtu_change_allowed = false;
1823        mutex_unlock(&team->lock);
1824
1825        dev->mtu = new_mtu;
1826
1827        return 0;
1828
1829unwind:
1830        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1831                dev_set_mtu(port->dev, dev->mtu);
1832        team->port_mtu_change_allowed = false;
1833        mutex_unlock(&team->lock);
1834
1835        return err;
1836}
1837
1838static void
1839team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1840{
1841        struct team *team = netdev_priv(dev);
1842        struct team_pcpu_stats *p;
1843        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1844        u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1845        unsigned int start;
1846        int i;
1847
1848        for_each_possible_cpu(i) {
1849                p = per_cpu_ptr(team->pcpu_stats, i);
1850                do {
1851                        start = u64_stats_fetch_begin_irq(&p->syncp);
1852                        rx_packets      = p->rx_packets;
1853                        rx_bytes        = p->rx_bytes;
1854                        rx_multicast    = p->rx_multicast;
1855                        tx_packets      = p->tx_packets;
1856                        tx_bytes        = p->tx_bytes;
1857                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1858
1859                stats->rx_packets       += rx_packets;
1860                stats->rx_bytes         += rx_bytes;
1861                stats->multicast        += rx_multicast;
1862                stats->tx_packets       += tx_packets;
1863                stats->tx_bytes         += tx_bytes;
1864                /*
1865                 * rx_dropped, tx_dropped & rx_nohandler are u32,
1866                 * updated without syncp protection.
1867                 */
1868                rx_dropped      += p->rx_dropped;
1869                tx_dropped      += p->tx_dropped;
1870                rx_nohandler    += p->rx_nohandler;
1871        }
1872        stats->rx_dropped       = rx_dropped;
1873        stats->tx_dropped       = tx_dropped;
1874        stats->rx_nohandler     = rx_nohandler;
1875}
1876
1877static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1878{
1879        struct team *team = netdev_priv(dev);
1880        struct team_port *port;
1881        int err;
1882
1883        /*
1884         * Alhough this is reader, it's guarded by team lock. It's not possible
1885         * to traverse list in reverse under rcu_read_lock
1886         */
1887        mutex_lock(&team->lock);
1888        list_for_each_entry(port, &team->port_list, list) {
1889                err = vlan_vid_add(port->dev, proto, vid);
1890                if (err)
1891                        goto unwind;
1892        }
1893        mutex_unlock(&team->lock);
1894
1895        return 0;
1896
1897unwind:
1898        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1899                vlan_vid_del(port->dev, proto, vid);
1900        mutex_unlock(&team->lock);
1901
1902        return err;
1903}
1904
1905static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1906{
1907        struct team *team = netdev_priv(dev);
1908        struct team_port *port;
1909
1910        mutex_lock(&team->lock);
1911        list_for_each_entry(port, &team->port_list, list)
1912                vlan_vid_del(port->dev, proto, vid);
1913        mutex_unlock(&team->lock);
1914
1915        return 0;
1916}
1917
1918#ifdef CONFIG_NET_POLL_CONTROLLER
1919static void team_poll_controller(struct net_device *dev)
1920{
1921}
1922
1923static void __team_netpoll_cleanup(struct team *team)
1924{
1925        struct team_port *port;
1926
1927        list_for_each_entry(port, &team->port_list, list)
1928                team_port_disable_netpoll(port);
1929}
1930
1931static void team_netpoll_cleanup(struct net_device *dev)
1932{
1933        struct team *team = netdev_priv(dev);
1934
1935        mutex_lock(&team->lock);
1936        __team_netpoll_cleanup(team);
1937        mutex_unlock(&team->lock);
1938}
1939
1940static int team_netpoll_setup(struct net_device *dev,
1941                              struct netpoll_info *npifo)
1942{
1943        struct team *team = netdev_priv(dev);
1944        struct team_port *port;
1945        int err = 0;
1946
1947        mutex_lock(&team->lock);
1948        list_for_each_entry(port, &team->port_list, list) {
1949                err = __team_port_enable_netpoll(port);
1950                if (err) {
1951                        __team_netpoll_cleanup(team);
1952                        break;
1953                }
1954        }
1955        mutex_unlock(&team->lock);
1956        return err;
1957}
1958#endif
1959
1960static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1961                          struct netlink_ext_ack *extack)
1962{
1963        struct team *team = netdev_priv(dev);
1964        int err;
1965
1966        mutex_lock(&team->lock);
1967        err = team_port_add(team, port_dev, extack);
1968        mutex_unlock(&team->lock);
1969
1970        if (!err)
1971                netdev_change_features(dev);
1972
1973        return err;
1974}
1975
1976static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1977{
1978        struct team *team = netdev_priv(dev);
1979        int err;
1980
1981        mutex_lock(&team->lock);
1982        err = team_port_del(team, port_dev);
1983        mutex_unlock(&team->lock);
1984
1985        if (err)
1986                return err;
1987
1988        if (netif_is_team_master(port_dev)) {
1989                lockdep_unregister_key(&team->team_lock_key);
1990                lockdep_register_key(&team->team_lock_key);
1991                lockdep_set_class(&team->lock, &team->team_lock_key);
1992        }
1993        netdev_change_features(dev);
1994
1995        return err;
1996}
1997
1998static netdev_features_t team_fix_features(struct net_device *dev,
1999                                           netdev_features_t features)
2000{
2001        struct team_port *port;
2002        struct team *team = netdev_priv(dev);
2003        netdev_features_t mask;
2004
2005        mask = features;
2006        features &= ~NETIF_F_ONE_FOR_ALL;
2007        features |= NETIF_F_ALL_FOR_ALL;
2008
2009        rcu_read_lock();
2010        list_for_each_entry_rcu(port, &team->port_list, list) {
2011                features = netdev_increment_features(features,
2012                                                     port->dev->features,
2013                                                     mask);
2014        }
2015        rcu_read_unlock();
2016
2017        features = netdev_add_tso_features(features, mask);
2018
2019        return features;
2020}
2021
2022static int team_change_carrier(struct net_device *dev, bool new_carrier)
2023{
2024        struct team *team = netdev_priv(dev);
2025
2026        team->user_carrier_enabled = true;
2027
2028        if (new_carrier)
2029                netif_carrier_on(dev);
2030        else
2031                netif_carrier_off(dev);
2032        return 0;
2033}
2034
2035static const struct net_device_ops team_netdev_ops = {
2036        .ndo_init               = team_init,
2037        .ndo_uninit             = team_uninit,
2038        .ndo_open               = team_open,
2039        .ndo_stop               = team_close,
2040        .ndo_start_xmit         = team_xmit,
2041        .ndo_select_queue       = team_select_queue,
2042        .ndo_change_rx_flags    = team_change_rx_flags,
2043        .ndo_set_rx_mode        = team_set_rx_mode,
2044        .ndo_set_mac_address    = team_set_mac_address,
2045        .ndo_change_mtu         = team_change_mtu,
2046        .ndo_get_stats64        = team_get_stats64,
2047        .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
2048        .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
2049#ifdef CONFIG_NET_POLL_CONTROLLER
2050        .ndo_poll_controller    = team_poll_controller,
2051        .ndo_netpoll_setup      = team_netpoll_setup,
2052        .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2053#endif
2054        .ndo_add_slave          = team_add_slave,
2055        .ndo_del_slave          = team_del_slave,
2056        .ndo_fix_features       = team_fix_features,
2057        .ndo_change_carrier     = team_change_carrier,
2058        .ndo_features_check     = passthru_features_check,
2059};
2060
2061/***********************
2062 * ethtool interface
2063 ***********************/
2064
2065static void team_ethtool_get_drvinfo(struct net_device *dev,
2066                                     struct ethtool_drvinfo *drvinfo)
2067{
2068        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2069        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2070}
2071
2072static int team_ethtool_get_link_ksettings(struct net_device *dev,
2073                                           struct ethtool_link_ksettings *cmd)
2074{
2075        struct team *team= netdev_priv(dev);
2076        unsigned long speed = 0;
2077        struct team_port *port;
2078
2079        cmd->base.duplex = DUPLEX_UNKNOWN;
2080        cmd->base.port = PORT_OTHER;
2081
2082        rcu_read_lock();
2083        list_for_each_entry_rcu(port, &team->port_list, list) {
2084                if (team_port_txable(port)) {
2085                        if (port->state.speed != SPEED_UNKNOWN)
2086                                speed += port->state.speed;
2087                        if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2088                            port->state.duplex != DUPLEX_UNKNOWN)
2089                                cmd->base.duplex = port->state.duplex;
2090                }
2091        }
2092        rcu_read_unlock();
2093
2094        cmd->base.speed = speed ? : SPEED_UNKNOWN;
2095
2096        return 0;
2097}
2098
2099static const struct ethtool_ops team_ethtool_ops = {
2100        .get_drvinfo            = team_ethtool_get_drvinfo,
2101        .get_link               = ethtool_op_get_link,
2102        .get_link_ksettings     = team_ethtool_get_link_ksettings,
2103};
2104
2105/***********************
2106 * rt netlink interface
2107 ***********************/
2108
2109static void team_setup_by_port(struct net_device *dev,
2110                               struct net_device *port_dev)
2111{
2112        dev->header_ops = port_dev->header_ops;
2113        dev->type = port_dev->type;
2114        dev->hard_header_len = port_dev->hard_header_len;
2115        dev->needed_headroom = port_dev->needed_headroom;
2116        dev->addr_len = port_dev->addr_len;
2117        dev->mtu = port_dev->mtu;
2118        memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2119        eth_hw_addr_inherit(dev, port_dev);
2120}
2121
2122static int team_dev_type_check_change(struct net_device *dev,
2123                                      struct net_device *port_dev)
2124{
2125        struct team *team = netdev_priv(dev);
2126        char *portname = port_dev->name;
2127        int err;
2128
2129        if (dev->type == port_dev->type)
2130                return 0;
2131        if (!list_empty(&team->port_list)) {
2132                netdev_err(dev, "Device %s is of different type\n", portname);
2133                return -EBUSY;
2134        }
2135        err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2136        err = notifier_to_errno(err);
2137        if (err) {
2138                netdev_err(dev, "Refused to change device type\n");
2139                return err;
2140        }
2141        dev_uc_flush(dev);
2142        dev_mc_flush(dev);
2143        team_setup_by_port(dev, port_dev);
2144        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2145        return 0;
2146}
2147
2148static void team_setup(struct net_device *dev)
2149{
2150        ether_setup(dev);
2151        dev->max_mtu = ETH_MAX_MTU;
2152
2153        dev->netdev_ops = &team_netdev_ops;
2154        dev->ethtool_ops = &team_ethtool_ops;
2155        dev->needs_free_netdev = true;
2156        dev->priv_destructor = team_destructor;
2157        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2158        dev->priv_flags |= IFF_NO_QUEUE;
2159        dev->priv_flags |= IFF_TEAM;
2160
2161        /*
2162         * Indicate we support unicast address filtering. That way core won't
2163         * bring us to promisc mode in case a unicast addr is added.
2164         * Let this up to underlay drivers.
2165         */
2166        dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2167
2168        dev->features |= NETIF_F_LLTX;
2169        dev->features |= NETIF_F_GRO;
2170
2171        /* Don't allow team devices to change network namespaces. */
2172        dev->features |= NETIF_F_NETNS_LOCAL;
2173
2174        dev->hw_features = TEAM_VLAN_FEATURES |
2175                           NETIF_F_HW_VLAN_CTAG_RX |
2176                           NETIF_F_HW_VLAN_CTAG_FILTER;
2177
2178        dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2179        dev->features |= dev->hw_features;
2180        dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2181}
2182
2183static int team_newlink(struct net *src_net, struct net_device *dev,
2184                        struct nlattr *tb[], struct nlattr *data[],
2185                        struct netlink_ext_ack *extack)
2186{
2187        if (tb[IFLA_ADDRESS] == NULL)
2188                eth_hw_addr_random(dev);
2189
2190        return register_netdevice(dev);
2191}
2192
2193static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2194                         struct netlink_ext_ack *extack)
2195{
2196        if (tb[IFLA_ADDRESS]) {
2197                if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2198                        return -EINVAL;
2199                if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2200                        return -EADDRNOTAVAIL;
2201        }
2202        return 0;
2203}
2204
2205static unsigned int team_get_num_tx_queues(void)
2206{
2207        return TEAM_DEFAULT_NUM_TX_QUEUES;
2208}
2209
2210static unsigned int team_get_num_rx_queues(void)
2211{
2212        return TEAM_DEFAULT_NUM_RX_QUEUES;
2213}
2214
2215static struct rtnl_link_ops team_link_ops __read_mostly = {
2216        .kind                   = DRV_NAME,
2217        .priv_size              = sizeof(struct team),
2218        .setup                  = team_setup,
2219        .newlink                = team_newlink,
2220        .validate               = team_validate,
2221        .get_num_tx_queues      = team_get_num_tx_queues,
2222        .get_num_rx_queues      = team_get_num_rx_queues,
2223};
2224
2225
2226/***********************************
2227 * Generic netlink custom interface
2228 ***********************************/
2229
2230static struct genl_family team_nl_family;
2231
2232static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2233        [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2234        [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2235        [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2236        [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2237};
2238
2239static const struct nla_policy
2240team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2241        [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2242        [TEAM_ATTR_OPTION_NAME] = {
2243                .type = NLA_STRING,
2244                .len = TEAM_STRING_MAX_LEN,
2245        },
2246        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2247        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2248        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2249        [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
2250        [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
2251};
2252
2253static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2254{
2255        struct sk_buff *msg;
2256        void *hdr;
2257        int err;
2258
2259        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2260        if (!msg)
2261                return -ENOMEM;
2262
2263        hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2264                          &team_nl_family, 0, TEAM_CMD_NOOP);
2265        if (!hdr) {
2266                err = -EMSGSIZE;
2267                goto err_msg_put;
2268        }
2269
2270        genlmsg_end(msg, hdr);
2271
2272        return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2273
2274err_msg_put:
2275        nlmsg_free(msg);
2276
2277        return err;
2278}
2279
2280/*
2281 * Netlink cmd functions should be locked by following two functions.
2282 * Since dev gets held here, that ensures dev won't disappear in between.
2283 */
2284static struct team *team_nl_team_get(struct genl_info *info)
2285{
2286        struct net *net = genl_info_net(info);
2287        int ifindex;
2288        struct net_device *dev;
2289        struct team *team;
2290
2291        if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2292                return NULL;
2293
2294        ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2295        dev = dev_get_by_index(net, ifindex);
2296        if (!dev || dev->netdev_ops != &team_netdev_ops) {
2297                if (dev)
2298                        dev_put(dev);
2299                return NULL;
2300        }
2301
2302        team = netdev_priv(dev);
2303        mutex_lock(&team->lock);
2304        return team;
2305}
2306
2307static void team_nl_team_put(struct team *team)
2308{
2309        mutex_unlock(&team->lock);
2310        dev_put(team->dev);
2311}
2312
2313typedef int team_nl_send_func_t(struct sk_buff *skb,
2314                                struct team *team, u32 portid);
2315
2316static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2317{
2318        return genlmsg_unicast(dev_net(team->dev), skb, portid);
2319}
2320
2321static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2322                                       struct team_option_inst *opt_inst)
2323{
2324        struct nlattr *option_item;
2325        struct team_option *option = opt_inst->option;
2326        struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2327        struct team_gsetter_ctx ctx;
2328        int err;
2329
2330        ctx.info = opt_inst_info;
2331        err = team_option_get(team, opt_inst, &ctx);
2332        if (err)
2333                return err;
2334
2335        option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2336        if (!option_item)
2337                return -EMSGSIZE;
2338
2339        if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2340                goto nest_cancel;
2341        if (opt_inst_info->port &&
2342            nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2343                        opt_inst_info->port->dev->ifindex))
2344                goto nest_cancel;
2345        if (opt_inst->option->array_size &&
2346            nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2347                        opt_inst_info->array_index))
2348                goto nest_cancel;
2349
2350        switch (option->type) {
2351        case TEAM_OPTION_TYPE_U32:
2352                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2353                        goto nest_cancel;
2354                if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2355                        goto nest_cancel;
2356                break;
2357        case TEAM_OPTION_TYPE_STRING:
2358                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2359                        goto nest_cancel;
2360                if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2361                                   ctx.data.str_val))
2362                        goto nest_cancel;
2363                break;
2364        case TEAM_OPTION_TYPE_BINARY:
2365                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2366                        goto nest_cancel;
2367                if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2368                            ctx.data.bin_val.ptr))
2369                        goto nest_cancel;
2370                break;
2371        case TEAM_OPTION_TYPE_BOOL:
2372                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2373                        goto nest_cancel;
2374                if (ctx.data.bool_val &&
2375                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2376                        goto nest_cancel;
2377                break;
2378        case TEAM_OPTION_TYPE_S32:
2379                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2380                        goto nest_cancel;
2381                if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2382                        goto nest_cancel;
2383                break;
2384        default:
2385                BUG();
2386        }
2387        if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2388                goto nest_cancel;
2389        if (opt_inst->changed) {
2390                if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2391                        goto nest_cancel;
2392                opt_inst->changed = false;
2393        }
2394        nla_nest_end(skb, option_item);
2395        return 0;
2396
2397nest_cancel:
2398        nla_nest_cancel(skb, option_item);
2399        return -EMSGSIZE;
2400}
2401
2402static int __send_and_alloc_skb(struct sk_buff **pskb,
2403                                struct team *team, u32 portid,
2404                                team_nl_send_func_t *send_func)
2405{
2406        int err;
2407
2408        if (*pskb) {
2409                err = send_func(*pskb, team, portid);
2410                if (err)
2411                        return err;
2412        }
2413        *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2414        if (!*pskb)
2415                return -ENOMEM;
2416        return 0;
2417}
2418
2419static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2420                                    int flags, team_nl_send_func_t *send_func,
2421                                    struct list_head *sel_opt_inst_list)
2422{
2423        struct nlattr *option_list;
2424        struct nlmsghdr *nlh;
2425        void *hdr;
2426        struct team_option_inst *opt_inst;
2427        int err;
2428        struct sk_buff *skb = NULL;
2429        bool incomplete;
2430        int i;
2431
2432        opt_inst = list_first_entry(sel_opt_inst_list,
2433                                    struct team_option_inst, tmp_list);
2434
2435start_again:
2436        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2437        if (err)
2438                return err;
2439
2440        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2441                          TEAM_CMD_OPTIONS_GET);
2442        if (!hdr) {
2443                nlmsg_free(skb);
2444                return -EMSGSIZE;
2445        }
2446
2447        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2448                goto nla_put_failure;
2449        option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2450        if (!option_list)
2451                goto nla_put_failure;
2452
2453        i = 0;
2454        incomplete = false;
2455        list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2456                err = team_nl_fill_one_option_get(skb, team, opt_inst);
2457                if (err) {
2458                        if (err == -EMSGSIZE) {
2459                                if (!i)
2460                                        goto errout;
2461                                incomplete = true;
2462                                break;
2463                        }
2464                        goto errout;
2465                }
2466                i++;
2467        }
2468
2469        nla_nest_end(skb, option_list);
2470        genlmsg_end(skb, hdr);
2471        if (incomplete)
2472                goto start_again;
2473
2474send_done:
2475        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2476        if (!nlh) {
2477                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2478                if (err)
2479                        return err;
2480                goto send_done;
2481        }
2482
2483        return send_func(skb, team, portid);
2484
2485nla_put_failure:
2486        err = -EMSGSIZE;
2487errout:
2488        nlmsg_free(skb);
2489        return err;
2490}
2491
2492static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2493{
2494        struct team *team;
2495        struct team_option_inst *opt_inst;
2496        int err;
2497        LIST_HEAD(sel_opt_inst_list);
2498
2499        team = team_nl_team_get(info);
2500        if (!team)
2501                return -EINVAL;
2502
2503        list_for_each_entry(opt_inst, &team->option_inst_list, list)
2504                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2505        err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2506                                       NLM_F_ACK, team_nl_send_unicast,
2507                                       &sel_opt_inst_list);
2508
2509        team_nl_team_put(team);
2510
2511        return err;
2512}
2513
2514static int team_nl_send_event_options_get(struct team *team,
2515                                          struct list_head *sel_opt_inst_list);
2516
2517static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2518{
2519        struct team *team;
2520        int err = 0;
2521        int i;
2522        struct nlattr *nl_option;
2523
2524        rtnl_lock();
2525
2526        team = team_nl_team_get(info);
2527        if (!team) {
2528                err = -EINVAL;
2529                goto rtnl_unlock;
2530        }
2531
2532        err = -EINVAL;
2533        if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2534                err = -EINVAL;
2535                goto team_put;
2536        }
2537
2538        nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2539                struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2540                struct nlattr *attr;
2541                struct nlattr *attr_data;
2542                LIST_HEAD(opt_inst_list);
2543                enum team_option_type opt_type;
2544                int opt_port_ifindex = 0; /* != 0 for per-port options */
2545                u32 opt_array_index = 0;
2546                bool opt_is_array = false;
2547                struct team_option_inst *opt_inst;
2548                char *opt_name;
2549                bool opt_found = false;
2550
2551                if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2552                        err = -EINVAL;
2553                        goto team_put;
2554                }
2555                err = nla_parse_nested_deprecated(opt_attrs,
2556                                                  TEAM_ATTR_OPTION_MAX,
2557                                                  nl_option,
2558                                                  team_nl_option_policy,
2559                                                  info->extack);
2560                if (err)
2561                        goto team_put;
2562                if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2563                    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2564                        err = -EINVAL;
2565                        goto team_put;
2566                }
2567                switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2568                case NLA_U32:
2569                        opt_type = TEAM_OPTION_TYPE_U32;
2570                        break;
2571                case NLA_STRING:
2572                        opt_type = TEAM_OPTION_TYPE_STRING;
2573                        break;
2574                case NLA_BINARY:
2575                        opt_type = TEAM_OPTION_TYPE_BINARY;
2576                        break;
2577                case NLA_FLAG:
2578                        opt_type = TEAM_OPTION_TYPE_BOOL;
2579                        break;
2580                case NLA_S32:
2581                        opt_type = TEAM_OPTION_TYPE_S32;
2582                        break;
2583                default:
2584                        goto team_put;
2585                }
2586
2587                attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2588                if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2589                        err = -EINVAL;
2590                        goto team_put;
2591                }
2592
2593                opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2594                attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2595                if (attr)
2596                        opt_port_ifindex = nla_get_u32(attr);
2597
2598                attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2599                if (attr) {
2600                        opt_is_array = true;
2601                        opt_array_index = nla_get_u32(attr);
2602                }
2603
2604                list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2605                        struct team_option *option = opt_inst->option;
2606                        struct team_gsetter_ctx ctx;
2607                        struct team_option_inst_info *opt_inst_info;
2608                        int tmp_ifindex;
2609
2610                        opt_inst_info = &opt_inst->info;
2611                        tmp_ifindex = opt_inst_info->port ?
2612                                      opt_inst_info->port->dev->ifindex : 0;
2613                        if (option->type != opt_type ||
2614                            strcmp(option->name, opt_name) ||
2615                            tmp_ifindex != opt_port_ifindex ||
2616                            (option->array_size && !opt_is_array) ||
2617                            opt_inst_info->array_index != opt_array_index)
2618                                continue;
2619                        opt_found = true;
2620                        ctx.info = opt_inst_info;
2621                        switch (opt_type) {
2622                        case TEAM_OPTION_TYPE_U32:
2623                                ctx.data.u32_val = nla_get_u32(attr_data);
2624                                break;
2625                        case TEAM_OPTION_TYPE_STRING:
2626                                if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2627                                        err = -EINVAL;
2628                                        goto team_put;
2629                                }
2630                                ctx.data.str_val = nla_data(attr_data);
2631                                break;
2632                        case TEAM_OPTION_TYPE_BINARY:
2633                                ctx.data.bin_val.len = nla_len(attr_data);
2634                                ctx.data.bin_val.ptr = nla_data(attr_data);
2635                                break;
2636                        case TEAM_OPTION_TYPE_BOOL:
2637                                ctx.data.bool_val = attr_data ? true : false;
2638                                break;
2639                        case TEAM_OPTION_TYPE_S32:
2640                                ctx.data.s32_val = nla_get_s32(attr_data);
2641                                break;
2642                        default:
2643                                BUG();
2644                        }
2645                        err = team_option_set(team, opt_inst, &ctx);
2646                        if (err)
2647                                goto team_put;
2648                        opt_inst->changed = true;
2649                        list_add(&opt_inst->tmp_list, &opt_inst_list);
2650                }
2651                if (!opt_found) {
2652                        err = -ENOENT;
2653                        goto team_put;
2654                }
2655
2656                err = team_nl_send_event_options_get(team, &opt_inst_list);
2657                if (err)
2658                        break;
2659        }
2660
2661team_put:
2662        team_nl_team_put(team);
2663rtnl_unlock:
2664        rtnl_unlock();
2665        return err;
2666}
2667
2668static int team_nl_fill_one_port_get(struct sk_buff *skb,
2669                                     struct team_port *port)
2670{
2671        struct nlattr *port_item;
2672
2673        port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2674        if (!port_item)
2675                goto nest_cancel;
2676        if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2677                goto nest_cancel;
2678        if (port->changed) {
2679                if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2680                        goto nest_cancel;
2681                port->changed = false;
2682        }
2683        if ((port->removed &&
2684             nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2685            (port->state.linkup &&
2686             nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2687            nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2688            nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2689                goto nest_cancel;
2690        nla_nest_end(skb, port_item);
2691        return 0;
2692
2693nest_cancel:
2694        nla_nest_cancel(skb, port_item);
2695        return -EMSGSIZE;
2696}
2697
2698static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2699                                      int flags, team_nl_send_func_t *send_func,
2700                                      struct team_port *one_port)
2701{
2702        struct nlattr *port_list;
2703        struct nlmsghdr *nlh;
2704        void *hdr;
2705        struct team_port *port;
2706        int err;
2707        struct sk_buff *skb = NULL;
2708        bool incomplete;
2709        int i;
2710
2711        port = list_first_entry_or_null(&team->port_list,
2712                                        struct team_port, list);
2713
2714start_again:
2715        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2716        if (err)
2717                return err;
2718
2719        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2720                          TEAM_CMD_PORT_LIST_GET);
2721        if (!hdr) {
2722                nlmsg_free(skb);
2723                return -EMSGSIZE;
2724        }
2725
2726        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2727                goto nla_put_failure;
2728        port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2729        if (!port_list)
2730                goto nla_put_failure;
2731
2732        i = 0;
2733        incomplete = false;
2734
2735        /* If one port is selected, called wants to send port list containing
2736         * only this port. Otherwise go through all listed ports and send all
2737         */
2738        if (one_port) {
2739                err = team_nl_fill_one_port_get(skb, one_port);
2740                if (err)
2741                        goto errout;
2742        } else if (port) {
2743                list_for_each_entry_from(port, &team->port_list, list) {
2744                        err = team_nl_fill_one_port_get(skb, port);
2745                        if (err) {
2746                                if (err == -EMSGSIZE) {
2747                                        if (!i)
2748                                                goto errout;
2749                                        incomplete = true;
2750                                        break;
2751                                }
2752                                goto errout;
2753                        }
2754                        i++;
2755                }
2756        }
2757
2758        nla_nest_end(skb, port_list);
2759        genlmsg_end(skb, hdr);
2760        if (incomplete)
2761                goto start_again;
2762
2763send_done:
2764        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2765        if (!nlh) {
2766                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2767                if (err)
2768                        return err;
2769                goto send_done;
2770        }
2771
2772        return send_func(skb, team, portid);
2773
2774nla_put_failure:
2775        err = -EMSGSIZE;
2776errout:
2777        nlmsg_free(skb);
2778        return err;
2779}
2780
2781static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2782                                     struct genl_info *info)
2783{
2784        struct team *team;
2785        int err;
2786
2787        team = team_nl_team_get(info);
2788        if (!team)
2789                return -EINVAL;
2790
2791        err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2792                                         NLM_F_ACK, team_nl_send_unicast, NULL);
2793
2794        team_nl_team_put(team);
2795
2796        return err;
2797}
2798
2799static const struct genl_small_ops team_nl_ops[] = {
2800        {
2801                .cmd = TEAM_CMD_NOOP,
2802                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2803                .doit = team_nl_cmd_noop,
2804        },
2805        {
2806                .cmd = TEAM_CMD_OPTIONS_SET,
2807                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2808                .doit = team_nl_cmd_options_set,
2809                .flags = GENL_ADMIN_PERM,
2810        },
2811        {
2812                .cmd = TEAM_CMD_OPTIONS_GET,
2813                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2814                .doit = team_nl_cmd_options_get,
2815                .flags = GENL_ADMIN_PERM,
2816        },
2817        {
2818                .cmd = TEAM_CMD_PORT_LIST_GET,
2819                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2820                .doit = team_nl_cmd_port_list_get,
2821                .flags = GENL_ADMIN_PERM,
2822        },
2823};
2824
2825static const struct genl_multicast_group team_nl_mcgrps[] = {
2826        { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2827};
2828
2829static struct genl_family team_nl_family __ro_after_init = {
2830        .name           = TEAM_GENL_NAME,
2831        .version        = TEAM_GENL_VERSION,
2832        .maxattr        = TEAM_ATTR_MAX,
2833        .policy = team_nl_policy,
2834        .netnsok        = true,
2835        .module         = THIS_MODULE,
2836        .small_ops      = team_nl_ops,
2837        .n_small_ops    = ARRAY_SIZE(team_nl_ops),
2838        .mcgrps         = team_nl_mcgrps,
2839        .n_mcgrps       = ARRAY_SIZE(team_nl_mcgrps),
2840};
2841
2842static int team_nl_send_multicast(struct sk_buff *skb,
2843                                  struct team *team, u32 portid)
2844{
2845        return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2846                                       skb, 0, 0, GFP_KERNEL);
2847}
2848
2849static int team_nl_send_event_options_get(struct team *team,
2850                                          struct list_head *sel_opt_inst_list)
2851{
2852        return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2853                                        sel_opt_inst_list);
2854}
2855
2856static int team_nl_send_event_port_get(struct team *team,
2857                                       struct team_port *port)
2858{
2859        return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2860                                          port);
2861}
2862
2863static int __init team_nl_init(void)
2864{
2865        return genl_register_family(&team_nl_family);
2866}
2867
2868static void team_nl_fini(void)
2869{
2870        genl_unregister_family(&team_nl_family);
2871}
2872
2873
2874/******************
2875 * Change checkers
2876 ******************/
2877
2878static void __team_options_change_check(struct team *team)
2879{
2880        int err;
2881        struct team_option_inst *opt_inst;
2882        LIST_HEAD(sel_opt_inst_list);
2883
2884        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2885                if (opt_inst->changed)
2886                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2887        }
2888        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2889        if (err && err != -ESRCH)
2890                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2891                            err);
2892}
2893
2894/* rtnl lock is held */
2895
2896static void __team_port_change_send(struct team_port *port, bool linkup)
2897{
2898        int err;
2899
2900        port->changed = true;
2901        port->state.linkup = linkup;
2902        team_refresh_port_linkup(port);
2903        if (linkup) {
2904                struct ethtool_link_ksettings ecmd;
2905
2906                err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2907                if (!err) {
2908                        port->state.speed = ecmd.base.speed;
2909                        port->state.duplex = ecmd.base.duplex;
2910                        goto send_event;
2911                }
2912        }
2913        port->state.speed = 0;
2914        port->state.duplex = 0;
2915
2916send_event:
2917        err = team_nl_send_event_port_get(port->team, port);
2918        if (err && err != -ESRCH)
2919                netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2920                            port->dev->name, err);
2921
2922}
2923
2924static void __team_carrier_check(struct team *team)
2925{
2926        struct team_port *port;
2927        bool team_linkup;
2928
2929        if (team->user_carrier_enabled)
2930                return;
2931
2932        team_linkup = false;
2933        list_for_each_entry(port, &team->port_list, list) {
2934                if (port->linkup) {
2935                        team_linkup = true;
2936                        break;
2937                }
2938        }
2939
2940        if (team_linkup)
2941                netif_carrier_on(team->dev);
2942        else
2943                netif_carrier_off(team->dev);
2944}
2945
2946static void __team_port_change_check(struct team_port *port, bool linkup)
2947{
2948        if (port->state.linkup != linkup)
2949                __team_port_change_send(port, linkup);
2950        __team_carrier_check(port->team);
2951}
2952
2953static void __team_port_change_port_added(struct team_port *port, bool linkup)
2954{
2955        __team_port_change_send(port, linkup);
2956        __team_carrier_check(port->team);
2957}
2958
2959static void __team_port_change_port_removed(struct team_port *port)
2960{
2961        port->removed = true;
2962        __team_port_change_send(port, false);
2963        __team_carrier_check(port->team);
2964}
2965
2966static void team_port_change_check(struct team_port *port, bool linkup)
2967{
2968        struct team *team = port->team;
2969
2970        mutex_lock(&team->lock);
2971        __team_port_change_check(port, linkup);
2972        mutex_unlock(&team->lock);
2973}
2974
2975
2976/************************************
2977 * Net device notifier event handler
2978 ************************************/
2979
2980static int team_device_event(struct notifier_block *unused,
2981                             unsigned long event, void *ptr)
2982{
2983        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2984        struct team_port *port;
2985
2986        port = team_port_get_rtnl(dev);
2987        if (!port)
2988                return NOTIFY_DONE;
2989
2990        switch (event) {
2991        case NETDEV_UP:
2992                if (netif_oper_up(dev))
2993                        team_port_change_check(port, true);
2994                break;
2995        case NETDEV_DOWN:
2996                team_port_change_check(port, false);
2997                break;
2998        case NETDEV_CHANGE:
2999                if (netif_running(port->dev))
3000                        team_port_change_check(port,
3001                                               !!netif_oper_up(port->dev));
3002                break;
3003        case NETDEV_UNREGISTER:
3004                team_del_slave(port->team->dev, dev);
3005                break;
3006        case NETDEV_FEAT_CHANGE:
3007                team_compute_features(port->team);
3008                break;
3009        case NETDEV_PRECHANGEMTU:
3010                /* Forbid to change mtu of underlaying device */
3011                if (!port->team->port_mtu_change_allowed)
3012                        return NOTIFY_BAD;
3013                break;
3014        case NETDEV_PRE_TYPE_CHANGE:
3015                /* Forbid to change type of underlaying device */
3016                return NOTIFY_BAD;
3017        case NETDEV_RESEND_IGMP:
3018                /* Propagate to master device */
3019                call_netdevice_notifiers(event, port->team->dev);
3020                break;
3021        }
3022        return NOTIFY_DONE;
3023}
3024
3025static struct notifier_block team_notifier_block __read_mostly = {
3026        .notifier_call = team_device_event,
3027};
3028
3029
3030/***********************
3031 * Module init and exit
3032 ***********************/
3033
3034static int __init team_module_init(void)
3035{
3036        int err;
3037
3038        register_netdevice_notifier(&team_notifier_block);
3039
3040        err = rtnl_link_register(&team_link_ops);
3041        if (err)
3042                goto err_rtnl_reg;
3043
3044        err = team_nl_init();
3045        if (err)
3046                goto err_nl_init;
3047
3048        return 0;
3049
3050err_nl_init:
3051        rtnl_link_unregister(&team_link_ops);
3052
3053err_rtnl_reg:
3054        unregister_netdevice_notifier(&team_notifier_block);
3055
3056        return err;
3057}
3058
3059static void __exit team_module_exit(void)
3060{
3061        team_nl_fini();
3062        rtnl_link_unregister(&team_link_ops);
3063        unregister_netdevice_notifier(&team_notifier_block);
3064}
3065
3066module_init(team_module_init);
3067module_exit(team_module_exit);
3068
3069MODULE_LICENSE("GPL v2");
3070MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3071MODULE_DESCRIPTION("Ethernet team device driver");
3072MODULE_ALIAS_RTNL_LINK(DRV_NAME);
3073