linux/drivers/net/team/team.c
<<
>>
Prefs
   1/*
   2 * drivers/net/team/team.c - Network team device driver
   3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/rcupdate.h>
  17#include <linux/errno.h>
  18#include <linux/ctype.h>
  19#include <linux/notifier.h>
  20#include <linux/netdevice.h>
  21#include <linux/netpoll.h>
  22#include <linux/if_vlan.h>
  23#include <linux/if_arp.h>
  24#include <linux/socket.h>
  25#include <linux/etherdevice.h>
  26#include <linux/rtnetlink.h>
  27#include <net/rtnetlink.h>
  28#include <net/genetlink.h>
  29#include <net/netlink.h>
  30#include <net/sch_generic.h>
  31#include <generated/utsrelease.h>
  32#include <linux/if_team.h>
  33
  34#define DRV_NAME "team"
  35
  36
  37/**********
  38 * Helpers
  39 **********/
  40
  41#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
  42
  43static struct team_port *team_port_get_rcu(const struct net_device *dev)
  44{
  45        struct team_port *port = rcu_dereference(dev->rx_handler_data);
  46
  47        return team_port_exists(dev) ? port : NULL;
  48}
  49
  50static struct team_port *team_port_get_rtnl(const struct net_device *dev)
  51{
  52        struct team_port *port = rtnl_dereference(dev->rx_handler_data);
  53
  54        return team_port_exists(dev) ? port : NULL;
  55}
  56
  57/*
  58 * Since the ability to change device address for open port device is tested in
  59 * team_port_add, this function can be called without control of return value
  60 */
  61static int __set_port_dev_addr(struct net_device *port_dev,
  62                               const unsigned char *dev_addr)
  63{
  64        struct sockaddr addr;
  65
  66        memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
  67        addr.sa_family = port_dev->type;
  68        return dev_set_mac_address(port_dev, &addr);
  69}
  70
  71static int team_port_set_orig_dev_addr(struct team_port *port)
  72{
  73        return __set_port_dev_addr(port->dev, port->orig.dev_addr);
  74}
  75
  76static int team_port_set_team_dev_addr(struct team *team,
  77                                       struct team_port *port)
  78{
  79        return __set_port_dev_addr(port->dev, team->dev->dev_addr);
  80}
  81
  82int team_modeop_port_enter(struct team *team, struct team_port *port)
  83{
  84        return team_port_set_team_dev_addr(team, port);
  85}
  86EXPORT_SYMBOL(team_modeop_port_enter);
  87
  88void team_modeop_port_change_dev_addr(struct team *team,
  89                                      struct team_port *port)
  90{
  91        team_port_set_team_dev_addr(team, port);
  92}
  93EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
  94
  95static void team_refresh_port_linkup(struct team_port *port)
  96{
  97        port->linkup = port->user.linkup_enabled ? port->user.linkup :
  98                                                   port->state.linkup;
  99}
 100
 101
 102/*******************
 103 * Options handling
 104 *******************/
 105
 106struct team_option_inst { /* One for each option instance */
 107        struct list_head list;
 108        struct list_head tmp_list;
 109        struct team_option *option;
 110        struct team_option_inst_info info;
 111        bool changed;
 112        bool removed;
 113};
 114
 115static struct team_option *__team_find_option(struct team *team,
 116                                              const char *opt_name)
 117{
 118        struct team_option *option;
 119
 120        list_for_each_entry(option, &team->option_list, list) {
 121                if (strcmp(option->name, opt_name) == 0)
 122                        return option;
 123        }
 124        return NULL;
 125}
 126
 127static void __team_option_inst_del(struct team_option_inst *opt_inst)
 128{
 129        list_del(&opt_inst->list);
 130        kfree(opt_inst);
 131}
 132
 133static void __team_option_inst_del_option(struct team *team,
 134                                          struct team_option *option)
 135{
 136        struct team_option_inst *opt_inst, *tmp;
 137
 138        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 139                if (opt_inst->option == option)
 140                        __team_option_inst_del(opt_inst);
 141        }
 142}
 143
 144static int __team_option_inst_add(struct team *team, struct team_option *option,
 145                                  struct team_port *port)
 146{
 147        struct team_option_inst *opt_inst;
 148        unsigned int array_size;
 149        unsigned int i;
 150        int err;
 151
 152        array_size = option->array_size;
 153        if (!array_size)
 154                array_size = 1; /* No array but still need one instance */
 155
 156        for (i = 0; i < array_size; i++) {
 157                opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
 158                if (!opt_inst)
 159                        return -ENOMEM;
 160                opt_inst->option = option;
 161                opt_inst->info.port = port;
 162                opt_inst->info.array_index = i;
 163                opt_inst->changed = true;
 164                opt_inst->removed = false;
 165                list_add_tail(&opt_inst->list, &team->option_inst_list);
 166                if (option->init) {
 167                        err = option->init(team, &opt_inst->info);
 168                        if (err)
 169                                return err;
 170                }
 171
 172        }
 173        return 0;
 174}
 175
 176static int __team_option_inst_add_option(struct team *team,
 177                                         struct team_option *option)
 178{
 179        struct team_port *port;
 180        int err;
 181
 182        if (!option->per_port) {
 183                err = __team_option_inst_add(team, option, NULL);
 184                if (err)
 185                        goto inst_del_option;
 186        }
 187
 188        list_for_each_entry(port, &team->port_list, list) {
 189                err = __team_option_inst_add(team, option, port);
 190                if (err)
 191                        goto inst_del_option;
 192        }
 193        return 0;
 194
 195inst_del_option:
 196        __team_option_inst_del_option(team, option);
 197        return err;
 198}
 199
 200static void __team_option_inst_mark_removed_option(struct team *team,
 201                                                   struct team_option *option)
 202{
 203        struct team_option_inst *opt_inst;
 204
 205        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 206                if (opt_inst->option == option) {
 207                        opt_inst->changed = true;
 208                        opt_inst->removed = true;
 209                }
 210        }
 211}
 212
 213static void __team_option_inst_del_port(struct team *team,
 214                                        struct team_port *port)
 215{
 216        struct team_option_inst *opt_inst, *tmp;
 217
 218        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 219                if (opt_inst->option->per_port &&
 220                    opt_inst->info.port == port)
 221                        __team_option_inst_del(opt_inst);
 222        }
 223}
 224
 225static int __team_option_inst_add_port(struct team *team,
 226                                       struct team_port *port)
 227{
 228        struct team_option *option;
 229        int err;
 230
 231        list_for_each_entry(option, &team->option_list, list) {
 232                if (!option->per_port)
 233                        continue;
 234                err = __team_option_inst_add(team, option, port);
 235                if (err)
 236                        goto inst_del_port;
 237        }
 238        return 0;
 239
 240inst_del_port:
 241        __team_option_inst_del_port(team, port);
 242        return err;
 243}
 244
 245static void __team_option_inst_mark_removed_port(struct team *team,
 246                                                 struct team_port *port)
 247{
 248        struct team_option_inst *opt_inst;
 249
 250        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 251                if (opt_inst->info.port == port) {
 252                        opt_inst->changed = true;
 253                        opt_inst->removed = true;
 254                }
 255        }
 256}
 257
 258static int __team_options_register(struct team *team,
 259                                   const struct team_option *option,
 260                                   size_t option_count)
 261{
 262        int i;
 263        struct team_option **dst_opts;
 264        int err;
 265
 266        dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
 267                           GFP_KERNEL);
 268        if (!dst_opts)
 269                return -ENOMEM;
 270        for (i = 0; i < option_count; i++, option++) {
 271                if (__team_find_option(team, option->name)) {
 272                        err = -EEXIST;
 273                        goto alloc_rollback;
 274                }
 275                dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 276                if (!dst_opts[i]) {
 277                        err = -ENOMEM;
 278                        goto alloc_rollback;
 279                }
 280        }
 281
 282        for (i = 0; i < option_count; i++) {
 283                err = __team_option_inst_add_option(team, dst_opts[i]);
 284                if (err)
 285                        goto inst_rollback;
 286                list_add_tail(&dst_opts[i]->list, &team->option_list);
 287        }
 288
 289        kfree(dst_opts);
 290        return 0;
 291
 292inst_rollback:
 293        for (i--; i >= 0; i--)
 294                __team_option_inst_del_option(team, dst_opts[i]);
 295
 296        i = option_count - 1;
 297alloc_rollback:
 298        for (i--; i >= 0; i--)
 299                kfree(dst_opts[i]);
 300
 301        kfree(dst_opts);
 302        return err;
 303}
 304
 305static void __team_options_mark_removed(struct team *team,
 306                                        const struct team_option *option,
 307                                        size_t option_count)
 308{
 309        int i;
 310
 311        for (i = 0; i < option_count; i++, option++) {
 312                struct team_option *del_opt;
 313
 314                del_opt = __team_find_option(team, option->name);
 315                if (del_opt)
 316                        __team_option_inst_mark_removed_option(team, del_opt);
 317        }
 318}
 319
 320static void __team_options_unregister(struct team *team,
 321                                      const struct team_option *option,
 322                                      size_t option_count)
 323{
 324        int i;
 325
 326        for (i = 0; i < option_count; i++, option++) {
 327                struct team_option *del_opt;
 328
 329                del_opt = __team_find_option(team, option->name);
 330                if (del_opt) {
 331                        __team_option_inst_del_option(team, del_opt);
 332                        list_del(&del_opt->list);
 333                        kfree(del_opt);
 334                }
 335        }
 336}
 337
 338static void __team_options_change_check(struct team *team);
 339
 340int team_options_register(struct team *team,
 341                          const struct team_option *option,
 342                          size_t option_count)
 343{
 344        int err;
 345
 346        err = __team_options_register(team, option, option_count);
 347        if (err)
 348                return err;
 349        __team_options_change_check(team);
 350        return 0;
 351}
 352EXPORT_SYMBOL(team_options_register);
 353
 354void team_options_unregister(struct team *team,
 355                             const struct team_option *option,
 356                             size_t option_count)
 357{
 358        __team_options_mark_removed(team, option, option_count);
 359        __team_options_change_check(team);
 360        __team_options_unregister(team, option, option_count);
 361}
 362EXPORT_SYMBOL(team_options_unregister);
 363
 364static int team_option_get(struct team *team,
 365                           struct team_option_inst *opt_inst,
 366                           struct team_gsetter_ctx *ctx)
 367{
 368        if (!opt_inst->option->getter)
 369                return -EOPNOTSUPP;
 370        return opt_inst->option->getter(team, ctx);
 371}
 372
 373static int team_option_set(struct team *team,
 374                           struct team_option_inst *opt_inst,
 375                           struct team_gsetter_ctx *ctx)
 376{
 377        if (!opt_inst->option->setter)
 378                return -EOPNOTSUPP;
 379        return opt_inst->option->setter(team, ctx);
 380}
 381
 382void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
 383{
 384        struct team_option_inst *opt_inst;
 385
 386        opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
 387        opt_inst->changed = true;
 388}
 389EXPORT_SYMBOL(team_option_inst_set_change);
 390
 391void team_options_change_check(struct team *team)
 392{
 393        __team_options_change_check(team);
 394}
 395EXPORT_SYMBOL(team_options_change_check);
 396
 397
 398/****************
 399 * Mode handling
 400 ****************/
 401
 402static LIST_HEAD(mode_list);
 403static DEFINE_SPINLOCK(mode_list_lock);
 404
 405struct team_mode_item {
 406        struct list_head list;
 407        const struct team_mode *mode;
 408};
 409
 410static struct team_mode_item *__find_mode(const char *kind)
 411{
 412        struct team_mode_item *mitem;
 413
 414        list_for_each_entry(mitem, &mode_list, list) {
 415                if (strcmp(mitem->mode->kind, kind) == 0)
 416                        return mitem;
 417        }
 418        return NULL;
 419}
 420
 421static bool is_good_mode_name(const char *name)
 422{
 423        while (*name != '\0') {
 424                if (!isalpha(*name) && !isdigit(*name) && *name != '_')
 425                        return false;
 426                name++;
 427        }
 428        return true;
 429}
 430
 431int team_mode_register(const struct team_mode *mode)
 432{
 433        int err = 0;
 434        struct team_mode_item *mitem;
 435
 436        if (!is_good_mode_name(mode->kind) ||
 437            mode->priv_size > TEAM_MODE_PRIV_SIZE)
 438                return -EINVAL;
 439
 440        mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
 441        if (!mitem)
 442                return -ENOMEM;
 443
 444        spin_lock(&mode_list_lock);
 445        if (__find_mode(mode->kind)) {
 446                err = -EEXIST;
 447                kfree(mitem);
 448                goto unlock;
 449        }
 450        mitem->mode = mode;
 451        list_add_tail(&mitem->list, &mode_list);
 452unlock:
 453        spin_unlock(&mode_list_lock);
 454        return err;
 455}
 456EXPORT_SYMBOL(team_mode_register);
 457
 458void team_mode_unregister(const struct team_mode *mode)
 459{
 460        struct team_mode_item *mitem;
 461
 462        spin_lock(&mode_list_lock);
 463        mitem = __find_mode(mode->kind);
 464        if (mitem) {
 465                list_del_init(&mitem->list);
 466                kfree(mitem);
 467        }
 468        spin_unlock(&mode_list_lock);
 469}
 470EXPORT_SYMBOL(team_mode_unregister);
 471
 472static const struct team_mode *team_mode_get(const char *kind)
 473{
 474        struct team_mode_item *mitem;
 475        const struct team_mode *mode = NULL;
 476
 477        spin_lock(&mode_list_lock);
 478        mitem = __find_mode(kind);
 479        if (!mitem) {
 480                spin_unlock(&mode_list_lock);
 481                request_module("team-mode-%s", kind);
 482                spin_lock(&mode_list_lock);
 483                mitem = __find_mode(kind);
 484        }
 485        if (mitem) {
 486                mode = mitem->mode;
 487                if (!try_module_get(mode->owner))
 488                        mode = NULL;
 489        }
 490
 491        spin_unlock(&mode_list_lock);
 492        return mode;
 493}
 494
 495static void team_mode_put(const struct team_mode *mode)
 496{
 497        module_put(mode->owner);
 498}
 499
 500static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
 501{
 502        dev_kfree_skb_any(skb);
 503        return false;
 504}
 505
 506static rx_handler_result_t team_dummy_receive(struct team *team,
 507                                              struct team_port *port,
 508                                              struct sk_buff *skb)
 509{
 510        return RX_HANDLER_ANOTHER;
 511}
 512
 513static const struct team_mode __team_no_mode = {
 514        .kind           = "*NOMODE*",
 515};
 516
 517static bool team_is_mode_set(struct team *team)
 518{
 519        return team->mode != &__team_no_mode;
 520}
 521
 522static void team_set_no_mode(struct team *team)
 523{
 524        team->user_carrier_enabled = false;
 525        team->mode = &__team_no_mode;
 526}
 527
 528static void team_adjust_ops(struct team *team)
 529{
 530        /*
 531         * To avoid checks in rx/tx skb paths, ensure here that non-null and
 532         * correct ops are always set.
 533         */
 534
 535        if (!team->en_port_count || !team_is_mode_set(team) ||
 536            !team->mode->ops->transmit)
 537                team->ops.transmit = team_dummy_transmit;
 538        else
 539                team->ops.transmit = team->mode->ops->transmit;
 540
 541        if (!team->en_port_count || !team_is_mode_set(team) ||
 542            !team->mode->ops->receive)
 543                team->ops.receive = team_dummy_receive;
 544        else
 545                team->ops.receive = team->mode->ops->receive;
 546}
 547
 548/*
 549 * We can benefit from the fact that it's ensured no port is present
 550 * at the time of mode change. Therefore no packets are in fly so there's no
 551 * need to set mode operations in any special way.
 552 */
 553static int __team_change_mode(struct team *team,
 554                              const struct team_mode *new_mode)
 555{
 556        /* Check if mode was previously set and do cleanup if so */
 557        if (team_is_mode_set(team)) {
 558                void (*exit_op)(struct team *team) = team->ops.exit;
 559
 560                /* Clear ops area so no callback is called any longer */
 561                memset(&team->ops, 0, sizeof(struct team_mode_ops));
 562                team_adjust_ops(team);
 563
 564                if (exit_op)
 565                        exit_op(team);
 566                team_mode_put(team->mode);
 567                team_set_no_mode(team);
 568                /* zero private data area */
 569                memset(&team->mode_priv, 0,
 570                       sizeof(struct team) - offsetof(struct team, mode_priv));
 571        }
 572
 573        if (!new_mode)
 574                return 0;
 575
 576        if (new_mode->ops->init) {
 577                int err;
 578
 579                err = new_mode->ops->init(team);
 580                if (err)
 581                        return err;
 582        }
 583
 584        team->mode = new_mode;
 585        memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
 586        team_adjust_ops(team);
 587
 588        return 0;
 589}
 590
 591static int team_change_mode(struct team *team, const char *kind)
 592{
 593        const struct team_mode *new_mode;
 594        struct net_device *dev = team->dev;
 595        int err;
 596
 597        if (!list_empty(&team->port_list)) {
 598                netdev_err(dev, "No ports can be present during mode change\n");
 599                return -EBUSY;
 600        }
 601
 602        if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
 603                netdev_err(dev, "Unable to change to the same mode the team is in\n");
 604                return -EINVAL;
 605        }
 606
 607        new_mode = team_mode_get(kind);
 608        if (!new_mode) {
 609                netdev_err(dev, "Mode \"%s\" not found\n", kind);
 610                return -EINVAL;
 611        }
 612
 613        err = __team_change_mode(team, new_mode);
 614        if (err) {
 615                netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
 616                team_mode_put(new_mode);
 617                return err;
 618        }
 619
 620        netdev_info(dev, "Mode changed to \"%s\"\n", kind);
 621        return 0;
 622}
 623
 624
 625/*********************
 626 * Peers notification
 627 *********************/
 628
 629static void team_notify_peers_work(struct work_struct *work)
 630{
 631        struct team *team;
 632
 633        team = container_of(work, struct team, notify_peers.dw.work);
 634
 635        if (!rtnl_trylock()) {
 636                schedule_delayed_work(&team->notify_peers.dw, 0);
 637                return;
 638        }
 639        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
 640        rtnl_unlock();
 641        if (!atomic_dec_and_test(&team->notify_peers.count_pending))
 642                schedule_delayed_work(&team->notify_peers.dw,
 643                                      msecs_to_jiffies(team->notify_peers.interval));
 644}
 645
 646static void team_notify_peers(struct team *team)
 647{
 648        if (!team->notify_peers.count || !netif_running(team->dev))
 649                return;
 650        atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
 651        schedule_delayed_work(&team->notify_peers.dw, 0);
 652}
 653
 654static void team_notify_peers_init(struct team *team)
 655{
 656        INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
 657}
 658
 659static void team_notify_peers_fini(struct team *team)
 660{
 661        cancel_delayed_work_sync(&team->notify_peers.dw);
 662}
 663
 664
 665/*******************************
 666 * Send multicast group rejoins
 667 *******************************/
 668
 669static void team_mcast_rejoin_work(struct work_struct *work)
 670{
 671        struct team *team;
 672
 673        team = container_of(work, struct team, mcast_rejoin.dw.work);
 674
 675        if (!rtnl_trylock()) {
 676                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 677                return;
 678        }
 679        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
 680        rtnl_unlock();
 681        if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
 682                schedule_delayed_work(&team->mcast_rejoin.dw,
 683                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 684}
 685
 686static void team_mcast_rejoin(struct team *team)
 687{
 688        if (!team->mcast_rejoin.count || !netif_running(team->dev))
 689                return;
 690        atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
 691        schedule_delayed_work(&team->mcast_rejoin.dw, 0);
 692}
 693
 694static void team_mcast_rejoin_init(struct team *team)
 695{
 696        INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
 697}
 698
 699static void team_mcast_rejoin_fini(struct team *team)
 700{
 701        cancel_delayed_work_sync(&team->mcast_rejoin.dw);
 702}
 703
 704
 705/************************
 706 * Rx path frame handler
 707 ************************/
 708
 709/* note: already called with rcu_read_lock */
 710static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 711{
 712        struct sk_buff *skb = *pskb;
 713        struct team_port *port;
 714        struct team *team;
 715        rx_handler_result_t res;
 716
 717        skb = skb_share_check(skb, GFP_ATOMIC);
 718        if (!skb)
 719                return RX_HANDLER_CONSUMED;
 720
 721        *pskb = skb;
 722
 723        port = team_port_get_rcu(skb->dev);
 724        team = port->team;
 725        if (!team_port_enabled(port)) {
 726                /* allow exact match delivery for disabled ports */
 727                res = RX_HANDLER_EXACT;
 728        } else {
 729                res = team->ops.receive(team, port, skb);
 730        }
 731        if (res == RX_HANDLER_ANOTHER) {
 732                struct team_pcpu_stats *pcpu_stats;
 733
 734                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
 735                u64_stats_update_begin(&pcpu_stats->syncp);
 736                pcpu_stats->rx_packets++;
 737                pcpu_stats->rx_bytes += skb->len;
 738                if (skb->pkt_type == PACKET_MULTICAST)
 739                        pcpu_stats->rx_multicast++;
 740                u64_stats_update_end(&pcpu_stats->syncp);
 741
 742                skb->dev = team->dev;
 743        } else {
 744                this_cpu_inc(team->pcpu_stats->rx_dropped);
 745        }
 746
 747        return res;
 748}
 749
 750
 751/*************************************
 752 * Multiqueue Tx port select override
 753 *************************************/
 754
 755static int team_queue_override_init(struct team *team)
 756{
 757        struct list_head *listarr;
 758        unsigned int queue_cnt = team->dev->num_tx_queues - 1;
 759        unsigned int i;
 760
 761        if (!queue_cnt)
 762                return 0;
 763        listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
 764        if (!listarr)
 765                return -ENOMEM;
 766        team->qom_lists = listarr;
 767        for (i = 0; i < queue_cnt; i++)
 768                INIT_LIST_HEAD(listarr++);
 769        return 0;
 770}
 771
 772static void team_queue_override_fini(struct team *team)
 773{
 774        kfree(team->qom_lists);
 775}
 776
 777static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
 778{
 779        return &team->qom_lists[queue_id - 1];
 780}
 781
 782/*
 783 * note: already called with rcu_read_lock
 784 */
 785static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
 786{
 787        struct list_head *qom_list;
 788        struct team_port *port;
 789
 790        if (!team->queue_override_enabled || !skb->queue_mapping)
 791                return false;
 792        qom_list = __team_get_qom_list(team, skb->queue_mapping);
 793        list_for_each_entry_rcu(port, qom_list, qom_list) {
 794                if (!team_dev_queue_xmit(team, port, skb))
 795                        return true;
 796        }
 797        return false;
 798}
 799
 800static void __team_queue_override_port_del(struct team *team,
 801                                           struct team_port *port)
 802{
 803        if (!port->queue_id)
 804                return;
 805        list_del_rcu(&port->qom_list);
 806}
 807
 808static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
 809                                                      struct team_port *cur)
 810{
 811        if (port->priority < cur->priority)
 812                return true;
 813        if (port->priority > cur->priority)
 814                return false;
 815        if (port->index < cur->index)
 816                return true;
 817        return false;
 818}
 819
 820static void __team_queue_override_port_add(struct team *team,
 821                                           struct team_port *port)
 822{
 823        struct team_port *cur;
 824        struct list_head *qom_list;
 825        struct list_head *node;
 826
 827        if (!port->queue_id)
 828                return;
 829        qom_list = __team_get_qom_list(team, port->queue_id);
 830        node = qom_list;
 831        list_for_each_entry(cur, qom_list, qom_list) {
 832                if (team_queue_override_port_has_gt_prio_than(port, cur))
 833                        break;
 834                node = &cur->qom_list;
 835        }
 836        list_add_tail_rcu(&port->qom_list, node);
 837}
 838
 839static void __team_queue_override_enabled_check(struct team *team)
 840{
 841        struct team_port *port;
 842        bool enabled = false;
 843
 844        list_for_each_entry(port, &team->port_list, list) {
 845                if (port->queue_id) {
 846                        enabled = true;
 847                        break;
 848                }
 849        }
 850        if (enabled == team->queue_override_enabled)
 851                return;
 852        netdev_dbg(team->dev, "%s queue override\n",
 853                   enabled ? "Enabling" : "Disabling");
 854        team->queue_override_enabled = enabled;
 855}
 856
 857static void team_queue_override_port_prio_changed(struct team *team,
 858                                                  struct team_port *port)
 859{
 860        if (!port->queue_id || team_port_enabled(port))
 861                return;
 862        __team_queue_override_port_del(team, port);
 863        __team_queue_override_port_add(team, port);
 864        __team_queue_override_enabled_check(team);
 865}
 866
 867static void team_queue_override_port_change_queue_id(struct team *team,
 868                                                     struct team_port *port,
 869                                                     u16 new_queue_id)
 870{
 871        if (team_port_enabled(port)) {
 872                __team_queue_override_port_del(team, port);
 873                port->queue_id = new_queue_id;
 874                __team_queue_override_port_add(team, port);
 875                __team_queue_override_enabled_check(team);
 876        } else {
 877                port->queue_id = new_queue_id;
 878        }
 879}
 880
 881static void team_queue_override_port_add(struct team *team,
 882                                         struct team_port *port)
 883{
 884        __team_queue_override_port_add(team, port);
 885        __team_queue_override_enabled_check(team);
 886}
 887
 888static void team_queue_override_port_del(struct team *team,
 889                                         struct team_port *port)
 890{
 891        __team_queue_override_port_del(team, port);
 892        __team_queue_override_enabled_check(team);
 893}
 894
 895
 896/****************
 897 * Port handling
 898 ****************/
 899
 900static bool team_port_find(const struct team *team,
 901                           const struct team_port *port)
 902{
 903        struct team_port *cur;
 904
 905        list_for_each_entry(cur, &team->port_list, list)
 906                if (cur == port)
 907                        return true;
 908        return false;
 909}
 910
 911/*
 912 * Enable/disable port by adding to enabled port hashlist and setting
 913 * port->index (Might be racy so reader could see incorrect ifindex when
 914 * processing a flying packet, but that is not a problem). Write guarded
 915 * by team->lock.
 916 */
 917static void team_port_enable(struct team *team,
 918                             struct team_port *port)
 919{
 920        if (team_port_enabled(port))
 921                return;
 922        port->index = team->en_port_count++;
 923        hlist_add_head_rcu(&port->hlist,
 924                           team_port_index_hash(team, port->index));
 925        team_adjust_ops(team);
 926        team_queue_override_port_add(team, port);
 927        if (team->ops.port_enabled)
 928                team->ops.port_enabled(team, port);
 929        team_notify_peers(team);
 930        team_mcast_rejoin(team);
 931}
 932
 933static void __reconstruct_port_hlist(struct team *team, int rm_index)
 934{
 935        int i;
 936        struct team_port *port;
 937
 938        for (i = rm_index + 1; i < team->en_port_count; i++) {
 939                port = team_get_port_by_index(team, i);
 940                hlist_del_rcu(&port->hlist);
 941                port->index--;
 942                hlist_add_head_rcu(&port->hlist,
 943                                   team_port_index_hash(team, port->index));
 944        }
 945}
 946
 947static void team_port_disable(struct team *team,
 948                              struct team_port *port)
 949{
 950        if (!team_port_enabled(port))
 951                return;
 952        if (team->ops.port_disabled)
 953                team->ops.port_disabled(team, port);
 954        hlist_del_rcu(&port->hlist);
 955        __reconstruct_port_hlist(team, port->index);
 956        port->index = -1;
 957        team->en_port_count--;
 958        team_queue_override_port_del(team, port);
 959        team_adjust_ops(team);
 960        team_notify_peers(team);
 961        team_mcast_rejoin(team);
 962}
 963
 964#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
 965                            NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
 966                            NETIF_F_HIGHDMA | NETIF_F_LRO)
 967
 968static void __team_compute_features(struct team *team)
 969{
 970        struct team_port *port;
 971        u32 vlan_features = TEAM_VLAN_FEATURES;
 972        unsigned short max_hard_header_len = ETH_HLEN;
 973        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 974
 975        list_for_each_entry(port, &team->port_list, list) {
 976                vlan_features = netdev_increment_features(vlan_features,
 977                                        port->dev->vlan_features,
 978                                        TEAM_VLAN_FEATURES);
 979
 980                dst_release_flag &= port->dev->priv_flags;
 981                if (port->dev->hard_header_len > max_hard_header_len)
 982                        max_hard_header_len = port->dev->hard_header_len;
 983        }
 984
 985        team->dev->vlan_features = vlan_features;
 986        team->dev->hard_header_len = max_hard_header_len;
 987
 988        flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
 989        team->dev->priv_flags = flags | dst_release_flag;
 990
 991        netdev_change_features(team->dev);
 992}
 993
 994static void team_compute_features(struct team *team)
 995{
 996        mutex_lock(&team->lock);
 997        __team_compute_features(team);
 998        mutex_unlock(&team->lock);
 999}
1000
1001static int team_port_enter(struct team *team, struct team_port *port)
1002{
1003        int err = 0;
1004
1005        dev_hold(team->dev);
1006        port->dev->priv_flags |= IFF_TEAM_PORT;
1007        if (team->ops.port_enter) {
1008                err = team->ops.port_enter(team, port);
1009                if (err) {
1010                        netdev_err(team->dev, "Device %s failed to enter team mode\n",
1011                                   port->dev->name);
1012                        goto err_port_enter;
1013                }
1014        }
1015
1016        return 0;
1017
1018err_port_enter:
1019        port->dev->priv_flags &= ~IFF_TEAM_PORT;
1020        dev_put(team->dev);
1021
1022        return err;
1023}
1024
1025static void team_port_leave(struct team *team, struct team_port *port)
1026{
1027        if (team->ops.port_leave)
1028                team->ops.port_leave(team, port);
1029        port->dev->priv_flags &= ~IFF_TEAM_PORT;
1030        dev_put(team->dev);
1031}
1032
1033#ifdef CONFIG_NET_POLL_CONTROLLER
1034static int team_port_enable_netpoll(struct team *team, struct team_port *port,
1035                                    gfp_t gfp)
1036{
1037        struct netpoll *np;
1038        int err;
1039
1040        if (!team->dev->npinfo)
1041                return 0;
1042
1043        np = kzalloc(sizeof(*np), gfp);
1044        if (!np)
1045                return -ENOMEM;
1046
1047        err = __netpoll_setup(np, port->dev, gfp);
1048        if (err) {
1049                kfree(np);
1050                return err;
1051        }
1052        port->np = np;
1053        return err;
1054}
1055
1056static void team_port_disable_netpoll(struct team_port *port)
1057{
1058        struct netpoll *np = port->np;
1059
1060        if (!np)
1061                return;
1062        port->np = NULL;
1063
1064        /* Wait for transmitting packets to finish before freeing. */
1065        synchronize_rcu_bh();
1066        __netpoll_cleanup(np);
1067        kfree(np);
1068}
1069#else
1070static int team_port_enable_netpoll(struct team *team, struct team_port *port,
1071                                    gfp_t gfp)
1072{
1073        return 0;
1074}
1075static void team_port_disable_netpoll(struct team_port *port)
1076{
1077}
1078#endif
1079
1080static void __team_port_change_port_added(struct team_port *port, bool linkup);
1081static int team_dev_type_check_change(struct net_device *dev,
1082                                      struct net_device *port_dev);
1083
1084static int team_port_add(struct team *team, struct net_device *port_dev)
1085{
1086        struct net_device *dev = team->dev;
1087        struct team_port *port;
1088        char *portname = port_dev->name;
1089        int err;
1090
1091        if (port_dev->flags & IFF_LOOPBACK) {
1092                netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1093                           portname);
1094                return -EINVAL;
1095        }
1096
1097        if (team_port_exists(port_dev)) {
1098                netdev_err(dev, "Device %s is already a port "
1099                                "of a team device\n", portname);
1100                return -EBUSY;
1101        }
1102
1103        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1104            vlan_uses_dev(dev)) {
1105                netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1106                           portname);
1107                return -EPERM;
1108        }
1109
1110        err = team_dev_type_check_change(dev, port_dev);
1111        if (err)
1112                return err;
1113
1114        if (port_dev->flags & IFF_UP) {
1115                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1116                           portname);
1117                return -EBUSY;
1118        }
1119
1120        port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1121                       GFP_KERNEL);
1122        if (!port)
1123                return -ENOMEM;
1124
1125        port->dev = port_dev;
1126        port->team = team;
1127        INIT_LIST_HEAD(&port->qom_list);
1128
1129        port->orig.mtu = port_dev->mtu;
1130        err = dev_set_mtu(port_dev, dev->mtu);
1131        if (err) {
1132                netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1133                goto err_set_mtu;
1134        }
1135
1136        memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1137
1138        err = team_port_enter(team, port);
1139        if (err) {
1140                netdev_err(dev, "Device %s failed to enter team mode\n",
1141                           portname);
1142                goto err_port_enter;
1143        }
1144
1145        err = dev_open(port_dev);
1146        if (err) {
1147                netdev_dbg(dev, "Device %s opening failed\n",
1148                           portname);
1149                goto err_dev_open;
1150        }
1151
1152        err = vlan_vids_add_by_dev(port_dev, dev);
1153        if (err) {
1154                netdev_err(dev, "Failed to add vlan ids to device %s\n",
1155                                portname);
1156                goto err_vids_add;
1157        }
1158
1159        err = team_port_enable_netpoll(team, port, GFP_KERNEL);
1160        if (err) {
1161                netdev_err(dev, "Failed to enable netpoll on device %s\n",
1162                           portname);
1163                goto err_enable_netpoll;
1164        }
1165
1166        err = netdev_master_upper_dev_link(port_dev, dev);
1167        if (err) {
1168                netdev_err(dev, "Device %s failed to set upper link\n",
1169                           portname);
1170                goto err_set_upper_link;
1171        }
1172
1173        err = netdev_rx_handler_register(port_dev, team_handle_frame,
1174                                         port);
1175        if (err) {
1176                netdev_err(dev, "Device %s failed to register rx_handler\n",
1177                           portname);
1178                goto err_handler_register;
1179        }
1180
1181        err = __team_option_inst_add_port(team, port);
1182        if (err) {
1183                netdev_err(dev, "Device %s failed to add per-port options\n",
1184                           portname);
1185                goto err_option_port_add;
1186        }
1187
1188        port->index = -1;
1189        list_add_tail_rcu(&port->list, &team->port_list);
1190        team_port_enable(team, port);
1191        __team_compute_features(team);
1192        __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1193        __team_options_change_check(team);
1194
1195        netdev_info(dev, "Port device %s added\n", portname);
1196
1197        return 0;
1198
1199err_option_port_add:
1200        netdev_rx_handler_unregister(port_dev);
1201
1202err_handler_register:
1203        netdev_upper_dev_unlink(port_dev, dev);
1204
1205err_set_upper_link:
1206        team_port_disable_netpoll(port);
1207
1208err_enable_netpoll:
1209        vlan_vids_del_by_dev(port_dev, dev);
1210
1211err_vids_add:
1212        dev_close(port_dev);
1213
1214err_dev_open:
1215        team_port_leave(team, port);
1216        team_port_set_orig_dev_addr(port);
1217
1218err_port_enter:
1219        dev_set_mtu(port_dev, port->orig.mtu);
1220
1221err_set_mtu:
1222        kfree(port);
1223
1224        return err;
1225}
1226
1227static void __team_port_change_port_removed(struct team_port *port);
1228
1229static int team_port_del(struct team *team, struct net_device *port_dev)
1230{
1231        struct net_device *dev = team->dev;
1232        struct team_port *port;
1233        char *portname = port_dev->name;
1234
1235        port = team_port_get_rtnl(port_dev);
1236        if (!port || !team_port_find(team, port)) {
1237                netdev_err(dev, "Device %s does not act as a port of this team\n",
1238                           portname);
1239                return -ENOENT;
1240        }
1241
1242        team_port_disable(team, port);
1243        list_del_rcu(&port->list);
1244        netdev_rx_handler_unregister(port_dev);
1245        netdev_upper_dev_unlink(port_dev, dev);
1246        team_port_disable_netpoll(port);
1247        vlan_vids_del_by_dev(port_dev, dev);
1248        dev_uc_unsync(port_dev, dev);
1249        dev_mc_unsync(port_dev, dev);
1250        dev_close(port_dev);
1251        team_port_leave(team, port);
1252
1253        __team_option_inst_mark_removed_port(team, port);
1254        __team_options_change_check(team);
1255        __team_option_inst_del_port(team, port);
1256        __team_port_change_port_removed(port);
1257
1258        team_port_set_orig_dev_addr(port);
1259        dev_set_mtu(port_dev, port->orig.mtu);
1260        kfree_rcu(port, rcu);
1261        netdev_info(dev, "Port device %s removed\n", portname);
1262        __team_compute_features(team);
1263
1264        return 0;
1265}
1266
1267
1268/*****************
1269 * Net device ops
1270 *****************/
1271
1272static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1273{
1274        ctx->data.str_val = team->mode->kind;
1275        return 0;
1276}
1277
1278static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1279{
1280        return team_change_mode(team, ctx->data.str_val);
1281}
1282
1283static int team_notify_peers_count_get(struct team *team,
1284                                       struct team_gsetter_ctx *ctx)
1285{
1286        ctx->data.u32_val = team->notify_peers.count;
1287        return 0;
1288}
1289
1290static int team_notify_peers_count_set(struct team *team,
1291                                       struct team_gsetter_ctx *ctx)
1292{
1293        team->notify_peers.count = ctx->data.u32_val;
1294        return 0;
1295}
1296
1297static int team_notify_peers_interval_get(struct team *team,
1298                                          struct team_gsetter_ctx *ctx)
1299{
1300        ctx->data.u32_val = team->notify_peers.interval;
1301        return 0;
1302}
1303
1304static int team_notify_peers_interval_set(struct team *team,
1305                                          struct team_gsetter_ctx *ctx)
1306{
1307        team->notify_peers.interval = ctx->data.u32_val;
1308        return 0;
1309}
1310
1311static int team_mcast_rejoin_count_get(struct team *team,
1312                                       struct team_gsetter_ctx *ctx)
1313{
1314        ctx->data.u32_val = team->mcast_rejoin.count;
1315        return 0;
1316}
1317
1318static int team_mcast_rejoin_count_set(struct team *team,
1319                                       struct team_gsetter_ctx *ctx)
1320{
1321        team->mcast_rejoin.count = ctx->data.u32_val;
1322        return 0;
1323}
1324
1325static int team_mcast_rejoin_interval_get(struct team *team,
1326                                          struct team_gsetter_ctx *ctx)
1327{
1328        ctx->data.u32_val = team->mcast_rejoin.interval;
1329        return 0;
1330}
1331
1332static int team_mcast_rejoin_interval_set(struct team *team,
1333                                          struct team_gsetter_ctx *ctx)
1334{
1335        team->mcast_rejoin.interval = ctx->data.u32_val;
1336        return 0;
1337}
1338
1339static int team_port_en_option_get(struct team *team,
1340                                   struct team_gsetter_ctx *ctx)
1341{
1342        struct team_port *port = ctx->info->port;
1343
1344        ctx->data.bool_val = team_port_enabled(port);
1345        return 0;
1346}
1347
1348static int team_port_en_option_set(struct team *team,
1349                                   struct team_gsetter_ctx *ctx)
1350{
1351        struct team_port *port = ctx->info->port;
1352
1353        if (ctx->data.bool_val)
1354                team_port_enable(team, port);
1355        else
1356                team_port_disable(team, port);
1357        return 0;
1358}
1359
1360static int team_user_linkup_option_get(struct team *team,
1361                                       struct team_gsetter_ctx *ctx)
1362{
1363        struct team_port *port = ctx->info->port;
1364
1365        ctx->data.bool_val = port->user.linkup;
1366        return 0;
1367}
1368
1369static int team_user_linkup_option_set(struct team *team,
1370                                       struct team_gsetter_ctx *ctx)
1371{
1372        struct team_port *port = ctx->info->port;
1373
1374        port->user.linkup = ctx->data.bool_val;
1375        team_refresh_port_linkup(port);
1376        return 0;
1377}
1378
1379static int team_user_linkup_en_option_get(struct team *team,
1380                                          struct team_gsetter_ctx *ctx)
1381{
1382        struct team_port *port = ctx->info->port;
1383
1384        ctx->data.bool_val = port->user.linkup_enabled;
1385        return 0;
1386}
1387
1388static int team_user_linkup_en_option_set(struct team *team,
1389                                          struct team_gsetter_ctx *ctx)
1390{
1391        struct team_port *port = ctx->info->port;
1392
1393        port->user.linkup_enabled = ctx->data.bool_val;
1394        team_refresh_port_linkup(port);
1395        return 0;
1396}
1397
1398static int team_priority_option_get(struct team *team,
1399                                    struct team_gsetter_ctx *ctx)
1400{
1401        struct team_port *port = ctx->info->port;
1402
1403        ctx->data.s32_val = port->priority;
1404        return 0;
1405}
1406
1407static int team_priority_option_set(struct team *team,
1408                                    struct team_gsetter_ctx *ctx)
1409{
1410        struct team_port *port = ctx->info->port;
1411        s32 priority = ctx->data.s32_val;
1412
1413        if (port->priority == priority)
1414                return 0;
1415        port->priority = priority;
1416        team_queue_override_port_prio_changed(team, port);
1417        return 0;
1418}
1419
1420static int team_queue_id_option_get(struct team *team,
1421                                    struct team_gsetter_ctx *ctx)
1422{
1423        struct team_port *port = ctx->info->port;
1424
1425        ctx->data.u32_val = port->queue_id;
1426        return 0;
1427}
1428
1429static int team_queue_id_option_set(struct team *team,
1430                                    struct team_gsetter_ctx *ctx)
1431{
1432        struct team_port *port = ctx->info->port;
1433        u16 new_queue_id = ctx->data.u32_val;
1434
1435        if (port->queue_id == new_queue_id)
1436                return 0;
1437        if (new_queue_id >= team->dev->real_num_tx_queues)
1438                return -EINVAL;
1439        team_queue_override_port_change_queue_id(team, port, new_queue_id);
1440        return 0;
1441}
1442
1443static const struct team_option team_options[] = {
1444        {
1445                .name = "mode",
1446                .type = TEAM_OPTION_TYPE_STRING,
1447                .getter = team_mode_option_get,
1448                .setter = team_mode_option_set,
1449        },
1450        {
1451                .name = "notify_peers_count",
1452                .type = TEAM_OPTION_TYPE_U32,
1453                .getter = team_notify_peers_count_get,
1454                .setter = team_notify_peers_count_set,
1455        },
1456        {
1457                .name = "notify_peers_interval",
1458                .type = TEAM_OPTION_TYPE_U32,
1459                .getter = team_notify_peers_interval_get,
1460                .setter = team_notify_peers_interval_set,
1461        },
1462        {
1463                .name = "mcast_rejoin_count",
1464                .type = TEAM_OPTION_TYPE_U32,
1465                .getter = team_mcast_rejoin_count_get,
1466                .setter = team_mcast_rejoin_count_set,
1467        },
1468        {
1469                .name = "mcast_rejoin_interval",
1470                .type = TEAM_OPTION_TYPE_U32,
1471                .getter = team_mcast_rejoin_interval_get,
1472                .setter = team_mcast_rejoin_interval_set,
1473        },
1474        {
1475                .name = "enabled",
1476                .type = TEAM_OPTION_TYPE_BOOL,
1477                .per_port = true,
1478                .getter = team_port_en_option_get,
1479                .setter = team_port_en_option_set,
1480        },
1481        {
1482                .name = "user_linkup",
1483                .type = TEAM_OPTION_TYPE_BOOL,
1484                .per_port = true,
1485                .getter = team_user_linkup_option_get,
1486                .setter = team_user_linkup_option_set,
1487        },
1488        {
1489                .name = "user_linkup_enabled",
1490                .type = TEAM_OPTION_TYPE_BOOL,
1491                .per_port = true,
1492                .getter = team_user_linkup_en_option_get,
1493                .setter = team_user_linkup_en_option_set,
1494        },
1495        {
1496                .name = "priority",
1497                .type = TEAM_OPTION_TYPE_S32,
1498                .per_port = true,
1499                .getter = team_priority_option_get,
1500                .setter = team_priority_option_set,
1501        },
1502        {
1503                .name = "queue_id",
1504                .type = TEAM_OPTION_TYPE_U32,
1505                .per_port = true,
1506                .getter = team_queue_id_option_get,
1507                .setter = team_queue_id_option_set,
1508        },
1509};
1510
1511static struct lock_class_key team_netdev_xmit_lock_key;
1512static struct lock_class_key team_netdev_addr_lock_key;
1513static struct lock_class_key team_tx_busylock_key;
1514
1515static void team_set_lockdep_class_one(struct net_device *dev,
1516                                       struct netdev_queue *txq,
1517                                       void *unused)
1518{
1519        lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
1520}
1521
1522static void team_set_lockdep_class(struct net_device *dev)
1523{
1524        lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
1525        netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
1526        dev->qdisc_tx_busylock = &team_tx_busylock_key;
1527}
1528
1529static int team_init(struct net_device *dev)
1530{
1531        struct team *team = netdev_priv(dev);
1532        int i;
1533        int err;
1534
1535        team->dev = dev;
1536        mutex_init(&team->lock);
1537        team_set_no_mode(team);
1538
1539        team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1540        if (!team->pcpu_stats)
1541                return -ENOMEM;
1542
1543        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1544                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1545        INIT_LIST_HEAD(&team->port_list);
1546        err = team_queue_override_init(team);
1547        if (err)
1548                goto err_team_queue_override_init;
1549
1550        team_adjust_ops(team);
1551
1552        INIT_LIST_HEAD(&team->option_list);
1553        INIT_LIST_HEAD(&team->option_inst_list);
1554
1555        team_notify_peers_init(team);
1556        team_mcast_rejoin_init(team);
1557
1558        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1559        if (err)
1560                goto err_options_register;
1561        netif_carrier_off(dev);
1562
1563        team_set_lockdep_class(dev);
1564
1565        return 0;
1566
1567err_options_register:
1568        team_mcast_rejoin_fini(team);
1569        team_notify_peers_fini(team);
1570        team_queue_override_fini(team);
1571err_team_queue_override_init:
1572        free_percpu(team->pcpu_stats);
1573
1574        return err;
1575}
1576
1577static void team_uninit(struct net_device *dev)
1578{
1579        struct team *team = netdev_priv(dev);
1580        struct team_port *port;
1581        struct team_port *tmp;
1582
1583        mutex_lock(&team->lock);
1584        list_for_each_entry_safe(port, tmp, &team->port_list, list)
1585                team_port_del(team, port->dev);
1586
1587        __team_change_mode(team, NULL); /* cleanup */
1588        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1589        team_mcast_rejoin_fini(team);
1590        team_notify_peers_fini(team);
1591        team_queue_override_fini(team);
1592        mutex_unlock(&team->lock);
1593}
1594
1595static void team_destructor(struct net_device *dev)
1596{
1597        struct team *team = netdev_priv(dev);
1598
1599        free_percpu(team->pcpu_stats);
1600        free_netdev(dev);
1601}
1602
1603static int team_open(struct net_device *dev)
1604{
1605        return 0;
1606}
1607
1608static int team_close(struct net_device *dev)
1609{
1610        return 0;
1611}
1612
1613/*
1614 * note: already called with rcu_read_lock
1615 */
1616static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1617{
1618        struct team *team = netdev_priv(dev);
1619        bool tx_success;
1620        unsigned int len = skb->len;
1621
1622        tx_success = team_queue_override_transmit(team, skb);
1623        if (!tx_success)
1624                tx_success = team->ops.transmit(team, skb);
1625        if (tx_success) {
1626                struct team_pcpu_stats *pcpu_stats;
1627
1628                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1629                u64_stats_update_begin(&pcpu_stats->syncp);
1630                pcpu_stats->tx_packets++;
1631                pcpu_stats->tx_bytes += len;
1632                u64_stats_update_end(&pcpu_stats->syncp);
1633        } else {
1634                this_cpu_inc(team->pcpu_stats->tx_dropped);
1635        }
1636
1637        return NETDEV_TX_OK;
1638}
1639
1640static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
1641{
1642        /*
1643         * This helper function exists to help dev_pick_tx get the correct
1644         * destination queue.  Using a helper function skips a call to
1645         * skb_tx_hash and will put the skbs in the queue we expect on their
1646         * way down to the team driver.
1647         */
1648        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1649
1650        /*
1651         * Save the original txq to restore before passing to the driver
1652         */
1653        qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1654
1655        if (unlikely(txq >= dev->real_num_tx_queues)) {
1656                do {
1657                        txq -= dev->real_num_tx_queues;
1658                } while (txq >= dev->real_num_tx_queues);
1659        }
1660        return txq;
1661}
1662
1663static void team_change_rx_flags(struct net_device *dev, int change)
1664{
1665        struct team *team = netdev_priv(dev);
1666        struct team_port *port;
1667        int inc;
1668
1669        rcu_read_lock();
1670        list_for_each_entry_rcu(port, &team->port_list, list) {
1671                if (change & IFF_PROMISC) {
1672                        inc = dev->flags & IFF_PROMISC ? 1 : -1;
1673                        dev_set_promiscuity(port->dev, inc);
1674                }
1675                if (change & IFF_ALLMULTI) {
1676                        inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1677                        dev_set_allmulti(port->dev, inc);
1678                }
1679        }
1680        rcu_read_unlock();
1681}
1682
1683static void team_set_rx_mode(struct net_device *dev)
1684{
1685        struct team *team = netdev_priv(dev);
1686        struct team_port *port;
1687
1688        rcu_read_lock();
1689        list_for_each_entry_rcu(port, &team->port_list, list) {
1690                dev_uc_sync_multiple(port->dev, dev);
1691                dev_mc_sync_multiple(port->dev, dev);
1692        }
1693        rcu_read_unlock();
1694}
1695
1696static int team_set_mac_address(struct net_device *dev, void *p)
1697{
1698        struct sockaddr *addr = p;
1699        struct team *team = netdev_priv(dev);
1700        struct team_port *port;
1701
1702        if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1703                return -EADDRNOTAVAIL;
1704        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1705        rcu_read_lock();
1706        list_for_each_entry_rcu(port, &team->port_list, list)
1707                if (team->ops.port_change_dev_addr)
1708                        team->ops.port_change_dev_addr(team, port);
1709        rcu_read_unlock();
1710        return 0;
1711}
1712
1713static int team_change_mtu(struct net_device *dev, int new_mtu)
1714{
1715        struct team *team = netdev_priv(dev);
1716        struct team_port *port;
1717        int err;
1718
1719        /*
1720         * Alhough this is reader, it's guarded by team lock. It's not possible
1721         * to traverse list in reverse under rcu_read_lock
1722         */
1723        mutex_lock(&team->lock);
1724        list_for_each_entry(port, &team->port_list, list) {
1725                err = dev_set_mtu(port->dev, new_mtu);
1726                if (err) {
1727                        netdev_err(dev, "Device %s failed to change mtu",
1728                                   port->dev->name);
1729                        goto unwind;
1730                }
1731        }
1732        mutex_unlock(&team->lock);
1733
1734        dev->mtu = new_mtu;
1735
1736        return 0;
1737
1738unwind:
1739        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1740                dev_set_mtu(port->dev, dev->mtu);
1741        mutex_unlock(&team->lock);
1742
1743        return err;
1744}
1745
1746static struct rtnl_link_stats64 *
1747team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1748{
1749        struct team *team = netdev_priv(dev);
1750        struct team_pcpu_stats *p;
1751        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1752        u32 rx_dropped = 0, tx_dropped = 0;
1753        unsigned int start;
1754        int i;
1755
1756        for_each_possible_cpu(i) {
1757                p = per_cpu_ptr(team->pcpu_stats, i);
1758                do {
1759                        start = u64_stats_fetch_begin_bh(&p->syncp);
1760                        rx_packets      = p->rx_packets;
1761                        rx_bytes        = p->rx_bytes;
1762                        rx_multicast    = p->rx_multicast;
1763                        tx_packets      = p->tx_packets;
1764                        tx_bytes        = p->tx_bytes;
1765                } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1766
1767                stats->rx_packets       += rx_packets;
1768                stats->rx_bytes         += rx_bytes;
1769                stats->multicast        += rx_multicast;
1770                stats->tx_packets       += tx_packets;
1771                stats->tx_bytes         += tx_bytes;
1772                /*
1773                 * rx_dropped & tx_dropped are u32, updated
1774                 * without syncp protection.
1775                 */
1776                rx_dropped      += p->rx_dropped;
1777                tx_dropped      += p->tx_dropped;
1778        }
1779        stats->rx_dropped       = rx_dropped;
1780        stats->tx_dropped       = tx_dropped;
1781        return stats;
1782}
1783
1784static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1785{
1786        struct team *team = netdev_priv(dev);
1787        struct team_port *port;
1788        int err;
1789
1790        /*
1791         * Alhough this is reader, it's guarded by team lock. It's not possible
1792         * to traverse list in reverse under rcu_read_lock
1793         */
1794        mutex_lock(&team->lock);
1795        list_for_each_entry(port, &team->port_list, list) {
1796                err = vlan_vid_add(port->dev, proto, vid);
1797                if (err)
1798                        goto unwind;
1799        }
1800        mutex_unlock(&team->lock);
1801
1802        return 0;
1803
1804unwind:
1805        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1806                vlan_vid_del(port->dev, proto, vid);
1807        mutex_unlock(&team->lock);
1808
1809        return err;
1810}
1811
1812static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1813{
1814        struct team *team = netdev_priv(dev);
1815        struct team_port *port;
1816
1817        rcu_read_lock();
1818        list_for_each_entry_rcu(port, &team->port_list, list)
1819                vlan_vid_del(port->dev, proto, vid);
1820        rcu_read_unlock();
1821
1822        return 0;
1823}
1824
1825#ifdef CONFIG_NET_POLL_CONTROLLER
1826static void team_poll_controller(struct net_device *dev)
1827{
1828}
1829
1830static void __team_netpoll_cleanup(struct team *team)
1831{
1832        struct team_port *port;
1833
1834        list_for_each_entry(port, &team->port_list, list)
1835                team_port_disable_netpoll(port);
1836}
1837
1838static void team_netpoll_cleanup(struct net_device *dev)
1839{
1840        struct team *team = netdev_priv(dev);
1841
1842        mutex_lock(&team->lock);
1843        __team_netpoll_cleanup(team);
1844        mutex_unlock(&team->lock);
1845}
1846
1847static int team_netpoll_setup(struct net_device *dev,
1848                              struct netpoll_info *npifo, gfp_t gfp)
1849{
1850        struct team *team = netdev_priv(dev);
1851        struct team_port *port;
1852        int err = 0;
1853
1854        mutex_lock(&team->lock);
1855        list_for_each_entry(port, &team->port_list, list) {
1856                err = team_port_enable_netpoll(team, port, gfp);
1857                if (err) {
1858                        __team_netpoll_cleanup(team);
1859                        break;
1860                }
1861        }
1862        mutex_unlock(&team->lock);
1863        return err;
1864}
1865#endif
1866
1867static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1868{
1869        struct team *team = netdev_priv(dev);
1870        int err;
1871
1872        mutex_lock(&team->lock);
1873        err = team_port_add(team, port_dev);
1874        mutex_unlock(&team->lock);
1875        return err;
1876}
1877
1878static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1879{
1880        struct team *team = netdev_priv(dev);
1881        int err;
1882
1883        mutex_lock(&team->lock);
1884        err = team_port_del(team, port_dev);
1885        mutex_unlock(&team->lock);
1886        return err;
1887}
1888
1889static netdev_features_t team_fix_features(struct net_device *dev,
1890                                           netdev_features_t features)
1891{
1892        struct team_port *port;
1893        struct team *team = netdev_priv(dev);
1894        netdev_features_t mask;
1895
1896        mask = features;
1897        features &= ~NETIF_F_ONE_FOR_ALL;
1898        features |= NETIF_F_ALL_FOR_ALL;
1899
1900        rcu_read_lock();
1901        list_for_each_entry_rcu(port, &team->port_list, list) {
1902                features = netdev_increment_features(features,
1903                                                     port->dev->features,
1904                                                     mask);
1905        }
1906        rcu_read_unlock();
1907        return features;
1908}
1909
1910static int team_change_carrier(struct net_device *dev, bool new_carrier)
1911{
1912        struct team *team = netdev_priv(dev);
1913
1914        team->user_carrier_enabled = true;
1915
1916        if (new_carrier)
1917                netif_carrier_on(dev);
1918        else
1919                netif_carrier_off(dev);
1920        return 0;
1921}
1922
1923static const struct net_device_ops team_netdev_ops = {
1924        .ndo_init               = team_init,
1925        .ndo_uninit             = team_uninit,
1926        .ndo_open               = team_open,
1927        .ndo_stop               = team_close,
1928        .ndo_start_xmit         = team_xmit,
1929        .ndo_select_queue       = team_select_queue,
1930        .ndo_change_rx_flags    = team_change_rx_flags,
1931        .ndo_set_rx_mode        = team_set_rx_mode,
1932        .ndo_set_mac_address    = team_set_mac_address,
1933        .ndo_change_mtu         = team_change_mtu,
1934        .ndo_get_stats64        = team_get_stats64,
1935        .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
1936        .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
1937#ifdef CONFIG_NET_POLL_CONTROLLER
1938        .ndo_poll_controller    = team_poll_controller,
1939        .ndo_netpoll_setup      = team_netpoll_setup,
1940        .ndo_netpoll_cleanup    = team_netpoll_cleanup,
1941#endif
1942        .ndo_add_slave          = team_add_slave,
1943        .ndo_del_slave          = team_del_slave,
1944        .ndo_fix_features       = team_fix_features,
1945        .ndo_change_carrier     = team_change_carrier,
1946};
1947
1948/***********************
1949 * ethtool interface
1950 ***********************/
1951
1952static void team_ethtool_get_drvinfo(struct net_device *dev,
1953                                     struct ethtool_drvinfo *drvinfo)
1954{
1955        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
1956        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
1957}
1958
1959static const struct ethtool_ops team_ethtool_ops = {
1960        .get_drvinfo            = team_ethtool_get_drvinfo,
1961        .get_link               = ethtool_op_get_link,
1962};
1963
1964/***********************
1965 * rt netlink interface
1966 ***********************/
1967
1968static void team_setup_by_port(struct net_device *dev,
1969                               struct net_device *port_dev)
1970{
1971        dev->header_ops = port_dev->header_ops;
1972        dev->type = port_dev->type;
1973        dev->hard_header_len = port_dev->hard_header_len;
1974        dev->addr_len = port_dev->addr_len;
1975        dev->mtu = port_dev->mtu;
1976        memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1977        eth_hw_addr_inherit(dev, port_dev);
1978}
1979
1980static int team_dev_type_check_change(struct net_device *dev,
1981                                      struct net_device *port_dev)
1982{
1983        struct team *team = netdev_priv(dev);
1984        char *portname = port_dev->name;
1985        int err;
1986
1987        if (dev->type == port_dev->type)
1988                return 0;
1989        if (!list_empty(&team->port_list)) {
1990                netdev_err(dev, "Device %s is of different type\n", portname);
1991                return -EBUSY;
1992        }
1993        err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
1994        err = notifier_to_errno(err);
1995        if (err) {
1996                netdev_err(dev, "Refused to change device type\n");
1997                return err;
1998        }
1999        dev_uc_flush(dev);
2000        dev_mc_flush(dev);
2001        team_setup_by_port(dev, port_dev);
2002        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2003        return 0;
2004}
2005
2006static void team_setup(struct net_device *dev)
2007{
2008        ether_setup(dev);
2009
2010        dev->netdev_ops = &team_netdev_ops;
2011        dev->ethtool_ops = &team_ethtool_ops;
2012        dev->destructor = team_destructor;
2013        dev->tx_queue_len = 0;
2014        dev->flags |= IFF_MULTICAST;
2015        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2016
2017        /*
2018         * Indicate we support unicast address filtering. That way core won't
2019         * bring us to promisc mode in case a unicast addr is added.
2020         * Let this up to underlay drivers.
2021         */
2022        dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2023
2024        dev->features |= NETIF_F_LLTX;
2025        dev->features |= NETIF_F_GRO;
2026        dev->hw_features = TEAM_VLAN_FEATURES |
2027                           NETIF_F_HW_VLAN_CTAG_TX |
2028                           NETIF_F_HW_VLAN_CTAG_RX |
2029                           NETIF_F_HW_VLAN_CTAG_FILTER;
2030
2031        dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
2032        dev->features |= dev->hw_features;
2033}
2034
2035static int team_newlink(struct net *src_net, struct net_device *dev,
2036                        struct nlattr *tb[], struct nlattr *data[])
2037{
2038        int err;
2039
2040        if (tb[IFLA_ADDRESS] == NULL)
2041                eth_hw_addr_random(dev);
2042
2043        err = register_netdevice(dev);
2044        if (err)
2045                return err;
2046
2047        return 0;
2048}
2049
2050static int team_validate(struct nlattr *tb[], struct nlattr *data[])
2051{
2052        if (tb[IFLA_ADDRESS]) {
2053                if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2054                        return -EINVAL;
2055                if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2056                        return -EADDRNOTAVAIL;
2057        }
2058        return 0;
2059}
2060
2061static unsigned int team_get_num_tx_queues(void)
2062{
2063        return TEAM_DEFAULT_NUM_TX_QUEUES;
2064}
2065
2066static unsigned int team_get_num_rx_queues(void)
2067{
2068        return TEAM_DEFAULT_NUM_RX_QUEUES;
2069}
2070
2071static struct rtnl_link_ops team_link_ops __read_mostly = {
2072        .kind                   = DRV_NAME,
2073        .priv_size              = sizeof(struct team),
2074        .setup                  = team_setup,
2075        .newlink                = team_newlink,
2076        .validate               = team_validate,
2077        .get_num_tx_queues      = team_get_num_tx_queues,
2078        .get_num_rx_queues      = team_get_num_rx_queues,
2079};
2080
2081
2082/***********************************
2083 * Generic netlink custom interface
2084 ***********************************/
2085
2086static struct genl_family team_nl_family = {
2087        .id             = GENL_ID_GENERATE,
2088        .name           = TEAM_GENL_NAME,
2089        .version        = TEAM_GENL_VERSION,
2090        .maxattr        = TEAM_ATTR_MAX,
2091        .netnsok        = true,
2092};
2093
2094static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2095        [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2096        [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2097        [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2098        [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2099};
2100
2101static const struct nla_policy
2102team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2103        [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2104        [TEAM_ATTR_OPTION_NAME] = {
2105                .type = NLA_STRING,
2106                .len = TEAM_STRING_MAX_LEN,
2107        },
2108        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2109        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2110        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2111};
2112
2113static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2114{
2115        struct sk_buff *msg;
2116        void *hdr;
2117        int err;
2118
2119        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2120        if (!msg)
2121                return -ENOMEM;
2122
2123        hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2124                          &team_nl_family, 0, TEAM_CMD_NOOP);
2125        if (!hdr) {
2126                err = -EMSGSIZE;
2127                goto err_msg_put;
2128        }
2129
2130        genlmsg_end(msg, hdr);
2131
2132        return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2133
2134err_msg_put:
2135        nlmsg_free(msg);
2136
2137        return err;
2138}
2139
2140/*
2141 * Netlink cmd functions should be locked by following two functions.
2142 * Since dev gets held here, that ensures dev won't disappear in between.
2143 */
2144static struct team *team_nl_team_get(struct genl_info *info)
2145{
2146        struct net *net = genl_info_net(info);
2147        int ifindex;
2148        struct net_device *dev;
2149        struct team *team;
2150
2151        if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2152                return NULL;
2153
2154        ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2155        dev = dev_get_by_index(net, ifindex);
2156        if (!dev || dev->netdev_ops != &team_netdev_ops) {
2157                if (dev)
2158                        dev_put(dev);
2159                return NULL;
2160        }
2161
2162        team = netdev_priv(dev);
2163        mutex_lock(&team->lock);
2164        return team;
2165}
2166
2167static void team_nl_team_put(struct team *team)
2168{
2169        mutex_unlock(&team->lock);
2170        dev_put(team->dev);
2171}
2172
2173typedef int team_nl_send_func_t(struct sk_buff *skb,
2174                                struct team *team, u32 portid);
2175
2176static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2177{
2178        return genlmsg_unicast(dev_net(team->dev), skb, portid);
2179}
2180
2181static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2182                                       struct team_option_inst *opt_inst)
2183{
2184        struct nlattr *option_item;
2185        struct team_option *option = opt_inst->option;
2186        struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2187        struct team_gsetter_ctx ctx;
2188        int err;
2189
2190        ctx.info = opt_inst_info;
2191        err = team_option_get(team, opt_inst, &ctx);
2192        if (err)
2193                return err;
2194
2195        option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2196        if (!option_item)
2197                return -EMSGSIZE;
2198
2199        if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2200                goto nest_cancel;
2201        if (opt_inst_info->port &&
2202            nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2203                        opt_inst_info->port->dev->ifindex))
2204                goto nest_cancel;
2205        if (opt_inst->option->array_size &&
2206            nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2207                        opt_inst_info->array_index))
2208                goto nest_cancel;
2209
2210        switch (option->type) {
2211        case TEAM_OPTION_TYPE_U32:
2212                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2213                        goto nest_cancel;
2214                if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2215                        goto nest_cancel;
2216                break;
2217        case TEAM_OPTION_TYPE_STRING:
2218                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2219                        goto nest_cancel;
2220                if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2221                                   ctx.data.str_val))
2222                        goto nest_cancel;
2223                break;
2224        case TEAM_OPTION_TYPE_BINARY:
2225                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2226                        goto nest_cancel;
2227                if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2228                            ctx.data.bin_val.ptr))
2229                        goto nest_cancel;
2230                break;
2231        case TEAM_OPTION_TYPE_BOOL:
2232                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2233                        goto nest_cancel;
2234                if (ctx.data.bool_val &&
2235                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2236                        goto nest_cancel;
2237                break;
2238        case TEAM_OPTION_TYPE_S32:
2239                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2240                        goto nest_cancel;
2241                if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2242                        goto nest_cancel;
2243                break;
2244        default:
2245                BUG();
2246        }
2247        if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2248                goto nest_cancel;
2249        if (opt_inst->changed) {
2250                if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2251                        goto nest_cancel;
2252                opt_inst->changed = false;
2253        }
2254        nla_nest_end(skb, option_item);
2255        return 0;
2256
2257nest_cancel:
2258        nla_nest_cancel(skb, option_item);
2259        return -EMSGSIZE;
2260}
2261
2262static int __send_and_alloc_skb(struct sk_buff **pskb,
2263                                struct team *team, u32 portid,
2264                                team_nl_send_func_t *send_func)
2265{
2266        int err;
2267
2268        if (*pskb) {
2269                err = send_func(*pskb, team, portid);
2270                if (err)
2271                        return err;
2272        }
2273        *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2274        if (!*pskb)
2275                return -ENOMEM;
2276        return 0;
2277}
2278
2279static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2280                                    int flags, team_nl_send_func_t *send_func,
2281                                    struct list_head *sel_opt_inst_list)
2282{
2283        struct nlattr *option_list;
2284        struct nlmsghdr *nlh;
2285        void *hdr;
2286        struct team_option_inst *opt_inst;
2287        int err;
2288        struct sk_buff *skb = NULL;
2289        bool incomplete;
2290        int i;
2291
2292        opt_inst = list_first_entry(sel_opt_inst_list,
2293                                    struct team_option_inst, tmp_list);
2294
2295start_again:
2296        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2297        if (err)
2298                return err;
2299
2300        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2301                          TEAM_CMD_OPTIONS_GET);
2302        if (!hdr)
2303                return -EMSGSIZE;
2304
2305        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2306                goto nla_put_failure;
2307        option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2308        if (!option_list)
2309                goto nla_put_failure;
2310
2311        i = 0;
2312        incomplete = false;
2313        list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2314                err = team_nl_fill_one_option_get(skb, team, opt_inst);
2315                if (err) {
2316                        if (err == -EMSGSIZE) {
2317                                if (!i)
2318                                        goto errout;
2319                                incomplete = true;
2320                                break;
2321                        }
2322                        goto errout;
2323                }
2324                i++;
2325        }
2326
2327        nla_nest_end(skb, option_list);
2328        genlmsg_end(skb, hdr);
2329        if (incomplete)
2330                goto start_again;
2331
2332send_done:
2333        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2334        if (!nlh) {
2335                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2336                if (err)
2337                        goto errout;
2338                goto send_done;
2339        }
2340
2341        return send_func(skb, team, portid);
2342
2343nla_put_failure:
2344        err = -EMSGSIZE;
2345errout:
2346        genlmsg_cancel(skb, hdr);
2347        nlmsg_free(skb);
2348        return err;
2349}
2350
2351static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2352{
2353        struct team *team;
2354        struct team_option_inst *opt_inst;
2355        int err;
2356        LIST_HEAD(sel_opt_inst_list);
2357
2358        team = team_nl_team_get(info);
2359        if (!team)
2360                return -EINVAL;
2361
2362        list_for_each_entry(opt_inst, &team->option_inst_list, list)
2363                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2364        err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2365                                       NLM_F_ACK, team_nl_send_unicast,
2366                                       &sel_opt_inst_list);
2367
2368        team_nl_team_put(team);
2369
2370        return err;
2371}
2372
2373static int team_nl_send_event_options_get(struct team *team,
2374                                          struct list_head *sel_opt_inst_list);
2375
2376static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2377{
2378        struct team *team;
2379        int err = 0;
2380        int i;
2381        struct nlattr *nl_option;
2382        LIST_HEAD(opt_inst_list);
2383
2384        team = team_nl_team_get(info);
2385        if (!team)
2386                return -EINVAL;
2387
2388        err = -EINVAL;
2389        if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2390                err = -EINVAL;
2391                goto team_put;
2392        }
2393
2394        nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2395                struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2396                struct nlattr *attr;
2397                struct nlattr *attr_data;
2398                enum team_option_type opt_type;
2399                int opt_port_ifindex = 0; /* != 0 for per-port options */
2400                u32 opt_array_index = 0;
2401                bool opt_is_array = false;
2402                struct team_option_inst *opt_inst;
2403                char *opt_name;
2404                bool opt_found = false;
2405
2406                if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2407                        err = -EINVAL;
2408                        goto team_put;
2409                }
2410                err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2411                                       nl_option, team_nl_option_policy);
2412                if (err)
2413                        goto team_put;
2414                if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2415                    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2416                        err = -EINVAL;
2417                        goto team_put;
2418                }
2419                switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2420                case NLA_U32:
2421                        opt_type = TEAM_OPTION_TYPE_U32;
2422                        break;
2423                case NLA_STRING:
2424                        opt_type = TEAM_OPTION_TYPE_STRING;
2425                        break;
2426                case NLA_BINARY:
2427                        opt_type = TEAM_OPTION_TYPE_BINARY;
2428                        break;
2429                case NLA_FLAG:
2430                        opt_type = TEAM_OPTION_TYPE_BOOL;
2431                        break;
2432                case NLA_S32:
2433                        opt_type = TEAM_OPTION_TYPE_S32;
2434                        break;
2435                default:
2436                        goto team_put;
2437                }
2438
2439                attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2440                if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2441                        err = -EINVAL;
2442                        goto team_put;
2443                }
2444
2445                opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2446                attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2447                if (attr)
2448                        opt_port_ifindex = nla_get_u32(attr);
2449
2450                attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2451                if (attr) {
2452                        opt_is_array = true;
2453                        opt_array_index = nla_get_u32(attr);
2454                }
2455
2456                list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2457                        struct team_option *option = opt_inst->option;
2458                        struct team_gsetter_ctx ctx;
2459                        struct team_option_inst_info *opt_inst_info;
2460                        int tmp_ifindex;
2461
2462                        opt_inst_info = &opt_inst->info;
2463                        tmp_ifindex = opt_inst_info->port ?
2464                                      opt_inst_info->port->dev->ifindex : 0;
2465                        if (option->type != opt_type ||
2466                            strcmp(option->name, opt_name) ||
2467                            tmp_ifindex != opt_port_ifindex ||
2468                            (option->array_size && !opt_is_array) ||
2469                            opt_inst_info->array_index != opt_array_index)
2470                                continue;
2471                        opt_found = true;
2472                        ctx.info = opt_inst_info;
2473                        switch (opt_type) {
2474                        case TEAM_OPTION_TYPE_U32:
2475                                ctx.data.u32_val = nla_get_u32(attr_data);
2476                                break;
2477                        case TEAM_OPTION_TYPE_STRING:
2478                                if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2479                                        err = -EINVAL;
2480                                        goto team_put;
2481                                }
2482                                ctx.data.str_val = nla_data(attr_data);
2483                                break;
2484                        case TEAM_OPTION_TYPE_BINARY:
2485                                ctx.data.bin_val.len = nla_len(attr_data);
2486                                ctx.data.bin_val.ptr = nla_data(attr_data);
2487                                break;
2488                        case TEAM_OPTION_TYPE_BOOL:
2489                                ctx.data.bool_val = attr_data ? true : false;
2490                                break;
2491                        case TEAM_OPTION_TYPE_S32:
2492                                ctx.data.s32_val = nla_get_s32(attr_data);
2493                                break;
2494                        default:
2495                                BUG();
2496                        }
2497                        err = team_option_set(team, opt_inst, &ctx);
2498                        if (err)
2499                                goto team_put;
2500                        opt_inst->changed = true;
2501                        list_add(&opt_inst->tmp_list, &opt_inst_list);
2502                }
2503                if (!opt_found) {
2504                        err = -ENOENT;
2505                        goto team_put;
2506                }
2507        }
2508
2509        err = team_nl_send_event_options_get(team, &opt_inst_list);
2510
2511team_put:
2512        team_nl_team_put(team);
2513
2514        return err;
2515}
2516
2517static int team_nl_fill_one_port_get(struct sk_buff *skb,
2518                                     struct team_port *port)
2519{
2520        struct nlattr *port_item;
2521
2522        port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2523        if (!port_item)
2524                goto nest_cancel;
2525        if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2526                goto nest_cancel;
2527        if (port->changed) {
2528                if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2529                        goto nest_cancel;
2530                port->changed = false;
2531        }
2532        if ((port->removed &&
2533             nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2534            (port->state.linkup &&
2535             nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2536            nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2537            nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2538                goto nest_cancel;
2539        nla_nest_end(skb, port_item);
2540        return 0;
2541
2542nest_cancel:
2543        nla_nest_cancel(skb, port_item);
2544        return -EMSGSIZE;
2545}
2546
2547static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2548                                      int flags, team_nl_send_func_t *send_func,
2549                                      struct team_port *one_port)
2550{
2551        struct nlattr *port_list;
2552        struct nlmsghdr *nlh;
2553        void *hdr;
2554        struct team_port *port;
2555        int err;
2556        struct sk_buff *skb = NULL;
2557        bool incomplete;
2558        int i;
2559
2560        port = list_first_entry_or_null(&team->port_list,
2561                                        struct team_port, list);
2562
2563start_again:
2564        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2565        if (err)
2566                return err;
2567
2568        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2569                          TEAM_CMD_PORT_LIST_GET);
2570        if (!hdr)
2571                return -EMSGSIZE;
2572
2573        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2574                goto nla_put_failure;
2575        port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2576        if (!port_list)
2577                goto nla_put_failure;
2578
2579        i = 0;
2580        incomplete = false;
2581
2582        /* If one port is selected, called wants to send port list containing
2583         * only this port. Otherwise go through all listed ports and send all
2584         */
2585        if (one_port) {
2586                err = team_nl_fill_one_port_get(skb, one_port);
2587                if (err)
2588                        goto errout;
2589        } else if (port) {
2590                list_for_each_entry_from(port, &team->port_list, list) {
2591                        err = team_nl_fill_one_port_get(skb, port);
2592                        if (err) {
2593                                if (err == -EMSGSIZE) {
2594                                        if (!i)
2595                                                goto errout;
2596                                        incomplete = true;
2597                                        break;
2598                                }
2599                                goto errout;
2600                        }
2601                        i++;
2602                }
2603        }
2604
2605        nla_nest_end(skb, port_list);
2606        genlmsg_end(skb, hdr);
2607        if (incomplete)
2608                goto start_again;
2609
2610send_done:
2611        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2612        if (!nlh) {
2613                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2614                if (err)
2615                        goto errout;
2616                goto send_done;
2617        }
2618
2619        return send_func(skb, team, portid);
2620
2621nla_put_failure:
2622        err = -EMSGSIZE;
2623errout:
2624        genlmsg_cancel(skb, hdr);
2625        nlmsg_free(skb);
2626        return err;
2627}
2628
2629static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2630                                     struct genl_info *info)
2631{
2632        struct team *team;
2633        int err;
2634
2635        team = team_nl_team_get(info);
2636        if (!team)
2637                return -EINVAL;
2638
2639        err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2640                                         NLM_F_ACK, team_nl_send_unicast, NULL);
2641
2642        team_nl_team_put(team);
2643
2644        return err;
2645}
2646
2647static struct genl_ops team_nl_ops[] = {
2648        {
2649                .cmd = TEAM_CMD_NOOP,
2650                .doit = team_nl_cmd_noop,
2651                .policy = team_nl_policy,
2652        },
2653        {
2654                .cmd = TEAM_CMD_OPTIONS_SET,
2655                .doit = team_nl_cmd_options_set,
2656                .policy = team_nl_policy,
2657                .flags = GENL_ADMIN_PERM,
2658        },
2659        {
2660                .cmd = TEAM_CMD_OPTIONS_GET,
2661                .doit = team_nl_cmd_options_get,
2662                .policy = team_nl_policy,
2663                .flags = GENL_ADMIN_PERM,
2664        },
2665        {
2666                .cmd = TEAM_CMD_PORT_LIST_GET,
2667                .doit = team_nl_cmd_port_list_get,
2668                .policy = team_nl_policy,
2669                .flags = GENL_ADMIN_PERM,
2670        },
2671};
2672
2673static struct genl_multicast_group team_change_event_mcgrp = {
2674        .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
2675};
2676
2677static int team_nl_send_multicast(struct sk_buff *skb,
2678                                  struct team *team, u32 portid)
2679{
2680        return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2681                                       team_change_event_mcgrp.id, GFP_KERNEL);
2682}
2683
2684static int team_nl_send_event_options_get(struct team *team,
2685                                          struct list_head *sel_opt_inst_list)
2686{
2687        return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2688                                        sel_opt_inst_list);
2689}
2690
2691static int team_nl_send_event_port_get(struct team *team,
2692                                       struct team_port *port)
2693{
2694        return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2695                                          port);
2696}
2697
2698static int team_nl_init(void)
2699{
2700        int err;
2701
2702        err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2703                                            ARRAY_SIZE(team_nl_ops));
2704        if (err)
2705                return err;
2706
2707        err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2708        if (err)
2709                goto err_change_event_grp_reg;
2710
2711        return 0;
2712
2713err_change_event_grp_reg:
2714        genl_unregister_family(&team_nl_family);
2715
2716        return err;
2717}
2718
2719static void team_nl_fini(void)
2720{
2721        genl_unregister_family(&team_nl_family);
2722}
2723
2724
2725/******************
2726 * Change checkers
2727 ******************/
2728
2729static void __team_options_change_check(struct team *team)
2730{
2731        int err;
2732        struct team_option_inst *opt_inst;
2733        LIST_HEAD(sel_opt_inst_list);
2734
2735        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2736                if (opt_inst->changed)
2737                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2738        }
2739        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2740        if (err && err != -ESRCH)
2741                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2742                            err);
2743}
2744
2745/* rtnl lock is held */
2746
2747static void __team_port_change_send(struct team_port *port, bool linkup)
2748{
2749        int err;
2750
2751        port->changed = true;
2752        port->state.linkup = linkup;
2753        team_refresh_port_linkup(port);
2754        if (linkup) {
2755                struct ethtool_cmd ecmd;
2756
2757                err = __ethtool_get_settings(port->dev, &ecmd);
2758                if (!err) {
2759                        port->state.speed = ethtool_cmd_speed(&ecmd);
2760                        port->state.duplex = ecmd.duplex;
2761                        goto send_event;
2762                }
2763        }
2764        port->state.speed = 0;
2765        port->state.duplex = 0;
2766
2767send_event:
2768        err = team_nl_send_event_port_get(port->team, port);
2769        if (err && err != -ESRCH)
2770                netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2771                            port->dev->name, err);
2772
2773}
2774
2775static void __team_carrier_check(struct team *team)
2776{
2777        struct team_port *port;
2778        bool team_linkup;
2779
2780        if (team->user_carrier_enabled)
2781                return;
2782
2783        team_linkup = false;
2784        list_for_each_entry(port, &team->port_list, list) {
2785                if (port->linkup) {
2786                        team_linkup = true;
2787                        break;
2788                }
2789        }
2790
2791        if (team_linkup)
2792                netif_carrier_on(team->dev);
2793        else
2794                netif_carrier_off(team->dev);
2795}
2796
2797static void __team_port_change_check(struct team_port *port, bool linkup)
2798{
2799        if (port->state.linkup != linkup)
2800                __team_port_change_send(port, linkup);
2801        __team_carrier_check(port->team);
2802}
2803
2804static void __team_port_change_port_added(struct team_port *port, bool linkup)
2805{
2806        __team_port_change_send(port, linkup);
2807        __team_carrier_check(port->team);
2808}
2809
2810static void __team_port_change_port_removed(struct team_port *port)
2811{
2812        port->removed = true;
2813        __team_port_change_send(port, false);
2814        __team_carrier_check(port->team);
2815}
2816
2817static void team_port_change_check(struct team_port *port, bool linkup)
2818{
2819        struct team *team = port->team;
2820
2821        mutex_lock(&team->lock);
2822        __team_port_change_check(port, linkup);
2823        mutex_unlock(&team->lock);
2824}
2825
2826
2827/************************************
2828 * Net device notifier event handler
2829 ************************************/
2830
2831static int team_device_event(struct notifier_block *unused,
2832                             unsigned long event, void *ptr)
2833{
2834        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2835        struct team_port *port;
2836
2837        port = team_port_get_rtnl(dev);
2838        if (!port)
2839                return NOTIFY_DONE;
2840
2841        switch (event) {
2842        case NETDEV_UP:
2843                if (netif_carrier_ok(dev))
2844                        team_port_change_check(port, true);
2845        case NETDEV_DOWN:
2846                team_port_change_check(port, false);
2847        case NETDEV_CHANGE:
2848                if (netif_running(port->dev))
2849                        team_port_change_check(port,
2850                                               !!netif_carrier_ok(port->dev));
2851                break;
2852        case NETDEV_UNREGISTER:
2853                team_del_slave(port->team->dev, dev);
2854                break;
2855        case NETDEV_FEAT_CHANGE:
2856                team_compute_features(port->team);
2857                break;
2858        case NETDEV_CHANGEMTU:
2859                /* Forbid to change mtu of underlaying device */
2860                return NOTIFY_BAD;
2861        case NETDEV_PRE_TYPE_CHANGE:
2862                /* Forbid to change type of underlaying device */
2863                return NOTIFY_BAD;
2864        case NETDEV_RESEND_IGMP:
2865                /* Propagate to master device */
2866                call_netdevice_notifiers(event, port->team->dev);
2867                break;
2868        }
2869        return NOTIFY_DONE;
2870}
2871
2872static struct notifier_block team_notifier_block __read_mostly = {
2873        .notifier_call = team_device_event,
2874};
2875
2876
2877/***********************
2878 * Module init and exit
2879 ***********************/
2880
2881static int __init team_module_init(void)
2882{
2883        int err;
2884
2885        register_netdevice_notifier(&team_notifier_block);
2886
2887        err = rtnl_link_register(&team_link_ops);
2888        if (err)
2889                goto err_rtnl_reg;
2890
2891        err = team_nl_init();
2892        if (err)
2893                goto err_nl_init;
2894
2895        return 0;
2896
2897err_nl_init:
2898        rtnl_link_unregister(&team_link_ops);
2899
2900err_rtnl_reg:
2901        unregister_netdevice_notifier(&team_notifier_block);
2902
2903        return err;
2904}
2905
2906static void __exit team_module_exit(void)
2907{
2908        team_nl_fini();
2909        rtnl_link_unregister(&team_link_ops);
2910        unregister_netdevice_notifier(&team_notifier_block);
2911}
2912
2913module_init(team_module_init);
2914module_exit(team_module_exit);
2915
2916MODULE_LICENSE("GPL v2");
2917MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2918MODULE_DESCRIPTION("Ethernet team device driver");
2919MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2920