linux/drivers/net/team/team.c
<<
>>
Prefs
   1/*
   2 * drivers/net/team/team.c - Network team device driver
   3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/rcupdate.h>
  17#include <linux/errno.h>
  18#include <linux/ctype.h>
  19#include <linux/notifier.h>
  20#include <linux/netdevice.h>
  21#include <linux/netpoll.h>
  22#include <linux/if_vlan.h>
  23#include <linux/if_arp.h>
  24#include <linux/socket.h>
  25#include <linux/etherdevice.h>
  26#include <linux/rtnetlink.h>
  27#include <net/rtnetlink.h>
  28#include <net/genetlink.h>
  29#include <net/netlink.h>
  30#include <net/sch_generic.h>
  31#include <generated/utsrelease.h>
  32#include <linux/if_team.h>
  33
  34#define DRV_NAME "team"
  35
  36
  37/**********
  38 * Helpers
  39 **********/
  40
  41#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
  42
  43static struct team_port *team_port_get_rcu(const struct net_device *dev)
  44{
  45        struct team_port *port = rcu_dereference(dev->rx_handler_data);
  46
  47        return team_port_exists(dev) ? port : NULL;
  48}
  49
  50static struct team_port *team_port_get_rtnl(const struct net_device *dev)
  51{
  52        struct team_port *port = rtnl_dereference(dev->rx_handler_data);
  53
  54        return team_port_exists(dev) ? port : NULL;
  55}
  56
  57/*
  58 * Since the ability to change device address for open port device is tested in
  59 * team_port_add, this function can be called without control of return value
  60 */
  61static int __set_port_dev_addr(struct net_device *port_dev,
  62                               const unsigned char *dev_addr)
  63{
  64        struct sockaddr addr;
  65
  66        memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
  67        addr.sa_family = port_dev->type;
  68        return dev_set_mac_address(port_dev, &addr);
  69}
  70
  71static int team_port_set_orig_dev_addr(struct team_port *port)
  72{
  73        return __set_port_dev_addr(port->dev, port->orig.dev_addr);
  74}
  75
  76static int team_port_set_team_dev_addr(struct team *team,
  77                                       struct team_port *port)
  78{
  79        return __set_port_dev_addr(port->dev, team->dev->dev_addr);
  80}
  81
  82int team_modeop_port_enter(struct team *team, struct team_port *port)
  83{
  84        return team_port_set_team_dev_addr(team, port);
  85}
  86EXPORT_SYMBOL(team_modeop_port_enter);
  87
  88void team_modeop_port_change_dev_addr(struct team *team,
  89                                      struct team_port *port)
  90{
  91        team_port_set_team_dev_addr(team, port);
  92}
  93EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
  94
  95static void team_refresh_port_linkup(struct team_port *port)
  96{
  97        port->linkup = port->user.linkup_enabled ? port->user.linkup :
  98                                                   port->state.linkup;
  99}
 100
 101
 102/*******************
 103 * Options handling
 104 *******************/
 105
 106struct team_option_inst { /* One for each option instance */
 107        struct list_head list;
 108        struct list_head tmp_list;
 109        struct team_option *option;
 110        struct team_option_inst_info info;
 111        bool changed;
 112        bool removed;
 113};
 114
 115static struct team_option *__team_find_option(struct team *team,
 116                                              const char *opt_name)
 117{
 118        struct team_option *option;
 119
 120        list_for_each_entry(option, &team->option_list, list) {
 121                if (strcmp(option->name, opt_name) == 0)
 122                        return option;
 123        }
 124        return NULL;
 125}
 126
 127static void __team_option_inst_del(struct team_option_inst *opt_inst)
 128{
 129        list_del(&opt_inst->list);
 130        kfree(opt_inst);
 131}
 132
 133static void __team_option_inst_del_option(struct team *team,
 134                                          struct team_option *option)
 135{
 136        struct team_option_inst *opt_inst, *tmp;
 137
 138        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 139                if (opt_inst->option == option)
 140                        __team_option_inst_del(opt_inst);
 141        }
 142}
 143
 144static int __team_option_inst_add(struct team *team, struct team_option *option,
 145                                  struct team_port *port)
 146{
 147        struct team_option_inst *opt_inst;
 148        unsigned int array_size;
 149        unsigned int i;
 150        int err;
 151
 152        array_size = option->array_size;
 153        if (!array_size)
 154                array_size = 1; /* No array but still need one instance */
 155
 156        for (i = 0; i < array_size; i++) {
 157                opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
 158                if (!opt_inst)
 159                        return -ENOMEM;
 160                opt_inst->option = option;
 161                opt_inst->info.port = port;
 162                opt_inst->info.array_index = i;
 163                opt_inst->changed = true;
 164                opt_inst->removed = false;
 165                list_add_tail(&opt_inst->list, &team->option_inst_list);
 166                if (option->init) {
 167                        err = option->init(team, &opt_inst->info);
 168                        if (err)
 169                                return err;
 170                }
 171
 172        }
 173        return 0;
 174}
 175
 176static int __team_option_inst_add_option(struct team *team,
 177                                         struct team_option *option)
 178{
 179        struct team_port *port;
 180        int err;
 181
 182        if (!option->per_port) {
 183                err = __team_option_inst_add(team, option, NULL);
 184                if (err)
 185                        goto inst_del_option;
 186        }
 187
 188        list_for_each_entry(port, &team->port_list, list) {
 189                err = __team_option_inst_add(team, option, port);
 190                if (err)
 191                        goto inst_del_option;
 192        }
 193        return 0;
 194
 195inst_del_option:
 196        __team_option_inst_del_option(team, option);
 197        return err;
 198}
 199
 200static void __team_option_inst_mark_removed_option(struct team *team,
 201                                                   struct team_option *option)
 202{
 203        struct team_option_inst *opt_inst;
 204
 205        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 206                if (opt_inst->option == option) {
 207                        opt_inst->changed = true;
 208                        opt_inst->removed = true;
 209                }
 210        }
 211}
 212
 213static void __team_option_inst_del_port(struct team *team,
 214                                        struct team_port *port)
 215{
 216        struct team_option_inst *opt_inst, *tmp;
 217
 218        list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
 219                if (opt_inst->option->per_port &&
 220                    opt_inst->info.port == port)
 221                        __team_option_inst_del(opt_inst);
 222        }
 223}
 224
 225static int __team_option_inst_add_port(struct team *team,
 226                                       struct team_port *port)
 227{
 228        struct team_option *option;
 229        int err;
 230
 231        list_for_each_entry(option, &team->option_list, list) {
 232                if (!option->per_port)
 233                        continue;
 234                err = __team_option_inst_add(team, option, port);
 235                if (err)
 236                        goto inst_del_port;
 237        }
 238        return 0;
 239
 240inst_del_port:
 241        __team_option_inst_del_port(team, port);
 242        return err;
 243}
 244
 245static void __team_option_inst_mark_removed_port(struct team *team,
 246                                                 struct team_port *port)
 247{
 248        struct team_option_inst *opt_inst;
 249
 250        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 251                if (opt_inst->info.port == port) {
 252                        opt_inst->changed = true;
 253                        opt_inst->removed = true;
 254                }
 255        }
 256}
 257
 258static int __team_options_register(struct team *team,
 259                                   const struct team_option *option,
 260                                   size_t option_count)
 261{
 262        int i;
 263        struct team_option **dst_opts;
 264        int err;
 265
 266        dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
 267                           GFP_KERNEL);
 268        if (!dst_opts)
 269                return -ENOMEM;
 270        for (i = 0; i < option_count; i++, option++) {
 271                if (__team_find_option(team, option->name)) {
 272                        err = -EEXIST;
 273                        goto alloc_rollback;
 274                }
 275                dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 276                if (!dst_opts[i]) {
 277                        err = -ENOMEM;
 278                        goto alloc_rollback;
 279                }
 280        }
 281
 282        for (i = 0; i < option_count; i++) {
 283                err = __team_option_inst_add_option(team, dst_opts[i]);
 284                if (err)
 285                        goto inst_rollback;
 286                list_add_tail(&dst_opts[i]->list, &team->option_list);
 287        }
 288
 289        kfree(dst_opts);
 290        return 0;
 291
 292inst_rollback:
 293        for (i--; i >= 0; i--)
 294                __team_option_inst_del_option(team, dst_opts[i]);
 295
 296        i = option_count - 1;
 297alloc_rollback:
 298        for (i--; i >= 0; i--)
 299                kfree(dst_opts[i]);
 300
 301        kfree(dst_opts);
 302        return err;
 303}
 304
 305static void __team_options_mark_removed(struct team *team,
 306                                        const struct team_option *option,
 307                                        size_t option_count)
 308{
 309        int i;
 310
 311        for (i = 0; i < option_count; i++, option++) {
 312                struct team_option *del_opt;
 313
 314                del_opt = __team_find_option(team, option->name);
 315                if (del_opt)
 316                        __team_option_inst_mark_removed_option(team, del_opt);
 317        }
 318}
 319
 320static void __team_options_unregister(struct team *team,
 321                                      const struct team_option *option,
 322                                      size_t option_count)
 323{
 324        int i;
 325
 326        for (i = 0; i < option_count; i++, option++) {
 327                struct team_option *del_opt;
 328
 329                del_opt = __team_find_option(team, option->name);
 330                if (del_opt) {
 331                        __team_option_inst_del_option(team, del_opt);
 332                        list_del(&del_opt->list);
 333                        kfree(del_opt);
 334                }
 335        }
 336}
 337
 338static void __team_options_change_check(struct team *team);
 339
 340int team_options_register(struct team *team,
 341                          const struct team_option *option,
 342                          size_t option_count)
 343{
 344        int err;
 345
 346        err = __team_options_register(team, option, option_count);
 347        if (err)
 348                return err;
 349        __team_options_change_check(team);
 350        return 0;
 351}
 352EXPORT_SYMBOL(team_options_register);
 353
 354void team_options_unregister(struct team *team,
 355                             const struct team_option *option,
 356                             size_t option_count)
 357{
 358        __team_options_mark_removed(team, option, option_count);
 359        __team_options_change_check(team);
 360        __team_options_unregister(team, option, option_count);
 361}
 362EXPORT_SYMBOL(team_options_unregister);
 363
 364static int team_option_get(struct team *team,
 365                           struct team_option_inst *opt_inst,
 366                           struct team_gsetter_ctx *ctx)
 367{
 368        if (!opt_inst->option->getter)
 369                return -EOPNOTSUPP;
 370        return opt_inst->option->getter(team, ctx);
 371}
 372
 373static int team_option_set(struct team *team,
 374                           struct team_option_inst *opt_inst,
 375                           struct team_gsetter_ctx *ctx)
 376{
 377        if (!opt_inst->option->setter)
 378                return -EOPNOTSUPP;
 379        return opt_inst->option->setter(team, ctx);
 380}
 381
 382void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
 383{
 384        struct team_option_inst *opt_inst;
 385
 386        opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
 387        opt_inst->changed = true;
 388}
 389EXPORT_SYMBOL(team_option_inst_set_change);
 390
 391void team_options_change_check(struct team *team)
 392{
 393        __team_options_change_check(team);
 394}
 395EXPORT_SYMBOL(team_options_change_check);
 396
 397
 398/****************
 399 * Mode handling
 400 ****************/
 401
 402static LIST_HEAD(mode_list);
 403static DEFINE_SPINLOCK(mode_list_lock);
 404
 405struct team_mode_item {
 406        struct list_head list;
 407        const struct team_mode *mode;
 408};
 409
 410static struct team_mode_item *__find_mode(const char *kind)
 411{
 412        struct team_mode_item *mitem;
 413
 414        list_for_each_entry(mitem, &mode_list, list) {
 415                if (strcmp(mitem->mode->kind, kind) == 0)
 416                        return mitem;
 417        }
 418        return NULL;
 419}
 420
 421static bool is_good_mode_name(const char *name)
 422{
 423        while (*name != '\0') {
 424                if (!isalpha(*name) && !isdigit(*name) && *name != '_')
 425                        return false;
 426                name++;
 427        }
 428        return true;
 429}
 430
 431int team_mode_register(const struct team_mode *mode)
 432{
 433        int err = 0;
 434        struct team_mode_item *mitem;
 435
 436        if (!is_good_mode_name(mode->kind) ||
 437            mode->priv_size > TEAM_MODE_PRIV_SIZE)
 438                return -EINVAL;
 439
 440        mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
 441        if (!mitem)
 442                return -ENOMEM;
 443
 444        spin_lock(&mode_list_lock);
 445        if (__find_mode(mode->kind)) {
 446                err = -EEXIST;
 447                kfree(mitem);
 448                goto unlock;
 449        }
 450        mitem->mode = mode;
 451        list_add_tail(&mitem->list, &mode_list);
 452unlock:
 453        spin_unlock(&mode_list_lock);
 454        return err;
 455}
 456EXPORT_SYMBOL(team_mode_register);
 457
 458void team_mode_unregister(const struct team_mode *mode)
 459{
 460        struct team_mode_item *mitem;
 461
 462        spin_lock(&mode_list_lock);
 463        mitem = __find_mode(mode->kind);
 464        if (mitem) {
 465                list_del_init(&mitem->list);
 466                kfree(mitem);
 467        }
 468        spin_unlock(&mode_list_lock);
 469}
 470EXPORT_SYMBOL(team_mode_unregister);
 471
 472static const struct team_mode *team_mode_get(const char *kind)
 473{
 474        struct team_mode_item *mitem;
 475        const struct team_mode *mode = NULL;
 476
 477        spin_lock(&mode_list_lock);
 478        mitem = __find_mode(kind);
 479        if (!mitem) {
 480                spin_unlock(&mode_list_lock);
 481                request_module("team-mode-%s", kind);
 482                spin_lock(&mode_list_lock);
 483                mitem = __find_mode(kind);
 484        }
 485        if (mitem) {
 486                mode = mitem->mode;
 487                if (!try_module_get(mode->owner))
 488                        mode = NULL;
 489        }
 490
 491        spin_unlock(&mode_list_lock);
 492        return mode;
 493}
 494
 495static void team_mode_put(const struct team_mode *mode)
 496{
 497        module_put(mode->owner);
 498}
 499
 500static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
 501{
 502        dev_kfree_skb_any(skb);
 503        return false;
 504}
 505
 506static rx_handler_result_t team_dummy_receive(struct team *team,
 507                                              struct team_port *port,
 508                                              struct sk_buff *skb)
 509{
 510        return RX_HANDLER_ANOTHER;
 511}
 512
 513static const struct team_mode __team_no_mode = {
 514        .kind           = "*NOMODE*",
 515};
 516
 517static bool team_is_mode_set(struct team *team)
 518{
 519        return team->mode != &__team_no_mode;
 520}
 521
 522static void team_set_no_mode(struct team *team)
 523{
 524        team->user_carrier_enabled = false;
 525        team->mode = &__team_no_mode;
 526}
 527
 528static void __team_adjust_ops(struct team *team, int en_port_count)
 529{
 530        /*
 531         * To avoid checks in rx/tx skb paths, ensure here that non-null and
 532         * correct ops are always set.
 533         */
 534
 535        if (!en_port_count || !team_is_mode_set(team) ||
 536            !team->mode->ops->transmit)
 537                team->ops.transmit = team_dummy_transmit;
 538        else
 539                team->ops.transmit = team->mode->ops->transmit;
 540
 541        if (!en_port_count || !team_is_mode_set(team) ||
 542            !team->mode->ops->receive)
 543                team->ops.receive = team_dummy_receive;
 544        else
 545                team->ops.receive = team->mode->ops->receive;
 546}
 547
 548static void team_adjust_ops(struct team *team)
 549{
 550        __team_adjust_ops(team, team->en_port_count);
 551}
 552
 553/*
 554 * We can benefit from the fact that it's ensured no port is present
 555 * at the time of mode change. Therefore no packets are in fly so there's no
 556 * need to set mode operations in any special way.
 557 */
 558static int __team_change_mode(struct team *team,
 559                              const struct team_mode *new_mode)
 560{
 561        /* Check if mode was previously set and do cleanup if so */
 562        if (team_is_mode_set(team)) {
 563                void (*exit_op)(struct team *team) = team->ops.exit;
 564
 565                /* Clear ops area so no callback is called any longer */
 566                memset(&team->ops, 0, sizeof(struct team_mode_ops));
 567                team_adjust_ops(team);
 568
 569                if (exit_op)
 570                        exit_op(team);
 571                team_mode_put(team->mode);
 572                team_set_no_mode(team);
 573                /* zero private data area */
 574                memset(&team->mode_priv, 0,
 575                       sizeof(struct team) - offsetof(struct team, mode_priv));
 576        }
 577
 578        if (!new_mode)
 579                return 0;
 580
 581        if (new_mode->ops->init) {
 582                int err;
 583
 584                err = new_mode->ops->init(team);
 585                if (err)
 586                        return err;
 587        }
 588
 589        team->mode = new_mode;
 590        memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
 591        team_adjust_ops(team);
 592
 593        return 0;
 594}
 595
 596static int team_change_mode(struct team *team, const char *kind)
 597{
 598        const struct team_mode *new_mode;
 599        struct net_device *dev = team->dev;
 600        int err;
 601
 602        if (!list_empty(&team->port_list)) {
 603                netdev_err(dev, "No ports can be present during mode change\n");
 604                return -EBUSY;
 605        }
 606
 607        if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
 608                netdev_err(dev, "Unable to change to the same mode the team is in\n");
 609                return -EINVAL;
 610        }
 611
 612        new_mode = team_mode_get(kind);
 613        if (!new_mode) {
 614                netdev_err(dev, "Mode \"%s\" not found\n", kind);
 615                return -EINVAL;
 616        }
 617
 618        err = __team_change_mode(team, new_mode);
 619        if (err) {
 620                netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
 621                team_mode_put(new_mode);
 622                return err;
 623        }
 624
 625        netdev_info(dev, "Mode changed to \"%s\"\n", kind);
 626        return 0;
 627}
 628
 629
 630/************************
 631 * Rx path frame handler
 632 ************************/
 633
 634/* note: already called with rcu_read_lock */
 635static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 636{
 637        struct sk_buff *skb = *pskb;
 638        struct team_port *port;
 639        struct team *team;
 640        rx_handler_result_t res;
 641
 642        skb = skb_share_check(skb, GFP_ATOMIC);
 643        if (!skb)
 644                return RX_HANDLER_CONSUMED;
 645
 646        *pskb = skb;
 647
 648        port = team_port_get_rcu(skb->dev);
 649        team = port->team;
 650        if (!team_port_enabled(port)) {
 651                /* allow exact match delivery for disabled ports */
 652                res = RX_HANDLER_EXACT;
 653        } else {
 654                res = team->ops.receive(team, port, skb);
 655        }
 656        if (res == RX_HANDLER_ANOTHER) {
 657                struct team_pcpu_stats *pcpu_stats;
 658
 659                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
 660                u64_stats_update_begin(&pcpu_stats->syncp);
 661                pcpu_stats->rx_packets++;
 662                pcpu_stats->rx_bytes += skb->len;
 663                if (skb->pkt_type == PACKET_MULTICAST)
 664                        pcpu_stats->rx_multicast++;
 665                u64_stats_update_end(&pcpu_stats->syncp);
 666
 667                skb->dev = team->dev;
 668        } else {
 669                this_cpu_inc(team->pcpu_stats->rx_dropped);
 670        }
 671
 672        return res;
 673}
 674
 675
 676/*************************************
 677 * Multiqueue Tx port select override
 678 *************************************/
 679
 680static int team_queue_override_init(struct team *team)
 681{
 682        struct list_head *listarr;
 683        unsigned int queue_cnt = team->dev->num_tx_queues - 1;
 684        unsigned int i;
 685
 686        if (!queue_cnt)
 687                return 0;
 688        listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
 689        if (!listarr)
 690                return -ENOMEM;
 691        team->qom_lists = listarr;
 692        for (i = 0; i < queue_cnt; i++)
 693                INIT_LIST_HEAD(listarr++);
 694        return 0;
 695}
 696
 697static void team_queue_override_fini(struct team *team)
 698{
 699        kfree(team->qom_lists);
 700}
 701
 702static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
 703{
 704        return &team->qom_lists[queue_id - 1];
 705}
 706
 707/*
 708 * note: already called with rcu_read_lock
 709 */
 710static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
 711{
 712        struct list_head *qom_list;
 713        struct team_port *port;
 714
 715        if (!team->queue_override_enabled || !skb->queue_mapping)
 716                return false;
 717        qom_list = __team_get_qom_list(team, skb->queue_mapping);
 718        list_for_each_entry_rcu(port, qom_list, qom_list) {
 719                if (!team_dev_queue_xmit(team, port, skb))
 720                        return true;
 721        }
 722        return false;
 723}
 724
 725static void __team_queue_override_port_del(struct team *team,
 726                                           struct team_port *port)
 727{
 728        list_del_rcu(&port->qom_list);
 729        synchronize_rcu();
 730        INIT_LIST_HEAD(&port->qom_list);
 731}
 732
 733static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
 734                                                      struct team_port *cur)
 735{
 736        if (port->priority < cur->priority)
 737                return true;
 738        if (port->priority > cur->priority)
 739                return false;
 740        if (port->index < cur->index)
 741                return true;
 742        return false;
 743}
 744
 745static void __team_queue_override_port_add(struct team *team,
 746                                           struct team_port *port)
 747{
 748        struct team_port *cur;
 749        struct list_head *qom_list;
 750        struct list_head *node;
 751
 752        if (!port->queue_id || !team_port_enabled(port))
 753                return;
 754
 755        qom_list = __team_get_qom_list(team, port->queue_id);
 756        node = qom_list;
 757        list_for_each_entry(cur, qom_list, qom_list) {
 758                if (team_queue_override_port_has_gt_prio_than(port, cur))
 759                        break;
 760                node = &cur->qom_list;
 761        }
 762        list_add_tail_rcu(&port->qom_list, node);
 763}
 764
 765static void __team_queue_override_enabled_check(struct team *team)
 766{
 767        struct team_port *port;
 768        bool enabled = false;
 769
 770        list_for_each_entry(port, &team->port_list, list) {
 771                if (!list_empty(&port->qom_list)) {
 772                        enabled = true;
 773                        break;
 774                }
 775        }
 776        if (enabled == team->queue_override_enabled)
 777                return;
 778        netdev_dbg(team->dev, "%s queue override\n",
 779                   enabled ? "Enabling" : "Disabling");
 780        team->queue_override_enabled = enabled;
 781}
 782
 783static void team_queue_override_port_refresh(struct team *team,
 784                                             struct team_port *port)
 785{
 786        __team_queue_override_port_del(team, port);
 787        __team_queue_override_port_add(team, port);
 788        __team_queue_override_enabled_check(team);
 789}
 790
 791
 792/****************
 793 * Port handling
 794 ****************/
 795
 796static bool team_port_find(const struct team *team,
 797                           const struct team_port *port)
 798{
 799        struct team_port *cur;
 800
 801        list_for_each_entry(cur, &team->port_list, list)
 802                if (cur == port)
 803                        return true;
 804        return false;
 805}
 806
 807/*
 808 * Enable/disable port by adding to enabled port hashlist and setting
 809 * port->index (Might be racy so reader could see incorrect ifindex when
 810 * processing a flying packet, but that is not a problem). Write guarded
 811 * by team->lock.
 812 */
 813static void team_port_enable(struct team *team,
 814                             struct team_port *port)
 815{
 816        if (team_port_enabled(port))
 817                return;
 818        port->index = team->en_port_count++;
 819        hlist_add_head_rcu(&port->hlist,
 820                           team_port_index_hash(team, port->index));
 821        team_adjust_ops(team);
 822        team_queue_override_port_refresh(team, port);
 823        if (team->ops.port_enabled)
 824                team->ops.port_enabled(team, port);
 825}
 826
 827static void __reconstruct_port_hlist(struct team *team, int rm_index)
 828{
 829        int i;
 830        struct team_port *port;
 831
 832        for (i = rm_index + 1; i < team->en_port_count; i++) {
 833                port = team_get_port_by_index(team, i);
 834                hlist_del_rcu(&port->hlist);
 835                port->index--;
 836                hlist_add_head_rcu(&port->hlist,
 837                                   team_port_index_hash(team, port->index));
 838        }
 839}
 840
 841static void team_port_disable(struct team *team,
 842                              struct team_port *port)
 843{
 844        if (!team_port_enabled(port))
 845                return;
 846        if (team->ops.port_disabled)
 847                team->ops.port_disabled(team, port);
 848        hlist_del_rcu(&port->hlist);
 849        __reconstruct_port_hlist(team, port->index);
 850        port->index = -1;
 851        team_queue_override_port_refresh(team, port);
 852        __team_adjust_ops(team, team->en_port_count - 1);
 853        /*
 854         * Wait until readers see adjusted ops. This ensures that
 855         * readers never see team->en_port_count == 0
 856         */
 857        synchronize_rcu();
 858        team->en_port_count--;
 859}
 860
 861#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
 862                            NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
 863                            NETIF_F_HIGHDMA | NETIF_F_LRO)
 864
 865static void __team_compute_features(struct team *team)
 866{
 867        struct team_port *port;
 868        u32 vlan_features = TEAM_VLAN_FEATURES;
 869        unsigned short max_hard_header_len = ETH_HLEN;
 870        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 871
 872        list_for_each_entry(port, &team->port_list, list) {
 873                vlan_features = netdev_increment_features(vlan_features,
 874                                        port->dev->vlan_features,
 875                                        TEAM_VLAN_FEATURES);
 876
 877                dst_release_flag &= port->dev->priv_flags;
 878                if (port->dev->hard_header_len > max_hard_header_len)
 879                        max_hard_header_len = port->dev->hard_header_len;
 880        }
 881
 882        team->dev->vlan_features = vlan_features;
 883        team->dev->hard_header_len = max_hard_header_len;
 884
 885        flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
 886        team->dev->priv_flags = flags | dst_release_flag;
 887
 888        netdev_change_features(team->dev);
 889}
 890
 891static void team_compute_features(struct team *team)
 892{
 893        mutex_lock(&team->lock);
 894        __team_compute_features(team);
 895        mutex_unlock(&team->lock);
 896}
 897
 898static int team_port_enter(struct team *team, struct team_port *port)
 899{
 900        int err = 0;
 901
 902        dev_hold(team->dev);
 903        port->dev->priv_flags |= IFF_TEAM_PORT;
 904        if (team->ops.port_enter) {
 905                err = team->ops.port_enter(team, port);
 906                if (err) {
 907                        netdev_err(team->dev, "Device %s failed to enter team mode\n",
 908                                   port->dev->name);
 909                        goto err_port_enter;
 910                }
 911        }
 912
 913        return 0;
 914
 915err_port_enter:
 916        port->dev->priv_flags &= ~IFF_TEAM_PORT;
 917        dev_put(team->dev);
 918
 919        return err;
 920}
 921
 922static void team_port_leave(struct team *team, struct team_port *port)
 923{
 924        if (team->ops.port_leave)
 925                team->ops.port_leave(team, port);
 926        port->dev->priv_flags &= ~IFF_TEAM_PORT;
 927        dev_put(team->dev);
 928}
 929
 930#ifdef CONFIG_NET_POLL_CONTROLLER
 931static int team_port_enable_netpoll(struct team *team, struct team_port *port,
 932                                    gfp_t gfp)
 933{
 934        struct netpoll *np;
 935        int err;
 936
 937        np = kzalloc(sizeof(*np), gfp);
 938        if (!np)
 939                return -ENOMEM;
 940
 941        err = __netpoll_setup(np, port->dev, gfp);
 942        if (err) {
 943                kfree(np);
 944                return err;
 945        }
 946        port->np = np;
 947        return err;
 948}
 949
 950static void team_port_disable_netpoll(struct team_port *port)
 951{
 952        struct netpoll *np = port->np;
 953
 954        if (!np)
 955                return;
 956        port->np = NULL;
 957
 958        /* Wait for transmitting packets to finish before freeing. */
 959        synchronize_rcu_bh();
 960        __netpoll_cleanup(np);
 961        kfree(np);
 962}
 963
 964static struct netpoll_info *team_netpoll_info(struct team *team)
 965{
 966        return team->dev->npinfo;
 967}
 968
 969#else
 970static int team_port_enable_netpoll(struct team *team, struct team_port *port,
 971                                    gfp_t gfp)
 972{
 973        return 0;
 974}
 975static void team_port_disable_netpoll(struct team_port *port)
 976{
 977}
 978static struct netpoll_info *team_netpoll_info(struct team *team)
 979{
 980        return NULL;
 981}
 982#endif
 983
 984static void __team_port_change_port_added(struct team_port *port, bool linkup);
 985static int team_dev_type_check_change(struct net_device *dev,
 986                                      struct net_device *port_dev);
 987
 988static int team_port_add(struct team *team, struct net_device *port_dev)
 989{
 990        struct net_device *dev = team->dev;
 991        struct team_port *port;
 992        char *portname = port_dev->name;
 993        int err;
 994
 995        if (port_dev->flags & IFF_LOOPBACK) {
 996                netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
 997                           portname);
 998                return -EINVAL;
 999        }
1000
1001        if (team_port_exists(port_dev)) {
1002                netdev_err(dev, "Device %s is already a port "
1003                                "of a team device\n", portname);
1004                return -EBUSY;
1005        }
1006
1007        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1008            vlan_uses_dev(dev)) {
1009                netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1010                           portname);
1011                return -EPERM;
1012        }
1013
1014        err = team_dev_type_check_change(dev, port_dev);
1015        if (err)
1016                return err;
1017
1018        if (port_dev->flags & IFF_UP) {
1019                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1020                           portname);
1021                return -EBUSY;
1022        }
1023
1024        port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1025                       GFP_KERNEL);
1026        if (!port)
1027                return -ENOMEM;
1028
1029        port->dev = port_dev;
1030        port->team = team;
1031        INIT_LIST_HEAD(&port->qom_list);
1032
1033        port->orig.mtu = port_dev->mtu;
1034        err = dev_set_mtu(port_dev, dev->mtu);
1035        if (err) {
1036                netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1037                goto err_set_mtu;
1038        }
1039
1040        memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1041
1042        err = team_port_enter(team, port);
1043        if (err) {
1044                netdev_err(dev, "Device %s failed to enter team mode\n",
1045                           portname);
1046                goto err_port_enter;
1047        }
1048
1049        err = dev_open(port_dev);
1050        if (err) {
1051                netdev_dbg(dev, "Device %s opening failed\n",
1052                           portname);
1053                goto err_dev_open;
1054        }
1055
1056        err = vlan_vids_add_by_dev(port_dev, dev);
1057        if (err) {
1058                netdev_err(dev, "Failed to add vlan ids to device %s\n",
1059                                portname);
1060                goto err_vids_add;
1061        }
1062
1063        if (team_netpoll_info(team)) {
1064                err = team_port_enable_netpoll(team, port, GFP_KERNEL);
1065                if (err) {
1066                        netdev_err(dev, "Failed to enable netpoll on device %s\n",
1067                                   portname);
1068                        goto err_enable_netpoll;
1069                }
1070        }
1071
1072        err = netdev_master_upper_dev_link(port_dev, dev);
1073        if (err) {
1074                netdev_err(dev, "Device %s failed to set upper link\n",
1075                           portname);
1076                goto err_set_upper_link;
1077        }
1078
1079        err = netdev_rx_handler_register(port_dev, team_handle_frame,
1080                                         port);
1081        if (err) {
1082                netdev_err(dev, "Device %s failed to register rx_handler\n",
1083                           portname);
1084                goto err_handler_register;
1085        }
1086
1087        err = __team_option_inst_add_port(team, port);
1088        if (err) {
1089                netdev_err(dev, "Device %s failed to add per-port options\n",
1090                           portname);
1091                goto err_option_port_add;
1092        }
1093
1094        port->index = -1;
1095        list_add_tail_rcu(&port->list, &team->port_list);
1096        team_port_enable(team, port);
1097        __team_compute_features(team);
1098        __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1099        __team_options_change_check(team);
1100
1101        netdev_info(dev, "Port device %s added\n", portname);
1102
1103        return 0;
1104
1105err_option_port_add:
1106        netdev_rx_handler_unregister(port_dev);
1107
1108err_handler_register:
1109        netdev_upper_dev_unlink(port_dev, dev);
1110
1111err_set_upper_link:
1112        team_port_disable_netpoll(port);
1113
1114err_enable_netpoll:
1115        vlan_vids_del_by_dev(port_dev, dev);
1116
1117err_vids_add:
1118        dev_close(port_dev);
1119
1120err_dev_open:
1121        team_port_leave(team, port);
1122        team_port_set_orig_dev_addr(port);
1123
1124err_port_enter:
1125        dev_set_mtu(port_dev, port->orig.mtu);
1126
1127err_set_mtu:
1128        kfree(port);
1129
1130        return err;
1131}
1132
1133static void __team_port_change_port_removed(struct team_port *port);
1134
1135static int team_port_del(struct team *team, struct net_device *port_dev)
1136{
1137        struct net_device *dev = team->dev;
1138        struct team_port *port;
1139        char *portname = port_dev->name;
1140
1141        port = team_port_get_rtnl(port_dev);
1142        if (!port || !team_port_find(team, port)) {
1143                netdev_err(dev, "Device %s does not act as a port of this team\n",
1144                           portname);
1145                return -ENOENT;
1146        }
1147
1148        team_port_disable(team, port);
1149        list_del_rcu(&port->list);
1150        netdev_rx_handler_unregister(port_dev);
1151        netdev_upper_dev_unlink(port_dev, dev);
1152        team_port_disable_netpoll(port);
1153        vlan_vids_del_by_dev(port_dev, dev);
1154        dev_uc_unsync(port_dev, dev);
1155        dev_mc_unsync(port_dev, dev);
1156        dev_close(port_dev);
1157        team_port_leave(team, port);
1158
1159        __team_option_inst_mark_removed_port(team, port);
1160        __team_options_change_check(team);
1161        __team_option_inst_del_port(team, port);
1162        __team_port_change_port_removed(port);
1163
1164        team_port_set_orig_dev_addr(port);
1165        dev_set_mtu(port_dev, port->orig.mtu);
1166        synchronize_rcu();
1167        kfree(port);
1168        netdev_info(dev, "Port device %s removed\n", portname);
1169        __team_compute_features(team);
1170
1171        return 0;
1172}
1173
1174
1175/*****************
1176 * Net device ops
1177 *****************/
1178
1179static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1180{
1181        ctx->data.str_val = team->mode->kind;
1182        return 0;
1183}
1184
1185static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1186{
1187        return team_change_mode(team, ctx->data.str_val);
1188}
1189
1190static int team_port_en_option_get(struct team *team,
1191                                   struct team_gsetter_ctx *ctx)
1192{
1193        struct team_port *port = ctx->info->port;
1194
1195        ctx->data.bool_val = team_port_enabled(port);
1196        return 0;
1197}
1198
1199static int team_port_en_option_set(struct team *team,
1200                                   struct team_gsetter_ctx *ctx)
1201{
1202        struct team_port *port = ctx->info->port;
1203
1204        if (ctx->data.bool_val)
1205                team_port_enable(team, port);
1206        else
1207                team_port_disable(team, port);
1208        return 0;
1209}
1210
1211static int team_user_linkup_option_get(struct team *team,
1212                                       struct team_gsetter_ctx *ctx)
1213{
1214        struct team_port *port = ctx->info->port;
1215
1216        ctx->data.bool_val = port->user.linkup;
1217        return 0;
1218}
1219
1220static int team_user_linkup_option_set(struct team *team,
1221                                       struct team_gsetter_ctx *ctx)
1222{
1223        struct team_port *port = ctx->info->port;
1224
1225        port->user.linkup = ctx->data.bool_val;
1226        team_refresh_port_linkup(port);
1227        return 0;
1228}
1229
1230static int team_user_linkup_en_option_get(struct team *team,
1231                                          struct team_gsetter_ctx *ctx)
1232{
1233        struct team_port *port = ctx->info->port;
1234
1235        ctx->data.bool_val = port->user.linkup_enabled;
1236        return 0;
1237}
1238
1239static int team_user_linkup_en_option_set(struct team *team,
1240                                          struct team_gsetter_ctx *ctx)
1241{
1242        struct team_port *port = ctx->info->port;
1243
1244        port->user.linkup_enabled = ctx->data.bool_val;
1245        team_refresh_port_linkup(port);
1246        return 0;
1247}
1248
1249static int team_priority_option_get(struct team *team,
1250                                    struct team_gsetter_ctx *ctx)
1251{
1252        struct team_port *port = ctx->info->port;
1253
1254        ctx->data.s32_val = port->priority;
1255        return 0;
1256}
1257
1258static int team_priority_option_set(struct team *team,
1259                                    struct team_gsetter_ctx *ctx)
1260{
1261        struct team_port *port = ctx->info->port;
1262
1263        port->priority = ctx->data.s32_val;
1264        team_queue_override_port_refresh(team, port);
1265        return 0;
1266}
1267
1268static int team_queue_id_option_get(struct team *team,
1269                                    struct team_gsetter_ctx *ctx)
1270{
1271        struct team_port *port = ctx->info->port;
1272
1273        ctx->data.u32_val = port->queue_id;
1274        return 0;
1275}
1276
1277static int team_queue_id_option_set(struct team *team,
1278                                    struct team_gsetter_ctx *ctx)
1279{
1280        struct team_port *port = ctx->info->port;
1281
1282        if (port->queue_id == ctx->data.u32_val)
1283                return 0;
1284        if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
1285                return -EINVAL;
1286        port->queue_id = ctx->data.u32_val;
1287        team_queue_override_port_refresh(team, port);
1288        return 0;
1289}
1290
1291
1292static const struct team_option team_options[] = {
1293        {
1294                .name = "mode",
1295                .type = TEAM_OPTION_TYPE_STRING,
1296                .getter = team_mode_option_get,
1297                .setter = team_mode_option_set,
1298        },
1299        {
1300                .name = "enabled",
1301                .type = TEAM_OPTION_TYPE_BOOL,
1302                .per_port = true,
1303                .getter = team_port_en_option_get,
1304                .setter = team_port_en_option_set,
1305        },
1306        {
1307                .name = "user_linkup",
1308                .type = TEAM_OPTION_TYPE_BOOL,
1309                .per_port = true,
1310                .getter = team_user_linkup_option_get,
1311                .setter = team_user_linkup_option_set,
1312        },
1313        {
1314                .name = "user_linkup_enabled",
1315                .type = TEAM_OPTION_TYPE_BOOL,
1316                .per_port = true,
1317                .getter = team_user_linkup_en_option_get,
1318                .setter = team_user_linkup_en_option_set,
1319        },
1320        {
1321                .name = "priority",
1322                .type = TEAM_OPTION_TYPE_S32,
1323                .per_port = true,
1324                .getter = team_priority_option_get,
1325                .setter = team_priority_option_set,
1326        },
1327        {
1328                .name = "queue_id",
1329                .type = TEAM_OPTION_TYPE_U32,
1330                .per_port = true,
1331                .getter = team_queue_id_option_get,
1332                .setter = team_queue_id_option_set,
1333        },
1334};
1335
1336static struct lock_class_key team_netdev_xmit_lock_key;
1337static struct lock_class_key team_netdev_addr_lock_key;
1338static struct lock_class_key team_tx_busylock_key;
1339
1340static void team_set_lockdep_class_one(struct net_device *dev,
1341                                       struct netdev_queue *txq,
1342                                       void *unused)
1343{
1344        lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
1345}
1346
1347static void team_set_lockdep_class(struct net_device *dev)
1348{
1349        lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
1350        netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
1351        dev->qdisc_tx_busylock = &team_tx_busylock_key;
1352}
1353
1354static int team_init(struct net_device *dev)
1355{
1356        struct team *team = netdev_priv(dev);
1357        int i;
1358        int err;
1359
1360        team->dev = dev;
1361        mutex_init(&team->lock);
1362        team_set_no_mode(team);
1363
1364        team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1365        if (!team->pcpu_stats)
1366                return -ENOMEM;
1367
1368        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1369                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1370        INIT_LIST_HEAD(&team->port_list);
1371        err = team_queue_override_init(team);
1372        if (err)
1373                goto err_team_queue_override_init;
1374
1375        team_adjust_ops(team);
1376
1377        INIT_LIST_HEAD(&team->option_list);
1378        INIT_LIST_HEAD(&team->option_inst_list);
1379        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1380        if (err)
1381                goto err_options_register;
1382        netif_carrier_off(dev);
1383
1384        team_set_lockdep_class(dev);
1385
1386        return 0;
1387
1388err_options_register:
1389        team_queue_override_fini(team);
1390err_team_queue_override_init:
1391        free_percpu(team->pcpu_stats);
1392
1393        return err;
1394}
1395
1396static void team_uninit(struct net_device *dev)
1397{
1398        struct team *team = netdev_priv(dev);
1399        struct team_port *port;
1400        struct team_port *tmp;
1401
1402        mutex_lock(&team->lock);
1403        list_for_each_entry_safe(port, tmp, &team->port_list, list)
1404                team_port_del(team, port->dev);
1405
1406        __team_change_mode(team, NULL); /* cleanup */
1407        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1408        team_queue_override_fini(team);
1409        mutex_unlock(&team->lock);
1410}
1411
1412static void team_destructor(struct net_device *dev)
1413{
1414        struct team *team = netdev_priv(dev);
1415
1416        free_percpu(team->pcpu_stats);
1417        free_netdev(dev);
1418}
1419
1420static int team_open(struct net_device *dev)
1421{
1422        return 0;
1423}
1424
1425static int team_close(struct net_device *dev)
1426{
1427        return 0;
1428}
1429
1430/*
1431 * note: already called with rcu_read_lock
1432 */
1433static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1434{
1435        struct team *team = netdev_priv(dev);
1436        bool tx_success;
1437        unsigned int len = skb->len;
1438
1439        tx_success = team_queue_override_transmit(team, skb);
1440        if (!tx_success)
1441                tx_success = team->ops.transmit(team, skb);
1442        if (tx_success) {
1443                struct team_pcpu_stats *pcpu_stats;
1444
1445                pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1446                u64_stats_update_begin(&pcpu_stats->syncp);
1447                pcpu_stats->tx_packets++;
1448                pcpu_stats->tx_bytes += len;
1449                u64_stats_update_end(&pcpu_stats->syncp);
1450        } else {
1451                this_cpu_inc(team->pcpu_stats->tx_dropped);
1452        }
1453
1454        return NETDEV_TX_OK;
1455}
1456
1457static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
1458{
1459        /*
1460         * This helper function exists to help dev_pick_tx get the correct
1461         * destination queue.  Using a helper function skips a call to
1462         * skb_tx_hash and will put the skbs in the queue we expect on their
1463         * way down to the team driver.
1464         */
1465        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1466
1467        /*
1468         * Save the original txq to restore before passing to the driver
1469         */
1470        qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1471
1472        if (unlikely(txq >= dev->real_num_tx_queues)) {
1473                do {
1474                        txq -= dev->real_num_tx_queues;
1475                } while (txq >= dev->real_num_tx_queues);
1476        }
1477        return txq;
1478}
1479
1480static void team_change_rx_flags(struct net_device *dev, int change)
1481{
1482        struct team *team = netdev_priv(dev);
1483        struct team_port *port;
1484        int inc;
1485
1486        rcu_read_lock();
1487        list_for_each_entry_rcu(port, &team->port_list, list) {
1488                if (change & IFF_PROMISC) {
1489                        inc = dev->flags & IFF_PROMISC ? 1 : -1;
1490                        dev_set_promiscuity(port->dev, inc);
1491                }
1492                if (change & IFF_ALLMULTI) {
1493                        inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1494                        dev_set_allmulti(port->dev, inc);
1495                }
1496        }
1497        rcu_read_unlock();
1498}
1499
1500static void team_set_rx_mode(struct net_device *dev)
1501{
1502        struct team *team = netdev_priv(dev);
1503        struct team_port *port;
1504
1505        rcu_read_lock();
1506        list_for_each_entry_rcu(port, &team->port_list, list) {
1507                dev_uc_sync_multiple(port->dev, dev);
1508                dev_mc_sync_multiple(port->dev, dev);
1509        }
1510        rcu_read_unlock();
1511}
1512
1513static int team_set_mac_address(struct net_device *dev, void *p)
1514{
1515        struct sockaddr *addr = p;
1516        struct team *team = netdev_priv(dev);
1517        struct team_port *port;
1518
1519        if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1520                return -EADDRNOTAVAIL;
1521        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1522        rcu_read_lock();
1523        list_for_each_entry_rcu(port, &team->port_list, list)
1524                if (team->ops.port_change_dev_addr)
1525                        team->ops.port_change_dev_addr(team, port);
1526        rcu_read_unlock();
1527        return 0;
1528}
1529
1530static int team_change_mtu(struct net_device *dev, int new_mtu)
1531{
1532        struct team *team = netdev_priv(dev);
1533        struct team_port *port;
1534        int err;
1535
1536        /*
1537         * Alhough this is reader, it's guarded by team lock. It's not possible
1538         * to traverse list in reverse under rcu_read_lock
1539         */
1540        mutex_lock(&team->lock);
1541        list_for_each_entry(port, &team->port_list, list) {
1542                err = dev_set_mtu(port->dev, new_mtu);
1543                if (err) {
1544                        netdev_err(dev, "Device %s failed to change mtu",
1545                                   port->dev->name);
1546                        goto unwind;
1547                }
1548        }
1549        mutex_unlock(&team->lock);
1550
1551        dev->mtu = new_mtu;
1552
1553        return 0;
1554
1555unwind:
1556        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1557                dev_set_mtu(port->dev, dev->mtu);
1558        mutex_unlock(&team->lock);
1559
1560        return err;
1561}
1562
1563static struct rtnl_link_stats64 *
1564team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1565{
1566        struct team *team = netdev_priv(dev);
1567        struct team_pcpu_stats *p;
1568        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1569        u32 rx_dropped = 0, tx_dropped = 0;
1570        unsigned int start;
1571        int i;
1572
1573        for_each_possible_cpu(i) {
1574                p = per_cpu_ptr(team->pcpu_stats, i);
1575                do {
1576                        start = u64_stats_fetch_begin_bh(&p->syncp);
1577                        rx_packets      = p->rx_packets;
1578                        rx_bytes        = p->rx_bytes;
1579                        rx_multicast    = p->rx_multicast;
1580                        tx_packets      = p->tx_packets;
1581                        tx_bytes        = p->tx_bytes;
1582                } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1583
1584                stats->rx_packets       += rx_packets;
1585                stats->rx_bytes         += rx_bytes;
1586                stats->multicast        += rx_multicast;
1587                stats->tx_packets       += tx_packets;
1588                stats->tx_bytes         += tx_bytes;
1589                /*
1590                 * rx_dropped & tx_dropped are u32, updated
1591                 * without syncp protection.
1592                 */
1593                rx_dropped      += p->rx_dropped;
1594                tx_dropped      += p->tx_dropped;
1595        }
1596        stats->rx_dropped       = rx_dropped;
1597        stats->tx_dropped       = tx_dropped;
1598        return stats;
1599}
1600
1601static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1602{
1603        struct team *team = netdev_priv(dev);
1604        struct team_port *port;
1605        int err;
1606
1607        /*
1608         * Alhough this is reader, it's guarded by team lock. It's not possible
1609         * to traverse list in reverse under rcu_read_lock
1610         */
1611        mutex_lock(&team->lock);
1612        list_for_each_entry(port, &team->port_list, list) {
1613                err = vlan_vid_add(port->dev, proto, vid);
1614                if (err)
1615                        goto unwind;
1616        }
1617        mutex_unlock(&team->lock);
1618
1619        return 0;
1620
1621unwind:
1622        list_for_each_entry_continue_reverse(port, &team->port_list, list)
1623                vlan_vid_del(port->dev, proto, vid);
1624        mutex_unlock(&team->lock);
1625
1626        return err;
1627}
1628
1629static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1630{
1631        struct team *team = netdev_priv(dev);
1632        struct team_port *port;
1633
1634        rcu_read_lock();
1635        list_for_each_entry_rcu(port, &team->port_list, list)
1636                vlan_vid_del(port->dev, proto, vid);
1637        rcu_read_unlock();
1638
1639        return 0;
1640}
1641
1642#ifdef CONFIG_NET_POLL_CONTROLLER
1643static void team_poll_controller(struct net_device *dev)
1644{
1645}
1646
1647static void __team_netpoll_cleanup(struct team *team)
1648{
1649        struct team_port *port;
1650
1651        list_for_each_entry(port, &team->port_list, list)
1652                team_port_disable_netpoll(port);
1653}
1654
1655static void team_netpoll_cleanup(struct net_device *dev)
1656{
1657        struct team *team = netdev_priv(dev);
1658
1659        mutex_lock(&team->lock);
1660        __team_netpoll_cleanup(team);
1661        mutex_unlock(&team->lock);
1662}
1663
1664static int team_netpoll_setup(struct net_device *dev,
1665                              struct netpoll_info *npifo, gfp_t gfp)
1666{
1667        struct team *team = netdev_priv(dev);
1668        struct team_port *port;
1669        int err = 0;
1670
1671        mutex_lock(&team->lock);
1672        list_for_each_entry(port, &team->port_list, list) {
1673                err = team_port_enable_netpoll(team, port, gfp);
1674                if (err) {
1675                        __team_netpoll_cleanup(team);
1676                        break;
1677                }
1678        }
1679        mutex_unlock(&team->lock);
1680        return err;
1681}
1682#endif
1683
1684static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1685{
1686        struct team *team = netdev_priv(dev);
1687        int err;
1688
1689        mutex_lock(&team->lock);
1690        err = team_port_add(team, port_dev);
1691        mutex_unlock(&team->lock);
1692        return err;
1693}
1694
1695static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1696{
1697        struct team *team = netdev_priv(dev);
1698        int err;
1699
1700        mutex_lock(&team->lock);
1701        err = team_port_del(team, port_dev);
1702        mutex_unlock(&team->lock);
1703        return err;
1704}
1705
1706static netdev_features_t team_fix_features(struct net_device *dev,
1707                                           netdev_features_t features)
1708{
1709        struct team_port *port;
1710        struct team *team = netdev_priv(dev);
1711        netdev_features_t mask;
1712
1713        mask = features;
1714        features &= ~NETIF_F_ONE_FOR_ALL;
1715        features |= NETIF_F_ALL_FOR_ALL;
1716
1717        rcu_read_lock();
1718        list_for_each_entry_rcu(port, &team->port_list, list) {
1719                features = netdev_increment_features(features,
1720                                                     port->dev->features,
1721                                                     mask);
1722        }
1723        rcu_read_unlock();
1724        return features;
1725}
1726
1727static int team_change_carrier(struct net_device *dev, bool new_carrier)
1728{
1729        struct team *team = netdev_priv(dev);
1730
1731        team->user_carrier_enabled = true;
1732
1733        if (new_carrier)
1734                netif_carrier_on(dev);
1735        else
1736                netif_carrier_off(dev);
1737        return 0;
1738}
1739
1740static const struct net_device_ops team_netdev_ops = {
1741        .ndo_init               = team_init,
1742        .ndo_uninit             = team_uninit,
1743        .ndo_open               = team_open,
1744        .ndo_stop               = team_close,
1745        .ndo_start_xmit         = team_xmit,
1746        .ndo_select_queue       = team_select_queue,
1747        .ndo_change_rx_flags    = team_change_rx_flags,
1748        .ndo_set_rx_mode        = team_set_rx_mode,
1749        .ndo_set_mac_address    = team_set_mac_address,
1750        .ndo_change_mtu         = team_change_mtu,
1751        .ndo_get_stats64        = team_get_stats64,
1752        .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
1753        .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
1754#ifdef CONFIG_NET_POLL_CONTROLLER
1755        .ndo_poll_controller    = team_poll_controller,
1756        .ndo_netpoll_setup      = team_netpoll_setup,
1757        .ndo_netpoll_cleanup    = team_netpoll_cleanup,
1758#endif
1759        .ndo_add_slave          = team_add_slave,
1760        .ndo_del_slave          = team_del_slave,
1761        .ndo_fix_features       = team_fix_features,
1762        .ndo_change_carrier     = team_change_carrier,
1763};
1764
1765/***********************
1766 * ethtool interface
1767 ***********************/
1768
1769static void team_ethtool_get_drvinfo(struct net_device *dev,
1770                                     struct ethtool_drvinfo *drvinfo)
1771{
1772        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
1773        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
1774}
1775
1776static const struct ethtool_ops team_ethtool_ops = {
1777        .get_drvinfo            = team_ethtool_get_drvinfo,
1778        .get_link               = ethtool_op_get_link,
1779};
1780
1781/***********************
1782 * rt netlink interface
1783 ***********************/
1784
1785static void team_setup_by_port(struct net_device *dev,
1786                               struct net_device *port_dev)
1787{
1788        dev->header_ops = port_dev->header_ops;
1789        dev->type = port_dev->type;
1790        dev->hard_header_len = port_dev->hard_header_len;
1791        dev->addr_len = port_dev->addr_len;
1792        dev->mtu = port_dev->mtu;
1793        memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1794        memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
1795}
1796
1797static int team_dev_type_check_change(struct net_device *dev,
1798                                      struct net_device *port_dev)
1799{
1800        struct team *team = netdev_priv(dev);
1801        char *portname = port_dev->name;
1802        int err;
1803
1804        if (dev->type == port_dev->type)
1805                return 0;
1806        if (!list_empty(&team->port_list)) {
1807                netdev_err(dev, "Device %s is of different type\n", portname);
1808                return -EBUSY;
1809        }
1810        err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
1811        err = notifier_to_errno(err);
1812        if (err) {
1813                netdev_err(dev, "Refused to change device type\n");
1814                return err;
1815        }
1816        dev_uc_flush(dev);
1817        dev_mc_flush(dev);
1818        team_setup_by_port(dev, port_dev);
1819        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1820        return 0;
1821}
1822
1823static void team_setup(struct net_device *dev)
1824{
1825        ether_setup(dev);
1826
1827        dev->netdev_ops = &team_netdev_ops;
1828        dev->ethtool_ops = &team_ethtool_ops;
1829        dev->destructor = team_destructor;
1830        dev->tx_queue_len = 0;
1831        dev->flags |= IFF_MULTICAST;
1832        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1833
1834        /*
1835         * Indicate we support unicast address filtering. That way core won't
1836         * bring us to promisc mode in case a unicast addr is added.
1837         * Let this up to underlay drivers.
1838         */
1839        dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1840
1841        dev->features |= NETIF_F_LLTX;
1842        dev->features |= NETIF_F_GRO;
1843        dev->hw_features = TEAM_VLAN_FEATURES |
1844                           NETIF_F_HW_VLAN_CTAG_TX |
1845                           NETIF_F_HW_VLAN_CTAG_RX |
1846                           NETIF_F_HW_VLAN_CTAG_FILTER;
1847
1848        dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
1849        dev->features |= dev->hw_features;
1850}
1851
1852static int team_newlink(struct net *src_net, struct net_device *dev,
1853                        struct nlattr *tb[], struct nlattr *data[])
1854{
1855        int err;
1856
1857        if (tb[IFLA_ADDRESS] == NULL)
1858                eth_hw_addr_random(dev);
1859
1860        err = register_netdevice(dev);
1861        if (err)
1862                return err;
1863
1864        return 0;
1865}
1866
1867static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1868{
1869        if (tb[IFLA_ADDRESS]) {
1870                if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1871                        return -EINVAL;
1872                if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1873                        return -EADDRNOTAVAIL;
1874        }
1875        return 0;
1876}
1877
1878static unsigned int team_get_num_tx_queues(void)
1879{
1880        return TEAM_DEFAULT_NUM_TX_QUEUES;
1881}
1882
1883static unsigned int team_get_num_rx_queues(void)
1884{
1885        return TEAM_DEFAULT_NUM_RX_QUEUES;
1886}
1887
1888static struct rtnl_link_ops team_link_ops __read_mostly = {
1889        .kind                   = DRV_NAME,
1890        .priv_size              = sizeof(struct team),
1891        .setup                  = team_setup,
1892        .newlink                = team_newlink,
1893        .validate               = team_validate,
1894        .get_num_tx_queues      = team_get_num_tx_queues,
1895        .get_num_rx_queues      = team_get_num_rx_queues,
1896};
1897
1898
1899/***********************************
1900 * Generic netlink custom interface
1901 ***********************************/
1902
1903static struct genl_family team_nl_family = {
1904        .id             = GENL_ID_GENERATE,
1905        .name           = TEAM_GENL_NAME,
1906        .version        = TEAM_GENL_VERSION,
1907        .maxattr        = TEAM_ATTR_MAX,
1908        .netnsok        = true,
1909};
1910
1911static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1912        [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
1913        [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
1914        [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
1915        [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
1916};
1917
1918static const struct nla_policy
1919team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1920        [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
1921        [TEAM_ATTR_OPTION_NAME] = {
1922                .type = NLA_STRING,
1923                .len = TEAM_STRING_MAX_LEN,
1924        },
1925        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
1926        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
1927        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
1928};
1929
1930static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1931{
1932        struct sk_buff *msg;
1933        void *hdr;
1934        int err;
1935
1936        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1937        if (!msg)
1938                return -ENOMEM;
1939
1940        hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1941                          &team_nl_family, 0, TEAM_CMD_NOOP);
1942        if (!hdr) {
1943                err = -EMSGSIZE;
1944                goto err_msg_put;
1945        }
1946
1947        genlmsg_end(msg, hdr);
1948
1949        return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
1950
1951err_msg_put:
1952        nlmsg_free(msg);
1953
1954        return err;
1955}
1956
1957/*
1958 * Netlink cmd functions should be locked by following two functions.
1959 * Since dev gets held here, that ensures dev won't disappear in between.
1960 */
1961static struct team *team_nl_team_get(struct genl_info *info)
1962{
1963        struct net *net = genl_info_net(info);
1964        int ifindex;
1965        struct net_device *dev;
1966        struct team *team;
1967
1968        if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1969                return NULL;
1970
1971        ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1972        dev = dev_get_by_index(net, ifindex);
1973        if (!dev || dev->netdev_ops != &team_netdev_ops) {
1974                if (dev)
1975                        dev_put(dev);
1976                return NULL;
1977        }
1978
1979        team = netdev_priv(dev);
1980        mutex_lock(&team->lock);
1981        return team;
1982}
1983
1984static void team_nl_team_put(struct team *team)
1985{
1986        mutex_unlock(&team->lock);
1987        dev_put(team->dev);
1988}
1989
1990typedef int team_nl_send_func_t(struct sk_buff *skb,
1991                                struct team *team, u32 portid);
1992
1993static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
1994{
1995        return genlmsg_unicast(dev_net(team->dev), skb, portid);
1996}
1997
1998static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1999                                       struct team_option_inst *opt_inst)
2000{
2001        struct nlattr *option_item;
2002        struct team_option *option = opt_inst->option;
2003        struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2004        struct team_gsetter_ctx ctx;
2005        int err;
2006
2007        ctx.info = opt_inst_info;
2008        err = team_option_get(team, opt_inst, &ctx);
2009        if (err)
2010                return err;
2011
2012        option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2013        if (!option_item)
2014                return -EMSGSIZE;
2015
2016        if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2017                goto nest_cancel;
2018        if (opt_inst_info->port &&
2019            nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2020                        opt_inst_info->port->dev->ifindex))
2021                goto nest_cancel;
2022        if (opt_inst->option->array_size &&
2023            nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2024                        opt_inst_info->array_index))
2025                goto nest_cancel;
2026
2027        switch (option->type) {
2028        case TEAM_OPTION_TYPE_U32:
2029                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2030                        goto nest_cancel;
2031                if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2032                        goto nest_cancel;
2033                break;
2034        case TEAM_OPTION_TYPE_STRING:
2035                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2036                        goto nest_cancel;
2037                if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2038                                   ctx.data.str_val))
2039                        goto nest_cancel;
2040                break;
2041        case TEAM_OPTION_TYPE_BINARY:
2042                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2043                        goto nest_cancel;
2044                if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2045                            ctx.data.bin_val.ptr))
2046                        goto nest_cancel;
2047                break;
2048        case TEAM_OPTION_TYPE_BOOL:
2049                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2050                        goto nest_cancel;
2051                if (ctx.data.bool_val &&
2052                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2053                        goto nest_cancel;
2054                break;
2055        case TEAM_OPTION_TYPE_S32:
2056                if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2057                        goto nest_cancel;
2058                if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2059                        goto nest_cancel;
2060                break;
2061        default:
2062                BUG();
2063        }
2064        if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2065                goto nest_cancel;
2066        if (opt_inst->changed) {
2067                if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2068                        goto nest_cancel;
2069                opt_inst->changed = false;
2070        }
2071        nla_nest_end(skb, option_item);
2072        return 0;
2073
2074nest_cancel:
2075        nla_nest_cancel(skb, option_item);
2076        return -EMSGSIZE;
2077}
2078
2079static int __send_and_alloc_skb(struct sk_buff **pskb,
2080                                struct team *team, u32 portid,
2081                                team_nl_send_func_t *send_func)
2082{
2083        int err;
2084
2085        if (*pskb) {
2086                err = send_func(*pskb, team, portid);
2087                if (err)
2088                        return err;
2089        }
2090        *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2091        if (!*pskb)
2092                return -ENOMEM;
2093        return 0;
2094}
2095
2096static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2097                                    int flags, team_nl_send_func_t *send_func,
2098                                    struct list_head *sel_opt_inst_list)
2099{
2100        struct nlattr *option_list;
2101        struct nlmsghdr *nlh;
2102        void *hdr;
2103        struct team_option_inst *opt_inst;
2104        int err;
2105        struct sk_buff *skb = NULL;
2106        bool incomplete;
2107        int i;
2108
2109        opt_inst = list_first_entry(sel_opt_inst_list,
2110                                    struct team_option_inst, tmp_list);
2111
2112start_again:
2113        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2114        if (err)
2115                return err;
2116
2117        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2118                          TEAM_CMD_OPTIONS_GET);
2119        if (!hdr)
2120                return -EMSGSIZE;
2121
2122        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2123                goto nla_put_failure;
2124        option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2125        if (!option_list)
2126                goto nla_put_failure;
2127
2128        i = 0;
2129        incomplete = false;
2130        list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2131                err = team_nl_fill_one_option_get(skb, team, opt_inst);
2132                if (err) {
2133                        if (err == -EMSGSIZE) {
2134                                if (!i)
2135                                        goto errout;
2136                                incomplete = true;
2137                                break;
2138                        }
2139                        goto errout;
2140                }
2141                i++;
2142        }
2143
2144        nla_nest_end(skb, option_list);
2145        genlmsg_end(skb, hdr);
2146        if (incomplete)
2147                goto start_again;
2148
2149send_done:
2150        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2151        if (!nlh) {
2152                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2153                if (err)
2154                        goto errout;
2155                goto send_done;
2156        }
2157
2158        return send_func(skb, team, portid);
2159
2160nla_put_failure:
2161        err = -EMSGSIZE;
2162errout:
2163        genlmsg_cancel(skb, hdr);
2164        nlmsg_free(skb);
2165        return err;
2166}
2167
2168static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2169{
2170        struct team *team;
2171        struct team_option_inst *opt_inst;
2172        int err;
2173        LIST_HEAD(sel_opt_inst_list);
2174
2175        team = team_nl_team_get(info);
2176        if (!team)
2177                return -EINVAL;
2178
2179        list_for_each_entry(opt_inst, &team->option_inst_list, list)
2180                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2181        err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2182                                       NLM_F_ACK, team_nl_send_unicast,
2183                                       &sel_opt_inst_list);
2184
2185        team_nl_team_put(team);
2186
2187        return err;
2188}
2189
2190static int team_nl_send_event_options_get(struct team *team,
2191                                          struct list_head *sel_opt_inst_list);
2192
2193static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2194{
2195        struct team *team;
2196        int err = 0;
2197        int i;
2198        struct nlattr *nl_option;
2199        LIST_HEAD(opt_inst_list);
2200
2201        team = team_nl_team_get(info);
2202        if (!team)
2203                return -EINVAL;
2204
2205        err = -EINVAL;
2206        if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2207                err = -EINVAL;
2208                goto team_put;
2209        }
2210
2211        nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2212                struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2213                struct nlattr *attr;
2214                struct nlattr *attr_data;
2215                enum team_option_type opt_type;
2216                int opt_port_ifindex = 0; /* != 0 for per-port options */
2217                u32 opt_array_index = 0;
2218                bool opt_is_array = false;
2219                struct team_option_inst *opt_inst;
2220                char *opt_name;
2221                bool opt_found = false;
2222
2223                if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2224                        err = -EINVAL;
2225                        goto team_put;
2226                }
2227                err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2228                                       nl_option, team_nl_option_policy);
2229                if (err)
2230                        goto team_put;
2231                if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2232                    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2233                        err = -EINVAL;
2234                        goto team_put;
2235                }
2236                switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2237                case NLA_U32:
2238                        opt_type = TEAM_OPTION_TYPE_U32;
2239                        break;
2240                case NLA_STRING:
2241                        opt_type = TEAM_OPTION_TYPE_STRING;
2242                        break;
2243                case NLA_BINARY:
2244                        opt_type = TEAM_OPTION_TYPE_BINARY;
2245                        break;
2246                case NLA_FLAG:
2247                        opt_type = TEAM_OPTION_TYPE_BOOL;
2248                        break;
2249                case NLA_S32:
2250                        opt_type = TEAM_OPTION_TYPE_S32;
2251                        break;
2252                default:
2253                        goto team_put;
2254                }
2255
2256                attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2257                if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2258                        err = -EINVAL;
2259                        goto team_put;
2260                }
2261
2262                opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2263                attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2264                if (attr)
2265                        opt_port_ifindex = nla_get_u32(attr);
2266
2267                attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2268                if (attr) {
2269                        opt_is_array = true;
2270                        opt_array_index = nla_get_u32(attr);
2271                }
2272
2273                list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2274                        struct team_option *option = opt_inst->option;
2275                        struct team_gsetter_ctx ctx;
2276                        struct team_option_inst_info *opt_inst_info;
2277                        int tmp_ifindex;
2278
2279                        opt_inst_info = &opt_inst->info;
2280                        tmp_ifindex = opt_inst_info->port ?
2281                                      opt_inst_info->port->dev->ifindex : 0;
2282                        if (option->type != opt_type ||
2283                            strcmp(option->name, opt_name) ||
2284                            tmp_ifindex != opt_port_ifindex ||
2285                            (option->array_size && !opt_is_array) ||
2286                            opt_inst_info->array_index != opt_array_index)
2287                                continue;
2288                        opt_found = true;
2289                        ctx.info = opt_inst_info;
2290                        switch (opt_type) {
2291                        case TEAM_OPTION_TYPE_U32:
2292                                ctx.data.u32_val = nla_get_u32(attr_data);
2293                                break;
2294                        case TEAM_OPTION_TYPE_STRING:
2295                                if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2296                                        err = -EINVAL;
2297                                        goto team_put;
2298                                }
2299                                ctx.data.str_val = nla_data(attr_data);
2300                                break;
2301                        case TEAM_OPTION_TYPE_BINARY:
2302                                ctx.data.bin_val.len = nla_len(attr_data);
2303                                ctx.data.bin_val.ptr = nla_data(attr_data);
2304                                break;
2305                        case TEAM_OPTION_TYPE_BOOL:
2306                                ctx.data.bool_val = attr_data ? true : false;
2307                                break;
2308                        case TEAM_OPTION_TYPE_S32:
2309                                ctx.data.s32_val = nla_get_s32(attr_data);
2310                                break;
2311                        default:
2312                                BUG();
2313                        }
2314                        err = team_option_set(team, opt_inst, &ctx);
2315                        if (err)
2316                                goto team_put;
2317                        opt_inst->changed = true;
2318                        list_add(&opt_inst->tmp_list, &opt_inst_list);
2319                }
2320                if (!opt_found) {
2321                        err = -ENOENT;
2322                        goto team_put;
2323                }
2324        }
2325
2326        err = team_nl_send_event_options_get(team, &opt_inst_list);
2327
2328team_put:
2329        team_nl_team_put(team);
2330
2331        return err;
2332}
2333
2334static int team_nl_fill_one_port_get(struct sk_buff *skb,
2335                                     struct team_port *port)
2336{
2337        struct nlattr *port_item;
2338
2339        port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2340        if (!port_item)
2341                goto nest_cancel;
2342        if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2343                goto nest_cancel;
2344        if (port->changed) {
2345                if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2346                        goto nest_cancel;
2347                port->changed = false;
2348        }
2349        if ((port->removed &&
2350             nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2351            (port->state.linkup &&
2352             nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2353            nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2354            nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2355                goto nest_cancel;
2356        nla_nest_end(skb, port_item);
2357        return 0;
2358
2359nest_cancel:
2360        nla_nest_cancel(skb, port_item);
2361        return -EMSGSIZE;
2362}
2363
2364static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2365                                      int flags, team_nl_send_func_t *send_func,
2366                                      struct team_port *one_port)
2367{
2368        struct nlattr *port_list;
2369        struct nlmsghdr *nlh;
2370        void *hdr;
2371        struct team_port *port;
2372        int err;
2373        struct sk_buff *skb = NULL;
2374        bool incomplete;
2375        int i;
2376
2377        port = list_first_entry_or_null(&team->port_list,
2378                                        struct team_port, list);
2379
2380start_again:
2381        err = __send_and_alloc_skb(&skb, team, portid, send_func);
2382        if (err)
2383                return err;
2384
2385        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2386                          TEAM_CMD_PORT_LIST_GET);
2387        if (!hdr)
2388                return -EMSGSIZE;
2389
2390        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2391                goto nla_put_failure;
2392        port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2393        if (!port_list)
2394                goto nla_put_failure;
2395
2396        i = 0;
2397        incomplete = false;
2398
2399        /* If one port is selected, called wants to send port list containing
2400         * only this port. Otherwise go through all listed ports and send all
2401         */
2402        if (one_port) {
2403                err = team_nl_fill_one_port_get(skb, one_port);
2404                if (err)
2405                        goto errout;
2406        } else if (port) {
2407                list_for_each_entry_from(port, &team->port_list, list) {
2408                        err = team_nl_fill_one_port_get(skb, port);
2409                        if (err) {
2410                                if (err == -EMSGSIZE) {
2411                                        if (!i)
2412                                                goto errout;
2413                                        incomplete = true;
2414                                        break;
2415                                }
2416                                goto errout;
2417                        }
2418                        i++;
2419                }
2420        }
2421
2422        nla_nest_end(skb, port_list);
2423        genlmsg_end(skb, hdr);
2424        if (incomplete)
2425                goto start_again;
2426
2427send_done:
2428        nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2429        if (!nlh) {
2430                err = __send_and_alloc_skb(&skb, team, portid, send_func);
2431                if (err)
2432                        goto errout;
2433                goto send_done;
2434        }
2435
2436        return send_func(skb, team, portid);
2437
2438nla_put_failure:
2439        err = -EMSGSIZE;
2440errout:
2441        genlmsg_cancel(skb, hdr);
2442        nlmsg_free(skb);
2443        return err;
2444}
2445
2446static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2447                                     struct genl_info *info)
2448{
2449        struct team *team;
2450        int err;
2451
2452        team = team_nl_team_get(info);
2453        if (!team)
2454                return -EINVAL;
2455
2456        err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2457                                         NLM_F_ACK, team_nl_send_unicast, NULL);
2458
2459        team_nl_team_put(team);
2460
2461        return err;
2462}
2463
2464static struct genl_ops team_nl_ops[] = {
2465        {
2466                .cmd = TEAM_CMD_NOOP,
2467                .doit = team_nl_cmd_noop,
2468                .policy = team_nl_policy,
2469        },
2470        {
2471                .cmd = TEAM_CMD_OPTIONS_SET,
2472                .doit = team_nl_cmd_options_set,
2473                .policy = team_nl_policy,
2474                .flags = GENL_ADMIN_PERM,
2475        },
2476        {
2477                .cmd = TEAM_CMD_OPTIONS_GET,
2478                .doit = team_nl_cmd_options_get,
2479                .policy = team_nl_policy,
2480                .flags = GENL_ADMIN_PERM,
2481        },
2482        {
2483                .cmd = TEAM_CMD_PORT_LIST_GET,
2484                .doit = team_nl_cmd_port_list_get,
2485                .policy = team_nl_policy,
2486                .flags = GENL_ADMIN_PERM,
2487        },
2488};
2489
2490static struct genl_multicast_group team_change_event_mcgrp = {
2491        .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
2492};
2493
2494static int team_nl_send_multicast(struct sk_buff *skb,
2495                                  struct team *team, u32 portid)
2496{
2497        return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2498                                       team_change_event_mcgrp.id, GFP_KERNEL);
2499}
2500
2501static int team_nl_send_event_options_get(struct team *team,
2502                                          struct list_head *sel_opt_inst_list)
2503{
2504        return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2505                                        sel_opt_inst_list);
2506}
2507
2508static int team_nl_send_event_port_get(struct team *team,
2509                                       struct team_port *port)
2510{
2511        return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2512                                          port);
2513}
2514
2515static int team_nl_init(void)
2516{
2517        int err;
2518
2519        err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2520                                            ARRAY_SIZE(team_nl_ops));
2521        if (err)
2522                return err;
2523
2524        err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2525        if (err)
2526                goto err_change_event_grp_reg;
2527
2528        return 0;
2529
2530err_change_event_grp_reg:
2531        genl_unregister_family(&team_nl_family);
2532
2533        return err;
2534}
2535
2536static void team_nl_fini(void)
2537{
2538        genl_unregister_family(&team_nl_family);
2539}
2540
2541
2542/******************
2543 * Change checkers
2544 ******************/
2545
2546static void __team_options_change_check(struct team *team)
2547{
2548        int err;
2549        struct team_option_inst *opt_inst;
2550        LIST_HEAD(sel_opt_inst_list);
2551
2552        list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2553                if (opt_inst->changed)
2554                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2555        }
2556        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2557        if (err && err != -ESRCH)
2558                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2559                            err);
2560}
2561
2562/* rtnl lock is held */
2563
2564static void __team_port_change_send(struct team_port *port, bool linkup)
2565{
2566        int err;
2567
2568        port->changed = true;
2569        port->state.linkup = linkup;
2570        team_refresh_port_linkup(port);
2571        if (linkup) {
2572                struct ethtool_cmd ecmd;
2573
2574                err = __ethtool_get_settings(port->dev, &ecmd);
2575                if (!err) {
2576                        port->state.speed = ethtool_cmd_speed(&ecmd);
2577                        port->state.duplex = ecmd.duplex;
2578                        goto send_event;
2579                }
2580        }
2581        port->state.speed = 0;
2582        port->state.duplex = 0;
2583
2584send_event:
2585        err = team_nl_send_event_port_get(port->team, port);
2586        if (err && err != -ESRCH)
2587                netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2588                            port->dev->name, err);
2589
2590}
2591
2592static void __team_carrier_check(struct team *team)
2593{
2594        struct team_port *port;
2595        bool team_linkup;
2596
2597        if (team->user_carrier_enabled)
2598                return;
2599
2600        team_linkup = false;
2601        list_for_each_entry(port, &team->port_list, list) {
2602                if (port->linkup) {
2603                        team_linkup = true;
2604                        break;
2605                }
2606        }
2607
2608        if (team_linkup)
2609                netif_carrier_on(team->dev);
2610        else
2611                netif_carrier_off(team->dev);
2612}
2613
2614static void __team_port_change_check(struct team_port *port, bool linkup)
2615{
2616        if (port->state.linkup != linkup)
2617                __team_port_change_send(port, linkup);
2618        __team_carrier_check(port->team);
2619}
2620
2621static void __team_port_change_port_added(struct team_port *port, bool linkup)
2622{
2623        __team_port_change_send(port, linkup);
2624        __team_carrier_check(port->team);
2625}
2626
2627static void __team_port_change_port_removed(struct team_port *port)
2628{
2629        port->removed = true;
2630        __team_port_change_send(port, false);
2631        __team_carrier_check(port->team);
2632}
2633
2634static void team_port_change_check(struct team_port *port, bool linkup)
2635{
2636        struct team *team = port->team;
2637
2638        mutex_lock(&team->lock);
2639        __team_port_change_check(port, linkup);
2640        mutex_unlock(&team->lock);
2641}
2642
2643
2644/************************************
2645 * Net device notifier event handler
2646 ************************************/
2647
2648static int team_device_event(struct notifier_block *unused,
2649                             unsigned long event, void *ptr)
2650{
2651        struct net_device *dev = (struct net_device *) ptr;
2652        struct team_port *port;
2653
2654        port = team_port_get_rtnl(dev);
2655        if (!port)
2656                return NOTIFY_DONE;
2657
2658        switch (event) {
2659        case NETDEV_UP:
2660                if (netif_carrier_ok(dev))
2661                        team_port_change_check(port, true);
2662        case NETDEV_DOWN:
2663                team_port_change_check(port, false);
2664        case NETDEV_CHANGE:
2665                if (netif_running(port->dev))
2666                        team_port_change_check(port,
2667                                               !!netif_carrier_ok(port->dev));
2668                break;
2669        case NETDEV_UNREGISTER:
2670                team_del_slave(port->team->dev, dev);
2671                break;
2672        case NETDEV_FEAT_CHANGE:
2673                team_compute_features(port->team);
2674                break;
2675        case NETDEV_CHANGEMTU:
2676                /* Forbid to change mtu of underlaying device */
2677                return NOTIFY_BAD;
2678        case NETDEV_PRE_TYPE_CHANGE:
2679                /* Forbid to change type of underlaying device */
2680                return NOTIFY_BAD;
2681        }
2682        return NOTIFY_DONE;
2683}
2684
2685static struct notifier_block team_notifier_block __read_mostly = {
2686        .notifier_call = team_device_event,
2687};
2688
2689
2690/***********************
2691 * Module init and exit
2692 ***********************/
2693
2694static int __init team_module_init(void)
2695{
2696        int err;
2697
2698        register_netdevice_notifier(&team_notifier_block);
2699
2700        err = rtnl_link_register(&team_link_ops);
2701        if (err)
2702                goto err_rtnl_reg;
2703
2704        err = team_nl_init();
2705        if (err)
2706                goto err_nl_init;
2707
2708        return 0;
2709
2710err_nl_init:
2711        rtnl_link_unregister(&team_link_ops);
2712
2713err_rtnl_reg:
2714        unregister_netdevice_notifier(&team_notifier_block);
2715
2716        return err;
2717}
2718
2719static void __exit team_module_exit(void)
2720{
2721        team_nl_fini();
2722        rtnl_link_unregister(&team_link_ops);
2723        unregister_netdevice_notifier(&team_notifier_block);
2724}
2725
2726module_init(team_module_init);
2727module_exit(team_module_exit);
2728
2729MODULE_LICENSE("GPL v2");
2730MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2731MODULE_DESCRIPTION("Ethernet team device driver");
2732MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2733