linux/net/switchdev/switchdev.c
<<
>>
Prefs
   1/*
   2 * net/switchdev/switchdev.c - Switch device API
   3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
   4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/types.h>
  14#include <linux/init.h>
  15#include <linux/mutex.h>
  16#include <linux/notifier.h>
  17#include <linux/netdevice.h>
  18#include <linux/etherdevice.h>
  19#include <linux/if_bridge.h>
  20#include <linux/list.h>
  21#include <linux/workqueue.h>
  22#include <linux/if_vlan.h>
  23#include <linux/rtnetlink.h>
  24#include <net/switchdev.h>
  25
  26/**
  27 *      switchdev_trans_item_enqueue - Enqueue data item to transaction queue
  28 *
  29 *      @trans: transaction
  30 *      @data: pointer to data being queued
  31 *      @destructor: data destructor
  32 *      @tritem: transaction item being queued
  33 *
  34 *      Enqeueue data item to transaction queue. tritem is typically placed in
  35 *      cointainter pointed at by data pointer. Destructor is called on
  36 *      transaction abort and after successful commit phase in case
  37 *      the caller did not dequeue the item before.
  38 */
  39void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
  40                                  void *data, void (*destructor)(void const *),
  41                                  struct switchdev_trans_item *tritem)
  42{
  43        tritem->data = data;
  44        tritem->destructor = destructor;
  45        list_add_tail(&tritem->list, &trans->item_list);
  46}
  47EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
  48
  49static struct switchdev_trans_item *
  50__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
  51{
  52        struct switchdev_trans_item *tritem;
  53
  54        if (list_empty(&trans->item_list))
  55                return NULL;
  56        tritem = list_first_entry(&trans->item_list,
  57                                  struct switchdev_trans_item, list);
  58        list_del(&tritem->list);
  59        return tritem;
  60}
  61
  62/**
  63 *      switchdev_trans_item_dequeue - Dequeue data item from transaction queue
  64 *
  65 *      @trans: transaction
  66 */
  67void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
  68{
  69        struct switchdev_trans_item *tritem;
  70
  71        tritem = __switchdev_trans_item_dequeue(trans);
  72        BUG_ON(!tritem);
  73        return tritem->data;
  74}
  75EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
  76
  77static void switchdev_trans_init(struct switchdev_trans *trans)
  78{
  79        INIT_LIST_HEAD(&trans->item_list);
  80}
  81
  82static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
  83{
  84        struct switchdev_trans_item *tritem;
  85
  86        while ((tritem = __switchdev_trans_item_dequeue(trans)))
  87                tritem->destructor(tritem->data);
  88}
  89
  90static void switchdev_trans_items_warn_destroy(struct net_device *dev,
  91                                               struct switchdev_trans *trans)
  92{
  93        WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
  94             dev->name);
  95        switchdev_trans_items_destroy(trans);
  96}
  97
  98static LIST_HEAD(deferred);
  99static DEFINE_SPINLOCK(deferred_lock);
 100
 101typedef void switchdev_deferred_func_t(struct net_device *dev,
 102                                       const void *data);
 103
 104struct switchdev_deferred_item {
 105        struct list_head list;
 106        struct net_device *dev;
 107        switchdev_deferred_func_t *func;
 108        unsigned long data[0];
 109};
 110
 111static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
 112{
 113        struct switchdev_deferred_item *dfitem;
 114
 115        spin_lock_bh(&deferred_lock);
 116        if (list_empty(&deferred)) {
 117                dfitem = NULL;
 118                goto unlock;
 119        }
 120        dfitem = list_first_entry(&deferred,
 121                                  struct switchdev_deferred_item, list);
 122        list_del(&dfitem->list);
 123unlock:
 124        spin_unlock_bh(&deferred_lock);
 125        return dfitem;
 126}
 127
 128/**
 129 *      switchdev_deferred_process - Process ops in deferred queue
 130 *
 131 *      Called to flush the ops currently queued in deferred ops queue.
 132 *      rtnl_lock must be held.
 133 */
 134void switchdev_deferred_process(void)
 135{
 136        struct switchdev_deferred_item *dfitem;
 137
 138        ASSERT_RTNL();
 139
 140        while ((dfitem = switchdev_deferred_dequeue())) {
 141                dfitem->func(dfitem->dev, dfitem->data);
 142                dev_put(dfitem->dev);
 143                kfree(dfitem);
 144        }
 145}
 146EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 147
 148static void switchdev_deferred_process_work(struct work_struct *work)
 149{
 150        rtnl_lock();
 151        switchdev_deferred_process();
 152        rtnl_unlock();
 153}
 154
 155static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 156
 157static int switchdev_deferred_enqueue(struct net_device *dev,
 158                                      const void *data, size_t data_len,
 159                                      switchdev_deferred_func_t *func)
 160{
 161        struct switchdev_deferred_item *dfitem;
 162
 163        dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
 164        if (!dfitem)
 165                return -ENOMEM;
 166        dfitem->dev = dev;
 167        dfitem->func = func;
 168        memcpy(dfitem->data, data, data_len);
 169        dev_hold(dev);
 170        spin_lock_bh(&deferred_lock);
 171        list_add_tail(&dfitem->list, &deferred);
 172        spin_unlock_bh(&deferred_lock);
 173        schedule_work(&deferred_process_work);
 174        return 0;
 175}
 176
 177static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
 178                                      struct net_device *dev,
 179                                      const struct switchdev_attr *attr,
 180                                      struct switchdev_trans *trans)
 181{
 182        int err;
 183        int rc;
 184
 185        struct switchdev_notifier_port_attr_info attr_info = {
 186                .attr = attr,
 187                .trans = trans,
 188                .handled = false,
 189        };
 190
 191        rc = call_switchdev_blocking_notifiers(nt, dev,
 192                                               &attr_info.info, NULL);
 193        err = notifier_to_errno(rc);
 194        if (err) {
 195                WARN_ON(!attr_info.handled);
 196                return err;
 197        }
 198
 199        if (!attr_info.handled)
 200                return -EOPNOTSUPP;
 201
 202        return 0;
 203}
 204
 205static int switchdev_port_attr_set_now(struct net_device *dev,
 206                                       const struct switchdev_attr *attr)
 207{
 208        struct switchdev_trans trans;
 209        int err;
 210
 211        switchdev_trans_init(&trans);
 212
 213        /* Phase I: prepare for attr set. Driver/device should fail
 214         * here if there are going to be issues in the commit phase,
 215         * such as lack of resources or support.  The driver/device
 216         * should reserve resources needed for the commit phase here,
 217         * but should not commit the attr.
 218         */
 219
 220        trans.ph_prepare = true;
 221        err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
 222                                         &trans);
 223        if (err) {
 224                /* Prepare phase failed: abort the transaction.  Any
 225                 * resources reserved in the prepare phase are
 226                 * released.
 227                 */
 228
 229                if (err != -EOPNOTSUPP)
 230                        switchdev_trans_items_destroy(&trans);
 231
 232                return err;
 233        }
 234
 235        /* Phase II: commit attr set.  This cannot fail as a fault
 236         * of driver/device.  If it does, it's a bug in the driver/device
 237         * because the driver said everythings was OK in phase I.
 238         */
 239
 240        trans.ph_prepare = false;
 241        err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
 242                                         &trans);
 243        WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
 244             dev->name, attr->id);
 245        switchdev_trans_items_warn_destroy(dev, &trans);
 246
 247        return err;
 248}
 249
 250static void switchdev_port_attr_set_deferred(struct net_device *dev,
 251                                             const void *data)
 252{
 253        const struct switchdev_attr *attr = data;
 254        int err;
 255
 256        err = switchdev_port_attr_set_now(dev, attr);
 257        if (err && err != -EOPNOTSUPP)
 258                netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 259                           err, attr->id);
 260        if (attr->complete)
 261                attr->complete(dev, err, attr->complete_priv);
 262}
 263
 264static int switchdev_port_attr_set_defer(struct net_device *dev,
 265                                         const struct switchdev_attr *attr)
 266{
 267        return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
 268                                          switchdev_port_attr_set_deferred);
 269}
 270
 271/**
 272 *      switchdev_port_attr_set - Set port attribute
 273 *
 274 *      @dev: port device
 275 *      @attr: attribute to set
 276 *
 277 *      Use a 2-phase prepare-commit transaction model to ensure
 278 *      system is not left in a partially updated state due to
 279 *      failure from driver/device.
 280 *
 281 *      rtnl_lock must be held and must not be in atomic section,
 282 *      in case SWITCHDEV_F_DEFER flag is not set.
 283 */
 284int switchdev_port_attr_set(struct net_device *dev,
 285                            const struct switchdev_attr *attr)
 286{
 287        if (attr->flags & SWITCHDEV_F_DEFER)
 288                return switchdev_port_attr_set_defer(dev, attr);
 289        ASSERT_RTNL();
 290        return switchdev_port_attr_set_now(dev, attr);
 291}
 292EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 293
 294static size_t switchdev_obj_size(const struct switchdev_obj *obj)
 295{
 296        switch (obj->id) {
 297        case SWITCHDEV_OBJ_ID_PORT_VLAN:
 298                return sizeof(struct switchdev_obj_port_vlan);
 299        case SWITCHDEV_OBJ_ID_PORT_MDB:
 300                return sizeof(struct switchdev_obj_port_mdb);
 301        case SWITCHDEV_OBJ_ID_HOST_MDB:
 302                return sizeof(struct switchdev_obj_port_mdb);
 303        default:
 304                BUG();
 305        }
 306        return 0;
 307}
 308
 309static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
 310                                     struct net_device *dev,
 311                                     const struct switchdev_obj *obj,
 312                                     struct switchdev_trans *trans,
 313                                     struct netlink_ext_ack *extack)
 314{
 315        int rc;
 316        int err;
 317
 318        struct switchdev_notifier_port_obj_info obj_info = {
 319                .obj = obj,
 320                .trans = trans,
 321                .handled = false,
 322        };
 323
 324        rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
 325        err = notifier_to_errno(rc);
 326        if (err) {
 327                WARN_ON(!obj_info.handled);
 328                return err;
 329        }
 330        if (!obj_info.handled)
 331                return -EOPNOTSUPP;
 332        return 0;
 333}
 334
 335static int switchdev_port_obj_add_now(struct net_device *dev,
 336                                      const struct switchdev_obj *obj,
 337                                      struct netlink_ext_ack *extack)
 338{
 339        struct switchdev_trans trans;
 340        int err;
 341
 342        ASSERT_RTNL();
 343
 344        switchdev_trans_init(&trans);
 345
 346        /* Phase I: prepare for obj add. Driver/device should fail
 347         * here if there are going to be issues in the commit phase,
 348         * such as lack of resources or support.  The driver/device
 349         * should reserve resources needed for the commit phase here,
 350         * but should not commit the obj.
 351         */
 352
 353        trans.ph_prepare = true;
 354        err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 355                                        dev, obj, &trans, extack);
 356        if (err) {
 357                /* Prepare phase failed: abort the transaction.  Any
 358                 * resources reserved in the prepare phase are
 359                 * released.
 360                 */
 361
 362                if (err != -EOPNOTSUPP)
 363                        switchdev_trans_items_destroy(&trans);
 364
 365                return err;
 366        }
 367
 368        /* Phase II: commit obj add.  This cannot fail as a fault
 369         * of driver/device.  If it does, it's a bug in the driver/device
 370         * because the driver said everythings was OK in phase I.
 371         */
 372
 373        trans.ph_prepare = false;
 374        err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 375                                        dev, obj, &trans, extack);
 376        WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
 377        switchdev_trans_items_warn_destroy(dev, &trans);
 378
 379        return err;
 380}
 381
 382static void switchdev_port_obj_add_deferred(struct net_device *dev,
 383                                            const void *data)
 384{
 385        const struct switchdev_obj *obj = data;
 386        int err;
 387
 388        err = switchdev_port_obj_add_now(dev, obj, NULL);
 389        if (err && err != -EOPNOTSUPP)
 390                netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
 391                           err, obj->id);
 392        if (obj->complete)
 393                obj->complete(dev, err, obj->complete_priv);
 394}
 395
 396static int switchdev_port_obj_add_defer(struct net_device *dev,
 397                                        const struct switchdev_obj *obj)
 398{
 399        return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 400                                          switchdev_port_obj_add_deferred);
 401}
 402
 403/**
 404 *      switchdev_port_obj_add - Add port object
 405 *
 406 *      @dev: port device
 407 *      @id: object ID
 408 *      @obj: object to add
 409 *
 410 *      Use a 2-phase prepare-commit transaction model to ensure
 411 *      system is not left in a partially updated state due to
 412 *      failure from driver/device.
 413 *
 414 *      rtnl_lock must be held and must not be in atomic section,
 415 *      in case SWITCHDEV_F_DEFER flag is not set.
 416 */
 417int switchdev_port_obj_add(struct net_device *dev,
 418                           const struct switchdev_obj *obj,
 419                           struct netlink_ext_ack *extack)
 420{
 421        if (obj->flags & SWITCHDEV_F_DEFER)
 422                return switchdev_port_obj_add_defer(dev, obj);
 423        ASSERT_RTNL();
 424        return switchdev_port_obj_add_now(dev, obj, extack);
 425}
 426EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 427
 428static int switchdev_port_obj_del_now(struct net_device *dev,
 429                                      const struct switchdev_obj *obj)
 430{
 431        return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
 432                                         dev, obj, NULL, NULL);
 433}
 434
 435static void switchdev_port_obj_del_deferred(struct net_device *dev,
 436                                            const void *data)
 437{
 438        const struct switchdev_obj *obj = data;
 439        int err;
 440
 441        err = switchdev_port_obj_del_now(dev, obj);
 442        if (err && err != -EOPNOTSUPP)
 443                netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
 444                           err, obj->id);
 445        if (obj->complete)
 446                obj->complete(dev, err, obj->complete_priv);
 447}
 448
 449static int switchdev_port_obj_del_defer(struct net_device *dev,
 450                                        const struct switchdev_obj *obj)
 451{
 452        return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 453                                          switchdev_port_obj_del_deferred);
 454}
 455
 456/**
 457 *      switchdev_port_obj_del - Delete port object
 458 *
 459 *      @dev: port device
 460 *      @id: object ID
 461 *      @obj: object to delete
 462 *
 463 *      rtnl_lock must be held and must not be in atomic section,
 464 *      in case SWITCHDEV_F_DEFER flag is not set.
 465 */
 466int switchdev_port_obj_del(struct net_device *dev,
 467                           const struct switchdev_obj *obj)
 468{
 469        if (obj->flags & SWITCHDEV_F_DEFER)
 470                return switchdev_port_obj_del_defer(dev, obj);
 471        ASSERT_RTNL();
 472        return switchdev_port_obj_del_now(dev, obj);
 473}
 474EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 475
 476static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
 477static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
 478
 479/**
 480 *      register_switchdev_notifier - Register notifier
 481 *      @nb: notifier_block
 482 *
 483 *      Register switch device notifier.
 484 */
 485int register_switchdev_notifier(struct notifier_block *nb)
 486{
 487        return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
 488}
 489EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 490
 491/**
 492 *      unregister_switchdev_notifier - Unregister notifier
 493 *      @nb: notifier_block
 494 *
 495 *      Unregister switch device notifier.
 496 */
 497int unregister_switchdev_notifier(struct notifier_block *nb)
 498{
 499        return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
 500}
 501EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 502
 503/**
 504 *      call_switchdev_notifiers - Call notifiers
 505 *      @val: value passed unmodified to notifier function
 506 *      @dev: port device
 507 *      @info: notifier information data
 508 *
 509 *      Call all network notifier blocks.
 510 */
 511int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 512                             struct switchdev_notifier_info *info,
 513                             struct netlink_ext_ack *extack)
 514{
 515        info->dev = dev;
 516        info->extack = extack;
 517        return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
 518}
 519EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 520
 521int register_switchdev_blocking_notifier(struct notifier_block *nb)
 522{
 523        struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 524
 525        return blocking_notifier_chain_register(chain, nb);
 526}
 527EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
 528
 529int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
 530{
 531        struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 532
 533        return blocking_notifier_chain_unregister(chain, nb);
 534}
 535EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
 536
 537int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
 538                                      struct switchdev_notifier_info *info,
 539                                      struct netlink_ext_ack *extack)
 540{
 541        info->dev = dev;
 542        info->extack = extack;
 543        return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
 544                                            val, info);
 545}
 546EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
 547
 548static int __switchdev_handle_port_obj_add(struct net_device *dev,
 549                        struct switchdev_notifier_port_obj_info *port_obj_info,
 550                        bool (*check_cb)(const struct net_device *dev),
 551                        int (*add_cb)(struct net_device *dev,
 552                                      const struct switchdev_obj *obj,
 553                                      struct switchdev_trans *trans,
 554                                      struct netlink_ext_ack *extack))
 555{
 556        struct netlink_ext_ack *extack;
 557        struct net_device *lower_dev;
 558        struct list_head *iter;
 559        int err = -EOPNOTSUPP;
 560
 561        extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
 562
 563        if (check_cb(dev)) {
 564                /* This flag is only checked if the return value is success. */
 565                port_obj_info->handled = true;
 566                return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
 567                              extack);
 568        }
 569
 570        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 571         * unsupported devices, another driver might be able to handle them. But
 572         * propagate to the callers any hard errors.
 573         *
 574         * If the driver does its own bookkeeping of stacked ports, it's not
 575         * necessary to go through this helper.
 576         */
 577        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 578                err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
 579                                                      check_cb, add_cb);
 580                if (err && err != -EOPNOTSUPP)
 581                        return err;
 582        }
 583
 584        return err;
 585}
 586
 587int switchdev_handle_port_obj_add(struct net_device *dev,
 588                        struct switchdev_notifier_port_obj_info *port_obj_info,
 589                        bool (*check_cb)(const struct net_device *dev),
 590                        int (*add_cb)(struct net_device *dev,
 591                                      const struct switchdev_obj *obj,
 592                                      struct switchdev_trans *trans,
 593                                      struct netlink_ext_ack *extack))
 594{
 595        int err;
 596
 597        err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
 598                                              add_cb);
 599        if (err == -EOPNOTSUPP)
 600                err = 0;
 601        return err;
 602}
 603EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
 604
 605static int __switchdev_handle_port_obj_del(struct net_device *dev,
 606                        struct switchdev_notifier_port_obj_info *port_obj_info,
 607                        bool (*check_cb)(const struct net_device *dev),
 608                        int (*del_cb)(struct net_device *dev,
 609                                      const struct switchdev_obj *obj))
 610{
 611        struct net_device *lower_dev;
 612        struct list_head *iter;
 613        int err = -EOPNOTSUPP;
 614
 615        if (check_cb(dev)) {
 616                /* This flag is only checked if the return value is success. */
 617                port_obj_info->handled = true;
 618                return del_cb(dev, port_obj_info->obj);
 619        }
 620
 621        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 622         * unsupported devices, another driver might be able to handle them. But
 623         * propagate to the callers any hard errors.
 624         *
 625         * If the driver does its own bookkeeping of stacked ports, it's not
 626         * necessary to go through this helper.
 627         */
 628        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 629                err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
 630                                                      check_cb, del_cb);
 631                if (err && err != -EOPNOTSUPP)
 632                        return err;
 633        }
 634
 635        return err;
 636}
 637
 638int switchdev_handle_port_obj_del(struct net_device *dev,
 639                        struct switchdev_notifier_port_obj_info *port_obj_info,
 640                        bool (*check_cb)(const struct net_device *dev),
 641                        int (*del_cb)(struct net_device *dev,
 642                                      const struct switchdev_obj *obj))
 643{
 644        int err;
 645
 646        err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
 647                                              del_cb);
 648        if (err == -EOPNOTSUPP)
 649                err = 0;
 650        return err;
 651}
 652EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
 653
 654static int __switchdev_handle_port_attr_set(struct net_device *dev,
 655                        struct switchdev_notifier_port_attr_info *port_attr_info,
 656                        bool (*check_cb)(const struct net_device *dev),
 657                        int (*set_cb)(struct net_device *dev,
 658                                      const struct switchdev_attr *attr,
 659                                      struct switchdev_trans *trans))
 660{
 661        struct net_device *lower_dev;
 662        struct list_head *iter;
 663        int err = -EOPNOTSUPP;
 664
 665        if (check_cb(dev)) {
 666                port_attr_info->handled = true;
 667                return set_cb(dev, port_attr_info->attr,
 668                              port_attr_info->trans);
 669        }
 670
 671        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 672         * unsupported devices, another driver might be able to handle them. But
 673         * propagate to the callers any hard errors.
 674         *
 675         * If the driver does its own bookkeeping of stacked ports, it's not
 676         * necessary to go through this helper.
 677         */
 678        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 679                err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
 680                                                       check_cb, set_cb);
 681                if (err && err != -EOPNOTSUPP)
 682                        return err;
 683        }
 684
 685        return err;
 686}
 687
 688int switchdev_handle_port_attr_set(struct net_device *dev,
 689                        struct switchdev_notifier_port_attr_info *port_attr_info,
 690                        bool (*check_cb)(const struct net_device *dev),
 691                        int (*set_cb)(struct net_device *dev,
 692                                      const struct switchdev_attr *attr,
 693                                      struct switchdev_trans *trans))
 694{
 695        int err;
 696
 697        err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
 698                                               set_cb);
 699        if (err == -EOPNOTSUPP)
 700                err = 0;
 701        return err;
 702}
 703EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
 704