linux/net/switchdev/switchdev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/switchdev/switchdev.c - Switch device API
   4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
   5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/init.h>
  11#include <linux/mutex.h>
  12#include <linux/notifier.h>
  13#include <linux/netdevice.h>
  14#include <linux/etherdevice.h>
  15#include <linux/if_bridge.h>
  16#include <linux/list.h>
  17#include <linux/workqueue.h>
  18#include <linux/if_vlan.h>
  19#include <linux/rtnetlink.h>
  20#include <net/switchdev.h>
  21
  22static LIST_HEAD(deferred);
  23static DEFINE_SPINLOCK(deferred_lock);
  24
  25typedef void switchdev_deferred_func_t(struct net_device *dev,
  26                                       const void *data);
  27
  28struct switchdev_deferred_item {
  29        struct list_head list;
  30        struct net_device *dev;
  31        netdevice_tracker dev_tracker;
  32        switchdev_deferred_func_t *func;
  33        unsigned long data[];
  34};
  35
  36static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
  37{
  38        struct switchdev_deferred_item *dfitem;
  39
  40        spin_lock_bh(&deferred_lock);
  41        if (list_empty(&deferred)) {
  42                dfitem = NULL;
  43                goto unlock;
  44        }
  45        dfitem = list_first_entry(&deferred,
  46                                  struct switchdev_deferred_item, list);
  47        list_del(&dfitem->list);
  48unlock:
  49        spin_unlock_bh(&deferred_lock);
  50        return dfitem;
  51}
  52
  53/**
  54 *      switchdev_deferred_process - Process ops in deferred queue
  55 *
  56 *      Called to flush the ops currently queued in deferred ops queue.
  57 *      rtnl_lock must be held.
  58 */
  59void switchdev_deferred_process(void)
  60{
  61        struct switchdev_deferred_item *dfitem;
  62
  63        ASSERT_RTNL();
  64
  65        while ((dfitem = switchdev_deferred_dequeue())) {
  66                dfitem->func(dfitem->dev, dfitem->data);
  67                dev_put_track(dfitem->dev, &dfitem->dev_tracker);
  68                kfree(dfitem);
  69        }
  70}
  71EXPORT_SYMBOL_GPL(switchdev_deferred_process);
  72
  73static void switchdev_deferred_process_work(struct work_struct *work)
  74{
  75        rtnl_lock();
  76        switchdev_deferred_process();
  77        rtnl_unlock();
  78}
  79
  80static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
  81
  82static int switchdev_deferred_enqueue(struct net_device *dev,
  83                                      const void *data, size_t data_len,
  84                                      switchdev_deferred_func_t *func)
  85{
  86        struct switchdev_deferred_item *dfitem;
  87
  88        dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
  89        if (!dfitem)
  90                return -ENOMEM;
  91        dfitem->dev = dev;
  92        dfitem->func = func;
  93        memcpy(dfitem->data, data, data_len);
  94        dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC);
  95        spin_lock_bh(&deferred_lock);
  96        list_add_tail(&dfitem->list, &deferred);
  97        spin_unlock_bh(&deferred_lock);
  98        schedule_work(&deferred_process_work);
  99        return 0;
 100}
 101
 102static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
 103                                      struct net_device *dev,
 104                                      const struct switchdev_attr *attr,
 105                                      struct netlink_ext_ack *extack)
 106{
 107        int err;
 108        int rc;
 109
 110        struct switchdev_notifier_port_attr_info attr_info = {
 111                .attr = attr,
 112                .handled = false,
 113        };
 114
 115        rc = call_switchdev_blocking_notifiers(nt, dev,
 116                                               &attr_info.info, extack);
 117        err = notifier_to_errno(rc);
 118        if (err) {
 119                WARN_ON(!attr_info.handled);
 120                return err;
 121        }
 122
 123        if (!attr_info.handled)
 124                return -EOPNOTSUPP;
 125
 126        return 0;
 127}
 128
 129static int switchdev_port_attr_set_now(struct net_device *dev,
 130                                       const struct switchdev_attr *attr,
 131                                       struct netlink_ext_ack *extack)
 132{
 133        return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
 134                                          extack);
 135}
 136
 137static void switchdev_port_attr_set_deferred(struct net_device *dev,
 138                                             const void *data)
 139{
 140        const struct switchdev_attr *attr = data;
 141        int err;
 142
 143        err = switchdev_port_attr_set_now(dev, attr, NULL);
 144        if (err && err != -EOPNOTSUPP)
 145                netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 146                           err, attr->id);
 147        if (attr->complete)
 148                attr->complete(dev, err, attr->complete_priv);
 149}
 150
 151static int switchdev_port_attr_set_defer(struct net_device *dev,
 152                                         const struct switchdev_attr *attr)
 153{
 154        return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
 155                                          switchdev_port_attr_set_deferred);
 156}
 157
 158/**
 159 *      switchdev_port_attr_set - Set port attribute
 160 *
 161 *      @dev: port device
 162 *      @attr: attribute to set
 163 *      @extack: netlink extended ack, for error message propagation
 164 *
 165 *      rtnl_lock must be held and must not be in atomic section,
 166 *      in case SWITCHDEV_F_DEFER flag is not set.
 167 */
 168int switchdev_port_attr_set(struct net_device *dev,
 169                            const struct switchdev_attr *attr,
 170                            struct netlink_ext_ack *extack)
 171{
 172        if (attr->flags & SWITCHDEV_F_DEFER)
 173                return switchdev_port_attr_set_defer(dev, attr);
 174        ASSERT_RTNL();
 175        return switchdev_port_attr_set_now(dev, attr, extack);
 176}
 177EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 178
 179static size_t switchdev_obj_size(const struct switchdev_obj *obj)
 180{
 181        switch (obj->id) {
 182        case SWITCHDEV_OBJ_ID_PORT_VLAN:
 183                return sizeof(struct switchdev_obj_port_vlan);
 184        case SWITCHDEV_OBJ_ID_PORT_MDB:
 185                return sizeof(struct switchdev_obj_port_mdb);
 186        case SWITCHDEV_OBJ_ID_HOST_MDB:
 187                return sizeof(struct switchdev_obj_port_mdb);
 188        default:
 189                BUG();
 190        }
 191        return 0;
 192}
 193
 194static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
 195                                     struct net_device *dev,
 196                                     const struct switchdev_obj *obj,
 197                                     struct netlink_ext_ack *extack)
 198{
 199        int rc;
 200        int err;
 201
 202        struct switchdev_notifier_port_obj_info obj_info = {
 203                .obj = obj,
 204                .handled = false,
 205        };
 206
 207        rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
 208        err = notifier_to_errno(rc);
 209        if (err) {
 210                WARN_ON(!obj_info.handled);
 211                return err;
 212        }
 213        if (!obj_info.handled)
 214                return -EOPNOTSUPP;
 215        return 0;
 216}
 217
 218static void switchdev_port_obj_add_deferred(struct net_device *dev,
 219                                            const void *data)
 220{
 221        const struct switchdev_obj *obj = data;
 222        int err;
 223
 224        ASSERT_RTNL();
 225        err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 226                                        dev, obj, NULL);
 227        if (err && err != -EOPNOTSUPP)
 228                netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
 229                           err, obj->id);
 230        if (obj->complete)
 231                obj->complete(dev, err, obj->complete_priv);
 232}
 233
 234static int switchdev_port_obj_add_defer(struct net_device *dev,
 235                                        const struct switchdev_obj *obj)
 236{
 237        return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 238                                          switchdev_port_obj_add_deferred);
 239}
 240
 241/**
 242 *      switchdev_port_obj_add - Add port object
 243 *
 244 *      @dev: port device
 245 *      @obj: object to add
 246 *      @extack: netlink extended ack
 247 *
 248 *      rtnl_lock must be held and must not be in atomic section,
 249 *      in case SWITCHDEV_F_DEFER flag is not set.
 250 */
 251int switchdev_port_obj_add(struct net_device *dev,
 252                           const struct switchdev_obj *obj,
 253                           struct netlink_ext_ack *extack)
 254{
 255        if (obj->flags & SWITCHDEV_F_DEFER)
 256                return switchdev_port_obj_add_defer(dev, obj);
 257        ASSERT_RTNL();
 258        return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 259                                         dev, obj, extack);
 260}
 261EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 262
 263static int switchdev_port_obj_del_now(struct net_device *dev,
 264                                      const struct switchdev_obj *obj)
 265{
 266        return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
 267                                         dev, obj, NULL);
 268}
 269
 270static void switchdev_port_obj_del_deferred(struct net_device *dev,
 271                                            const void *data)
 272{
 273        const struct switchdev_obj *obj = data;
 274        int err;
 275
 276        err = switchdev_port_obj_del_now(dev, obj);
 277        if (err && err != -EOPNOTSUPP)
 278                netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
 279                           err, obj->id);
 280        if (obj->complete)
 281                obj->complete(dev, err, obj->complete_priv);
 282}
 283
 284static int switchdev_port_obj_del_defer(struct net_device *dev,
 285                                        const struct switchdev_obj *obj)
 286{
 287        return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 288                                          switchdev_port_obj_del_deferred);
 289}
 290
 291/**
 292 *      switchdev_port_obj_del - Delete port object
 293 *
 294 *      @dev: port device
 295 *      @obj: object to delete
 296 *
 297 *      rtnl_lock must be held and must not be in atomic section,
 298 *      in case SWITCHDEV_F_DEFER flag is not set.
 299 */
 300int switchdev_port_obj_del(struct net_device *dev,
 301                           const struct switchdev_obj *obj)
 302{
 303        if (obj->flags & SWITCHDEV_F_DEFER)
 304                return switchdev_port_obj_del_defer(dev, obj);
 305        ASSERT_RTNL();
 306        return switchdev_port_obj_del_now(dev, obj);
 307}
 308EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 309
 310static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
 311static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
 312
 313/**
 314 *      register_switchdev_notifier - Register notifier
 315 *      @nb: notifier_block
 316 *
 317 *      Register switch device notifier.
 318 */
 319int register_switchdev_notifier(struct notifier_block *nb)
 320{
 321        return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
 322}
 323EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 324
 325/**
 326 *      unregister_switchdev_notifier - Unregister notifier
 327 *      @nb: notifier_block
 328 *
 329 *      Unregister switch device notifier.
 330 */
 331int unregister_switchdev_notifier(struct notifier_block *nb)
 332{
 333        return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
 334}
 335EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 336
 337/**
 338 *      call_switchdev_notifiers - Call notifiers
 339 *      @val: value passed unmodified to notifier function
 340 *      @dev: port device
 341 *      @info: notifier information data
 342 *      @extack: netlink extended ack
 343 *      Call all network notifier blocks.
 344 */
 345int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 346                             struct switchdev_notifier_info *info,
 347                             struct netlink_ext_ack *extack)
 348{
 349        info->dev = dev;
 350        info->extack = extack;
 351        return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
 352}
 353EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 354
 355int register_switchdev_blocking_notifier(struct notifier_block *nb)
 356{
 357        struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 358
 359        return blocking_notifier_chain_register(chain, nb);
 360}
 361EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
 362
 363int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
 364{
 365        struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 366
 367        return blocking_notifier_chain_unregister(chain, nb);
 368}
 369EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
 370
 371int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
 372                                      struct switchdev_notifier_info *info,
 373                                      struct netlink_ext_ack *extack)
 374{
 375        info->dev = dev;
 376        info->extack = extack;
 377        return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
 378                                            val, info);
 379}
 380EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
 381
 382struct switchdev_nested_priv {
 383        bool (*check_cb)(const struct net_device *dev);
 384        bool (*foreign_dev_check_cb)(const struct net_device *dev,
 385                                     const struct net_device *foreign_dev);
 386        const struct net_device *dev;
 387        struct net_device *lower_dev;
 388};
 389
 390static int switchdev_lower_dev_walk(struct net_device *lower_dev,
 391                                    struct netdev_nested_priv *priv)
 392{
 393        struct switchdev_nested_priv *switchdev_priv = priv->data;
 394        bool (*foreign_dev_check_cb)(const struct net_device *dev,
 395                                     const struct net_device *foreign_dev);
 396        bool (*check_cb)(const struct net_device *dev);
 397        const struct net_device *dev;
 398
 399        check_cb = switchdev_priv->check_cb;
 400        foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
 401        dev = switchdev_priv->dev;
 402
 403        if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
 404                switchdev_priv->lower_dev = lower_dev;
 405                return 1;
 406        }
 407
 408        return 0;
 409}
 410
 411static struct net_device *
 412switchdev_lower_dev_find(struct net_device *dev,
 413                         bool (*check_cb)(const struct net_device *dev),
 414                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
 415                                                      const struct net_device *foreign_dev))
 416{
 417        struct switchdev_nested_priv switchdev_priv = {
 418                .check_cb = check_cb,
 419                .foreign_dev_check_cb = foreign_dev_check_cb,
 420                .dev = dev,
 421                .lower_dev = NULL,
 422        };
 423        struct netdev_nested_priv priv = {
 424                .data = &switchdev_priv,
 425        };
 426
 427        netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
 428
 429        return switchdev_priv.lower_dev;
 430}
 431
 432static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
 433                struct net_device *orig_dev, unsigned long event,
 434                const struct switchdev_notifier_fdb_info *fdb_info,
 435                bool (*check_cb)(const struct net_device *dev),
 436                bool (*foreign_dev_check_cb)(const struct net_device *dev,
 437                                             const struct net_device *foreign_dev),
 438                int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 439                              unsigned long event, const void *ctx,
 440                              const struct switchdev_notifier_fdb_info *fdb_info),
 441                int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 442                                  unsigned long event, const void *ctx,
 443                                  const struct switchdev_notifier_fdb_info *fdb_info))
 444{
 445        const struct switchdev_notifier_info *info = &fdb_info->info;
 446        struct net_device *br, *lower_dev;
 447        struct list_head *iter;
 448        int err = -EOPNOTSUPP;
 449
 450        if (check_cb(dev))
 451                return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
 452
 453        if (netif_is_lag_master(dev)) {
 454                if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
 455                        goto maybe_bridged_with_us;
 456
 457                /* This is a LAG interface that we offload */
 458                if (!lag_mod_cb)
 459                        return -EOPNOTSUPP;
 460
 461                return lag_mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
 462        }
 463
 464        /* Recurse through lower interfaces in case the FDB entry is pointing
 465         * towards a bridge device.
 466         */
 467        if (netif_is_bridge_master(dev)) {
 468                if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
 469                        return 0;
 470
 471                /* This is a bridge interface that we offload */
 472                netdev_for_each_lower_dev(dev, lower_dev, iter) {
 473                        /* Do not propagate FDB entries across bridges */
 474                        if (netif_is_bridge_master(lower_dev))
 475                                continue;
 476
 477                        /* Bridge ports might be either us, or LAG interfaces
 478                         * that we offload.
 479                         */
 480                        if (!check_cb(lower_dev) &&
 481                            !switchdev_lower_dev_find(lower_dev, check_cb,
 482                                                      foreign_dev_check_cb))
 483                                continue;
 484
 485                        err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
 486                                                                     event, fdb_info, check_cb,
 487                                                                     foreign_dev_check_cb,
 488                                                                     mod_cb, lag_mod_cb);
 489                        if (err && err != -EOPNOTSUPP)
 490                                return err;
 491                }
 492
 493                return 0;
 494        }
 495
 496maybe_bridged_with_us:
 497        /* Event is neither on a bridge nor a LAG. Check whether it is on an
 498         * interface that is in a bridge with us.
 499         */
 500        br = netdev_master_upper_dev_get_rcu(dev);
 501        if (!br || !netif_is_bridge_master(br))
 502                return 0;
 503
 504        if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
 505                return 0;
 506
 507        return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
 508                                                      check_cb, foreign_dev_check_cb,
 509                                                      mod_cb, lag_mod_cb);
 510}
 511
 512int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
 513                const struct switchdev_notifier_fdb_info *fdb_info,
 514                bool (*check_cb)(const struct net_device *dev),
 515                bool (*foreign_dev_check_cb)(const struct net_device *dev,
 516                                             const struct net_device *foreign_dev),
 517                int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 518                              unsigned long event, const void *ctx,
 519                              const struct switchdev_notifier_fdb_info *fdb_info),
 520                int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 521                                  unsigned long event, const void *ctx,
 522                                  const struct switchdev_notifier_fdb_info *fdb_info))
 523{
 524        int err;
 525
 526        err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
 527                                                     check_cb, foreign_dev_check_cb,
 528                                                     mod_cb, lag_mod_cb);
 529        if (err == -EOPNOTSUPP)
 530                err = 0;
 531
 532        return err;
 533}
 534EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
 535
 536static int __switchdev_handle_port_obj_add(struct net_device *dev,
 537                        struct switchdev_notifier_port_obj_info *port_obj_info,
 538                        bool (*check_cb)(const struct net_device *dev),
 539                        int (*add_cb)(struct net_device *dev, const void *ctx,
 540                                      const struct switchdev_obj *obj,
 541                                      struct netlink_ext_ack *extack))
 542{
 543        struct switchdev_notifier_info *info = &port_obj_info->info;
 544        struct netlink_ext_ack *extack;
 545        struct net_device *lower_dev;
 546        struct list_head *iter;
 547        int err = -EOPNOTSUPP;
 548
 549        extack = switchdev_notifier_info_to_extack(info);
 550
 551        if (check_cb(dev)) {
 552                err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
 553                if (err != -EOPNOTSUPP)
 554                        port_obj_info->handled = true;
 555                return err;
 556        }
 557
 558        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 559         * unsupported devices, another driver might be able to handle them. But
 560         * propagate to the callers any hard errors.
 561         *
 562         * If the driver does its own bookkeeping of stacked ports, it's not
 563         * necessary to go through this helper.
 564         */
 565        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 566                if (netif_is_bridge_master(lower_dev))
 567                        continue;
 568
 569                err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
 570                                                      check_cb, add_cb);
 571                if (err && err != -EOPNOTSUPP)
 572                        return err;
 573        }
 574
 575        return err;
 576}
 577
 578int switchdev_handle_port_obj_add(struct net_device *dev,
 579                        struct switchdev_notifier_port_obj_info *port_obj_info,
 580                        bool (*check_cb)(const struct net_device *dev),
 581                        int (*add_cb)(struct net_device *dev, const void *ctx,
 582                                      const struct switchdev_obj *obj,
 583                                      struct netlink_ext_ack *extack))
 584{
 585        int err;
 586
 587        err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
 588                                              add_cb);
 589        if (err == -EOPNOTSUPP)
 590                err = 0;
 591        return err;
 592}
 593EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
 594
 595static int __switchdev_handle_port_obj_del(struct net_device *dev,
 596                        struct switchdev_notifier_port_obj_info *port_obj_info,
 597                        bool (*check_cb)(const struct net_device *dev),
 598                        int (*del_cb)(struct net_device *dev, const void *ctx,
 599                                      const struct switchdev_obj *obj))
 600{
 601        struct switchdev_notifier_info *info = &port_obj_info->info;
 602        struct net_device *lower_dev;
 603        struct list_head *iter;
 604        int err = -EOPNOTSUPP;
 605
 606        if (check_cb(dev)) {
 607                err = del_cb(dev, info->ctx, port_obj_info->obj);
 608                if (err != -EOPNOTSUPP)
 609                        port_obj_info->handled = true;
 610                return err;
 611        }
 612
 613        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 614         * unsupported devices, another driver might be able to handle them. But
 615         * propagate to the callers any hard errors.
 616         *
 617         * If the driver does its own bookkeeping of stacked ports, it's not
 618         * necessary to go through this helper.
 619         */
 620        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 621                if (netif_is_bridge_master(lower_dev))
 622                        continue;
 623
 624                err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
 625                                                      check_cb, del_cb);
 626                if (err && err != -EOPNOTSUPP)
 627                        return err;
 628        }
 629
 630        return err;
 631}
 632
 633int switchdev_handle_port_obj_del(struct net_device *dev,
 634                        struct switchdev_notifier_port_obj_info *port_obj_info,
 635                        bool (*check_cb)(const struct net_device *dev),
 636                        int (*del_cb)(struct net_device *dev, const void *ctx,
 637                                      const struct switchdev_obj *obj))
 638{
 639        int err;
 640
 641        err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
 642                                              del_cb);
 643        if (err == -EOPNOTSUPP)
 644                err = 0;
 645        return err;
 646}
 647EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
 648
 649static int __switchdev_handle_port_attr_set(struct net_device *dev,
 650                        struct switchdev_notifier_port_attr_info *port_attr_info,
 651                        bool (*check_cb)(const struct net_device *dev),
 652                        int (*set_cb)(struct net_device *dev, const void *ctx,
 653                                      const struct switchdev_attr *attr,
 654                                      struct netlink_ext_ack *extack))
 655{
 656        struct switchdev_notifier_info *info = &port_attr_info->info;
 657        struct netlink_ext_ack *extack;
 658        struct net_device *lower_dev;
 659        struct list_head *iter;
 660        int err = -EOPNOTSUPP;
 661
 662        extack = switchdev_notifier_info_to_extack(info);
 663
 664        if (check_cb(dev)) {
 665                err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
 666                if (err != -EOPNOTSUPP)
 667                        port_attr_info->handled = true;
 668                return err;
 669        }
 670
 671        /* Switch ports might be stacked under e.g. a LAG. Ignore the
 672         * unsupported devices, another driver might be able to handle them. But
 673         * propagate to the callers any hard errors.
 674         *
 675         * If the driver does its own bookkeeping of stacked ports, it's not
 676         * necessary to go through this helper.
 677         */
 678        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 679                if (netif_is_bridge_master(lower_dev))
 680                        continue;
 681
 682                err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
 683                                                       check_cb, set_cb);
 684                if (err && err != -EOPNOTSUPP)
 685                        return err;
 686        }
 687
 688        return err;
 689}
 690
 691int switchdev_handle_port_attr_set(struct net_device *dev,
 692                        struct switchdev_notifier_port_attr_info *port_attr_info,
 693                        bool (*check_cb)(const struct net_device *dev),
 694                        int (*set_cb)(struct net_device *dev, const void *ctx,
 695                                      const struct switchdev_attr *attr,
 696                                      struct netlink_ext_ack *extack))
 697{
 698        int err;
 699
 700        err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
 701                                               set_cb);
 702        if (err == -EOPNOTSUPP)
 703                err = 0;
 704        return err;
 705}
 706EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
 707
 708int switchdev_bridge_port_offload(struct net_device *brport_dev,
 709                                  struct net_device *dev, const void *ctx,
 710                                  struct notifier_block *atomic_nb,
 711                                  struct notifier_block *blocking_nb,
 712                                  bool tx_fwd_offload,
 713                                  struct netlink_ext_ack *extack)
 714{
 715        struct switchdev_notifier_brport_info brport_info = {
 716                .brport = {
 717                        .dev = dev,
 718                        .ctx = ctx,
 719                        .atomic_nb = atomic_nb,
 720                        .blocking_nb = blocking_nb,
 721                        .tx_fwd_offload = tx_fwd_offload,
 722                },
 723        };
 724        int err;
 725
 726        ASSERT_RTNL();
 727
 728        err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
 729                                                brport_dev, &brport_info.info,
 730                                                extack);
 731        return notifier_to_errno(err);
 732}
 733EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
 734
 735void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
 736                                     const void *ctx,
 737                                     struct notifier_block *atomic_nb,
 738                                     struct notifier_block *blocking_nb)
 739{
 740        struct switchdev_notifier_brport_info brport_info = {
 741                .brport = {
 742                        .ctx = ctx,
 743                        .atomic_nb = atomic_nb,
 744                        .blocking_nb = blocking_nb,
 745                },
 746        };
 747
 748        ASSERT_RTNL();
 749
 750        call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
 751                                          brport_dev, &brport_info.info,
 752                                          NULL);
 753}
 754EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
 755