linux/net/dsa/dsa2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
   6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/err.h>
  11#include <linux/list.h>
  12#include <linux/netdevice.h>
  13#include <linux/slab.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/of.h>
  16#include <linux/of_net.h>
  17#include <net/devlink.h>
  18
  19#include "dsa_priv.h"
  20
  21static DEFINE_MUTEX(dsa2_mutex);
  22LIST_HEAD(dsa_tree_list);
  23
  24/* Track the bridges with forwarding offload enabled */
  25static unsigned long dsa_fwd_offloading_bridges;
  26
  27/**
  28 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
  29 * @dst: collection of struct dsa_switch devices to notify.
  30 * @e: event, must be of type DSA_NOTIFIER_*
  31 * @v: event-specific value.
  32 *
  33 * Given a struct dsa_switch_tree, this can be used to run a function once for
  34 * each member DSA switch. The other alternative of traversing the tree is only
  35 * through its ports list, which does not uniquely list the switches.
  36 */
  37int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
  38{
  39        struct raw_notifier_head *nh = &dst->nh;
  40        int err;
  41
  42        err = raw_notifier_call_chain(nh, e, v);
  43
  44        return notifier_to_errno(err);
  45}
  46
  47/**
  48 * dsa_broadcast - Notify all DSA trees in the system.
  49 * @e: event, must be of type DSA_NOTIFIER_*
  50 * @v: event-specific value.
  51 *
  52 * Can be used to notify the switching fabric of events such as cross-chip
  53 * bridging between disjoint trees (such as islands of tagger-compatible
  54 * switches bridged by an incompatible middle switch).
  55 *
  56 * WARNING: this function is not reliable during probe time, because probing
  57 * between trees is asynchronous and not all DSA trees might have probed.
  58 */
  59int dsa_broadcast(unsigned long e, void *v)
  60{
  61        struct dsa_switch_tree *dst;
  62        int err = 0;
  63
  64        list_for_each_entry(dst, &dsa_tree_list, list) {
  65                err = dsa_tree_notify(dst, e, v);
  66                if (err)
  67                        break;
  68        }
  69
  70        return err;
  71}
  72
  73/**
  74 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
  75 * @dst: Tree in which to record the mapping.
  76 * @lag: Netdev that is to be mapped to an ID.
  77 *
  78 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
  79 * two spaces. The size of the mapping space is determined by the
  80 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
  81 * it unset if it is not needed, in which case these functions become
  82 * no-ops.
  83 */
  84void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
  85{
  86        unsigned int id;
  87
  88        if (dsa_lag_id(dst, lag) >= 0)
  89                /* Already mapped */
  90                return;
  91
  92        for (id = 0; id < dst->lags_len; id++) {
  93                if (!dsa_lag_dev(dst, id)) {
  94                        dst->lags[id] = lag;
  95                        return;
  96                }
  97        }
  98
  99        /* No IDs left, which is OK. Some drivers do not need it. The
 100         * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
 101         * returns an error for this device when joining the LAG. The
 102         * driver can then return -EOPNOTSUPP back to DSA, which will
 103         * fall back to a software LAG.
 104         */
 105}
 106
 107/**
 108 * dsa_lag_unmap() - Remove a LAG ID mapping
 109 * @dst: Tree in which the mapping is recorded.
 110 * @lag: Netdev that was mapped.
 111 *
 112 * As there may be multiple users of the mapping, it is only removed
 113 * if there are no other references to it.
 114 */
 115void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
 116{
 117        struct dsa_port *dp;
 118        unsigned int id;
 119
 120        dsa_lag_foreach_port(dp, dst, lag)
 121                /* There are remaining users of this mapping */
 122                return;
 123
 124        dsa_lags_foreach_id(id, dst) {
 125                if (dsa_lag_dev(dst, id) == lag) {
 126                        dst->lags[id] = NULL;
 127                        break;
 128                }
 129        }
 130}
 131
 132static int dsa_bridge_num_find(const struct net_device *bridge_dev)
 133{
 134        struct dsa_switch_tree *dst;
 135        struct dsa_port *dp;
 136
 137        /* When preparing the offload for a port, it will have a valid
 138         * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
 139         * However there might be other ports having the same dp->bridge_dev
 140         * and a valid dp->bridge_num, so just ignore this port.
 141         */
 142        list_for_each_entry(dst, &dsa_tree_list, list)
 143                list_for_each_entry(dp, &dst->ports, list)
 144                        if (dp->bridge_dev == bridge_dev &&
 145                            dp->bridge_num != -1)
 146                                return dp->bridge_num;
 147
 148        return -1;
 149}
 150
 151int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
 152{
 153        int bridge_num = dsa_bridge_num_find(bridge_dev);
 154
 155        if (bridge_num < 0) {
 156                /* First port that offloads TX forwarding for this bridge */
 157                bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
 158                                                 DSA_MAX_NUM_OFFLOADING_BRIDGES);
 159                if (bridge_num >= max)
 160                        return -1;
 161
 162                set_bit(bridge_num, &dsa_fwd_offloading_bridges);
 163        }
 164
 165        return bridge_num;
 166}
 167
 168void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
 169{
 170        /* Check if the bridge is still in use, otherwise it is time
 171         * to clean it up so we can reuse this bridge_num later.
 172         */
 173        if (dsa_bridge_num_find(bridge_dev) < 0)
 174                clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
 175}
 176
 177struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
 178{
 179        struct dsa_switch_tree *dst;
 180        struct dsa_port *dp;
 181
 182        list_for_each_entry(dst, &dsa_tree_list, list) {
 183                if (dst->index != tree_index)
 184                        continue;
 185
 186                list_for_each_entry(dp, &dst->ports, list) {
 187                        if (dp->ds->index != sw_index)
 188                                continue;
 189
 190                        return dp->ds;
 191                }
 192        }
 193
 194        return NULL;
 195}
 196EXPORT_SYMBOL_GPL(dsa_switch_find);
 197
 198static struct dsa_switch_tree *dsa_tree_find(int index)
 199{
 200        struct dsa_switch_tree *dst;
 201
 202        list_for_each_entry(dst, &dsa_tree_list, list)
 203                if (dst->index == index)
 204                        return dst;
 205
 206        return NULL;
 207}
 208
 209static struct dsa_switch_tree *dsa_tree_alloc(int index)
 210{
 211        struct dsa_switch_tree *dst;
 212
 213        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 214        if (!dst)
 215                return NULL;
 216
 217        dst->index = index;
 218
 219        INIT_LIST_HEAD(&dst->rtable);
 220
 221        INIT_LIST_HEAD(&dst->ports);
 222
 223        INIT_LIST_HEAD(&dst->list);
 224        list_add_tail(&dst->list, &dsa_tree_list);
 225
 226        kref_init(&dst->refcount);
 227
 228        return dst;
 229}
 230
 231static void dsa_tree_free(struct dsa_switch_tree *dst)
 232{
 233        if (dst->tag_ops)
 234                dsa_tag_driver_put(dst->tag_ops);
 235        list_del(&dst->list);
 236        kfree(dst);
 237}
 238
 239static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 240{
 241        if (dst)
 242                kref_get(&dst->refcount);
 243
 244        return dst;
 245}
 246
 247static struct dsa_switch_tree *dsa_tree_touch(int index)
 248{
 249        struct dsa_switch_tree *dst;
 250
 251        dst = dsa_tree_find(index);
 252        if (dst)
 253                return dsa_tree_get(dst);
 254        else
 255                return dsa_tree_alloc(index);
 256}
 257
 258static void dsa_tree_release(struct kref *ref)
 259{
 260        struct dsa_switch_tree *dst;
 261
 262        dst = container_of(ref, struct dsa_switch_tree, refcount);
 263
 264        dsa_tree_free(dst);
 265}
 266
 267static void dsa_tree_put(struct dsa_switch_tree *dst)
 268{
 269        if (dst)
 270                kref_put(&dst->refcount, dsa_tree_release);
 271}
 272
 273static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
 274                                                   struct device_node *dn)
 275{
 276        struct dsa_port *dp;
 277
 278        list_for_each_entry(dp, &dst->ports, list)
 279                if (dp->dn == dn)
 280                        return dp;
 281
 282        return NULL;
 283}
 284
 285static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
 286                                       struct dsa_port *link_dp)
 287{
 288        struct dsa_switch *ds = dp->ds;
 289        struct dsa_switch_tree *dst;
 290        struct dsa_link *dl;
 291
 292        dst = ds->dst;
 293
 294        list_for_each_entry(dl, &dst->rtable, list)
 295                if (dl->dp == dp && dl->link_dp == link_dp)
 296                        return dl;
 297
 298        dl = kzalloc(sizeof(*dl), GFP_KERNEL);
 299        if (!dl)
 300                return NULL;
 301
 302        dl->dp = dp;
 303        dl->link_dp = link_dp;
 304
 305        INIT_LIST_HEAD(&dl->list);
 306        list_add_tail(&dl->list, &dst->rtable);
 307
 308        return dl;
 309}
 310
 311static bool dsa_port_setup_routing_table(struct dsa_port *dp)
 312{
 313        struct dsa_switch *ds = dp->ds;
 314        struct dsa_switch_tree *dst = ds->dst;
 315        struct device_node *dn = dp->dn;
 316        struct of_phandle_iterator it;
 317        struct dsa_port *link_dp;
 318        struct dsa_link *dl;
 319        int err;
 320
 321        of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
 322                link_dp = dsa_tree_find_port_by_node(dst, it.node);
 323                if (!link_dp) {
 324                        of_node_put(it.node);
 325                        return false;
 326                }
 327
 328                dl = dsa_link_touch(dp, link_dp);
 329                if (!dl) {
 330                        of_node_put(it.node);
 331                        return false;
 332                }
 333        }
 334
 335        return true;
 336}
 337
 338static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
 339{
 340        bool complete = true;
 341        struct dsa_port *dp;
 342
 343        list_for_each_entry(dp, &dst->ports, list) {
 344                if (dsa_port_is_dsa(dp)) {
 345                        complete = dsa_port_setup_routing_table(dp);
 346                        if (!complete)
 347                                break;
 348                }
 349        }
 350
 351        return complete;
 352}
 353
 354static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
 355{
 356        struct dsa_port *dp;
 357
 358        list_for_each_entry(dp, &dst->ports, list)
 359                if (dsa_port_is_cpu(dp))
 360                        return dp;
 361
 362        return NULL;
 363}
 364
 365/* Assign the default CPU port (the first one in the tree) to all ports of the
 366 * fabric which don't already have one as part of their own switch.
 367 */
 368static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
 369{
 370        struct dsa_port *cpu_dp, *dp;
 371
 372        cpu_dp = dsa_tree_find_first_cpu(dst);
 373        if (!cpu_dp) {
 374                pr_err("DSA: tree %d has no CPU port\n", dst->index);
 375                return -EINVAL;
 376        }
 377
 378        list_for_each_entry(dp, &dst->ports, list) {
 379                if (dp->cpu_dp)
 380                        continue;
 381
 382                if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 383                        dp->cpu_dp = cpu_dp;
 384        }
 385
 386        return 0;
 387}
 388
 389/* Perform initial assignment of CPU ports to user ports and DSA links in the
 390 * fabric, giving preference to CPU ports local to each switch. Default to
 391 * using the first CPU port in the switch tree if the port does not have a CPU
 392 * port local to this switch.
 393 */
 394static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
 395{
 396        struct dsa_port *cpu_dp, *dp;
 397
 398        list_for_each_entry(cpu_dp, &dst->ports, list) {
 399                if (!dsa_port_is_cpu(cpu_dp))
 400                        continue;
 401
 402                list_for_each_entry(dp, &dst->ports, list) {
 403                        /* Prefer a local CPU port */
 404                        if (dp->ds != cpu_dp->ds)
 405                                continue;
 406
 407                        /* Prefer the first local CPU port found */
 408                        if (dp->cpu_dp)
 409                                continue;
 410
 411                        if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 412                                dp->cpu_dp = cpu_dp;
 413                }
 414        }
 415
 416        return dsa_tree_setup_default_cpu(dst);
 417}
 418
 419static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
 420{
 421        struct dsa_port *dp;
 422
 423        list_for_each_entry(dp, &dst->ports, list)
 424                if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 425                        dp->cpu_dp = NULL;
 426}
 427
 428static int dsa_port_setup(struct dsa_port *dp)
 429{
 430        struct devlink_port *dlp = &dp->devlink_port;
 431        bool dsa_port_link_registered = false;
 432        struct dsa_switch *ds = dp->ds;
 433        bool dsa_port_enabled = false;
 434        int err = 0;
 435
 436        if (dp->setup)
 437                return 0;
 438
 439        INIT_LIST_HEAD(&dp->fdbs);
 440        INIT_LIST_HEAD(&dp->mdbs);
 441
 442        if (ds->ops->port_setup) {
 443                err = ds->ops->port_setup(ds, dp->index);
 444                if (err)
 445                        return err;
 446        }
 447
 448        switch (dp->type) {
 449        case DSA_PORT_TYPE_UNUSED:
 450                dsa_port_disable(dp);
 451                break;
 452        case DSA_PORT_TYPE_CPU:
 453                err = dsa_port_link_register_of(dp);
 454                if (err)
 455                        break;
 456                dsa_port_link_registered = true;
 457
 458                err = dsa_port_enable(dp, NULL);
 459                if (err)
 460                        break;
 461                dsa_port_enabled = true;
 462
 463                break;
 464        case DSA_PORT_TYPE_DSA:
 465                err = dsa_port_link_register_of(dp);
 466                if (err)
 467                        break;
 468                dsa_port_link_registered = true;
 469
 470                err = dsa_port_enable(dp, NULL);
 471                if (err)
 472                        break;
 473                dsa_port_enabled = true;
 474
 475                break;
 476        case DSA_PORT_TYPE_USER:
 477                of_get_mac_address(dp->dn, dp->mac);
 478                err = dsa_slave_create(dp);
 479                if (err)
 480                        break;
 481
 482                devlink_port_type_eth_set(dlp, dp->slave);
 483                break;
 484        }
 485
 486        if (err && dsa_port_enabled)
 487                dsa_port_disable(dp);
 488        if (err && dsa_port_link_registered)
 489                dsa_port_link_unregister_of(dp);
 490        if (err) {
 491                if (ds->ops->port_teardown)
 492                        ds->ops->port_teardown(ds, dp->index);
 493                return err;
 494        }
 495
 496        dp->setup = true;
 497
 498        return 0;
 499}
 500
 501static int dsa_port_devlink_setup(struct dsa_port *dp)
 502{
 503        struct devlink_port *dlp = &dp->devlink_port;
 504        struct dsa_switch_tree *dst = dp->ds->dst;
 505        struct devlink_port_attrs attrs = {};
 506        struct devlink *dl = dp->ds->devlink;
 507        const unsigned char *id;
 508        unsigned char len;
 509        int err;
 510
 511        id = (const unsigned char *)&dst->index;
 512        len = sizeof(dst->index);
 513
 514        attrs.phys.port_number = dp->index;
 515        memcpy(attrs.switch_id.id, id, len);
 516        attrs.switch_id.id_len = len;
 517        memset(dlp, 0, sizeof(*dlp));
 518
 519        switch (dp->type) {
 520        case DSA_PORT_TYPE_UNUSED:
 521                attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
 522                break;
 523        case DSA_PORT_TYPE_CPU:
 524                attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
 525                break;
 526        case DSA_PORT_TYPE_DSA:
 527                attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
 528                break;
 529        case DSA_PORT_TYPE_USER:
 530                attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 531                break;
 532        }
 533
 534        devlink_port_attrs_set(dlp, &attrs);
 535        err = devlink_port_register(dl, dlp, dp->index);
 536
 537        if (!err)
 538                dp->devlink_port_setup = true;
 539
 540        return err;
 541}
 542
 543static void dsa_port_teardown(struct dsa_port *dp)
 544{
 545        struct devlink_port *dlp = &dp->devlink_port;
 546        struct dsa_switch *ds = dp->ds;
 547        struct dsa_mac_addr *a, *tmp;
 548
 549        if (!dp->setup)
 550                return;
 551
 552        if (ds->ops->port_teardown)
 553                ds->ops->port_teardown(ds, dp->index);
 554
 555        devlink_port_type_clear(dlp);
 556
 557        switch (dp->type) {
 558        case DSA_PORT_TYPE_UNUSED:
 559                break;
 560        case DSA_PORT_TYPE_CPU:
 561                dsa_port_disable(dp);
 562                dsa_port_link_unregister_of(dp);
 563                break;
 564        case DSA_PORT_TYPE_DSA:
 565                dsa_port_disable(dp);
 566                dsa_port_link_unregister_of(dp);
 567                break;
 568        case DSA_PORT_TYPE_USER:
 569                if (dp->slave) {
 570                        dsa_slave_destroy(dp->slave);
 571                        dp->slave = NULL;
 572                }
 573                break;
 574        }
 575
 576        list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
 577                list_del(&a->list);
 578                kfree(a);
 579        }
 580
 581        list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
 582                list_del(&a->list);
 583                kfree(a);
 584        }
 585
 586        dp->setup = false;
 587}
 588
 589static void dsa_port_devlink_teardown(struct dsa_port *dp)
 590{
 591        struct devlink_port *dlp = &dp->devlink_port;
 592
 593        if (dp->devlink_port_setup)
 594                devlink_port_unregister(dlp);
 595        dp->devlink_port_setup = false;
 596}
 597
 598/* Destroy the current devlink port, and create a new one which has the UNUSED
 599 * flavour. At this point, any call to ds->ops->port_setup has been already
 600 * balanced out by a call to ds->ops->port_teardown, so we know that any
 601 * devlink port regions the driver had are now unregistered. We then call its
 602 * ds->ops->port_setup again, in order for the driver to re-create them on the
 603 * new devlink port.
 604 */
 605static int dsa_port_reinit_as_unused(struct dsa_port *dp)
 606{
 607        struct dsa_switch *ds = dp->ds;
 608        int err;
 609
 610        dsa_port_devlink_teardown(dp);
 611        dp->type = DSA_PORT_TYPE_UNUSED;
 612        err = dsa_port_devlink_setup(dp);
 613        if (err)
 614                return err;
 615
 616        if (ds->ops->port_setup) {
 617                /* On error, leave the devlink port registered,
 618                 * dsa_switch_teardown will clean it up later.
 619                 */
 620                err = ds->ops->port_setup(ds, dp->index);
 621                if (err)
 622                        return err;
 623        }
 624
 625        return 0;
 626}
 627
 628static int dsa_devlink_info_get(struct devlink *dl,
 629                                struct devlink_info_req *req,
 630                                struct netlink_ext_ack *extack)
 631{
 632        struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 633
 634        if (ds->ops->devlink_info_get)
 635                return ds->ops->devlink_info_get(ds, req, extack);
 636
 637        return -EOPNOTSUPP;
 638}
 639
 640static int dsa_devlink_sb_pool_get(struct devlink *dl,
 641                                   unsigned int sb_index, u16 pool_index,
 642                                   struct devlink_sb_pool_info *pool_info)
 643{
 644        struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 645
 646        if (!ds->ops->devlink_sb_pool_get)
 647                return -EOPNOTSUPP;
 648
 649        return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
 650                                            pool_info);
 651}
 652
 653static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
 654                                   u16 pool_index, u32 size,
 655                                   enum devlink_sb_threshold_type threshold_type,
 656                                   struct netlink_ext_ack *extack)
 657{
 658        struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 659
 660        if (!ds->ops->devlink_sb_pool_set)
 661                return -EOPNOTSUPP;
 662
 663        return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
 664                                            threshold_type, extack);
 665}
 666
 667static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
 668                                        unsigned int sb_index, u16 pool_index,
 669                                        u32 *p_threshold)
 670{
 671        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 672        int port = dsa_devlink_port_to_port(dlp);
 673
 674        if (!ds->ops->devlink_sb_port_pool_get)
 675                return -EOPNOTSUPP;
 676
 677        return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
 678                                                 pool_index, p_threshold);
 679}
 680
 681static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
 682                                        unsigned int sb_index, u16 pool_index,
 683                                        u32 threshold,
 684                                        struct netlink_ext_ack *extack)
 685{
 686        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 687        int port = dsa_devlink_port_to_port(dlp);
 688
 689        if (!ds->ops->devlink_sb_port_pool_set)
 690                return -EOPNOTSUPP;
 691
 692        return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
 693                                                 pool_index, threshold, extack);
 694}
 695
 696static int
 697dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
 698                                unsigned int sb_index, u16 tc_index,
 699                                enum devlink_sb_pool_type pool_type,
 700                                u16 *p_pool_index, u32 *p_threshold)
 701{
 702        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 703        int port = dsa_devlink_port_to_port(dlp);
 704
 705        if (!ds->ops->devlink_sb_tc_pool_bind_get)
 706                return -EOPNOTSUPP;
 707
 708        return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
 709                                                    tc_index, pool_type,
 710                                                    p_pool_index, p_threshold);
 711}
 712
 713static int
 714dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
 715                                unsigned int sb_index, u16 tc_index,
 716                                enum devlink_sb_pool_type pool_type,
 717                                u16 pool_index, u32 threshold,
 718                                struct netlink_ext_ack *extack)
 719{
 720        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 721        int port = dsa_devlink_port_to_port(dlp);
 722
 723        if (!ds->ops->devlink_sb_tc_pool_bind_set)
 724                return -EOPNOTSUPP;
 725
 726        return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
 727                                                    tc_index, pool_type,
 728                                                    pool_index, threshold,
 729                                                    extack);
 730}
 731
 732static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
 733                                       unsigned int sb_index)
 734{
 735        struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 736
 737        if (!ds->ops->devlink_sb_occ_snapshot)
 738                return -EOPNOTSUPP;
 739
 740        return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
 741}
 742
 743static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
 744                                        unsigned int sb_index)
 745{
 746        struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 747
 748        if (!ds->ops->devlink_sb_occ_max_clear)
 749                return -EOPNOTSUPP;
 750
 751        return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
 752}
 753
 754static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
 755                                            unsigned int sb_index,
 756                                            u16 pool_index, u32 *p_cur,
 757                                            u32 *p_max)
 758{
 759        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 760        int port = dsa_devlink_port_to_port(dlp);
 761
 762        if (!ds->ops->devlink_sb_occ_port_pool_get)
 763                return -EOPNOTSUPP;
 764
 765        return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
 766                                                     pool_index, p_cur, p_max);
 767}
 768
 769static int
 770dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
 771                                    unsigned int sb_index, u16 tc_index,
 772                                    enum devlink_sb_pool_type pool_type,
 773                                    u32 *p_cur, u32 *p_max)
 774{
 775        struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 776        int port = dsa_devlink_port_to_port(dlp);
 777
 778        if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
 779                return -EOPNOTSUPP;
 780
 781        return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
 782                                                        sb_index, tc_index,
 783                                                        pool_type, p_cur,
 784                                                        p_max);
 785}
 786
 787static const struct devlink_ops dsa_devlink_ops = {
 788        .info_get                       = dsa_devlink_info_get,
 789        .sb_pool_get                    = dsa_devlink_sb_pool_get,
 790        .sb_pool_set                    = dsa_devlink_sb_pool_set,
 791        .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
 792        .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
 793        .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
 794        .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
 795        .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
 796        .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
 797        .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
 798        .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
 799};
 800
 801static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
 802{
 803        const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
 804        struct dsa_switch_tree *dst = ds->dst;
 805        int port, err;
 806
 807        if (tag_ops->proto == dst->default_proto)
 808                return 0;
 809
 810        for (port = 0; port < ds->num_ports; port++) {
 811                if (!dsa_is_cpu_port(ds, port))
 812                        continue;
 813
 814                rtnl_lock();
 815                err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
 816                rtnl_unlock();
 817                if (err) {
 818                        dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
 819                                tag_ops->name, ERR_PTR(err));
 820                        return err;
 821                }
 822        }
 823
 824        return 0;
 825}
 826
 827static int dsa_switch_setup(struct dsa_switch *ds)
 828{
 829        struct dsa_devlink_priv *dl_priv;
 830        struct dsa_port *dp;
 831        int err;
 832
 833        if (ds->setup)
 834                return 0;
 835
 836        /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
 837         * driver and before ops->setup() has run, since the switch drivers and
 838         * the slave MDIO bus driver rely on these values for probing PHY
 839         * devices or not
 840         */
 841        ds->phys_mii_mask |= dsa_user_ports(ds);
 842
 843        /* Add the switch to devlink before calling setup, so that setup can
 844         * add dpipe tables
 845         */
 846        ds->devlink =
 847                devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
 848        if (!ds->devlink)
 849                return -ENOMEM;
 850        dl_priv = devlink_priv(ds->devlink);
 851        dl_priv->ds = ds;
 852
 853        err = devlink_register(ds->devlink);
 854        if (err)
 855                goto free_devlink;
 856
 857        /* Setup devlink port instances now, so that the switch
 858         * setup() can register regions etc, against the ports
 859         */
 860        list_for_each_entry(dp, &ds->dst->ports, list) {
 861                if (dp->ds == ds) {
 862                        err = dsa_port_devlink_setup(dp);
 863                        if (err)
 864                                goto unregister_devlink_ports;
 865                }
 866        }
 867
 868        err = dsa_switch_register_notifier(ds);
 869        if (err)
 870                goto unregister_devlink_ports;
 871
 872        ds->configure_vlan_while_not_filtering = true;
 873
 874        err = ds->ops->setup(ds);
 875        if (err < 0)
 876                goto unregister_notifier;
 877
 878        err = dsa_switch_setup_tag_protocol(ds);
 879        if (err)
 880                goto teardown;
 881
 882        devlink_params_publish(ds->devlink);
 883
 884        if (!ds->slave_mii_bus && ds->ops->phy_read) {
 885                ds->slave_mii_bus = mdiobus_alloc();
 886                if (!ds->slave_mii_bus) {
 887                        err = -ENOMEM;
 888                        goto teardown;
 889                }
 890
 891                dsa_slave_mii_bus_init(ds);
 892
 893                err = mdiobus_register(ds->slave_mii_bus);
 894                if (err < 0)
 895                        goto free_slave_mii_bus;
 896        }
 897
 898        ds->setup = true;
 899
 900        return 0;
 901
 902free_slave_mii_bus:
 903        if (ds->slave_mii_bus && ds->ops->phy_read)
 904                mdiobus_free(ds->slave_mii_bus);
 905teardown:
 906        if (ds->ops->teardown)
 907                ds->ops->teardown(ds);
 908unregister_notifier:
 909        dsa_switch_unregister_notifier(ds);
 910unregister_devlink_ports:
 911        list_for_each_entry(dp, &ds->dst->ports, list)
 912                if (dp->ds == ds)
 913                        dsa_port_devlink_teardown(dp);
 914        devlink_unregister(ds->devlink);
 915free_devlink:
 916        devlink_free(ds->devlink);
 917        ds->devlink = NULL;
 918
 919        return err;
 920}
 921
 922static void dsa_switch_teardown(struct dsa_switch *ds)
 923{
 924        struct dsa_port *dp;
 925
 926        if (!ds->setup)
 927                return;
 928
 929        if (ds->slave_mii_bus && ds->ops->phy_read) {
 930                mdiobus_unregister(ds->slave_mii_bus);
 931                mdiobus_free(ds->slave_mii_bus);
 932                ds->slave_mii_bus = NULL;
 933        }
 934
 935        dsa_switch_unregister_notifier(ds);
 936
 937        if (ds->ops->teardown)
 938                ds->ops->teardown(ds);
 939
 940        if (ds->devlink) {
 941                list_for_each_entry(dp, &ds->dst->ports, list)
 942                        if (dp->ds == ds)
 943                                dsa_port_devlink_teardown(dp);
 944                devlink_unregister(ds->devlink);
 945                devlink_free(ds->devlink);
 946                ds->devlink = NULL;
 947        }
 948
 949        ds->setup = false;
 950}
 951
 952/* First tear down the non-shared, then the shared ports. This ensures that
 953 * all work items scheduled by our switchdev handlers for user ports have
 954 * completed before we destroy the refcounting kept on the shared ports.
 955 */
 956static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
 957{
 958        struct dsa_port *dp;
 959
 960        list_for_each_entry(dp, &dst->ports, list)
 961                if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
 962                        dsa_port_teardown(dp);
 963
 964        dsa_flush_workqueue();
 965
 966        list_for_each_entry(dp, &dst->ports, list)
 967                if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
 968                        dsa_port_teardown(dp);
 969}
 970
 971static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
 972{
 973        struct dsa_port *dp;
 974
 975        list_for_each_entry(dp, &dst->ports, list)
 976                dsa_switch_teardown(dp->ds);
 977}
 978
 979static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 980{
 981        struct dsa_port *dp;
 982        int err;
 983
 984        list_for_each_entry(dp, &dst->ports, list) {
 985                err = dsa_switch_setup(dp->ds);
 986                if (err)
 987                        goto teardown;
 988        }
 989
 990        list_for_each_entry(dp, &dst->ports, list) {
 991                err = dsa_port_setup(dp);
 992                if (err) {
 993                        err = dsa_port_reinit_as_unused(dp);
 994                        if (err)
 995                                goto teardown;
 996                }
 997        }
 998
 999        return 0;
1000
1001teardown:
1002        dsa_tree_teardown_ports(dst);
1003
1004        dsa_tree_teardown_switches(dst);
1005
1006        return err;
1007}
1008
1009static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1010{
1011        struct dsa_port *dp;
1012        int err;
1013
1014        list_for_each_entry(dp, &dst->ports, list) {
1015                if (dsa_port_is_cpu(dp)) {
1016                        err = dsa_master_setup(dp->master, dp);
1017                        if (err)
1018                                return err;
1019                }
1020        }
1021
1022        return 0;
1023}
1024
1025static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1026{
1027        struct dsa_port *dp;
1028
1029        list_for_each_entry(dp, &dst->ports, list)
1030                if (dsa_port_is_cpu(dp))
1031                        dsa_master_teardown(dp->master);
1032}
1033
1034static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1035{
1036        unsigned int len = 0;
1037        struct dsa_port *dp;
1038
1039        list_for_each_entry(dp, &dst->ports, list) {
1040                if (dp->ds->num_lag_ids > len)
1041                        len = dp->ds->num_lag_ids;
1042        }
1043
1044        if (!len)
1045                return 0;
1046
1047        dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1048        if (!dst->lags)
1049                return -ENOMEM;
1050
1051        dst->lags_len = len;
1052        return 0;
1053}
1054
1055static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1056{
1057        kfree(dst->lags);
1058}
1059
1060static int dsa_tree_setup(struct dsa_switch_tree *dst)
1061{
1062        bool complete;
1063        int err;
1064
1065        if (dst->setup) {
1066                pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1067                       dst->index);
1068                return -EEXIST;
1069        }
1070
1071        complete = dsa_tree_setup_routing_table(dst);
1072        if (!complete)
1073                return 0;
1074
1075        err = dsa_tree_setup_cpu_ports(dst);
1076        if (err)
1077                return err;
1078
1079        err = dsa_tree_setup_switches(dst);
1080        if (err)
1081                goto teardown_cpu_ports;
1082
1083        err = dsa_tree_setup_master(dst);
1084        if (err)
1085                goto teardown_switches;
1086
1087        err = dsa_tree_setup_lags(dst);
1088        if (err)
1089                goto teardown_master;
1090
1091        dst->setup = true;
1092
1093        pr_info("DSA: tree %d setup\n", dst->index);
1094
1095        return 0;
1096
1097teardown_master:
1098        dsa_tree_teardown_master(dst);
1099teardown_switches:
1100        dsa_tree_teardown_ports(dst);
1101        dsa_tree_teardown_switches(dst);
1102teardown_cpu_ports:
1103        dsa_tree_teardown_cpu_ports(dst);
1104
1105        return err;
1106}
1107
1108static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1109{
1110        struct dsa_link *dl, *next;
1111
1112        if (!dst->setup)
1113                return;
1114
1115        dsa_tree_teardown_lags(dst);
1116
1117        dsa_tree_teardown_master(dst);
1118
1119        dsa_tree_teardown_ports(dst);
1120
1121        dsa_tree_teardown_switches(dst);
1122
1123        dsa_tree_teardown_cpu_ports(dst);
1124
1125        list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1126                list_del(&dl->list);
1127                kfree(dl);
1128        }
1129
1130        pr_info("DSA: tree %d torn down\n", dst->index);
1131
1132        dst->setup = false;
1133}
1134
1135/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1136 * is that all DSA switches within a tree share the same tagger, otherwise
1137 * they would have formed disjoint trees (different "dsa,member" values).
1138 */
1139int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1140                              struct net_device *master,
1141                              const struct dsa_device_ops *tag_ops,
1142                              const struct dsa_device_ops *old_tag_ops)
1143{
1144        struct dsa_notifier_tag_proto_info info;
1145        struct dsa_port *dp;
1146        int err = -EBUSY;
1147
1148        if (!rtnl_trylock())
1149                return restart_syscall();
1150
1151        /* At the moment we don't allow changing the tag protocol under
1152         * traffic. The rtnl_mutex also happens to serialize concurrent
1153         * attempts to change the tagging protocol. If we ever lift the IFF_UP
1154         * restriction, there needs to be another mutex which serializes this.
1155         */
1156        if (master->flags & IFF_UP)
1157                goto out_unlock;
1158
1159        list_for_each_entry(dp, &dst->ports, list) {
1160                if (!dsa_is_user_port(dp->ds, dp->index))
1161                        continue;
1162
1163                if (dp->slave->flags & IFF_UP)
1164                        goto out_unlock;
1165        }
1166
1167        info.tag_ops = tag_ops;
1168        err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1169        if (err)
1170                goto out_unwind_tagger;
1171
1172        dst->tag_ops = tag_ops;
1173
1174        rtnl_unlock();
1175
1176        return 0;
1177
1178out_unwind_tagger:
1179        info.tag_ops = old_tag_ops;
1180        dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1181out_unlock:
1182        rtnl_unlock();
1183        return err;
1184}
1185
1186static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1187{
1188        struct dsa_switch_tree *dst = ds->dst;
1189        struct dsa_port *dp;
1190
1191        list_for_each_entry(dp, &dst->ports, list)
1192                if (dp->ds == ds && dp->index == index)
1193                        return dp;
1194
1195        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1196        if (!dp)
1197                return NULL;
1198
1199        dp->ds = ds;
1200        dp->index = index;
1201        dp->bridge_num = -1;
1202
1203        INIT_LIST_HEAD(&dp->list);
1204        list_add_tail(&dp->list, &dst->ports);
1205
1206        return dp;
1207}
1208
1209static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1210{
1211        if (!name)
1212                name = "eth%d";
1213
1214        dp->type = DSA_PORT_TYPE_USER;
1215        dp->name = name;
1216
1217        return 0;
1218}
1219
1220static int dsa_port_parse_dsa(struct dsa_port *dp)
1221{
1222        dp->type = DSA_PORT_TYPE_DSA;
1223
1224        return 0;
1225}
1226
1227static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1228                                                  struct net_device *master)
1229{
1230        enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1231        struct dsa_switch *mds, *ds = dp->ds;
1232        unsigned int mdp_upstream;
1233        struct dsa_port *mdp;
1234
1235        /* It is possible to stack DSA switches onto one another when that
1236         * happens the switch driver may want to know if its tagging protocol
1237         * is going to work in such a configuration.
1238         */
1239        if (dsa_slave_dev_check(master)) {
1240                mdp = dsa_slave_to_port(master);
1241                mds = mdp->ds;
1242                mdp_upstream = dsa_upstream_port(mds, mdp->index);
1243                tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1244                                                          DSA_TAG_PROTO_NONE);
1245        }
1246
1247        /* If the master device is not itself a DSA slave in a disjoint DSA
1248         * tree, then return immediately.
1249         */
1250        return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1251}
1252
1253static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1254                              const char *user_protocol)
1255{
1256        struct dsa_switch *ds = dp->ds;
1257        struct dsa_switch_tree *dst = ds->dst;
1258        const struct dsa_device_ops *tag_ops;
1259        enum dsa_tag_protocol default_proto;
1260
1261        /* Find out which protocol the switch would prefer. */
1262        default_proto = dsa_get_tag_protocol(dp, master);
1263        if (dst->default_proto) {
1264                if (dst->default_proto != default_proto) {
1265                        dev_err(ds->dev,
1266                                "A DSA switch tree can have only one tagging protocol\n");
1267                        return -EINVAL;
1268                }
1269        } else {
1270                dst->default_proto = default_proto;
1271        }
1272
1273        /* See if the user wants to override that preference. */
1274        if (user_protocol) {
1275                if (!ds->ops->change_tag_protocol) {
1276                        dev_err(ds->dev, "Tag protocol cannot be modified\n");
1277                        return -EINVAL;
1278                }
1279
1280                tag_ops = dsa_find_tagger_by_name(user_protocol);
1281        } else {
1282                tag_ops = dsa_tag_driver_get(default_proto);
1283        }
1284
1285        if (IS_ERR(tag_ops)) {
1286                if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1287                        return -EPROBE_DEFER;
1288
1289                dev_warn(ds->dev, "No tagger for this switch\n");
1290                return PTR_ERR(tag_ops);
1291        }
1292
1293        if (dst->tag_ops) {
1294                if (dst->tag_ops != tag_ops) {
1295                        dev_err(ds->dev,
1296                                "A DSA switch tree can have only one tagging protocol\n");
1297
1298                        dsa_tag_driver_put(tag_ops);
1299                        return -EINVAL;
1300                }
1301
1302                /* In the case of multiple CPU ports per switch, the tagging
1303                 * protocol is still reference-counted only per switch tree.
1304                 */
1305                dsa_tag_driver_put(tag_ops);
1306        } else {
1307                dst->tag_ops = tag_ops;
1308        }
1309
1310        dp->master = master;
1311        dp->type = DSA_PORT_TYPE_CPU;
1312        dsa_port_set_tag_protocol(dp, dst->tag_ops);
1313        dp->dst = dst;
1314
1315        /* At this point, the tree may be configured to use a different
1316         * tagger than the one chosen by the switch driver during
1317         * .setup, in the case when a user selects a custom protocol
1318         * through the DT.
1319         *
1320         * This is resolved by syncing the driver with the tree in
1321         * dsa_switch_setup_tag_protocol once .setup has run and the
1322         * driver is ready to accept calls to .change_tag_protocol. If
1323         * the driver does not support the custom protocol at that
1324         * point, the tree is wholly rejected, thereby ensuring that the
1325         * tree and driver are always in agreement on the protocol to
1326         * use.
1327         */
1328        return 0;
1329}
1330
1331static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1332{
1333        struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1334        const char *name = of_get_property(dn, "label", NULL);
1335        bool link = of_property_read_bool(dn, "link");
1336
1337        dp->dn = dn;
1338
1339        if (ethernet) {
1340                struct net_device *master;
1341                const char *user_protocol;
1342
1343                master = of_find_net_device_by_node(ethernet);
1344                if (!master)
1345                        return -EPROBE_DEFER;
1346
1347                user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1348                return dsa_port_parse_cpu(dp, master, user_protocol);
1349        }
1350
1351        if (link)
1352                return dsa_port_parse_dsa(dp);
1353
1354        return dsa_port_parse_user(dp, name);
1355}
1356
1357static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1358                                     struct device_node *dn)
1359{
1360        struct device_node *ports, *port;
1361        struct dsa_port *dp;
1362        int err = 0;
1363        u32 reg;
1364
1365        ports = of_get_child_by_name(dn, "ports");
1366        if (!ports) {
1367                /* The second possibility is "ethernet-ports" */
1368                ports = of_get_child_by_name(dn, "ethernet-ports");
1369                if (!ports) {
1370                        dev_err(ds->dev, "no ports child node found\n");
1371                        return -EINVAL;
1372                }
1373        }
1374
1375        for_each_available_child_of_node(ports, port) {
1376                err = of_property_read_u32(port, "reg", &reg);
1377                if (err) {
1378                        of_node_put(port);
1379                        goto out_put_node;
1380                }
1381
1382                if (reg >= ds->num_ports) {
1383                        dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1384                                port, reg, ds->num_ports);
1385                        of_node_put(port);
1386                        err = -EINVAL;
1387                        goto out_put_node;
1388                }
1389
1390                dp = dsa_to_port(ds, reg);
1391
1392                err = dsa_port_parse_of(dp, port);
1393                if (err) {
1394                        of_node_put(port);
1395                        goto out_put_node;
1396                }
1397        }
1398
1399out_put_node:
1400        of_node_put(ports);
1401        return err;
1402}
1403
1404static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1405                                      struct device_node *dn)
1406{
1407        u32 m[2] = { 0, 0 };
1408        int sz;
1409
1410        /* Don't error out if this optional property isn't found */
1411        sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1412        if (sz < 0 && sz != -EINVAL)
1413                return sz;
1414
1415        ds->index = m[1];
1416
1417        ds->dst = dsa_tree_touch(m[0]);
1418        if (!ds->dst)
1419                return -ENOMEM;
1420
1421        if (dsa_switch_find(ds->dst->index, ds->index)) {
1422                dev_err(ds->dev,
1423                        "A DSA switch with index %d already exists in tree %d\n",
1424                        ds->index, ds->dst->index);
1425                return -EEXIST;
1426        }
1427
1428        if (ds->dst->last_switch < ds->index)
1429                ds->dst->last_switch = ds->index;
1430
1431        return 0;
1432}
1433
1434static int dsa_switch_touch_ports(struct dsa_switch *ds)
1435{
1436        struct dsa_port *dp;
1437        int port;
1438
1439        for (port = 0; port < ds->num_ports; port++) {
1440                dp = dsa_port_touch(ds, port);
1441                if (!dp)
1442                        return -ENOMEM;
1443        }
1444
1445        return 0;
1446}
1447
1448static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1449{
1450        int err;
1451
1452        err = dsa_switch_parse_member_of(ds, dn);
1453        if (err)
1454                return err;
1455
1456        err = dsa_switch_touch_ports(ds);
1457        if (err)
1458                return err;
1459
1460        return dsa_switch_parse_ports_of(ds, dn);
1461}
1462
1463static int dsa_port_parse(struct dsa_port *dp, const char *name,
1464                          struct device *dev)
1465{
1466        if (!strcmp(name, "cpu")) {
1467                struct net_device *master;
1468
1469                master = dsa_dev_to_net_device(dev);
1470                if (!master)
1471                        return -EPROBE_DEFER;
1472
1473                dev_put(master);
1474
1475                return dsa_port_parse_cpu(dp, master, NULL);
1476        }
1477
1478        if (!strcmp(name, "dsa"))
1479                return dsa_port_parse_dsa(dp);
1480
1481        return dsa_port_parse_user(dp, name);
1482}
1483
1484static int dsa_switch_parse_ports(struct dsa_switch *ds,
1485                                  struct dsa_chip_data *cd)
1486{
1487        bool valid_name_found = false;
1488        struct dsa_port *dp;
1489        struct device *dev;
1490        const char *name;
1491        unsigned int i;
1492        int err;
1493
1494        for (i = 0; i < DSA_MAX_PORTS; i++) {
1495                name = cd->port_names[i];
1496                dev = cd->netdev[i];
1497                dp = dsa_to_port(ds, i);
1498
1499                if (!name)
1500                        continue;
1501
1502                err = dsa_port_parse(dp, name, dev);
1503                if (err)
1504                        return err;
1505
1506                valid_name_found = true;
1507        }
1508
1509        if (!valid_name_found && i == DSA_MAX_PORTS)
1510                return -EINVAL;
1511
1512        return 0;
1513}
1514
1515static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1516{
1517        int err;
1518
1519        ds->cd = cd;
1520
1521        /* We don't support interconnected switches nor multiple trees via
1522         * platform data, so this is the unique switch of the tree.
1523         */
1524        ds->index = 0;
1525        ds->dst = dsa_tree_touch(0);
1526        if (!ds->dst)
1527                return -ENOMEM;
1528
1529        err = dsa_switch_touch_ports(ds);
1530        if (err)
1531                return err;
1532
1533        return dsa_switch_parse_ports(ds, cd);
1534}
1535
1536static void dsa_switch_release_ports(struct dsa_switch *ds)
1537{
1538        struct dsa_switch_tree *dst = ds->dst;
1539        struct dsa_port *dp, *next;
1540
1541        list_for_each_entry_safe(dp, next, &dst->ports, list) {
1542                if (dp->ds != ds)
1543                        continue;
1544                list_del(&dp->list);
1545                kfree(dp);
1546        }
1547}
1548
1549static int dsa_switch_probe(struct dsa_switch *ds)
1550{
1551        struct dsa_switch_tree *dst;
1552        struct dsa_chip_data *pdata;
1553        struct device_node *np;
1554        int err;
1555
1556        if (!ds->dev)
1557                return -ENODEV;
1558
1559        pdata = ds->dev->platform_data;
1560        np = ds->dev->of_node;
1561
1562        if (!ds->num_ports)
1563                return -EINVAL;
1564
1565        if (np) {
1566                err = dsa_switch_parse_of(ds, np);
1567                if (err)
1568                        dsa_switch_release_ports(ds);
1569        } else if (pdata) {
1570                err = dsa_switch_parse(ds, pdata);
1571                if (err)
1572                        dsa_switch_release_ports(ds);
1573        } else {
1574                err = -ENODEV;
1575        }
1576
1577        if (err)
1578                return err;
1579
1580        dst = ds->dst;
1581        dsa_tree_get(dst);
1582        err = dsa_tree_setup(dst);
1583        if (err) {
1584                dsa_switch_release_ports(ds);
1585                dsa_tree_put(dst);
1586        }
1587
1588        return err;
1589}
1590
1591int dsa_register_switch(struct dsa_switch *ds)
1592{
1593        int err;
1594
1595        mutex_lock(&dsa2_mutex);
1596        err = dsa_switch_probe(ds);
1597        dsa_tree_put(ds->dst);
1598        mutex_unlock(&dsa2_mutex);
1599
1600        return err;
1601}
1602EXPORT_SYMBOL_GPL(dsa_register_switch);
1603
1604static void dsa_switch_remove(struct dsa_switch *ds)
1605{
1606        struct dsa_switch_tree *dst = ds->dst;
1607
1608        dsa_tree_teardown(dst);
1609        dsa_switch_release_ports(ds);
1610        dsa_tree_put(dst);
1611}
1612
1613void dsa_unregister_switch(struct dsa_switch *ds)
1614{
1615        mutex_lock(&dsa2_mutex);
1616        dsa_switch_remove(ds);
1617        mutex_unlock(&dsa2_mutex);
1618}
1619EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1620
1621/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1622 * blocking that operation from completion, due to the dev_hold taken inside
1623 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1624 * the DSA master, so that the system can reboot successfully.
1625 */
1626void dsa_switch_shutdown(struct dsa_switch *ds)
1627{
1628        struct net_device *master, *slave_dev;
1629        LIST_HEAD(unregister_list);
1630        struct dsa_port *dp;
1631
1632        mutex_lock(&dsa2_mutex);
1633        rtnl_lock();
1634
1635        list_for_each_entry(dp, &ds->dst->ports, list) {
1636                if (dp->ds != ds)
1637                        continue;
1638
1639                if (!dsa_port_is_user(dp))
1640                        continue;
1641
1642                master = dp->cpu_dp->master;
1643                slave_dev = dp->slave;
1644
1645                netdev_upper_dev_unlink(master, slave_dev);
1646                /* Just unlinking ourselves as uppers of the master is not
1647                 * sufficient. When the master net device unregisters, that will
1648                 * also call dev_close, which we will catch as NETDEV_GOING_DOWN
1649                 * and trigger a dev_close on our own devices (dsa_slave_close).
1650                 * In turn, that will call dev_mc_unsync on the master's net
1651                 * device. If the master is also a DSA switch port, this will
1652                 * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
1653                 * its own master. Lockdep will complain about the fact that
1654                 * all cascaded masters have the same dsa_master_addr_list_lock_key,
1655                 * which it normally would not do if the cascaded masters would
1656                 * be in a proper upper/lower relationship, which we've just
1657                 * destroyed.
1658                 * To suppress the lockdep warnings, let's actually unregister
1659                 * the DSA slave interfaces too, to avoid the nonsensical
1660                 * multicast address list synchronization on shutdown.
1661                 */
1662                unregister_netdevice_queue(slave_dev, &unregister_list);
1663        }
1664        unregister_netdevice_many(&unregister_list);
1665
1666        rtnl_unlock();
1667        mutex_unlock(&dsa2_mutex);
1668}
1669EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1670