linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
   3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
   5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
   6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/types.h>
  40#include <linux/pci.h>
  41#include <linux/netdevice.h>
  42#include <linux/etherdevice.h>
  43#include <linux/ethtool.h>
  44#include <linux/slab.h>
  45#include <linux/device.h>
  46#include <linux/skbuff.h>
  47#include <linux/if_vlan.h>
  48#include <linux/if_bridge.h>
  49#include <linux/workqueue.h>
  50#include <linux/jiffies.h>
  51#include <linux/bitops.h>
  52#include <linux/list.h>
  53#include <linux/notifier.h>
  54#include <linux/dcbnl.h>
  55#include <linux/inetdevice.h>
  56#include <net/switchdev.h>
  57#include <net/pkt_cls.h>
  58#include <net/tc_act/tc_mirred.h>
  59#include <net/netevent.h>
  60
  61#include "spectrum.h"
  62#include "pci.h"
  63#include "core.h"
  64#include "reg.h"
  65#include "port.h"
  66#include "trap.h"
  67#include "txheader.h"
  68
  69static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  70static const char mlxsw_sp_driver_version[] = "1.0";
  71
  72/* tx_hdr_version
  73 * Tx header version.
  74 * Must be set to 1.
  75 */
  76MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  77
  78/* tx_hdr_ctl
  79 * Packet control type.
  80 * 0 - Ethernet control (e.g. EMADs, LACP)
  81 * 1 - Ethernet data
  82 */
  83MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  84
  85/* tx_hdr_proto
  86 * Packet protocol type. Must be set to 1 (Ethernet).
  87 */
  88MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  89
  90/* tx_hdr_rx_is_router
  91 * Packet is sent from the router. Valid for data packets only.
  92 */
  93MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  94
  95/* tx_hdr_fid_valid
  96 * Indicates if the 'fid' field is valid and should be used for
  97 * forwarding lookup. Valid for data packets only.
  98 */
  99MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
 100
 101/* tx_hdr_swid
 102 * Switch partition ID. Must be set to 0.
 103 */
 104MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
 105
 106/* tx_hdr_control_tclass
 107 * Indicates if the packet should use the control TClass and not one
 108 * of the data TClasses.
 109 */
 110MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
 111
 112/* tx_hdr_etclass
 113 * Egress TClass to be used on the egress device on the egress port.
 114 */
 115MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
 116
 117/* tx_hdr_port_mid
 118 * Destination local port for unicast packets.
 119 * Destination multicast ID for multicast packets.
 120 *
 121 * Control packets are directed to a specific egress port, while data
 122 * packets are transmitted through the CPU port (0) into the switch partition,
 123 * where forwarding rules are applied.
 124 */
 125MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
 126
 127/* tx_hdr_fid
 128 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
 129 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
 130 * Valid for data packets only.
 131 */
 132MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
 133
 134/* tx_hdr_type
 135 * 0 - Data packets
 136 * 6 - Control packets
 137 */
 138MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 139
 140static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
 141                                     const struct mlxsw_tx_info *tx_info)
 142{
 143        char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
 144
 145        memset(txhdr, 0, MLXSW_TXHDR_LEN);
 146
 147        mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
 148        mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
 149        mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
 150        mlxsw_tx_hdr_swid_set(txhdr, 0);
 151        mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
 152        mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
 153        mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 154}
 155
 156static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
 157{
 158        char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
 159        int err;
 160
 161        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
 162        if (err)
 163                return err;
 164        mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
 165        return 0;
 166}
 167
 168static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 169{
 170        int i;
 171
 172        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
 173                return -EIO;
 174
 175        mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
 176                                                          MAX_SPAN);
 177        mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
 178                                         sizeof(struct mlxsw_sp_span_entry),
 179                                         GFP_KERNEL);
 180        if (!mlxsw_sp->span.entries)
 181                return -ENOMEM;
 182
 183        for (i = 0; i < mlxsw_sp->span.entries_count; i++)
 184                INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
 185
 186        return 0;
 187}
 188
 189static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
 190{
 191        int i;
 192
 193        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 194                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 195
 196                WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
 197        }
 198        kfree(mlxsw_sp->span.entries);
 199}
 200
 201static struct mlxsw_sp_span_entry *
 202mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
 203{
 204        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 205        struct mlxsw_sp_span_entry *span_entry;
 206        char mpat_pl[MLXSW_REG_MPAT_LEN];
 207        u8 local_port = port->local_port;
 208        int index;
 209        int i;
 210        int err;
 211
 212        /* find a free entry to use */
 213        index = -1;
 214        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 215                if (!mlxsw_sp->span.entries[i].used) {
 216                        index = i;
 217                        span_entry = &mlxsw_sp->span.entries[i];
 218                        break;
 219                }
 220        }
 221        if (index < 0)
 222                return NULL;
 223
 224        /* create a new port analayzer entry for local_port */
 225        mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
 226        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 227        if (err)
 228                return NULL;
 229
 230        span_entry->used = true;
 231        span_entry->id = index;
 232        span_entry->ref_count = 1;
 233        span_entry->local_port = local_port;
 234        return span_entry;
 235}
 236
 237static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
 238                                        struct mlxsw_sp_span_entry *span_entry)
 239{
 240        u8 local_port = span_entry->local_port;
 241        char mpat_pl[MLXSW_REG_MPAT_LEN];
 242        int pa_id = span_entry->id;
 243
 244        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
 245        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 246        span_entry->used = false;
 247}
 248
 249static struct mlxsw_sp_span_entry *
 250mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
 251{
 252        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 253        int i;
 254
 255        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 256                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 257
 258                if (curr->used && curr->local_port == port->local_port)
 259                        return curr;
 260        }
 261        return NULL;
 262}
 263
 264static struct mlxsw_sp_span_entry
 265*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
 266{
 267        struct mlxsw_sp_span_entry *span_entry;
 268
 269        span_entry = mlxsw_sp_span_entry_find(port);
 270        if (span_entry) {
 271                /* Already exists, just take a reference */
 272                span_entry->ref_count++;
 273                return span_entry;
 274        }
 275
 276        return mlxsw_sp_span_entry_create(port);
 277}
 278
 279static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
 280                                   struct mlxsw_sp_span_entry *span_entry)
 281{
 282        WARN_ON(!span_entry->ref_count);
 283        if (--span_entry->ref_count == 0)
 284                mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
 285        return 0;
 286}
 287
 288static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
 289{
 290        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 291        struct mlxsw_sp_span_inspected_port *p;
 292        int i;
 293
 294        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 295                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 296
 297                list_for_each_entry(p, &curr->bound_ports_list, list)
 298                        if (p->local_port == port->local_port &&
 299                            p->type == MLXSW_SP_SPAN_EGRESS)
 300                                return true;
 301        }
 302
 303        return false;
 304}
 305
 306static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
 307{
 308        return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
 309}
 310
 311static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
 312{
 313        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 314        char sbib_pl[MLXSW_REG_SBIB_LEN];
 315        int err;
 316
 317        /* If port is egress mirrored, the shared buffer size should be
 318         * updated according to the mtu value
 319         */
 320        if (mlxsw_sp_span_is_egress_mirror(port)) {
 321                mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
 322                                    mlxsw_sp_span_mtu_to_buffsize(mtu));
 323                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 324                if (err) {
 325                        netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
 326                        return err;
 327                }
 328        }
 329
 330        return 0;
 331}
 332
 333static struct mlxsw_sp_span_inspected_port *
 334mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
 335                                    struct mlxsw_sp_span_entry *span_entry)
 336{
 337        struct mlxsw_sp_span_inspected_port *p;
 338
 339        list_for_each_entry(p, &span_entry->bound_ports_list, list)
 340                if (port->local_port == p->local_port)
 341                        return p;
 342        return NULL;
 343}
 344
 345static int
 346mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 347                                  struct mlxsw_sp_span_entry *span_entry,
 348                                  enum mlxsw_sp_span_type type)
 349{
 350        struct mlxsw_sp_span_inspected_port *inspected_port;
 351        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 352        char mpar_pl[MLXSW_REG_MPAR_LEN];
 353        char sbib_pl[MLXSW_REG_SBIB_LEN];
 354        int pa_id = span_entry->id;
 355        int err;
 356
 357        /* if it is an egress SPAN, bind a shared buffer to it */
 358        if (type == MLXSW_SP_SPAN_EGRESS) {
 359                mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
 360                                    mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
 361                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 362                if (err) {
 363                        netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
 364                        return err;
 365                }
 366        }
 367
 368        /* bind the port to the SPAN entry */
 369        mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
 370                            (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
 371        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 372        if (err)
 373                goto err_mpar_reg_write;
 374
 375        inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
 376        if (!inspected_port) {
 377                err = -ENOMEM;
 378                goto err_inspected_port_alloc;
 379        }
 380        inspected_port->local_port = port->local_port;
 381        inspected_port->type = type;
 382        list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
 383
 384        return 0;
 385
 386err_mpar_reg_write:
 387err_inspected_port_alloc:
 388        if (type == MLXSW_SP_SPAN_EGRESS) {
 389                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 390                mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 391        }
 392        return err;
 393}
 394
 395static void
 396mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
 397                                    struct mlxsw_sp_span_entry *span_entry,
 398                                    enum mlxsw_sp_span_type type)
 399{
 400        struct mlxsw_sp_span_inspected_port *inspected_port;
 401        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 402        char mpar_pl[MLXSW_REG_MPAR_LEN];
 403        char sbib_pl[MLXSW_REG_SBIB_LEN];
 404        int pa_id = span_entry->id;
 405
 406        inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
 407        if (!inspected_port)
 408                return;
 409
 410        /* remove the inspected port */
 411        mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
 412                            (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
 413        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 414
 415        /* remove the SBIB buffer if it was egress SPAN */
 416        if (type == MLXSW_SP_SPAN_EGRESS) {
 417                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 418                mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 419        }
 420
 421        mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 422
 423        list_del(&inspected_port->list);
 424        kfree(inspected_port);
 425}
 426
 427static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 428                                    struct mlxsw_sp_port *to,
 429                                    enum mlxsw_sp_span_type type)
 430{
 431        struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
 432        struct mlxsw_sp_span_entry *span_entry;
 433        int err;
 434
 435        span_entry = mlxsw_sp_span_entry_get(to);
 436        if (!span_entry)
 437                return -ENOENT;
 438
 439        netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
 440                   span_entry->id);
 441
 442        err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
 443        if (err)
 444                goto err_port_bind;
 445
 446        return 0;
 447
 448err_port_bind:
 449        mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 450        return err;
 451}
 452
 453static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
 454                                        struct mlxsw_sp_port *to,
 455                                        enum mlxsw_sp_span_type type)
 456{
 457        struct mlxsw_sp_span_entry *span_entry;
 458
 459        span_entry = mlxsw_sp_span_entry_find(to);
 460        if (!span_entry) {
 461                netdev_err(from->dev, "no span entry found\n");
 462                return;
 463        }
 464
 465        netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
 466                   span_entry->id);
 467        mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
 468}
 469
 470static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
 471                                          bool is_up)
 472{
 473        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 474        char paos_pl[MLXSW_REG_PAOS_LEN];
 475
 476        mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
 477                            is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
 478                            MLXSW_PORT_ADMIN_STATUS_DOWN);
 479        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 480}
 481
 482static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
 483                                      unsigned char *addr)
 484{
 485        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 486        char ppad_pl[MLXSW_REG_PPAD_LEN];
 487
 488        mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
 489        mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
 490        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
 491}
 492
 493static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
 494{
 495        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 496        unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
 497
 498        ether_addr_copy(addr, mlxsw_sp->base_mac);
 499        addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
 500        return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
 501}
 502
 503static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
 504{
 505        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 506        char pmtu_pl[MLXSW_REG_PMTU_LEN];
 507        int max_mtu;
 508        int err;
 509
 510        mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
 511        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
 512        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 513        if (err)
 514                return err;
 515        max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
 516
 517        if (mtu > max_mtu)
 518                return -EINVAL;
 519
 520        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
 521        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 522}
 523
 524static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 525                                    u8 swid)
 526{
 527        char pspa_pl[MLXSW_REG_PSPA_LEN];
 528
 529        mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
 530        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 531}
 532
 533static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 534{
 535        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 536
 537        return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
 538                                        swid);
 539}
 540
 541static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
 542                                     bool enable)
 543{
 544        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 545        char svpe_pl[MLXSW_REG_SVPE_LEN];
 546
 547        mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
 548        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
 549}
 550
 551int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
 552                                 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
 553                                 u16 vid)
 554{
 555        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 556        char svfa_pl[MLXSW_REG_SVFA_LEN];
 557
 558        mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
 559                            fid, vid);
 560        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
 561}
 562
 563int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 564                                     u16 vid_begin, u16 vid_end,
 565                                     bool learn_enable)
 566{
 567        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 568        char *spvmlr_pl;
 569        int err;
 570
 571        spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
 572        if (!spvmlr_pl)
 573                return -ENOMEM;
 574        mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
 575                              vid_end, learn_enable);
 576        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
 577        kfree(spvmlr_pl);
 578        return err;
 579}
 580
 581static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 582                                          u16 vid, bool learn_enable)
 583{
 584        return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
 585                                                learn_enable);
 586}
 587
 588static int
 589mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
 590{
 591        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 592        char sspr_pl[MLXSW_REG_SSPR_LEN];
 593
 594        mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
 595        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 596}
 597
 598static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
 599                                         u8 local_port, u8 *p_module,
 600                                         u8 *p_width, u8 *p_lane)
 601{
 602        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 603        int err;
 604
 605        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 606        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 607        if (err)
 608                return err;
 609        *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 610        *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
 611        *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 612        return 0;
 613}
 614
 615static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 616                                    u8 module, u8 width, u8 lane)
 617{
 618        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 619        int i;
 620
 621        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 622        mlxsw_reg_pmlp_width_set(pmlp_pl, width);
 623        for (i = 0; i < width; i++) {
 624                mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
 625                mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
 626        }
 627
 628        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 629}
 630
 631static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 632{
 633        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 634
 635        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 636        mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
 637        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 638}
 639
 640static int mlxsw_sp_port_open(struct net_device *dev)
 641{
 642        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 643        int err;
 644
 645        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 646        if (err)
 647                return err;
 648        netif_start_queue(dev);
 649        return 0;
 650}
 651
 652static int mlxsw_sp_port_stop(struct net_device *dev)
 653{
 654        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 655
 656        netif_stop_queue(dev);
 657        return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 658}
 659
 660static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
 661                                      struct net_device *dev)
 662{
 663        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 664        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 665        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
 666        const struct mlxsw_tx_info tx_info = {
 667                .local_port = mlxsw_sp_port->local_port,
 668                .is_emad = false,
 669        };
 670        u64 len;
 671        int err;
 672
 673        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
 674                return NETDEV_TX_BUSY;
 675
 676        if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
 677                struct sk_buff *skb_orig = skb;
 678
 679                skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
 680                if (!skb) {
 681                        this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 682                        dev_kfree_skb_any(skb_orig);
 683                        return NETDEV_TX_OK;
 684                }
 685                dev_consume_skb_any(skb_orig);
 686        }
 687
 688        if (eth_skb_pad(skb)) {
 689                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 690                return NETDEV_TX_OK;
 691        }
 692
 693        mlxsw_sp_txhdr_construct(skb, &tx_info);
 694        /* TX header is consumed by HW on the way so we shouldn't count its
 695         * bytes as being sent.
 696         */
 697        len = skb->len - MLXSW_TXHDR_LEN;
 698
 699        /* Due to a race we might fail here because of a full queue. In that
 700         * unlikely case we simply drop the packet.
 701         */
 702        err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
 703
 704        if (!err) {
 705                pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
 706                u64_stats_update_begin(&pcpu_stats->syncp);
 707                pcpu_stats->tx_packets++;
 708                pcpu_stats->tx_bytes += len;
 709                u64_stats_update_end(&pcpu_stats->syncp);
 710        } else {
 711                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 712                dev_kfree_skb_any(skb);
 713        }
 714        return NETDEV_TX_OK;
 715}
 716
 717static void mlxsw_sp_set_rx_mode(struct net_device *dev)
 718{
 719}
 720
 721static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
 722{
 723        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 724        struct sockaddr *addr = p;
 725        int err;
 726
 727        if (!is_valid_ether_addr(addr->sa_data))
 728                return -EADDRNOTAVAIL;
 729
 730        err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
 731        if (err)
 732                return err;
 733        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 734        return 0;
 735}
 736
 737static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
 738                                 bool pause_en, bool pfc_en, u16 delay)
 739{
 740        u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
 741
 742        delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
 743                         MLXSW_SP_PAUSE_DELAY;
 744
 745        if (pause_en || pfc_en)
 746                mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
 747                                                    pg_size + delay, pg_size);
 748        else
 749                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
 750}
 751
 752int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 753                                 u8 *prio_tc, bool pause_en,
 754                                 struct ieee_pfc *my_pfc)
 755{
 756        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 757        u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
 758        u16 delay = !!my_pfc ? my_pfc->delay : 0;
 759        char pbmc_pl[MLXSW_REG_PBMC_LEN];
 760        int i, j, err;
 761
 762        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
 763        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 764        if (err)
 765                return err;
 766
 767        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 768                bool configure = false;
 769                bool pfc = false;
 770
 771                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
 772                        if (prio_tc[j] == i) {
 773                                pfc = pfc_en & BIT(j);
 774                                configure = true;
 775                                break;
 776                        }
 777                }
 778
 779                if (!configure)
 780                        continue;
 781                mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
 782        }
 783
 784        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 785}
 786
 787static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
 788                                      int mtu, bool pause_en)
 789{
 790        u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
 791        bool dcb_en = !!mlxsw_sp_port->dcb.ets;
 792        struct ieee_pfc *my_pfc;
 793        u8 *prio_tc;
 794
 795        prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
 796        my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
 797
 798        return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
 799                                            pause_en, my_pfc);
 800}
 801
 802static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
 803{
 804        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 805        bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
 806        int err;
 807
 808        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
 809        if (err)
 810                return err;
 811        err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
 812        if (err)
 813                goto err_span_port_mtu_update;
 814        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
 815        if (err)
 816                goto err_port_mtu_set;
 817        dev->mtu = mtu;
 818        return 0;
 819
 820err_port_mtu_set:
 821        mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
 822err_span_port_mtu_update:
 823        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
 824        return err;
 825}
 826
 827static int
 828mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
 829                             struct rtnl_link_stats64 *stats)
 830{
 831        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 832        struct mlxsw_sp_port_pcpu_stats *p;
 833        u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 834        u32 tx_dropped = 0;
 835        unsigned int start;
 836        int i;
 837
 838        for_each_possible_cpu(i) {
 839                p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
 840                do {
 841                        start = u64_stats_fetch_begin_irq(&p->syncp);
 842                        rx_packets      = p->rx_packets;
 843                        rx_bytes        = p->rx_bytes;
 844                        tx_packets      = p->tx_packets;
 845                        tx_bytes        = p->tx_bytes;
 846                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 847
 848                stats->rx_packets       += rx_packets;
 849                stats->rx_bytes         += rx_bytes;
 850                stats->tx_packets       += tx_packets;
 851                stats->tx_bytes         += tx_bytes;
 852                /* tx_dropped is u32, updated without syncp protection. */
 853                tx_dropped      += p->tx_dropped;
 854        }
 855        stats->tx_dropped       = tx_dropped;
 856        return 0;
 857}
 858
 859static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
 860{
 861        switch (attr_id) {
 862        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 863                return true;
 864        }
 865
 866        return false;
 867}
 868
 869static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
 870                                           void *sp)
 871{
 872        switch (attr_id) {
 873        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 874                return mlxsw_sp_port_get_sw_stats64(dev, sp);
 875        }
 876
 877        return -EINVAL;
 878}
 879
 880static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
 881                                       int prio, char *ppcnt_pl)
 882{
 883        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 884        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 885
 886        mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
 887        return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
 888}
 889
 890static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
 891                                      struct rtnl_link_stats64 *stats)
 892{
 893        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
 894        int err;
 895
 896        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
 897                                          0, ppcnt_pl);
 898        if (err)
 899                goto out;
 900
 901        stats->tx_packets =
 902                mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
 903        stats->rx_packets =
 904                mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
 905        stats->tx_bytes =
 906                mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
 907        stats->rx_bytes =
 908                mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
 909        stats->multicast =
 910                mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
 911
 912        stats->rx_crc_errors =
 913                mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
 914        stats->rx_frame_errors =
 915                mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
 916
 917        stats->rx_length_errors = (
 918                mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
 919                mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
 920                mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
 921
 922        stats->rx_errors = (stats->rx_crc_errors +
 923                stats->rx_frame_errors + stats->rx_length_errors);
 924
 925out:
 926        return err;
 927}
 928
 929static void update_stats_cache(struct work_struct *work)
 930{
 931        struct mlxsw_sp_port *mlxsw_sp_port =
 932                container_of(work, struct mlxsw_sp_port,
 933                             hw_stats.update_dw.work);
 934
 935        if (!netif_carrier_ok(mlxsw_sp_port->dev))
 936                goto out;
 937
 938        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
 939                                   mlxsw_sp_port->hw_stats.cache);
 940
 941out:
 942        mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
 943                               MLXSW_HW_STATS_UPDATE_TIME);
 944}
 945
 946/* Return the stats from a cache that is updated periodically,
 947 * as this function might get called in an atomic context.
 948 */
 949static struct rtnl_link_stats64 *
 950mlxsw_sp_port_get_stats64(struct net_device *dev,
 951                          struct rtnl_link_stats64 *stats)
 952{
 953        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 954
 955        memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
 956
 957        return stats;
 958}
 959
 960int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
 961                           u16 vid_end, bool is_member, bool untagged)
 962{
 963        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 964        char *spvm_pl;
 965        int err;
 966
 967        spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
 968        if (!spvm_pl)
 969                return -ENOMEM;
 970
 971        mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
 972                            vid_end, is_member, untagged);
 973        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
 974        kfree(spvm_pl);
 975        return err;
 976}
 977
 978static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
 979{
 980        enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
 981        u16 vid, last_visited_vid;
 982        int err;
 983
 984        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
 985                err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
 986                                                   vid);
 987                if (err) {
 988                        last_visited_vid = vid;
 989                        goto err_port_vid_to_fid_set;
 990                }
 991        }
 992
 993        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
 994        if (err) {
 995                last_visited_vid = VLAN_N_VID;
 996                goto err_port_vid_to_fid_set;
 997        }
 998
 999        return 0;
1000
1001err_port_vid_to_fid_set:
1002        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1003                mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1004                                             vid);
1005        return err;
1006}
1007
1008static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1009{
1010        enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1011        u16 vid;
1012        int err;
1013
1014        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1015        if (err)
1016                return err;
1017
1018        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1019                err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1020                                                   vid, vid);
1021                if (err)
1022                        return err;
1023        }
1024
1025        return 0;
1026}
1027
1028static struct mlxsw_sp_port *
1029mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1030{
1031        struct mlxsw_sp_port *mlxsw_sp_vport;
1032
1033        mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1034        if (!mlxsw_sp_vport)
1035                return NULL;
1036
1037        /* dev will be set correctly after the VLAN device is linked
1038         * with the real device. In case of bridge SELF invocation, dev
1039         * will remain as is.
1040         */
1041        mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1042        mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1043        mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1044        mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1045        mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1046        mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1047        mlxsw_sp_vport->vport.vid = vid;
1048
1049        list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1050
1051        return mlxsw_sp_vport;
1052}
1053
1054static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1055{
1056        list_del(&mlxsw_sp_vport->vport.list);
1057        kfree(mlxsw_sp_vport);
1058}
1059
1060static int mlxsw_sp_port_add_vid(struct net_device *dev,
1061                                 __be16 __always_unused proto, u16 vid)
1062{
1063        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1064        struct mlxsw_sp_port *mlxsw_sp_vport;
1065        bool untagged = vid == 1;
1066        int err;
1067
1068        /* VLAN 0 is added to HW filter when device goes up, but it is
1069         * reserved in our case, so simply return.
1070         */
1071        if (!vid)
1072                return 0;
1073
1074        if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1075                return 0;
1076
1077        mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1078        if (!mlxsw_sp_vport)
1079                return -ENOMEM;
1080
1081        /* When adding the first VLAN interface on a bridged port we need to
1082         * transition all the active 802.1Q bridge VLANs to use explicit
1083         * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1084         */
1085        if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1086                err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1087                if (err)
1088                        goto err_port_vp_mode_trans;
1089        }
1090
1091        err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1092        if (err)
1093                goto err_port_add_vid;
1094
1095        return 0;
1096
1097err_port_add_vid:
1098        if (list_is_singular(&mlxsw_sp_port->vports_list))
1099                mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1100err_port_vp_mode_trans:
1101        mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1102        return err;
1103}
1104
1105static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1106                                  __be16 __always_unused proto, u16 vid)
1107{
1108        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1109        struct mlxsw_sp_port *mlxsw_sp_vport;
1110        struct mlxsw_sp_fid *f;
1111
1112        /* VLAN 0 is removed from HW filter when device goes down, but
1113         * it is reserved in our case, so simply return.
1114         */
1115        if (!vid)
1116                return 0;
1117
1118        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1119        if (WARN_ON(!mlxsw_sp_vport))
1120                return 0;
1121
1122        mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1123
1124        /* Drop FID reference. If this was the last reference the
1125         * resources will be freed.
1126         */
1127        f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1128        if (f && !WARN_ON(!f->leave))
1129                f->leave(mlxsw_sp_vport);
1130
1131        /* When removing the last VLAN interface on a bridged port we need to
1132         * transition all active 802.1Q bridge VLANs to use VID to FID
1133         * mappings and set port's mode to VLAN mode.
1134         */
1135        if (list_is_singular(&mlxsw_sp_port->vports_list))
1136                mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1137
1138        mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1139
1140        return 0;
1141}
1142
1143static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1144                                            size_t len)
1145{
1146        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1147        u8 module = mlxsw_sp_port->mapping.module;
1148        u8 width = mlxsw_sp_port->mapping.width;
1149        u8 lane = mlxsw_sp_port->mapping.lane;
1150        int err;
1151
1152        if (!mlxsw_sp_port->split)
1153                err = snprintf(name, len, "p%d", module + 1);
1154        else
1155                err = snprintf(name, len, "p%ds%d", module + 1,
1156                               lane / width);
1157
1158        if (err >= len)
1159                return -EINVAL;
1160
1161        return 0;
1162}
1163
1164static struct mlxsw_sp_port_mall_tc_entry *
1165mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1166                                 unsigned long cookie) {
1167        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1168
1169        list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1170                if (mall_tc_entry->cookie == cookie)
1171                        return mall_tc_entry;
1172
1173        return NULL;
1174}
1175
1176static int
1177mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1178                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1179                                      const struct tc_action *a,
1180                                      bool ingress)
1181{
1182        struct net *net = dev_net(mlxsw_sp_port->dev);
1183        enum mlxsw_sp_span_type span_type;
1184        struct mlxsw_sp_port *to_port;
1185        struct net_device *to_dev;
1186        int ifindex;
1187
1188        ifindex = tcf_mirred_ifindex(a);
1189        to_dev = __dev_get_by_index(net, ifindex);
1190        if (!to_dev) {
1191                netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1192                return -EINVAL;
1193        }
1194
1195        if (!mlxsw_sp_port_dev_check(to_dev)) {
1196                netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1197                return -EOPNOTSUPP;
1198        }
1199        to_port = netdev_priv(to_dev);
1200
1201        mirror->to_local_port = to_port->local_port;
1202        mirror->ingress = ingress;
1203        span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1204        return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1205}
1206
1207static void
1208mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1209                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1210{
1211        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1212        enum mlxsw_sp_span_type span_type;
1213        struct mlxsw_sp_port *to_port;
1214
1215        to_port = mlxsw_sp->ports[mirror->to_local_port];
1216        span_type = mirror->ingress ?
1217                        MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1218        mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1219}
1220
1221static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1222                                          __be16 protocol,
1223                                          struct tc_cls_matchall_offload *cls,
1224                                          bool ingress)
1225{
1226        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1227        const struct tc_action *a;
1228        int err;
1229
1230        if (!tc_single_action(cls->exts)) {
1231                netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1232                return -EOPNOTSUPP;
1233        }
1234
1235        mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1236        if (!mall_tc_entry)
1237                return -ENOMEM;
1238        mall_tc_entry->cookie = cls->cookie;
1239
1240        a = list_first_entry(&cls->exts->actions, struct tc_action, list);
1241
1242        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1243                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1244
1245                mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1246                mirror = &mall_tc_entry->mirror;
1247                err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1248                                                            mirror, a, ingress);
1249        } else {
1250                err = -EOPNOTSUPP;
1251        }
1252
1253        if (err)
1254                goto err_add_action;
1255
1256        list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1257        return 0;
1258
1259err_add_action:
1260        kfree(mall_tc_entry);
1261        return err;
1262}
1263
1264static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1265                                           struct tc_cls_matchall_offload *cls)
1266{
1267        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1268
1269        mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1270                                                         cls->cookie);
1271        if (!mall_tc_entry) {
1272                netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1273                return;
1274        }
1275        list_del(&mall_tc_entry->list);
1276
1277        switch (mall_tc_entry->type) {
1278        case MLXSW_SP_PORT_MALL_MIRROR:
1279                mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1280                                                      &mall_tc_entry->mirror);
1281                break;
1282        default:
1283                WARN_ON(1);
1284        }
1285
1286        kfree(mall_tc_entry);
1287}
1288
1289static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1290                             __be16 proto, struct tc_to_netdev *tc)
1291{
1292        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1293        bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1294
1295        switch (tc->type) {
1296        case TC_SETUP_MATCHALL:
1297                switch (tc->cls_mall->command) {
1298                case TC_CLSMATCHALL_REPLACE:
1299                        return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1300                                                              proto,
1301                                                              tc->cls_mall,
1302                                                              ingress);
1303                case TC_CLSMATCHALL_DESTROY:
1304                        mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1305                                                       tc->cls_mall);
1306                        return 0;
1307                default:
1308                        return -EINVAL;
1309                }
1310        case TC_SETUP_CLSFLOWER:
1311                switch (tc->cls_flower->command) {
1312                case TC_CLSFLOWER_REPLACE:
1313                        return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1314                                                       proto, tc->cls_flower);
1315                case TC_CLSFLOWER_DESTROY:
1316                        mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1317                                                tc->cls_flower);
1318                        return 0;
1319                default:
1320                        return -EOPNOTSUPP;
1321                }
1322        }
1323
1324        return -EOPNOTSUPP;
1325}
1326
1327static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1328        .ndo_size               = sizeof(struct net_device_ops),
1329        .ndo_open               = mlxsw_sp_port_open,
1330        .ndo_stop               = mlxsw_sp_port_stop,
1331        .ndo_start_xmit         = mlxsw_sp_port_xmit,
1332        .ndo_setup_tc           = mlxsw_sp_setup_tc,
1333        .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1334        .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1335        .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1336        .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1337        .extended.ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1338        .extended.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1339        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1340        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1341        .ndo_fdb_add            = switchdev_port_fdb_add,
1342        .ndo_fdb_del            = switchdev_port_fdb_del,
1343        .extended.ndo_fdb_dump  = switchdev_port_fdb_dump,
1344        .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
1345        .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
1346        .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
1347        .extended.ndo_get_phys_port_name        = mlxsw_sp_port_get_phys_port_name,
1348};
1349
1350static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1351                                      struct ethtool_drvinfo *drvinfo)
1352{
1353        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1354        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1355
1356        strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1357        strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1358                sizeof(drvinfo->version));
1359        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1360                 "%d.%d.%d",
1361                 mlxsw_sp->bus_info->fw_rev.major,
1362                 mlxsw_sp->bus_info->fw_rev.minor,
1363                 mlxsw_sp->bus_info->fw_rev.subminor);
1364        strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1365                sizeof(drvinfo->bus_info));
1366}
1367
1368static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1369                                         struct ethtool_pauseparam *pause)
1370{
1371        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1372
1373        pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1374        pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1375}
1376
1377static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1378                                   struct ethtool_pauseparam *pause)
1379{
1380        char pfcc_pl[MLXSW_REG_PFCC_LEN];
1381
1382        mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1383        mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1384        mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1385
1386        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1387                               pfcc_pl);
1388}
1389
1390static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1391                                        struct ethtool_pauseparam *pause)
1392{
1393        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1394        bool pause_en = pause->tx_pause || pause->rx_pause;
1395        int err;
1396
1397        if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1398                netdev_err(dev, "PFC already enabled on port\n");
1399                return -EINVAL;
1400        }
1401
1402        if (pause->autoneg) {
1403                netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1404                return -EINVAL;
1405        }
1406
1407        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1408        if (err) {
1409                netdev_err(dev, "Failed to configure port's headroom\n");
1410                return err;
1411        }
1412
1413        err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1414        if (err) {
1415                netdev_err(dev, "Failed to set PAUSE parameters\n");
1416                goto err_port_pause_configure;
1417        }
1418
1419        mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1420        mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1421
1422        return 0;
1423
1424err_port_pause_configure:
1425        pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1426        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1427        return err;
1428}
1429
1430struct mlxsw_sp_port_hw_stats {
1431        char str[ETH_GSTRING_LEN];
1432        u64 (*getter)(const char *payload);
1433};
1434
1435static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1436        {
1437                .str = "a_frames_transmitted_ok",
1438                .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1439        },
1440        {
1441                .str = "a_frames_received_ok",
1442                .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1443        },
1444        {
1445                .str = "a_frame_check_sequence_errors",
1446                .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1447        },
1448        {
1449                .str = "a_alignment_errors",
1450                .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1451        },
1452        {
1453                .str = "a_octets_transmitted_ok",
1454                .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1455        },
1456        {
1457                .str = "a_octets_received_ok",
1458                .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1459        },
1460        {
1461                .str = "a_multicast_frames_xmitted_ok",
1462                .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1463        },
1464        {
1465                .str = "a_broadcast_frames_xmitted_ok",
1466                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1467        },
1468        {
1469                .str = "a_multicast_frames_received_ok",
1470                .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1471        },
1472        {
1473                .str = "a_broadcast_frames_received_ok",
1474                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1475        },
1476        {
1477                .str = "a_in_range_length_errors",
1478                .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1479        },
1480        {
1481                .str = "a_out_of_range_length_field",
1482                .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1483        },
1484        {
1485                .str = "a_frame_too_long_errors",
1486                .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1487        },
1488        {
1489                .str = "a_symbol_error_during_carrier",
1490                .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1491        },
1492        {
1493                .str = "a_mac_control_frames_transmitted",
1494                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1495        },
1496        {
1497                .str = "a_mac_control_frames_received",
1498                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1499        },
1500        {
1501                .str = "a_unsupported_opcodes_received",
1502                .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1503        },
1504        {
1505                .str = "a_pause_mac_ctrl_frames_received",
1506                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1507        },
1508        {
1509                .str = "a_pause_mac_ctrl_frames_xmitted",
1510                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1511        },
1512};
1513
1514#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1515
1516static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1517        {
1518                .str = "rx_octets_prio",
1519                .getter = mlxsw_reg_ppcnt_rx_octets_get,
1520        },
1521        {
1522                .str = "rx_frames_prio",
1523                .getter = mlxsw_reg_ppcnt_rx_frames_get,
1524        },
1525        {
1526                .str = "tx_octets_prio",
1527                .getter = mlxsw_reg_ppcnt_tx_octets_get,
1528        },
1529        {
1530                .str = "tx_frames_prio",
1531                .getter = mlxsw_reg_ppcnt_tx_frames_get,
1532        },
1533        {
1534                .str = "rx_pause_prio",
1535                .getter = mlxsw_reg_ppcnt_rx_pause_get,
1536        },
1537        {
1538                .str = "rx_pause_duration_prio",
1539                .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1540        },
1541        {
1542                .str = "tx_pause_prio",
1543                .getter = mlxsw_reg_ppcnt_tx_pause_get,
1544        },
1545        {
1546                .str = "tx_pause_duration_prio",
1547                .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1548        },
1549};
1550
1551#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1552
1553static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
1554{
1555        u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1556
1557        return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1558}
1559
1560static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1561        {
1562                .str = "tc_transmit_queue_tc",
1563                .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1564        },
1565        {
1566                .str = "tc_no_buffer_discard_uc_tc",
1567                .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1568        },
1569};
1570
1571#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1572
1573#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1574                                         (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1575                                          MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1576                                         IEEE_8021QAZ_MAX_TCS)
1577
1578static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1579{
1580        int i;
1581
1582        for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1583                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1584                         mlxsw_sp_port_hw_prio_stats[i].str, prio);
1585                *p += ETH_GSTRING_LEN;
1586        }
1587}
1588
1589static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1590{
1591        int i;
1592
1593        for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1594                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1595                         mlxsw_sp_port_hw_tc_stats[i].str, tc);
1596                *p += ETH_GSTRING_LEN;
1597        }
1598}
1599
1600static void mlxsw_sp_port_get_strings(struct net_device *dev,
1601                                      u32 stringset, u8 *data)
1602{
1603        u8 *p = data;
1604        int i;
1605
1606        switch (stringset) {
1607        case ETH_SS_STATS:
1608                for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1609                        memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1610                               ETH_GSTRING_LEN);
1611                        p += ETH_GSTRING_LEN;
1612                }
1613
1614                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1615                        mlxsw_sp_port_get_prio_strings(&p, i);
1616
1617                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1618                        mlxsw_sp_port_get_tc_strings(&p, i);
1619
1620                break;
1621        }
1622}
1623
1624static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1625                                     enum ethtool_phys_id_state state)
1626{
1627        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1628        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1629        char mlcr_pl[MLXSW_REG_MLCR_LEN];
1630        bool active;
1631
1632        switch (state) {
1633        case ETHTOOL_ID_ACTIVE:
1634                active = true;
1635                break;
1636        case ETHTOOL_ID_INACTIVE:
1637                active = false;
1638                break;
1639        default:
1640                return -EOPNOTSUPP;
1641        }
1642
1643        mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1644        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1645}
1646
1647static int
1648mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1649                               int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1650{
1651        switch (grp) {
1652        case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
1653                *p_hw_stats = mlxsw_sp_port_hw_stats;
1654                *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1655                break;
1656        case MLXSW_REG_PPCNT_PRIO_CNT:
1657                *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1658                *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1659                break;
1660        case MLXSW_REG_PPCNT_TC_CNT:
1661                *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1662                *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1663                break;
1664        default:
1665                WARN_ON(1);
1666                return -EOPNOTSUPP;
1667        }
1668        return 0;
1669}
1670
1671static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1672                                      enum mlxsw_reg_ppcnt_grp grp, int prio,
1673                                      u64 *data, int data_index)
1674{
1675        struct mlxsw_sp_port_hw_stats *hw_stats;
1676        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1677        int i, len;
1678        int err;
1679
1680        err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1681        if (err)
1682                return;
1683        mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1684        for (i = 0; i < len; i++)
1685                data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1686}
1687
1688static void mlxsw_sp_port_get_stats(struct net_device *dev,
1689                                    struct ethtool_stats *stats, u64 *data)
1690{
1691        int i, data_index = 0;
1692
1693        /* IEEE 802.3 Counters */
1694        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1695                                  data, data_index);
1696        data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1697
1698        /* Per-Priority Counters */
1699        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1700                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1701                                          data, data_index);
1702                data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1703        }
1704
1705        /* Per-TC Counters */
1706        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1707                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1708                                          data, data_index);
1709                data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1710        }
1711}
1712
1713static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1714{
1715        switch (sset) {
1716        case ETH_SS_STATS:
1717                return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1718        default:
1719                return -EOPNOTSUPP;
1720        }
1721}
1722
1723struct mlxsw_sp_port_link_mode {
1724        enum ethtool_link_mode_bit_indices mask_ethtool;
1725        u32 mask;
1726        u32 speed;
1727};
1728
1729static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1730        {
1731                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1732                .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1733                .speed          = SPEED_100,
1734        },
1735        {
1736                .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1737                                  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1738                .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1739                .speed          = SPEED_1000,
1740        },
1741        {
1742                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1743                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1744                .speed          = SPEED_10000,
1745        },
1746        {
1747                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1748                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1749                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1750                .speed          = SPEED_10000,
1751        },
1752        {
1753                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1754                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1755                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1756                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1757                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1758                .speed          = SPEED_10000,
1759        },
1760        {
1761                .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1762                .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1763                .speed          = SPEED_20000,
1764        },
1765        {
1766                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1767                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1768                .speed          = SPEED_40000,
1769        },
1770        {
1771                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1772                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1773                .speed          = SPEED_40000,
1774        },
1775        {
1776                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1777                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1778                .speed          = SPEED_40000,
1779        },
1780        {
1781                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1782                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1783                .speed          = SPEED_40000,
1784        },
1785        {
1786                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1787                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1788                .speed          = SPEED_25000,
1789        },
1790        {
1791                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1792                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1793                .speed          = SPEED_25000,
1794        },
1795        {
1796                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1797                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1798                .speed          = SPEED_25000,
1799        },
1800        {
1801                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1802                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1803                .speed          = SPEED_25000,
1804        },
1805        {
1806                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1807                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1808                .speed          = SPEED_50000,
1809        },
1810        {
1811                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1812                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1813                .speed          = SPEED_50000,
1814        },
1815        {
1816                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1817                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1818                .speed          = SPEED_50000,
1819        },
1820        {
1821                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1822                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1823                .speed          = SPEED_56000,
1824        },
1825        {
1826                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1827                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1828                .speed          = SPEED_56000,
1829        },
1830        {
1831                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1832                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1833                .speed          = SPEED_56000,
1834        },
1835        {
1836                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1837                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1838                .speed          = SPEED_56000,
1839        },
1840        {
1841                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
1842                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1843                .speed          = SPEED_100000,
1844        },
1845        {
1846                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
1847                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1848                .speed          = SPEED_100000,
1849        },
1850        {
1851                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
1852                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1853                .speed          = SPEED_100000,
1854        },
1855        {
1856                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1857                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1858                .speed          = SPEED_100000,
1859        },
1860};
1861
1862#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1863
1864static void
1865mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
1866                                  struct ethtool_link_ksettings *cmd)
1867{
1868        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1869                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1870                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1871                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1872                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1873                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1874                ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1875
1876        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1877                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1878                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1879                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1880                              MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1881                ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
1882}
1883
1884static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
1885{
1886        int i;
1887
1888        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1889                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1890                        __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
1891                                  mode);
1892        }
1893}
1894
1895static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1896                                            struct ethtool_link_ksettings *cmd)
1897{
1898        u32 speed = SPEED_UNKNOWN;
1899        u8 duplex = DUPLEX_UNKNOWN;
1900        int i;
1901
1902        if (!carrier_ok)
1903                goto out;
1904
1905        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1906                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1907                        speed = mlxsw_sp_port_link_mode[i].speed;
1908                        duplex = DUPLEX_FULL;
1909                        break;
1910                }
1911        }
1912out:
1913        cmd->base.speed = speed;
1914        cmd->base.duplex = duplex;
1915}
1916
1917static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1918{
1919        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1920                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1921                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1922                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1923                return PORT_FIBRE;
1924
1925        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1926                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1927                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1928                return PORT_DA;
1929
1930        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1931                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1932                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1933                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1934                return PORT_NONE;
1935
1936        return PORT_OTHER;
1937}
1938
1939static u32
1940mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
1941{
1942        u32 ptys_proto = 0;
1943        int i;
1944
1945        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1946                if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
1947                             cmd->link_modes.advertising))
1948                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1949        }
1950        return ptys_proto;
1951}
1952
1953static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1954{
1955        u32 ptys_proto = 0;
1956        int i;
1957
1958        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1959                if (speed == mlxsw_sp_port_link_mode[i].speed)
1960                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1961        }
1962        return ptys_proto;
1963}
1964
1965static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1966{
1967        u32 ptys_proto = 0;
1968        int i;
1969
1970        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1971                if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1972                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1973        }
1974        return ptys_proto;
1975}
1976
1977static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
1978                                             struct ethtool_link_ksettings *cmd)
1979{
1980        ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
1981        ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
1982        ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
1983
1984        mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
1985        mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
1986}
1987
1988static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
1989                                             struct ethtool_link_ksettings *cmd)
1990{
1991        if (!autoneg)
1992                return;
1993
1994        ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
1995        mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
1996}
1997
1998static void
1999mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2000                                    struct ethtool_link_ksettings *cmd)
2001{
2002        if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2003                return;
2004
2005        ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2006        mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2007}
2008
2009static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2010                                            struct ethtool_link_ksettings *cmd)
2011{
2012        u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2013        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2014        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2015        char ptys_pl[MLXSW_REG_PTYS_LEN];
2016        u8 autoneg_status;
2017        bool autoneg;
2018        int err;
2019
2020        autoneg = mlxsw_sp_port->link.autoneg;
2021        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2022        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2023        if (err)
2024                return err;
2025        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2026                                  &eth_proto_oper);
2027
2028        mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2029
2030        mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2031
2032        eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2033        autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2034        mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2035
2036        cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2037        cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2038        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2039                                        cmd);
2040
2041        return 0;
2042}
2043
2044static int
2045mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2046                                 const struct ethtool_link_ksettings *cmd)
2047{
2048        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2049        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2050        char ptys_pl[MLXSW_REG_PTYS_LEN];
2051        u32 eth_proto_cap, eth_proto_new;
2052        bool autoneg;
2053        int err;
2054
2055        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2056        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2057        if (err)
2058                return err;
2059        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2060
2061        autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2062        eth_proto_new = autoneg ?
2063                mlxsw_sp_to_ptys_advert_link(cmd) :
2064                mlxsw_sp_to_ptys_speed(cmd->base.speed);
2065
2066        eth_proto_new = eth_proto_new & eth_proto_cap;
2067        if (!eth_proto_new) {
2068                netdev_err(dev, "No supported speed requested\n");
2069                return -EINVAL;
2070        }
2071
2072        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2073                                eth_proto_new);
2074        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2075        if (err)
2076                return err;
2077
2078        if (!netif_running(dev))
2079                return 0;
2080
2081        mlxsw_sp_port->link.autoneg = autoneg;
2082
2083        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2084        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2085
2086        return 0;
2087}
2088
2089static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2090        .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
2091        .get_link               = ethtool_op_get_link,
2092        .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
2093        .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
2094        .get_strings            = mlxsw_sp_port_get_strings,
2095        .set_phys_id            = mlxsw_sp_port_set_phys_id,
2096        .get_ethtool_stats      = mlxsw_sp_port_get_stats,
2097        .get_sset_count         = mlxsw_sp_port_get_sset_count,
2098        .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
2099        .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
2100};
2101
2102static int
2103mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2104{
2105        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2106        u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2107        char ptys_pl[MLXSW_REG_PTYS_LEN];
2108        u32 eth_proto_admin;
2109
2110        eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2111        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2112                                eth_proto_admin);
2113        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2114}
2115
2116int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2117                          enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2118                          bool dwrr, u8 dwrr_weight)
2119{
2120        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2121        char qeec_pl[MLXSW_REG_QEEC_LEN];
2122
2123        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2124                            next_index);
2125        mlxsw_reg_qeec_de_set(qeec_pl, true);
2126        mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2127        mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2128        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2129}
2130
2131int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2132                                  enum mlxsw_reg_qeec_hr hr, u8 index,
2133                                  u8 next_index, u32 maxrate)
2134{
2135        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2136        char qeec_pl[MLXSW_REG_QEEC_LEN];
2137
2138        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2139                            next_index);
2140        mlxsw_reg_qeec_mase_set(qeec_pl, true);
2141        mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2142        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2143}
2144
2145int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2146                              u8 switch_prio, u8 tclass)
2147{
2148        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2149        char qtct_pl[MLXSW_REG_QTCT_LEN];
2150
2151        mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2152                            tclass);
2153        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2154}
2155
2156static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2157{
2158        int err, i;
2159
2160        /* Setup the elements hierarcy, so that each TC is linked to
2161         * one subgroup, which are all member in the same group.
2162         */
2163        err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2164                                    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2165                                    0);
2166        if (err)
2167                return err;
2168        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2169                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2170                                            MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2171                                            0, false, 0);
2172                if (err)
2173                        return err;
2174        }
2175        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2176                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2177                                            MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2178                                            false, 0);
2179                if (err)
2180                        return err;
2181        }
2182
2183        /* Make sure the max shaper is disabled in all hierarcies that
2184         * support it.
2185         */
2186        err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2187                                            MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2188                                            MLXSW_REG_QEEC_MAS_DIS);
2189        if (err)
2190                return err;
2191        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2192                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2193                                                    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2194                                                    i, 0,
2195                                                    MLXSW_REG_QEEC_MAS_DIS);
2196                if (err)
2197                        return err;
2198        }
2199        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2200                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2201                                                    MLXSW_REG_QEEC_HIERARCY_TC,
2202                                                    i, i,
2203                                                    MLXSW_REG_QEEC_MAS_DIS);
2204                if (err)
2205                        return err;
2206        }
2207
2208        /* Map all priorities to traffic class 0. */
2209        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2210                err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2211                if (err)
2212                        return err;
2213        }
2214
2215        return 0;
2216}
2217
2218static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2219{
2220        mlxsw_sp_port->pvid = 1;
2221
2222        return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2223}
2224
2225static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2226{
2227        return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2228}
2229
2230static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2231                                  bool split, u8 module, u8 width, u8 lane)
2232{
2233        struct mlxsw_sp_port *mlxsw_sp_port;
2234        struct net_device *dev;
2235        size_t bytes;
2236        int err;
2237
2238        dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2239        if (!dev)
2240                return -ENOMEM;
2241        SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2242        mlxsw_sp_port = netdev_priv(dev);
2243        mlxsw_sp_port->dev = dev;
2244        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2245        mlxsw_sp_port->local_port = local_port;
2246        mlxsw_sp_port->split = split;
2247        mlxsw_sp_port->mapping.module = module;
2248        mlxsw_sp_port->mapping.width = width;
2249        mlxsw_sp_port->mapping.lane = lane;
2250        mlxsw_sp_port->link.autoneg = 1;
2251        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2252        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2253        if (!mlxsw_sp_port->active_vlans) {
2254                err = -ENOMEM;
2255                goto err_port_active_vlans_alloc;
2256        }
2257        mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2258        if (!mlxsw_sp_port->untagged_vlans) {
2259                err = -ENOMEM;
2260                goto err_port_untagged_vlans_alloc;
2261        }
2262        INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2263        INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2264
2265        mlxsw_sp_port->pcpu_stats =
2266                netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2267        if (!mlxsw_sp_port->pcpu_stats) {
2268                err = -ENOMEM;
2269                goto err_alloc_stats;
2270        }
2271
2272        mlxsw_sp_port->hw_stats.cache =
2273                kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2274
2275        if (!mlxsw_sp_port->hw_stats.cache) {
2276                err = -ENOMEM;
2277                goto err_alloc_hw_stats;
2278        }
2279        INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2280                          &update_stats_cache);
2281
2282        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2283        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2284
2285        err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2286        if (err) {
2287                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2288                        mlxsw_sp_port->local_port);
2289                goto err_port_swid_set;
2290        }
2291
2292        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2293        if (err) {
2294                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2295                        mlxsw_sp_port->local_port);
2296                goto err_dev_addr_init;
2297        }
2298
2299        netif_carrier_off(dev);
2300
2301        dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2302                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2303        dev->hw_features |= NETIF_F_HW_TC;
2304
2305        /* Each packet needs to have a Tx header (metadata) on top all other
2306         * headers.
2307         */
2308        dev->needed_headroom = MLXSW_TXHDR_LEN;
2309
2310        err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2311        if (err) {
2312                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2313                        mlxsw_sp_port->local_port);
2314                goto err_port_system_port_mapping_set;
2315        }
2316
2317        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2318        if (err) {
2319                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2320                        mlxsw_sp_port->local_port);
2321                goto err_port_speed_by_width_set;
2322        }
2323
2324        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2325        if (err) {
2326                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2327                        mlxsw_sp_port->local_port);
2328                goto err_port_mtu_set;
2329        }
2330
2331        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2332        if (err)
2333                goto err_port_admin_status_set;
2334
2335        err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2336        if (err) {
2337                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2338                        mlxsw_sp_port->local_port);
2339                goto err_port_buffers_init;
2340        }
2341
2342        err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2343        if (err) {
2344                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2345                        mlxsw_sp_port->local_port);
2346                goto err_port_ets_init;
2347        }
2348
2349        /* ETS and buffers must be initialized before DCB. */
2350        err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2351        if (err) {
2352                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2353                        mlxsw_sp_port->local_port);
2354                goto err_port_dcb_init;
2355        }
2356
2357        err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2358        if (err) {
2359                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2360                        mlxsw_sp_port->local_port);
2361                goto err_port_pvid_vport_create;
2362        }
2363
2364        mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2365        mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2366        err = register_netdev(dev);
2367        if (err) {
2368                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2369                        mlxsw_sp_port->local_port);
2370                goto err_register_netdev;
2371        }
2372
2373        mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2374                                mlxsw_sp_port, dev, mlxsw_sp_port->split,
2375                                module);
2376        mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2377        return 0;
2378
2379err_register_netdev:
2380        mlxsw_sp->ports[local_port] = NULL;
2381        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2382        mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2383err_port_pvid_vport_create:
2384        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2385err_port_dcb_init:
2386err_port_ets_init:
2387err_port_buffers_init:
2388err_port_admin_status_set:
2389err_port_mtu_set:
2390err_port_speed_by_width_set:
2391err_port_system_port_mapping_set:
2392err_dev_addr_init:
2393        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2394err_port_swid_set:
2395        kfree(mlxsw_sp_port->hw_stats.cache);
2396err_alloc_hw_stats:
2397        free_percpu(mlxsw_sp_port->pcpu_stats);
2398err_alloc_stats:
2399        kfree(mlxsw_sp_port->untagged_vlans);
2400err_port_untagged_vlans_alloc:
2401        kfree(mlxsw_sp_port->active_vlans);
2402err_port_active_vlans_alloc:
2403        free_netdev(dev);
2404        return err;
2405}
2406
2407static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2408                                bool split, u8 module, u8 width, u8 lane)
2409{
2410        int err;
2411
2412        err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2413        if (err) {
2414                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2415                        local_port);
2416                return err;
2417        }
2418        err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2419                                     module, width, lane);
2420        if (err)
2421                goto err_port_create;
2422        return 0;
2423
2424err_port_create:
2425        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2426        return err;
2427}
2428
2429static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2430{
2431        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2432
2433        cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2434        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2435        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2436        mlxsw_sp->ports[local_port] = NULL;
2437        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2438        mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2439        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2440        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2441        mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2442        kfree(mlxsw_sp_port->hw_stats.cache);
2443        free_percpu(mlxsw_sp_port->pcpu_stats);
2444        kfree(mlxsw_sp_port->untagged_vlans);
2445        kfree(mlxsw_sp_port->active_vlans);
2446        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2447        free_netdev(mlxsw_sp_port->dev);
2448}
2449
2450static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2451{
2452        __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2453        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2454}
2455
2456static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2457{
2458        return mlxsw_sp->ports[local_port] != NULL;
2459}
2460
2461static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2462{
2463        int i;
2464
2465        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
2466                if (mlxsw_sp_port_created(mlxsw_sp, i))
2467                        mlxsw_sp_port_remove(mlxsw_sp, i);
2468        kfree(mlxsw_sp->ports);
2469}
2470
2471static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2472{
2473        u8 module, width, lane;
2474        size_t alloc_size;
2475        int i;
2476        int err;
2477
2478        alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
2479        mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2480        if (!mlxsw_sp->ports)
2481                return -ENOMEM;
2482
2483        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
2484                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2485                                                    &width, &lane);
2486                if (err)
2487                        goto err_port_module_info_get;
2488                if (!width)
2489                        continue;
2490                mlxsw_sp->port_to_module[i] = module;
2491                err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2492                                           module, width, lane);
2493                if (err)
2494                        goto err_port_create;
2495        }
2496        return 0;
2497
2498err_port_create:
2499err_port_module_info_get:
2500        for (i--; i >= 1; i--)
2501                if (mlxsw_sp_port_created(mlxsw_sp, i))
2502                        mlxsw_sp_port_remove(mlxsw_sp, i);
2503        kfree(mlxsw_sp->ports);
2504        return err;
2505}
2506
2507static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2508{
2509        u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2510
2511        return local_port - offset;
2512}
2513
2514static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2515                                      u8 module, unsigned int count)
2516{
2517        u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2518        int err, i;
2519
2520        for (i = 0; i < count; i++) {
2521                err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2522                                               width, i * width);
2523                if (err)
2524                        goto err_port_module_map;
2525        }
2526
2527        for (i = 0; i < count; i++) {
2528                err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2529                if (err)
2530                        goto err_port_swid_set;
2531        }
2532
2533        for (i = 0; i < count; i++) {
2534                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2535                                           module, width, i * width);
2536                if (err)
2537                        goto err_port_create;
2538        }
2539
2540        return 0;
2541
2542err_port_create:
2543        for (i--; i >= 0; i--)
2544                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2545                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2546        i = count;
2547err_port_swid_set:
2548        for (i--; i >= 0; i--)
2549                __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2550                                         MLXSW_PORT_SWID_DISABLED_PORT);
2551        i = count;
2552err_port_module_map:
2553        for (i--; i >= 0; i--)
2554                mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2555        return err;
2556}
2557
2558static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2559                                         u8 base_port, unsigned int count)
2560{
2561        u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2562        int i;
2563
2564        /* Split by four means we need to re-create two ports, otherwise
2565         * only one.
2566         */
2567        count = count / 2;
2568
2569        for (i = 0; i < count; i++) {
2570                local_port = base_port + i * 2;
2571                module = mlxsw_sp->port_to_module[local_port];
2572
2573                mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2574                                         0);
2575        }
2576
2577        for (i = 0; i < count; i++)
2578                __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2579
2580        for (i = 0; i < count; i++) {
2581                local_port = base_port + i * 2;
2582                module = mlxsw_sp->port_to_module[local_port];
2583
2584                mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2585                                     width, 0);
2586        }
2587}
2588
2589static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2590                               unsigned int count)
2591{
2592        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2593        struct mlxsw_sp_port *mlxsw_sp_port;
2594        u8 module, cur_width, base_port;
2595        int i;
2596        int err;
2597
2598        mlxsw_sp_port = mlxsw_sp->ports[local_port];
2599        if (!mlxsw_sp_port) {
2600                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2601                        local_port);
2602                return -EINVAL;
2603        }
2604
2605        module = mlxsw_sp_port->mapping.module;
2606        cur_width = mlxsw_sp_port->mapping.width;
2607
2608        if (count != 2 && count != 4) {
2609                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2610                return -EINVAL;
2611        }
2612
2613        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2614                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2615                return -EINVAL;
2616        }
2617
2618        /* Make sure we have enough slave (even) ports for the split. */
2619        if (count == 2) {
2620                base_port = local_port;
2621                if (mlxsw_sp->ports[base_port + 1]) {
2622                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2623                        return -EINVAL;
2624                }
2625        } else {
2626                base_port = mlxsw_sp_cluster_base_port_get(local_port);
2627                if (mlxsw_sp->ports[base_port + 1] ||
2628                    mlxsw_sp->ports[base_port + 3]) {
2629                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2630                        return -EINVAL;
2631                }
2632        }
2633
2634        for (i = 0; i < count; i++)
2635                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2636                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2637
2638        err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2639        if (err) {
2640                dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2641                goto err_port_split_create;
2642        }
2643
2644        return 0;
2645
2646err_port_split_create:
2647        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2648        return err;
2649}
2650
2651static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2652{
2653        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2654        struct mlxsw_sp_port *mlxsw_sp_port;
2655        u8 cur_width, base_port;
2656        unsigned int count;
2657        int i;
2658
2659        mlxsw_sp_port = mlxsw_sp->ports[local_port];
2660        if (!mlxsw_sp_port) {
2661                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2662                        local_port);
2663                return -EINVAL;
2664        }
2665
2666        if (!mlxsw_sp_port->split) {
2667                netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2668                return -EINVAL;
2669        }
2670
2671        cur_width = mlxsw_sp_port->mapping.width;
2672        count = cur_width == 1 ? 4 : 2;
2673
2674        base_port = mlxsw_sp_cluster_base_port_get(local_port);
2675
2676        /* Determine which ports to remove. */
2677        if (count == 2 && local_port >= base_port + 2)
2678                base_port = base_port + 2;
2679
2680        for (i = 0; i < count; i++)
2681                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2682                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2683
2684        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2685
2686        return 0;
2687}
2688
2689static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2690                                     char *pude_pl, void *priv)
2691{
2692        struct mlxsw_sp *mlxsw_sp = priv;
2693        struct mlxsw_sp_port *mlxsw_sp_port;
2694        enum mlxsw_reg_pude_oper_status status;
2695        u8 local_port;
2696
2697        local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2698        mlxsw_sp_port = mlxsw_sp->ports[local_port];
2699        if (!mlxsw_sp_port)
2700                return;
2701
2702        status = mlxsw_reg_pude_oper_status_get(pude_pl);
2703        if (status == MLXSW_PORT_OPER_STATUS_UP) {
2704                netdev_info(mlxsw_sp_port->dev, "link up\n");
2705                netif_carrier_on(mlxsw_sp_port->dev);
2706        } else {
2707                netdev_info(mlxsw_sp_port->dev, "link down\n");
2708                netif_carrier_off(mlxsw_sp_port->dev);
2709        }
2710}
2711
2712static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2713                                              u8 local_port, void *priv)
2714{
2715        struct mlxsw_sp *mlxsw_sp = priv;
2716        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2717        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2718
2719        if (unlikely(!mlxsw_sp_port)) {
2720                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2721                                     local_port);
2722                return;
2723        }
2724
2725        skb->dev = mlxsw_sp_port->dev;
2726
2727        pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2728        u64_stats_update_begin(&pcpu_stats->syncp);
2729        pcpu_stats->rx_packets++;
2730        pcpu_stats->rx_bytes += skb->len;
2731        u64_stats_update_end(&pcpu_stats->syncp);
2732
2733        skb->protocol = eth_type_trans(skb, skb->dev);
2734        netif_receive_skb(skb);
2735}
2736
2737static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2738                                           void *priv)
2739{
2740        skb->offload_fwd_mark = 1;
2741        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2742}
2743
2744#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2745        MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2746                  _is_ctrl, SP_##_trap_group, DISCARD)
2747
2748#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2749        MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2750                _is_ctrl, SP_##_trap_group, DISCARD)
2751
2752#define MLXSW_SP_EVENTL(_func, _trap_id)                \
2753        MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2754
2755static const struct mlxsw_listener mlxsw_sp_listener[] = {
2756        /* Events */
2757        MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2758        /* L2 traps */
2759        MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2760        MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2761        MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2762        MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2763        MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2764        MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2765        MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2766        MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2767        MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2768        MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2769        MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2770        /* L3 traps */
2771        MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2772        MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2773        MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2774        MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2775        MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2776        MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2777        MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2778        MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2779};
2780
2781static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2782{
2783        char qpcr_pl[MLXSW_REG_QPCR_LEN];
2784        enum mlxsw_reg_qpcr_ir_units ir_units;
2785        int max_cpu_policers;
2786        bool is_bytes;
2787        u8 burst_size;
2788        u32 rate;
2789        int i, err;
2790
2791        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2792                return -EIO;
2793
2794        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2795
2796        ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2797        for (i = 0; i < max_cpu_policers; i++) {
2798                is_bytes = false;
2799                switch (i) {
2800                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
2801                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
2802                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
2803                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
2804                        rate = 128;
2805                        burst_size = 7;
2806                        break;
2807                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
2808                        rate = 16 * 1024;
2809                        burst_size = 10;
2810                        break;
2811                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
2812                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
2813                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
2814                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
2815                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2816                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
2817                        rate = 1024;
2818                        burst_size = 7;
2819                        break;
2820                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
2821                        is_bytes = true;
2822                        rate = 4 * 1024;
2823                        burst_size = 4;
2824                        break;
2825                default:
2826                        continue;
2827                }
2828
2829                mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2830                                    burst_size);
2831                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2832                if (err)
2833                        return err;
2834        }
2835
2836        return 0;
2837}
2838
2839static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2840{
2841        char htgt_pl[MLXSW_REG_HTGT_LEN];
2842        enum mlxsw_reg_htgt_trap_group i;
2843        int max_cpu_policers;
2844        int max_trap_groups;
2845        u8 priority, tc;
2846        u16 policer_id;
2847        int err;
2848
2849        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2850                return -EIO;
2851
2852        max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2853        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2854
2855        for (i = 0; i < max_trap_groups; i++) {
2856                policer_id = i;
2857                switch (i) {
2858                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
2859                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
2860                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
2861                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
2862                        priority = 5;
2863                        tc = 5;
2864                        break;
2865                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
2866                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
2867                        priority = 4;
2868                        tc = 4;
2869                        break;
2870                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
2871                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
2872                        priority = 3;
2873                        tc = 3;
2874                        break;
2875                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
2876                        priority = 2;
2877                        tc = 2;
2878                        break;
2879                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
2880                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2881                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
2882                        priority = 1;
2883                        tc = 1;
2884                        break;
2885                case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2886                        priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2887                        tc = MLXSW_REG_HTGT_DEFAULT_TC;
2888                        policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2889                        break;
2890                default:
2891                        continue;
2892                }
2893
2894                if (max_cpu_policers <= policer_id &&
2895                    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2896                        return -EIO;
2897
2898                mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2899                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2900                if (err)
2901                        return err;
2902        }
2903
2904        return 0;
2905}
2906
2907static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2908{
2909        int i;
2910        int err;
2911
2912        err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2913        if (err)
2914                return err;
2915
2916        err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2917        if (err)
2918                return err;
2919
2920        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
2921                err = mlxsw_core_trap_register(mlxsw_sp->core,
2922                                               &mlxsw_sp_listener[i],
2923                                               mlxsw_sp);
2924                if (err)
2925                        goto err_listener_register;
2926
2927        }
2928        return 0;
2929
2930err_listener_register:
2931        for (i--; i >= 0; i--) {
2932                mlxsw_core_trap_unregister(mlxsw_sp->core,
2933                                           &mlxsw_sp_listener[i],
2934                                           mlxsw_sp);
2935        }
2936        return err;
2937}
2938
2939static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2940{
2941        int i;
2942
2943        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
2944                mlxsw_core_trap_unregister(mlxsw_sp->core,
2945                                           &mlxsw_sp_listener[i],
2946                                           mlxsw_sp);
2947        }
2948}
2949
2950static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2951                                 enum mlxsw_reg_sfgc_type type,
2952                                 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2953{
2954        enum mlxsw_flood_table_type table_type;
2955        enum mlxsw_sp_flood_table flood_table;
2956        char sfgc_pl[MLXSW_REG_SFGC_LEN];
2957
2958        if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2959                table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2960        else
2961                table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2962
2963        if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2964                flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2965        else
2966                flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2967
2968        mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2969                            flood_table);
2970        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2971}
2972
2973static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2974{
2975        int type, err;
2976
2977        for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2978                if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2979                        continue;
2980
2981                err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2982                                            MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2983                if (err)
2984                        return err;
2985
2986                err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2987                                            MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2988                if (err)
2989                        return err;
2990        }
2991
2992        return 0;
2993}
2994
2995static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2996{
2997        char slcr_pl[MLXSW_REG_SLCR_LEN];
2998        int err;
2999
3000        mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3001                                     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3002                                     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3003                                     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3004                                     MLXSW_REG_SLCR_LAG_HASH_SIP |
3005                                     MLXSW_REG_SLCR_LAG_HASH_DIP |
3006                                     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3007                                     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3008                                     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3009        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3010        if (err)
3011                return err;
3012
3013        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3014            !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3015                return -EIO;
3016
3017        mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3018                                 sizeof(struct mlxsw_sp_upper),
3019                                 GFP_KERNEL);
3020        if (!mlxsw_sp->lags)
3021                return -ENOMEM;
3022
3023        return 0;
3024}
3025
3026static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3027{
3028        kfree(mlxsw_sp->lags);
3029}
3030
3031static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3032{
3033        char htgt_pl[MLXSW_REG_HTGT_LEN];
3034
3035        mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3036                            MLXSW_REG_HTGT_INVALID_POLICER,
3037                            MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3038                            MLXSW_REG_HTGT_DEFAULT_TC);
3039        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3040}
3041
3042static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3043                         const struct mlxsw_bus_info *mlxsw_bus_info)
3044{
3045        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3046        int err;
3047
3048        mlxsw_sp->core = mlxsw_core;
3049        mlxsw_sp->bus_info = mlxsw_bus_info;
3050        INIT_LIST_HEAD(&mlxsw_sp->fids);
3051        INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3052        INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3053
3054        err = mlxsw_sp_base_mac_get(mlxsw_sp);
3055        if (err) {
3056                dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3057                return err;
3058        }
3059
3060        err = mlxsw_sp_traps_init(mlxsw_sp);
3061        if (err) {
3062                dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3063                return err;
3064        }
3065
3066        err = mlxsw_sp_flood_init(mlxsw_sp);
3067        if (err) {
3068                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3069                goto err_flood_init;
3070        }
3071
3072        err = mlxsw_sp_buffers_init(mlxsw_sp);
3073        if (err) {
3074                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3075                goto err_buffers_init;
3076        }
3077
3078        err = mlxsw_sp_lag_init(mlxsw_sp);
3079        if (err) {
3080                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3081                goto err_lag_init;
3082        }
3083
3084        err = mlxsw_sp_switchdev_init(mlxsw_sp);
3085        if (err) {
3086                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3087                goto err_switchdev_init;
3088        }
3089
3090        err = mlxsw_sp_router_init(mlxsw_sp);
3091        if (err) {
3092                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3093                goto err_router_init;
3094        }
3095
3096        err = mlxsw_sp_span_init(mlxsw_sp);
3097        if (err) {
3098                dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3099                goto err_span_init;
3100        }
3101
3102        err = mlxsw_sp_acl_init(mlxsw_sp);
3103        if (err) {
3104                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3105                goto err_acl_init;
3106        }
3107
3108        err = mlxsw_sp_ports_create(mlxsw_sp);
3109        if (err) {
3110                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3111                goto err_ports_create;
3112        }
3113
3114        return 0;
3115
3116err_ports_create:
3117        mlxsw_sp_acl_fini(mlxsw_sp);
3118err_acl_init:
3119        mlxsw_sp_span_fini(mlxsw_sp);
3120err_span_init:
3121        mlxsw_sp_router_fini(mlxsw_sp);
3122err_router_init:
3123        mlxsw_sp_switchdev_fini(mlxsw_sp);
3124err_switchdev_init:
3125        mlxsw_sp_lag_fini(mlxsw_sp);
3126err_lag_init:
3127        mlxsw_sp_buffers_fini(mlxsw_sp);
3128err_buffers_init:
3129err_flood_init:
3130        mlxsw_sp_traps_fini(mlxsw_sp);
3131        return err;
3132}
3133
3134static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3135{
3136        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3137
3138        mlxsw_sp_ports_remove(mlxsw_sp);
3139        mlxsw_sp_acl_fini(mlxsw_sp);
3140        mlxsw_sp_span_fini(mlxsw_sp);
3141        mlxsw_sp_router_fini(mlxsw_sp);
3142        mlxsw_sp_switchdev_fini(mlxsw_sp);
3143        mlxsw_sp_lag_fini(mlxsw_sp);
3144        mlxsw_sp_buffers_fini(mlxsw_sp);
3145        mlxsw_sp_traps_fini(mlxsw_sp);
3146        WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3147        WARN_ON(!list_empty(&mlxsw_sp->fids));
3148}
3149
3150static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3151        .used_max_vepa_channels         = 1,
3152        .max_vepa_channels              = 0,
3153        .used_max_mid                   = 1,
3154        .max_mid                        = MLXSW_SP_MID_MAX,
3155        .used_max_pgt                   = 1,
3156        .max_pgt                        = 0,
3157        .used_flood_tables              = 1,
3158        .used_flood_mode                = 1,
3159        .flood_mode                     = 3,
3160        .max_fid_offset_flood_tables    = 2,
3161        .fid_offset_flood_table_size    = VLAN_N_VID - 1,
3162        .max_fid_flood_tables           = 2,
3163        .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
3164        .used_max_ib_mc                 = 1,
3165        .max_ib_mc                      = 0,
3166        .used_max_pkey                  = 1,
3167        .max_pkey                       = 0,
3168        .used_kvd_split_data            = 1,
3169        .kvd_hash_granularity           = MLXSW_SP_KVD_GRANULARITY,
3170        .kvd_hash_single_parts          = 2,
3171        .kvd_hash_double_parts          = 1,
3172        .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3173        .swid_config                    = {
3174                {
3175                        .used_type      = 1,
3176                        .type           = MLXSW_PORT_SWID_TYPE_ETH,
3177                }
3178        },
3179        .resource_query_enable          = 1,
3180};
3181
3182static struct mlxsw_driver mlxsw_sp_driver = {
3183        .kind                           = mlxsw_sp_driver_name,
3184        .priv_size                      = sizeof(struct mlxsw_sp),
3185        .init                           = mlxsw_sp_init,
3186        .fini                           = mlxsw_sp_fini,
3187        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3188        .port_split                     = mlxsw_sp_port_split,
3189        .port_unsplit                   = mlxsw_sp_port_unsplit,
3190        .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3191        .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3192        .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3193        .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3194        .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3195        .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3196        .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3197        .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3198        .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3199        .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3200        .txhdr_construct                = mlxsw_sp_txhdr_construct,
3201        .txhdr_len                      = MLXSW_TXHDR_LEN,
3202        .profile                        = &mlxsw_sp_config_profile,
3203};
3204
3205bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3206{
3207        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3208}
3209
3210static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
3211{
3212        struct mlxsw_sp_port **port = data;
3213        int ret = 0;
3214
3215        if (mlxsw_sp_port_dev_check(lower_dev)) {
3216                *port = netdev_priv(lower_dev);
3217                ret = 1;
3218        }
3219
3220        return ret;
3221}
3222
3223static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3224{
3225        struct mlxsw_sp_port *port;
3226
3227        if (mlxsw_sp_port_dev_check(dev))
3228                return netdev_priv(dev);
3229
3230        port = NULL;
3231        netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
3232
3233        return port;
3234}
3235
3236static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3237{
3238        struct mlxsw_sp_port *mlxsw_sp_port;
3239
3240        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3241        return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3242}
3243
3244static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3245{
3246        struct mlxsw_sp_port *port;
3247
3248        if (mlxsw_sp_port_dev_check(dev))
3249                return netdev_priv(dev);
3250
3251        port = NULL;
3252        netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
3253
3254        return port;
3255}
3256
3257struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3258{
3259        struct mlxsw_sp_port *mlxsw_sp_port;
3260
3261        rcu_read_lock();
3262        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3263        if (mlxsw_sp_port)
3264                dev_hold(mlxsw_sp_port->dev);
3265        rcu_read_unlock();
3266        return mlxsw_sp_port;
3267}
3268
3269void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3270{
3271        dev_put(mlxsw_sp_port->dev);
3272}
3273
3274static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
3275                                       unsigned long event)
3276{
3277        switch (event) {
3278        case NETDEV_UP:
3279                if (!r)
3280                        return true;
3281                r->ref_count++;
3282                return false;
3283        case NETDEV_DOWN:
3284                if (r && --r->ref_count == 0)
3285                        return true;
3286                /* It is possible we already removed the RIF ourselves
3287                 * if it was assigned to a netdev that is now a bridge
3288                 * or LAG slave.
3289                 */
3290                return false;
3291        }
3292
3293        return false;
3294}
3295
3296static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
3297{
3298        int i;
3299
3300        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3301                if (!mlxsw_sp->rifs[i])
3302                        return i;
3303
3304        return MLXSW_SP_INVALID_RIF;
3305}
3306
3307static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
3308                                           bool *p_lagged, u16 *p_system_port)
3309{
3310        u8 local_port = mlxsw_sp_vport->local_port;
3311
3312        *p_lagged = mlxsw_sp_vport->lagged;
3313        *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
3314}
3315
3316static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
3317                                    struct net_device *l3_dev, u16 rif,
3318                                    bool create)
3319{
3320        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3321        bool lagged = mlxsw_sp_vport->lagged;
3322        char ritr_pl[MLXSW_REG_RITR_LEN];
3323        u16 system_port;
3324
3325        mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
3326                            l3_dev->mtu, l3_dev->dev_addr);
3327
3328        mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
3329        mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
3330                                  mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
3331
3332        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3333}
3334
3335static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3336
3337static struct mlxsw_sp_fid *
3338mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
3339{
3340        struct mlxsw_sp_fid *f;
3341
3342        f = kzalloc(sizeof(*f), GFP_KERNEL);
3343        if (!f)
3344                return NULL;
3345
3346        f->leave = mlxsw_sp_vport_rif_sp_leave;
3347        f->ref_count = 0;
3348        f->dev = l3_dev;
3349        f->fid = fid;
3350
3351        return f;
3352}
3353
3354static struct mlxsw_sp_rif *
3355mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
3356{
3357        struct mlxsw_sp_rif *r;
3358
3359        r = kzalloc(sizeof(*r), GFP_KERNEL);
3360        if (!r)
3361                return NULL;
3362
3363        INIT_LIST_HEAD(&r->nexthop_list);
3364        INIT_LIST_HEAD(&r->neigh_list);
3365        ether_addr_copy(r->addr, l3_dev->dev_addr);
3366        r->mtu = l3_dev->mtu;
3367        r->ref_count = 1;
3368        r->dev = l3_dev;
3369        r->rif = rif;
3370        r->f = f;
3371
3372        return r;
3373}
3374
3375static struct mlxsw_sp_rif *
3376mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3377                             struct net_device *l3_dev)
3378{
3379        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3380        struct mlxsw_sp_fid *f;
3381        struct mlxsw_sp_rif *r;
3382        u16 fid, rif;
3383        int err;
3384
3385        rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3386        if (rif == MLXSW_SP_INVALID_RIF)
3387                return ERR_PTR(-ERANGE);
3388
3389        err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
3390        if (err)
3391                return ERR_PTR(err);
3392
3393        fid = mlxsw_sp_rif_sp_to_fid(rif);
3394        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3395        if (err)
3396                goto err_rif_fdb_op;
3397
3398        f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3399        if (!f) {
3400                err = -ENOMEM;
3401                goto err_rfid_alloc;
3402        }
3403
3404        r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3405        if (!r) {
3406                err = -ENOMEM;
3407                goto err_rif_alloc;
3408        }
3409
3410        f->r = r;
3411        mlxsw_sp->rifs[rif] = r;
3412
3413        return r;
3414
3415err_rif_alloc:
3416        kfree(f);
3417err_rfid_alloc:
3418        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3419err_rif_fdb_op:
3420        mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3421        return ERR_PTR(err);
3422}
3423
3424static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
3425                                          struct mlxsw_sp_rif *r)
3426{
3427        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3428        struct net_device *l3_dev = r->dev;
3429        struct mlxsw_sp_fid *f = r->f;
3430        u16 fid = f->fid;
3431        u16 rif = r->rif;
3432
3433        mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3434
3435        mlxsw_sp->rifs[rif] = NULL;
3436        f->r = NULL;
3437
3438        kfree(r);
3439
3440        kfree(f);
3441
3442        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3443
3444        mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3445}
3446
3447static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3448                                      struct net_device *l3_dev)
3449{
3450        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3451        struct mlxsw_sp_rif *r;
3452
3453        r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3454        if (!r) {
3455                r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3456                if (IS_ERR(r))
3457                        return PTR_ERR(r);
3458        }
3459
3460        mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
3461        r->f->ref_count++;
3462
3463        netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
3464
3465        return 0;
3466}
3467
3468static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3469{
3470        struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3471
3472        netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3473
3474        mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3475        if (--f->ref_count == 0)
3476                mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
3477}
3478
3479static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3480                                         struct net_device *port_dev,
3481                                         unsigned long event, u16 vid)
3482{
3483        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3484        struct mlxsw_sp_port *mlxsw_sp_vport;
3485
3486        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3487        if (WARN_ON(!mlxsw_sp_vport))
3488                return -EINVAL;
3489
3490        switch (event) {
3491        case NETDEV_UP:
3492                return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3493        case NETDEV_DOWN:
3494                mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3495                break;
3496        }
3497
3498        return 0;
3499}
3500
3501static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3502                                        unsigned long event)
3503{
3504        if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
3505                return 0;
3506
3507        return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3508}
3509
3510static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3511                                         struct net_device *lag_dev,
3512                                         unsigned long event, u16 vid)
3513{
3514        struct net_device *port_dev;
3515        struct list_head *iter;
3516        int err;
3517
3518        netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3519                if (mlxsw_sp_port_dev_check(port_dev)) {
3520                        err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3521                                                            event, vid);
3522                        if (err)
3523                                return err;
3524                }
3525        }
3526
3527        return 0;
3528}
3529
3530static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3531                                       unsigned long event)
3532{
3533        if (netif_is_bridge_port(lag_dev))
3534                return 0;
3535
3536        return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3537}
3538
3539static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3540                                                    struct net_device *l3_dev)
3541{
3542        u16 fid;
3543
3544        if (is_vlan_dev(l3_dev))
3545                fid = vlan_dev_vlan_id(l3_dev);
3546        else if (mlxsw_sp->master_bridge.dev == l3_dev)
3547                fid = 1;
3548        else
3549                return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3550
3551        return mlxsw_sp_fid_find(mlxsw_sp, fid);
3552}
3553
3554static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3555{
3556        return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3557               MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3558}
3559
3560static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3561{
3562        return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3563}
3564
3565static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3566                                          bool set)
3567{
3568        enum mlxsw_flood_table_type table_type;
3569        char *sftr_pl;
3570        u16 index;
3571        int err;
3572
3573        sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3574        if (!sftr_pl)
3575                return -ENOMEM;
3576
3577        table_type = mlxsw_sp_flood_table_type_get(fid);
3578        index = mlxsw_sp_flood_table_index_get(fid);
3579        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
3580                            1, MLXSW_PORT_ROUTER_PORT, set);
3581        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3582
3583        kfree(sftr_pl);
3584        return err;
3585}
3586
3587static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3588{
3589        if (mlxsw_sp_fid_is_vfid(fid))
3590                return MLXSW_REG_RITR_FID_IF;
3591        else
3592                return MLXSW_REG_RITR_VLAN_IF;
3593}
3594
3595static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
3596                                  struct net_device *l3_dev,
3597                                  u16 fid, u16 rif,
3598                                  bool create)
3599{
3600        enum mlxsw_reg_ritr_if_type rif_type;
3601        char ritr_pl[MLXSW_REG_RITR_LEN];
3602
3603        rif_type = mlxsw_sp_rif_type_get(fid);
3604        mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
3605                            l3_dev->dev_addr);
3606        mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3607
3608        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3609}
3610
3611static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3612                                      struct net_device *l3_dev,
3613                                      struct mlxsw_sp_fid *f)
3614{
3615        struct mlxsw_sp_rif *r;
3616        u16 rif;
3617        int err;
3618
3619        rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3620        if (rif == MLXSW_SP_INVALID_RIF)
3621                return -ERANGE;
3622
3623        err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3624        if (err)
3625                return err;
3626
3627        err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
3628        if (err)
3629                goto err_rif_bridge_op;
3630
3631        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3632        if (err)
3633                goto err_rif_fdb_op;
3634
3635        r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3636        if (!r) {
3637                err = -ENOMEM;
3638                goto err_rif_alloc;
3639        }
3640
3641        f->r = r;
3642        mlxsw_sp->rifs[rif] = r;
3643
3644        netdev_dbg(l3_dev, "RIF=%d created\n", rif);
3645
3646        return 0;
3647
3648err_rif_alloc:
3649        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3650err_rif_fdb_op:
3651        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3652err_rif_bridge_op:
3653        mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3654        return err;
3655}
3656
3657void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3658                                 struct mlxsw_sp_rif *r)
3659{
3660        struct net_device *l3_dev = r->dev;
3661        struct mlxsw_sp_fid *f = r->f;
3662        u16 rif = r->rif;
3663
3664        mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3665
3666        mlxsw_sp->rifs[rif] = NULL;
3667        f->r = NULL;
3668
3669        kfree(r);
3670
3671        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3672
3673        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3674
3675        mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3676
3677        netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3678}
3679
3680static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3681                                          struct net_device *br_dev,
3682                                          unsigned long event)
3683{
3684        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3685        struct mlxsw_sp_fid *f;
3686
3687        /* FID can either be an actual FID if the L3 device is the
3688         * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3689         * L3 device is a VLAN-unaware bridge and we get a vFID.
3690         */
3691        f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3692        if (WARN_ON(!f))
3693                return -EINVAL;
3694
3695        switch (event) {
3696        case NETDEV_UP:
3697                return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3698        case NETDEV_DOWN:
3699                mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3700                break;
3701        }
3702
3703        return 0;
3704}
3705
3706static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3707                                        unsigned long event)
3708{
3709        struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3710        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3711        u16 vid = vlan_dev_vlan_id(vlan_dev);
3712
3713        if (mlxsw_sp_port_dev_check(real_dev))
3714                return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3715                                                     vid);
3716        else if (netif_is_lag_master(real_dev))
3717                return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3718                                                     vid);
3719        else if (netif_is_bridge_master(real_dev) &&
3720                 mlxsw_sp->master_bridge.dev == real_dev)
3721                return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3722                                                      event);
3723
3724        return 0;
3725}
3726
3727static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3728                                   unsigned long event, void *ptr)
3729{
3730        struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3731        struct net_device *dev = ifa->ifa_dev->dev;
3732        struct mlxsw_sp *mlxsw_sp;
3733        struct mlxsw_sp_rif *r;
3734        int err = 0;
3735
3736        mlxsw_sp = mlxsw_sp_lower_get(dev);
3737        if (!mlxsw_sp)
3738                goto out;
3739
3740        r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3741        if (!mlxsw_sp_rif_should_config(r, event))
3742                goto out;
3743
3744        if (mlxsw_sp_port_dev_check(dev))
3745                err = mlxsw_sp_inetaddr_port_event(dev, event);
3746        else if (netif_is_lag_master(dev))
3747                err = mlxsw_sp_inetaddr_lag_event(dev, event);
3748        else if (netif_is_bridge_master(dev))
3749                err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3750        else if (is_vlan_dev(dev))
3751                err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3752
3753out:
3754        return notifier_from_errno(err);
3755}
3756
3757static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3758                             const char *mac, int mtu)
3759{
3760        char ritr_pl[MLXSW_REG_RITR_LEN];
3761        int err;
3762
3763        mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3764        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3765        if (err)
3766                return err;
3767
3768        mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3769        mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3770        mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3771        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3772}
3773
3774static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3775{
3776        struct mlxsw_sp *mlxsw_sp;
3777        struct mlxsw_sp_rif *r;
3778        int err;
3779
3780        mlxsw_sp = mlxsw_sp_lower_get(dev);
3781        if (!mlxsw_sp)
3782                return 0;
3783
3784        r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3785        if (!r)
3786                return 0;
3787
3788        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3789        if (err)
3790                return err;
3791
3792        err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3793        if (err)
3794                goto err_rif_edit;
3795
3796        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3797        if (err)
3798                goto err_rif_fdb_op;
3799
3800        ether_addr_copy(r->addr, dev->dev_addr);
3801        r->mtu = dev->mtu;
3802
3803        netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3804
3805        return 0;
3806
3807err_rif_fdb_op:
3808        mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3809err_rif_edit:
3810        mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3811        return err;
3812}
3813
3814static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3815                                         u16 fid)
3816{
3817        if (mlxsw_sp_fid_is_vfid(fid))
3818                return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3819        else
3820                return test_bit(fid, lag_port->active_vlans);
3821}
3822
3823static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3824                                           u16 fid)
3825{
3826        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3827        u8 local_port = mlxsw_sp_port->local_port;
3828        u16 lag_id = mlxsw_sp_port->lag_id;
3829        u64 max_lag_members;
3830        int i, count = 0;
3831
3832        if (!mlxsw_sp_port->lagged)
3833                return true;
3834
3835        max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3836                                             MAX_LAG_MEMBERS);
3837        for (i = 0; i < max_lag_members; i++) {
3838                struct mlxsw_sp_port *lag_port;
3839
3840                lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3841                if (!lag_port || lag_port->local_port == local_port)
3842                        continue;
3843                if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3844                        count++;
3845        }
3846
3847        return !count;
3848}
3849
3850static int
3851mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3852                                    u16 fid)
3853{
3854        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3855        char sfdf_pl[MLXSW_REG_SFDF_LEN];
3856
3857        mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3858        mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3859        mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3860                                                mlxsw_sp_port->local_port);
3861
3862        netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3863                   mlxsw_sp_port->local_port, fid);
3864
3865        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3866}
3867
3868static int
3869mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3870                                      u16 fid)
3871{
3872        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3873        char sfdf_pl[MLXSW_REG_SFDF_LEN];
3874
3875        mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3876        mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3877        mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3878
3879        netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3880                   mlxsw_sp_port->lag_id, fid);
3881
3882        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3883}
3884
3885int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3886{
3887        if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3888                return 0;
3889
3890        if (mlxsw_sp_port->lagged)
3891                return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3892                                                             fid);
3893        else
3894                return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3895}
3896
3897static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3898{
3899        struct mlxsw_sp_fid *f, *tmp;
3900
3901        list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3902                if (--f->ref_count == 0)
3903                        mlxsw_sp_fid_destroy(mlxsw_sp, f);
3904                else
3905                        WARN_ON_ONCE(1);
3906}
3907
3908static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3909                                         struct net_device *br_dev)
3910{
3911        return !mlxsw_sp->master_bridge.dev ||
3912               mlxsw_sp->master_bridge.dev == br_dev;
3913}
3914
3915static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3916                                       struct net_device *br_dev)
3917{
3918        mlxsw_sp->master_bridge.dev = br_dev;
3919        mlxsw_sp->master_bridge.ref_count++;
3920}
3921
3922static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3923{
3924        if (--mlxsw_sp->master_bridge.ref_count == 0) {
3925                mlxsw_sp->master_bridge.dev = NULL;
3926                /* It's possible upper VLAN devices are still holding
3927                 * references to underlying FIDs. Drop the reference
3928                 * and release the resources if it was the last one.
3929                 * If it wasn't, then something bad happened.
3930                 */
3931                mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3932        }
3933}
3934
3935static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3936                                     struct net_device *br_dev)
3937{
3938        struct net_device *dev = mlxsw_sp_port->dev;
3939        int err;
3940
3941        /* When port is not bridged untagged packets are tagged with
3942         * PVID=VID=1, thereby creating an implicit VLAN interface in
3943         * the device. Remove it and let bridge code take care of its
3944         * own VLANs.
3945         */
3946        err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3947        if (err)
3948                return err;
3949
3950        mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3951
3952        mlxsw_sp_port->learning = 1;
3953        mlxsw_sp_port->learning_sync = 1;
3954        mlxsw_sp_port->uc_flood = 1;
3955        mlxsw_sp_port->bridged = 1;
3956
3957        return 0;
3958}
3959
3960static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3961{
3962        struct net_device *dev = mlxsw_sp_port->dev;
3963
3964        mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3965
3966        mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3967
3968        mlxsw_sp_port->learning = 0;
3969        mlxsw_sp_port->learning_sync = 0;
3970        mlxsw_sp_port->uc_flood = 0;
3971        mlxsw_sp_port->bridged = 0;
3972
3973        /* Add implicit VLAN interface in the device, so that untagged
3974         * packets will be classified to the default vFID.
3975         */
3976        mlxsw_sp_port_add_vid(dev, 0, 1);
3977}
3978
3979static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3980{
3981        char sldr_pl[MLXSW_REG_SLDR_LEN];
3982
3983        mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3984        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3985}
3986
3987static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3988{
3989        char sldr_pl[MLXSW_REG_SLDR_LEN];
3990
3991        mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3992        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3993}
3994
3995static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3996                                     u16 lag_id, u8 port_index)
3997{
3998        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3999        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4000
4001        mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4002                                      lag_id, port_index);
4003        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4004}
4005
4006static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4007                                        u16 lag_id)
4008{
4009        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4010        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4011
4012        mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4013                                         lag_id);
4014        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4015}
4016
4017static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4018                                        u16 lag_id)
4019{
4020        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4021        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4022
4023        mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4024                                        lag_id);
4025        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4026}
4027
4028static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4029                                         u16 lag_id)
4030{
4031        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4032        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4033
4034        mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4035                                         lag_id);
4036        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4037}
4038
4039static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4040                                  struct net_device *lag_dev,
4041                                  u16 *p_lag_id)
4042{
4043        struct mlxsw_sp_upper *lag;
4044        int free_lag_id = -1;
4045        u64 max_lag;
4046        int i;
4047
4048        max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4049        for (i = 0; i < max_lag; i++) {
4050                lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4051                if (lag->ref_count) {
4052                        if (lag->dev == lag_dev) {
4053                                *p_lag_id = i;
4054                                return 0;
4055                        }
4056                } else if (free_lag_id < 0) {
4057                        free_lag_id = i;
4058                }
4059        }
4060        if (free_lag_id < 0)
4061                return -EBUSY;
4062        *p_lag_id = free_lag_id;
4063        return 0;
4064}
4065
4066static bool
4067mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4068                          struct net_device *lag_dev,
4069                          struct netdev_lag_upper_info *lag_upper_info)
4070{
4071        u16 lag_id;
4072
4073        if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
4074                return false;
4075        if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
4076                return false;
4077        return true;
4078}
4079
4080static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4081                                       u16 lag_id, u8 *p_port_index)
4082{
4083        u64 max_lag_members;
4084        int i;
4085
4086        max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4087                                             MAX_LAG_MEMBERS);
4088        for (i = 0; i < max_lag_members; i++) {
4089                if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4090                        *p_port_index = i;
4091                        return 0;
4092                }
4093        }
4094        return -EBUSY;
4095}
4096
4097static void
4098mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4099                                  u16 lag_id)
4100{
4101        struct mlxsw_sp_port *mlxsw_sp_vport;
4102        struct mlxsw_sp_fid *f;
4103
4104        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
4105        if (WARN_ON(!mlxsw_sp_vport))
4106                return;
4107
4108        /* If vPort is assigned a RIF, then leave it since it's no
4109         * longer valid.
4110         */
4111        f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4112        if (f)
4113                f->leave(mlxsw_sp_vport);
4114
4115        mlxsw_sp_vport->lag_id = lag_id;
4116        mlxsw_sp_vport->lagged = 1;
4117}
4118
4119static void
4120mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4121{
4122        struct mlxsw_sp_port *mlxsw_sp_vport;
4123        struct mlxsw_sp_fid *f;
4124
4125        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
4126        if (WARN_ON(!mlxsw_sp_vport))
4127                return;
4128
4129        f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4130        if (f)
4131                f->leave(mlxsw_sp_vport);
4132
4133        mlxsw_sp_vport->lagged = 0;
4134}
4135
4136static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4137                                  struct net_device *lag_dev)
4138{
4139        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4140        struct mlxsw_sp_upper *lag;
4141        u16 lag_id;
4142        u8 port_index;
4143        int err;
4144
4145        err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4146        if (err)
4147                return err;
4148        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4149        if (!lag->ref_count) {
4150                err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4151                if (err)
4152                        return err;
4153                lag->dev = lag_dev;
4154        }
4155
4156        err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4157        if (err)
4158                return err;
4159        err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4160        if (err)
4161                goto err_col_port_add;
4162        err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4163        if (err)
4164                goto err_col_port_enable;
4165
4166        mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4167                                   mlxsw_sp_port->local_port);
4168        mlxsw_sp_port->lag_id = lag_id;
4169        mlxsw_sp_port->lagged = 1;
4170        lag->ref_count++;
4171
4172        mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
4173
4174        return 0;
4175
4176err_col_port_enable:
4177        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4178err_col_port_add:
4179        if (!lag->ref_count)
4180                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4181        return err;
4182}
4183
4184static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4185                                    struct net_device *lag_dev)
4186{
4187        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4188        u16 lag_id = mlxsw_sp_port->lag_id;
4189        struct mlxsw_sp_upper *lag;
4190
4191        if (!mlxsw_sp_port->lagged)
4192                return;
4193        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4194        WARN_ON(lag->ref_count == 0);
4195
4196        mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4197        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4198
4199        if (mlxsw_sp_port->bridged) {
4200                mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
4201                mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4202        }
4203
4204        if (lag->ref_count == 1)
4205                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4206
4207        mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4208                                     mlxsw_sp_port->local_port);
4209        mlxsw_sp_port->lagged = 0;
4210        lag->ref_count--;
4211
4212        mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
4213}
4214
4215static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4216                                      u16 lag_id)
4217{
4218        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4219        char sldr_pl[MLXSW_REG_SLDR_LEN];
4220
4221        mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4222                                         mlxsw_sp_port->local_port);
4223        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4224}
4225
4226static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4227                                         u16 lag_id)
4228{
4229        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4230        char sldr_pl[MLXSW_REG_SLDR_LEN];
4231
4232        mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4233                                            mlxsw_sp_port->local_port);
4234        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4235}
4236
4237static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4238                                       bool lag_tx_enabled)
4239{
4240        if (lag_tx_enabled)
4241                return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4242                                                  mlxsw_sp_port->lag_id);
4243        else
4244                return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4245                                                     mlxsw_sp_port->lag_id);
4246}
4247
4248static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4249                                     struct netdev_lag_lower_state_info *info)
4250{
4251        return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4252}
4253
4254static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
4255                                   struct net_device *vlan_dev)
4256{
4257        struct mlxsw_sp_port *mlxsw_sp_vport;
4258        u16 vid = vlan_dev_vlan_id(vlan_dev);
4259
4260        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4261        if (WARN_ON(!mlxsw_sp_vport))
4262                return -EINVAL;
4263
4264        mlxsw_sp_vport->dev = vlan_dev;
4265
4266        return 0;
4267}
4268
4269static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
4270                                      struct net_device *vlan_dev)
4271{
4272        struct mlxsw_sp_port *mlxsw_sp_vport;
4273        u16 vid = vlan_dev_vlan_id(vlan_dev);
4274
4275        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4276        if (WARN_ON(!mlxsw_sp_vport))
4277                return;
4278
4279        mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
4280}
4281
4282static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
4283                                               unsigned long event, void *ptr)
4284{
4285        struct netdev_notifier_changeupper_info *info;
4286        struct mlxsw_sp_port *mlxsw_sp_port;
4287        struct net_device *upper_dev;
4288        struct mlxsw_sp *mlxsw_sp;
4289        int err = 0;
4290
4291        mlxsw_sp_port = netdev_priv(dev);
4292        mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4293        info = ptr;
4294
4295        switch (event) {
4296        case NETDEV_PRECHANGEUPPER:
4297                upper_dev = info->upper_dev;
4298                if (!is_vlan_dev(upper_dev) &&
4299                    !netif_is_lag_master(upper_dev) &&
4300                    !netif_is_bridge_master(upper_dev))
4301                        return -EINVAL;
4302                if (!info->linking)
4303                        break;
4304                /* HW limitation forbids to put ports to multiple bridges. */
4305                if (netif_is_bridge_master(upper_dev) &&
4306                    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
4307                        return -EINVAL;
4308                if (netif_is_lag_master(upper_dev) &&
4309                    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4310                                               info->upper_info))
4311                        return -EINVAL;
4312                if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4313                        return -EINVAL;
4314                if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4315                    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4316                        return -EINVAL;
4317                break;
4318        case NETDEV_CHANGEUPPER:
4319                upper_dev = info->upper_dev;
4320                if (is_vlan_dev(upper_dev)) {
4321                        if (info->linking)
4322                                err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4323                                                              upper_dev);
4324                        else
4325                                 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4326                                                           upper_dev);
4327                } else if (netif_is_bridge_master(upper_dev)) {
4328                        if (info->linking)
4329                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4330                                                                upper_dev);
4331                        else
4332                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4333                } else if (netif_is_lag_master(upper_dev)) {
4334                        if (info->linking)
4335                                err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4336                                                             upper_dev);
4337                        else
4338                                mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4339                                                        upper_dev);
4340                } else {
4341                        err = -EINVAL;
4342                        WARN_ON(1);
4343                }
4344                break;
4345        }
4346
4347        return err;
4348}
4349
4350static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4351                                               unsigned long event, void *ptr)
4352{
4353        struct netdev_notifier_changelowerstate_info *info;
4354        struct mlxsw_sp_port *mlxsw_sp_port;
4355        int err;
4356
4357        mlxsw_sp_port = netdev_priv(dev);
4358        info = ptr;
4359
4360        switch (event) {
4361        case NETDEV_CHANGELOWERSTATE:
4362                if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4363                        err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4364                                                        info->lower_state_info);
4365                        if (err)
4366                                netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4367                }
4368                break;
4369        }
4370
4371        return 0;
4372}
4373
4374static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4375                                         unsigned long event, void *ptr)
4376{
4377        switch (event) {
4378        case NETDEV_PRECHANGEUPPER:
4379        case NETDEV_CHANGEUPPER:
4380                return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4381        case NETDEV_CHANGELOWERSTATE:
4382                return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4383        }
4384
4385        return 0;
4386}
4387
4388static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4389                                        unsigned long event, void *ptr)
4390{
4391        struct net_device *dev;
4392        struct list_head *iter;
4393        int ret;
4394
4395        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4396                if (mlxsw_sp_port_dev_check(dev)) {
4397                        ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4398                        if (ret)
4399                                return ret;
4400                }
4401        }
4402
4403        return 0;
4404}
4405
4406static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4407                                            struct net_device *vlan_dev)
4408{
4409        u16 fid = vlan_dev_vlan_id(vlan_dev);
4410        struct mlxsw_sp_fid *f;
4411
4412        f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4413        if (!f) {
4414                f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4415                if (IS_ERR(f))
4416                        return PTR_ERR(f);
4417        }
4418
4419        f->ref_count++;
4420
4421        return 0;
4422}
4423
4424static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4425                                               struct net_device *vlan_dev)
4426{
4427        u16 fid = vlan_dev_vlan_id(vlan_dev);
4428        struct mlxsw_sp_fid *f;
4429
4430        f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4431        if (f && f->r)
4432                mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4433        if (f && --f->ref_count == 0)
4434                mlxsw_sp_fid_destroy(mlxsw_sp, f);
4435}
4436
4437static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4438                                           unsigned long event, void *ptr)
4439{
4440        struct netdev_notifier_changeupper_info *info;
4441        struct net_device *upper_dev;
4442        struct mlxsw_sp *mlxsw_sp;
4443        int err;
4444
4445        mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4446        if (!mlxsw_sp)
4447                return 0;
4448        if (br_dev != mlxsw_sp->master_bridge.dev)
4449                return 0;
4450
4451        info = ptr;
4452
4453        switch (event) {
4454        case NETDEV_CHANGEUPPER:
4455                upper_dev = info->upper_dev;
4456                if (!is_vlan_dev(upper_dev))
4457                        break;
4458                if (info->linking) {
4459                        err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4460                                                               upper_dev);
4461                        if (err)
4462                                return err;
4463                } else {
4464                        mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
4465                }
4466                break;
4467        }
4468
4469        return 0;
4470}
4471
4472static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4473{
4474        return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4475                                   MLXSW_SP_VFID_MAX);
4476}
4477
4478static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4479{
4480        char sfmr_pl[MLXSW_REG_SFMR_LEN];
4481
4482        mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4483        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4484}
4485
4486static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4487
4488static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4489                                                 struct net_device *br_dev)
4490{
4491        struct device *dev = mlxsw_sp->bus_info->dev;
4492        struct mlxsw_sp_fid *f;
4493        u16 vfid, fid;
4494        int err;
4495
4496        vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4497        if (vfid == MLXSW_SP_VFID_MAX) {
4498                dev_err(dev, "No available vFIDs\n");
4499                return ERR_PTR(-ERANGE);
4500        }
4501
4502        fid = mlxsw_sp_vfid_to_fid(vfid);
4503        err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4504        if (err) {
4505                dev_err(dev, "Failed to create FID=%d\n", fid);
4506                return ERR_PTR(err);
4507        }
4508
4509        f = kzalloc(sizeof(*f), GFP_KERNEL);
4510        if (!f)
4511                goto err_allocate_vfid;
4512
4513        f->leave = mlxsw_sp_vport_vfid_leave;
4514        f->fid = fid;
4515        f->dev = br_dev;
4516
4517        list_add(&f->list, &mlxsw_sp->vfids.list);
4518        set_bit(vfid, mlxsw_sp->vfids.mapped);
4519
4520        return f;
4521
4522err_allocate_vfid:
4523        mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4524        return ERR_PTR(-ENOMEM);
4525}
4526
4527static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4528                                  struct mlxsw_sp_fid *f)
4529{
4530        u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4531        u16 fid = f->fid;
4532
4533        clear_bit(vfid, mlxsw_sp->vfids.mapped);
4534        list_del(&f->list);
4535
4536        if (f->r)
4537                mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4538
4539        kfree(f);
4540
4541        mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4542}
4543
4544static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4545                                  bool valid)
4546{
4547        enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4548        u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4549
4550        return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4551                                            vid);
4552}
4553
4554static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4555                                    struct net_device *br_dev)
4556{
4557        struct mlxsw_sp_fid *f;
4558        int err;
4559
4560        f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4561        if (!f) {
4562                f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4563                if (IS_ERR(f))
4564                        return PTR_ERR(f);
4565        }
4566
4567        err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4568        if (err)
4569                goto err_vport_flood_set;
4570
4571        err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4572        if (err)
4573                goto err_vport_fid_map;
4574
4575        mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4576        f->ref_count++;
4577
4578        netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4579
4580        return 0;
4581
4582err_vport_fid_map:
4583        mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4584err_vport_flood_set:
4585        if (!f->ref_count)
4586                mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4587        return err;
4588}
4589
4590static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4591{
4592        struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4593
4594        netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4595
4596        mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4597
4598        mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4599
4600        mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4601
4602        mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4603        if (--f->ref_count == 0)
4604                mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4605}
4606
4607static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4608                                      struct net_device *br_dev)
4609{
4610        struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4611        u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4612        struct net_device *dev = mlxsw_sp_vport->dev;
4613        int err;
4614
4615        if (f && !WARN_ON(!f->leave))
4616                f->leave(mlxsw_sp_vport);
4617
4618        err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4619        if (err) {
4620                netdev_err(dev, "Failed to join vFID\n");
4621                return err;
4622        }
4623
4624        err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4625        if (err) {
4626                netdev_err(dev, "Failed to enable learning\n");
4627                goto err_port_vid_learning_set;
4628        }
4629
4630        mlxsw_sp_vport->learning = 1;
4631        mlxsw_sp_vport->learning_sync = 1;
4632        mlxsw_sp_vport->uc_flood = 1;
4633        mlxsw_sp_vport->bridged = 1;
4634
4635        return 0;
4636
4637err_port_vid_learning_set:
4638        mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4639        return err;
4640}
4641
4642static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4643{
4644        u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4645
4646        mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4647
4648        mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4649
4650        mlxsw_sp_vport->learning = 0;
4651        mlxsw_sp_vport->learning_sync = 0;
4652        mlxsw_sp_vport->uc_flood = 0;
4653        mlxsw_sp_vport->bridged = 0;
4654}
4655
4656static bool
4657mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4658                                  const struct net_device *br_dev)
4659{
4660        struct mlxsw_sp_port *mlxsw_sp_vport;
4661
4662        list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4663                            vport.list) {
4664                struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4665
4666                if (dev && dev == br_dev)
4667                        return false;
4668        }
4669
4670        return true;
4671}
4672
4673static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4674                                          unsigned long event, void *ptr,
4675                                          u16 vid)
4676{
4677        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4678        struct netdev_notifier_changeupper_info *info = ptr;
4679        struct mlxsw_sp_port *mlxsw_sp_vport;
4680        struct net_device *upper_dev;
4681        int err = 0;
4682
4683        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4684
4685        switch (event) {
4686        case NETDEV_PRECHANGEUPPER:
4687                upper_dev = info->upper_dev;
4688                if (!netif_is_bridge_master(upper_dev))
4689                        return -EINVAL;
4690                if (!info->linking)
4691                        break;
4692                /* We can't have multiple VLAN interfaces configured on
4693                 * the same port and being members in the same bridge.
4694                 */
4695                if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4696                                                       upper_dev))
4697                        return -EINVAL;
4698                break;
4699        case NETDEV_CHANGEUPPER:
4700                upper_dev = info->upper_dev;
4701                if (info->linking) {
4702                        if (WARN_ON(!mlxsw_sp_vport))
4703                                return -EINVAL;
4704                        err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4705                                                         upper_dev);
4706                } else {
4707                        if (!mlxsw_sp_vport)
4708                                return 0;
4709                        mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4710                }
4711        }
4712
4713        return err;
4714}
4715
4716static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4717                                              unsigned long event, void *ptr,
4718                                              u16 vid)
4719{
4720        struct net_device *dev;
4721        struct list_head *iter;
4722        int ret;
4723
4724        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4725                if (mlxsw_sp_port_dev_check(dev)) {
4726                        ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4727                                                             vid);
4728                        if (ret)
4729                                return ret;
4730                }
4731        }
4732
4733        return 0;
4734}
4735
4736static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4737                                         unsigned long event, void *ptr)
4738{
4739        struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4740        u16 vid = vlan_dev_vlan_id(vlan_dev);
4741
4742        if (mlxsw_sp_port_dev_check(real_dev))
4743                return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4744                                                      vid);
4745        else if (netif_is_lag_master(real_dev))
4746                return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4747                                                          vid);
4748
4749        return 0;
4750}
4751
4752static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4753                                    unsigned long event, void *ptr)
4754{
4755        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4756        int err = 0;
4757
4758        if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4759                err = mlxsw_sp_netdevice_router_port_event(dev);
4760        else if (mlxsw_sp_port_dev_check(dev))
4761                err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4762        else if (netif_is_lag_master(dev))
4763                err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4764        else if (netif_is_bridge_master(dev))
4765                err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4766        else if (is_vlan_dev(dev))
4767                err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4768
4769        return notifier_from_errno(err);
4770}
4771
4772static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4773        .notifier_call = mlxsw_sp_netdevice_event,
4774};
4775
4776static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4777        .notifier_call = mlxsw_sp_inetaddr_event,
4778        .priority = 10, /* Must be called before FIB notifier block */
4779};
4780
4781static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4782        .notifier_call = mlxsw_sp_router_netevent_event,
4783};
4784
4785static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4786        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4787        {0, },
4788};
4789
4790static struct pci_driver mlxsw_sp_pci_driver = {
4791        .name = mlxsw_sp_driver_name,
4792        .id_table = mlxsw_sp_pci_id_table,
4793};
4794
4795static int __init mlxsw_sp_module_init(void)
4796{
4797        int err;
4798
4799        register_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4800        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4801        register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4802
4803        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4804        if (err)
4805                goto err_core_driver_register;
4806
4807        err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4808        if (err)
4809                goto err_pci_driver_register;
4810
4811        return 0;
4812
4813err_pci_driver_register:
4814        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4815err_core_driver_register:
4816        unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4817        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4818        unregister_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4819        return err;
4820}
4821
4822static void __exit mlxsw_sp_module_exit(void)
4823{
4824        mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4825        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4826        unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4827        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4828        unregister_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4829}
4830
4831module_init(mlxsw_sp_module_init);
4832module_exit(mlxsw_sp_module_exit);
4833
4834MODULE_LICENSE("Dual BSD/GPL");
4835MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4836MODULE_DESCRIPTION("Mellanox Spectrum driver");
4837MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4838