linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
   3 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
   5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
   6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/types.h>
  40#include <linux/pci.h>
  41#include <linux/netdevice.h>
  42#include <linux/etherdevice.h>
  43#include <linux/ethtool.h>
  44#include <linux/slab.h>
  45#include <linux/device.h>
  46#include <linux/skbuff.h>
  47#include <linux/if_vlan.h>
  48#include <linux/if_bridge.h>
  49#include <linux/workqueue.h>
  50#include <linux/jiffies.h>
  51#include <linux/bitops.h>
  52#include <linux/list.h>
  53#include <linux/notifier.h>
  54#include <linux/dcbnl.h>
  55#include <linux/inetdevice.h>
  56#include <linux/netlink.h>
  57#include <net/switchdev.h>
  58#include <net/pkt_cls.h>
  59#include <net/tc_act/tc_mirred.h>
  60#include <net/netevent.h>
  61#include <net/tc_act/tc_sample.h>
  62#include <net/addrconf.h>
  63
  64#include "spectrum.h"
  65#include "pci.h"
  66#include "core.h"
  67#include "reg.h"
  68#include "port.h"
  69#include "trap.h"
  70#include "txheader.h"
  71#include "spectrum_cnt.h"
  72#include "spectrum_dpipe.h"
  73#include "spectrum_acl_flex_actions.h"
  74#include "spectrum_span.h"
  75#include "../mlxfw/mlxfw.h"
  76
  77#define MLXSW_FWREV_MAJOR 13
  78#define MLXSW_FWREV_MINOR 1620
  79#define MLXSW_FWREV_SUBMINOR 192
  80#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
  81
  82#define MLXSW_SP_FW_FILENAME \
  83        "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
  84        "." __stringify(MLXSW_FWREV_MINOR) \
  85        "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
  86
  87static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  88static const char mlxsw_sp_driver_version[] = "1.0";
  89
  90/* tx_hdr_version
  91 * Tx header version.
  92 * Must be set to 1.
  93 */
  94MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  95
  96/* tx_hdr_ctl
  97 * Packet control type.
  98 * 0 - Ethernet control (e.g. EMADs, LACP)
  99 * 1 - Ethernet data
 100 */
 101MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
 102
 103/* tx_hdr_proto
 104 * Packet protocol type. Must be set to 1 (Ethernet).
 105 */
 106MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
 107
 108/* tx_hdr_rx_is_router
 109 * Packet is sent from the router. Valid for data packets only.
 110 */
 111MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
 112
 113/* tx_hdr_fid_valid
 114 * Indicates if the 'fid' field is valid and should be used for
 115 * forwarding lookup. Valid for data packets only.
 116 */
 117MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
 118
 119/* tx_hdr_swid
 120 * Switch partition ID. Must be set to 0.
 121 */
 122MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
 123
 124/* tx_hdr_control_tclass
 125 * Indicates if the packet should use the control TClass and not one
 126 * of the data TClasses.
 127 */
 128MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
 129
 130/* tx_hdr_etclass
 131 * Egress TClass to be used on the egress device on the egress port.
 132 */
 133MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
 134
 135/* tx_hdr_port_mid
 136 * Destination local port for unicast packets.
 137 * Destination multicast ID for multicast packets.
 138 *
 139 * Control packets are directed to a specific egress port, while data
 140 * packets are transmitted through the CPU port (0) into the switch partition,
 141 * where forwarding rules are applied.
 142 */
 143MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
 144
 145/* tx_hdr_fid
 146 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
 147 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
 148 * Valid for data packets only.
 149 */
 150MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
 151
 152/* tx_hdr_type
 153 * 0 - Data packets
 154 * 6 - Control packets
 155 */
 156MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 157
 158struct mlxsw_sp_mlxfw_dev {
 159        struct mlxfw_dev mlxfw_dev;
 160        struct mlxsw_sp *mlxsw_sp;
 161};
 162
 163static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
 164                                    u16 component_index, u32 *p_max_size,
 165                                    u8 *p_align_bits, u16 *p_max_write_size)
 166{
 167        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 168                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 169        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 170        char mcqi_pl[MLXSW_REG_MCQI_LEN];
 171        int err;
 172
 173        mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
 174        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
 175        if (err)
 176                return err;
 177        mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
 178                              p_max_write_size);
 179
 180        *p_align_bits = max_t(u8, *p_align_bits, 2);
 181        *p_max_write_size = min_t(u16, *p_max_write_size,
 182                                  MLXSW_REG_MCDA_MAX_DATA_LEN);
 183        return 0;
 184}
 185
 186static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
 187{
 188        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 189                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 190        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 191        char mcc_pl[MLXSW_REG_MCC_LEN];
 192        u8 control_state;
 193        int err;
 194
 195        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
 196        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 197        if (err)
 198                return err;
 199
 200        mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
 201        if (control_state != MLXFW_FSM_STATE_IDLE)
 202                return -EBUSY;
 203
 204        mlxsw_reg_mcc_pack(mcc_pl,
 205                           MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
 206                           0, *fwhandle, 0);
 207        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 208}
 209
 210static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
 211                                         u32 fwhandle, u16 component_index,
 212                                         u32 component_size)
 213{
 214        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 215                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 216        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 217        char mcc_pl[MLXSW_REG_MCC_LEN];
 218
 219        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
 220                           component_index, fwhandle, component_size);
 221        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 222}
 223
 224static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
 225                                       u32 fwhandle, u8 *data, u16 size,
 226                                       u32 offset)
 227{
 228        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 229                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 230        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 231        char mcda_pl[MLXSW_REG_MCDA_LEN];
 232
 233        mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
 234        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
 235}
 236
 237static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
 238                                         u32 fwhandle, u16 component_index)
 239{
 240        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 241                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 242        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 243        char mcc_pl[MLXSW_REG_MCC_LEN];
 244
 245        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
 246                           component_index, fwhandle, 0);
 247        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 248}
 249
 250static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 251{
 252        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 253                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 254        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 255        char mcc_pl[MLXSW_REG_MCC_LEN];
 256
 257        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
 258                           fwhandle, 0);
 259        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 260}
 261
 262static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
 263                                    enum mlxfw_fsm_state *fsm_state,
 264                                    enum mlxfw_fsm_state_err *fsm_state_err)
 265{
 266        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 267                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 268        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 269        char mcc_pl[MLXSW_REG_MCC_LEN];
 270        u8 control_state;
 271        u8 error_code;
 272        int err;
 273
 274        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
 275        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 276        if (err)
 277                return err;
 278
 279        mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
 280        *fsm_state = control_state;
 281        *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
 282                               MLXFW_FSM_STATE_ERR_MAX);
 283        return 0;
 284}
 285
 286static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 287{
 288        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 289                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 290        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 291        char mcc_pl[MLXSW_REG_MCC_LEN];
 292
 293        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
 294                           fwhandle, 0);
 295        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 296}
 297
 298static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 299{
 300        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 301                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 302        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 303        char mcc_pl[MLXSW_REG_MCC_LEN];
 304
 305        mlxsw_reg_mcc_pack(mcc_pl,
 306                           MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
 307                           fwhandle, 0);
 308        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 309}
 310
 311static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
 312        .component_query        = mlxsw_sp_component_query,
 313        .fsm_lock               = mlxsw_sp_fsm_lock,
 314        .fsm_component_update   = mlxsw_sp_fsm_component_update,
 315        .fsm_block_download     = mlxsw_sp_fsm_block_download,
 316        .fsm_component_verify   = mlxsw_sp_fsm_component_verify,
 317        .fsm_activate           = mlxsw_sp_fsm_activate,
 318        .fsm_query_state        = mlxsw_sp_fsm_query_state,
 319        .fsm_cancel             = mlxsw_sp_fsm_cancel,
 320        .fsm_release            = mlxsw_sp_fsm_release
 321};
 322
 323static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
 324                                   const struct firmware *firmware)
 325{
 326        struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
 327                .mlxfw_dev = {
 328                        .ops = &mlxsw_sp_mlxfw_dev_ops,
 329                        .psid = mlxsw_sp->bus_info->psid,
 330                        .psid_size = strlen(mlxsw_sp->bus_info->psid),
 331                },
 332                .mlxsw_sp = mlxsw_sp
 333        };
 334
 335        return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
 336}
 337
 338static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 339{
 340        const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
 341        const struct firmware *firmware;
 342        int err;
 343
 344        /* Validate driver & FW are compatible */
 345        if (rev->major != MLXSW_FWREV_MAJOR) {
 346                WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
 347                     rev->major, MLXSW_FWREV_MAJOR);
 348                return -EINVAL;
 349        }
 350        if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
 351            MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
 352                return 0;
 353
 354        dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
 355                 rev->major, rev->minor, rev->subminor);
 356        dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
 357                 MLXSW_SP_FW_FILENAME);
 358
 359        err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
 360                                      mlxsw_sp->bus_info->dev);
 361        if (err) {
 362                dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
 363                        MLXSW_SP_FW_FILENAME);
 364                return err;
 365        }
 366
 367        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
 368        release_firmware(firmware);
 369        return err;
 370}
 371
 372int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
 373                              unsigned int counter_index, u64 *packets,
 374                              u64 *bytes)
 375{
 376        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 377        int err;
 378
 379        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
 380                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 381        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 382        if (err)
 383                return err;
 384        if (packets)
 385                *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
 386        if (bytes)
 387                *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
 388        return 0;
 389}
 390
 391static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
 392                                       unsigned int counter_index)
 393{
 394        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 395
 396        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
 397                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 398        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 399}
 400
 401int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 402                                unsigned int *p_counter_index)
 403{
 404        int err;
 405
 406        err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 407                                     p_counter_index);
 408        if (err)
 409                return err;
 410        err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
 411        if (err)
 412                goto err_counter_clear;
 413        return 0;
 414
 415err_counter_clear:
 416        mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 417                              *p_counter_index);
 418        return err;
 419}
 420
 421void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
 422                                unsigned int counter_index)
 423{
 424         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 425                               counter_index);
 426}
 427
 428static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
 429                                     const struct mlxsw_tx_info *tx_info)
 430{
 431        char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
 432
 433        memset(txhdr, 0, MLXSW_TXHDR_LEN);
 434
 435        mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
 436        mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
 437        mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
 438        mlxsw_tx_hdr_swid_set(txhdr, 0);
 439        mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
 440        mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
 441        mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 442}
 443
 444int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 445                              u8 state)
 446{
 447        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 448        enum mlxsw_reg_spms_state spms_state;
 449        char *spms_pl;
 450        int err;
 451
 452        switch (state) {
 453        case BR_STATE_FORWARDING:
 454                spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
 455                break;
 456        case BR_STATE_LEARNING:
 457                spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
 458                break;
 459        case BR_STATE_LISTENING: /* fall-through */
 460        case BR_STATE_DISABLED: /* fall-through */
 461        case BR_STATE_BLOCKING:
 462                spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
 463                break;
 464        default:
 465                BUG();
 466        }
 467
 468        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
 469        if (!spms_pl)
 470                return -ENOMEM;
 471        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
 472        mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
 473
 474        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
 475        kfree(spms_pl);
 476        return err;
 477}
 478
 479static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
 480{
 481        char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
 482        int err;
 483
 484        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
 485        if (err)
 486                return err;
 487        mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
 488        return 0;
 489}
 490
 491static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
 492                                    bool enable, u32 rate)
 493{
 494        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 495        char mpsc_pl[MLXSW_REG_MPSC_LEN];
 496
 497        mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
 498        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
 499}
 500
 501static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
 502                                          bool is_up)
 503{
 504        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 505        char paos_pl[MLXSW_REG_PAOS_LEN];
 506
 507        mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
 508                            is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
 509                            MLXSW_PORT_ADMIN_STATUS_DOWN);
 510        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 511}
 512
 513static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
 514                                      unsigned char *addr)
 515{
 516        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 517        char ppad_pl[MLXSW_REG_PPAD_LEN];
 518
 519        mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
 520        mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
 521        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
 522}
 523
 524static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
 525{
 526        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 527        unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
 528
 529        ether_addr_copy(addr, mlxsw_sp->base_mac);
 530        addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
 531        return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
 532}
 533
 534static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
 535{
 536        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 537        char pmtu_pl[MLXSW_REG_PMTU_LEN];
 538        int max_mtu;
 539        int err;
 540
 541        mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
 542        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
 543        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 544        if (err)
 545                return err;
 546        max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
 547
 548        if (mtu > max_mtu)
 549                return -EINVAL;
 550
 551        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
 552        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 553}
 554
 555static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 556{
 557        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 558        char pspa_pl[MLXSW_REG_PSPA_LEN];
 559
 560        mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
 561        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 562}
 563
 564int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
 565{
 566        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 567        char svpe_pl[MLXSW_REG_SVPE_LEN];
 568
 569        mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
 570        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
 571}
 572
 573int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 574                                   bool learn_enable)
 575{
 576        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 577        char *spvmlr_pl;
 578        int err;
 579
 580        spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
 581        if (!spvmlr_pl)
 582                return -ENOMEM;
 583        mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
 584                              learn_enable);
 585        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
 586        kfree(spvmlr_pl);
 587        return err;
 588}
 589
 590static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
 591                                    u16 vid)
 592{
 593        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 594        char spvid_pl[MLXSW_REG_SPVID_LEN];
 595
 596        mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
 597        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
 598}
 599
 600static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
 601                                            bool allow)
 602{
 603        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 604        char spaft_pl[MLXSW_REG_SPAFT_LEN];
 605
 606        mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
 607        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
 608}
 609
 610int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
 611{
 612        int err;
 613
 614        if (!vid) {
 615                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
 616                if (err)
 617                        return err;
 618        } else {
 619                err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
 620                if (err)
 621                        return err;
 622                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
 623                if (err)
 624                        goto err_port_allow_untagged_set;
 625        }
 626
 627        mlxsw_sp_port->pvid = vid;
 628        return 0;
 629
 630err_port_allow_untagged_set:
 631        __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
 632        return err;
 633}
 634
 635static int
 636mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
 637{
 638        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 639        char sspr_pl[MLXSW_REG_SSPR_LEN];
 640
 641        mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
 642        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 643}
 644
 645static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
 646                                         u8 local_port, u8 *p_module,
 647                                         u8 *p_width, u8 *p_lane)
 648{
 649        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 650        int err;
 651
 652        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 653        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 654        if (err)
 655                return err;
 656        *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 657        *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
 658        *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 659        return 0;
 660}
 661
 662static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
 663                                    u8 module, u8 width, u8 lane)
 664{
 665        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 666        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 667        int i;
 668
 669        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 670        mlxsw_reg_pmlp_width_set(pmlp_pl, width);
 671        for (i = 0; i < width; i++) {
 672                mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
 673                mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
 674        }
 675
 676        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 677}
 678
 679static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
 680{
 681        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 682        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 683
 684        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 685        mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
 686        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 687}
 688
 689static int mlxsw_sp_port_open(struct net_device *dev)
 690{
 691        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 692        int err;
 693
 694        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 695        if (err)
 696                return err;
 697        netif_start_queue(dev);
 698        return 0;
 699}
 700
 701static int mlxsw_sp_port_stop(struct net_device *dev)
 702{
 703        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 704
 705        netif_stop_queue(dev);
 706        return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 707}
 708
 709static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
 710                                      struct net_device *dev)
 711{
 712        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 713        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 714        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
 715        const struct mlxsw_tx_info tx_info = {
 716                .local_port = mlxsw_sp_port->local_port,
 717                .is_emad = false,
 718        };
 719        u64 len;
 720        int err;
 721
 722        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
 723                return NETDEV_TX_BUSY;
 724
 725        if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
 726                struct sk_buff *skb_orig = skb;
 727
 728                skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
 729                if (!skb) {
 730                        this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 731                        dev_kfree_skb_any(skb_orig);
 732                        return NETDEV_TX_OK;
 733                }
 734                dev_consume_skb_any(skb_orig);
 735        }
 736
 737        if (eth_skb_pad(skb)) {
 738                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 739                return NETDEV_TX_OK;
 740        }
 741
 742        mlxsw_sp_txhdr_construct(skb, &tx_info);
 743        /* TX header is consumed by HW on the way so we shouldn't count its
 744         * bytes as being sent.
 745         */
 746        len = skb->len - MLXSW_TXHDR_LEN;
 747
 748        /* Due to a race we might fail here because of a full queue. In that
 749         * unlikely case we simply drop the packet.
 750         */
 751        err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
 752
 753        if (!err) {
 754                pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
 755                u64_stats_update_begin(&pcpu_stats->syncp);
 756                pcpu_stats->tx_packets++;
 757                pcpu_stats->tx_bytes += len;
 758                u64_stats_update_end(&pcpu_stats->syncp);
 759        } else {
 760                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 761                dev_kfree_skb_any(skb);
 762        }
 763        return NETDEV_TX_OK;
 764}
 765
 766static void mlxsw_sp_set_rx_mode(struct net_device *dev)
 767{
 768}
 769
 770static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
 771{
 772        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 773        struct sockaddr *addr = p;
 774        int err;
 775
 776        if (!is_valid_ether_addr(addr->sa_data))
 777                return -EADDRNOTAVAIL;
 778
 779        err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
 780        if (err)
 781                return err;
 782        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 783        return 0;
 784}
 785
 786static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
 787                                         int mtu)
 788{
 789        return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
 790}
 791
 792#define MLXSW_SP_CELL_FACTOR 2  /* 2 * cell_size / (IPG + cell_size + 1) */
 793
 794static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
 795                                  u16 delay)
 796{
 797        delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
 798                                                            BITS_PER_BYTE));
 799        return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
 800                                                                   mtu);
 801}
 802
 803/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
 804 * Assumes 100m cable and maximum MTU.
 805 */
 806#define MLXSW_SP_PAUSE_DELAY 58752
 807
 808static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
 809                                     u16 delay, bool pfc, bool pause)
 810{
 811        if (pfc)
 812                return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
 813        else if (pause)
 814                return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
 815        else
 816                return 0;
 817}
 818
 819static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
 820                                 bool lossy)
 821{
 822        if (lossy)
 823                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
 824        else
 825                mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
 826                                                    thres);
 827}
 828
 829int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 830                                 u8 *prio_tc, bool pause_en,
 831                                 struct ieee_pfc *my_pfc)
 832{
 833        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 834        u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
 835        u16 delay = !!my_pfc ? my_pfc->delay : 0;
 836        char pbmc_pl[MLXSW_REG_PBMC_LEN];
 837        int i, j, err;
 838
 839        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
 840        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 841        if (err)
 842                return err;
 843
 844        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 845                bool configure = false;
 846                bool pfc = false;
 847                bool lossy;
 848                u16 thres;
 849
 850                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
 851                        if (prio_tc[j] == i) {
 852                                pfc = pfc_en & BIT(j);
 853                                configure = true;
 854                                break;
 855                        }
 856                }
 857
 858                if (!configure)
 859                        continue;
 860
 861                lossy = !(pfc || pause_en);
 862                thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
 863                delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
 864                                                  pause_en);
 865                mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
 866        }
 867
 868        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 869}
 870
 871static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
 872                                      int mtu, bool pause_en)
 873{
 874        u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
 875        bool dcb_en = !!mlxsw_sp_port->dcb.ets;
 876        struct ieee_pfc *my_pfc;
 877        u8 *prio_tc;
 878
 879        prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
 880        my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
 881
 882        return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
 883                                            pause_en, my_pfc);
 884}
 885
 886static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
 887{
 888        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 889        bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
 890        int err;
 891
 892        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
 893        if (err)
 894                return err;
 895        err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
 896        if (err)
 897                goto err_span_port_mtu_update;
 898        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
 899        if (err)
 900                goto err_port_mtu_set;
 901        dev->mtu = mtu;
 902        return 0;
 903
 904err_port_mtu_set:
 905        mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
 906err_span_port_mtu_update:
 907        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
 908        return err;
 909}
 910
 911static int
 912mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
 913                             struct rtnl_link_stats64 *stats)
 914{
 915        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 916        struct mlxsw_sp_port_pcpu_stats *p;
 917        u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 918        u32 tx_dropped = 0;
 919        unsigned int start;
 920        int i;
 921
 922        for_each_possible_cpu(i) {
 923                p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
 924                do {
 925                        start = u64_stats_fetch_begin_irq(&p->syncp);
 926                        rx_packets      = p->rx_packets;
 927                        rx_bytes        = p->rx_bytes;
 928                        tx_packets      = p->tx_packets;
 929                        tx_bytes        = p->tx_bytes;
 930                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 931
 932                stats->rx_packets       += rx_packets;
 933                stats->rx_bytes         += rx_bytes;
 934                stats->tx_packets       += tx_packets;
 935                stats->tx_bytes         += tx_bytes;
 936                /* tx_dropped is u32, updated without syncp protection. */
 937                tx_dropped      += p->tx_dropped;
 938        }
 939        stats->tx_dropped       = tx_dropped;
 940        return 0;
 941}
 942
 943static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
 944{
 945        switch (attr_id) {
 946        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 947                return true;
 948        }
 949
 950        return false;
 951}
 952
 953static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
 954                                           void *sp)
 955{
 956        switch (attr_id) {
 957        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 958                return mlxsw_sp_port_get_sw_stats64(dev, sp);
 959        }
 960
 961        return -EINVAL;
 962}
 963
 964static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
 965                                       int prio, char *ppcnt_pl)
 966{
 967        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 968        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 969
 970        mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
 971        return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
 972}
 973
 974static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
 975                                      struct rtnl_link_stats64 *stats)
 976{
 977        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
 978        int err;
 979
 980        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
 981                                          0, ppcnt_pl);
 982        if (err)
 983                goto out;
 984
 985        stats->tx_packets =
 986                mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
 987        stats->rx_packets =
 988                mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
 989        stats->tx_bytes =
 990                mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
 991        stats->rx_bytes =
 992                mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
 993        stats->multicast =
 994                mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
 995
 996        stats->rx_crc_errors =
 997                mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
 998        stats->rx_frame_errors =
 999                mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1000
1001        stats->rx_length_errors = (
1002                mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1003                mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1004                mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1005
1006        stats->rx_errors = (stats->rx_crc_errors +
1007                stats->rx_frame_errors + stats->rx_length_errors);
1008
1009out:
1010        return err;
1011}
1012
1013static void
1014mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1015                            struct mlxsw_sp_port_xstats *xstats)
1016{
1017        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1018        int err, i;
1019
1020        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1021                                          ppcnt_pl);
1022        if (!err)
1023                xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1024
1025        for (i = 0; i < TC_MAX_QUEUE; i++) {
1026                err = mlxsw_sp_port_get_stats_raw(dev,
1027                                                  MLXSW_REG_PPCNT_TC_CONG_TC,
1028                                                  i, ppcnt_pl);
1029                if (!err)
1030                        xstats->wred_drop[i] =
1031                                mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1032
1033                err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1034                                                  i, ppcnt_pl);
1035                if (err)
1036                        continue;
1037
1038                xstats->backlog[i] =
1039                        mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1040                xstats->tail_drop[i] =
1041                        mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1042        }
1043
1044        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1045                err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1046                                                  i, ppcnt_pl);
1047                if (err)
1048                        continue;
1049
1050                xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1051                xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1052        }
1053}
1054
1055static void update_stats_cache(struct work_struct *work)
1056{
1057        struct mlxsw_sp_port *mlxsw_sp_port =
1058                container_of(work, struct mlxsw_sp_port,
1059                             periodic_hw_stats.update_dw.work);
1060
1061        if (!netif_carrier_ok(mlxsw_sp_port->dev))
1062                goto out;
1063
1064        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1065                                   &mlxsw_sp_port->periodic_hw_stats.stats);
1066        mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1067                                    &mlxsw_sp_port->periodic_hw_stats.xstats);
1068
1069out:
1070        mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1071                               MLXSW_HW_STATS_UPDATE_TIME);
1072}
1073
1074/* Return the stats from a cache that is updated periodically,
1075 * as this function might get called in an atomic context.
1076 */
1077static void
1078mlxsw_sp_port_get_stats64(struct net_device *dev,
1079                          struct rtnl_link_stats64 *stats)
1080{
1081        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1082
1083        memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1084}
1085
1086static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1087                                    u16 vid_begin, u16 vid_end,
1088                                    bool is_member, bool untagged)
1089{
1090        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1091        char *spvm_pl;
1092        int err;
1093
1094        spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1095        if (!spvm_pl)
1096                return -ENOMEM;
1097
1098        mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1099                            vid_end, is_member, untagged);
1100        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1101        kfree(spvm_pl);
1102        return err;
1103}
1104
1105int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1106                           u16 vid_end, bool is_member, bool untagged)
1107{
1108        u16 vid, vid_e;
1109        int err;
1110
1111        for (vid = vid_begin; vid <= vid_end;
1112             vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1113                vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1114                            vid_end);
1115
1116                err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1117                                               is_member, untagged);
1118                if (err)
1119                        return err;
1120        }
1121
1122        return 0;
1123}
1124
1125static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1126{
1127        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1128
1129        list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1130                                 &mlxsw_sp_port->vlans_list, list)
1131                mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1132}
1133
1134static struct mlxsw_sp_port_vlan *
1135mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1136{
1137        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1138        bool untagged = vid == 1;
1139        int err;
1140
1141        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1142        if (err)
1143                return ERR_PTR(err);
1144
1145        mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1146        if (!mlxsw_sp_port_vlan) {
1147                err = -ENOMEM;
1148                goto err_port_vlan_alloc;
1149        }
1150
1151        mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1152        mlxsw_sp_port_vlan->ref_count = 1;
1153        mlxsw_sp_port_vlan->vid = vid;
1154        list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1155
1156        return mlxsw_sp_port_vlan;
1157
1158err_port_vlan_alloc:
1159        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1160        return ERR_PTR(err);
1161}
1162
1163static void
1164mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1165{
1166        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1167        u16 vid = mlxsw_sp_port_vlan->vid;
1168
1169        list_del(&mlxsw_sp_port_vlan->list);
1170        kfree(mlxsw_sp_port_vlan);
1171        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1172}
1173
1174struct mlxsw_sp_port_vlan *
1175mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1176{
1177        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1178
1179        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1180        if (mlxsw_sp_port_vlan) {
1181                mlxsw_sp_port_vlan->ref_count++;
1182                return mlxsw_sp_port_vlan;
1183        }
1184
1185        return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1186}
1187
1188void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1189{
1190        struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1191
1192        if (--mlxsw_sp_port_vlan->ref_count != 0)
1193                return;
1194
1195        if (mlxsw_sp_port_vlan->bridge_port)
1196                mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1197        else if (fid)
1198                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1199
1200        mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1201}
1202
1203static int mlxsw_sp_port_add_vid(struct net_device *dev,
1204                                 __be16 __always_unused proto, u16 vid)
1205{
1206        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1207
1208        /* VLAN 0 is added to HW filter when device goes up, but it is
1209         * reserved in our case, so simply return.
1210         */
1211        if (!vid)
1212                return 0;
1213
1214        return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1215}
1216
1217static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1218                                  __be16 __always_unused proto, u16 vid)
1219{
1220        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1221        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1222
1223        /* VLAN 0 is removed from HW filter when device goes down, but
1224         * it is reserved in our case, so simply return.
1225         */
1226        if (!vid)
1227                return 0;
1228
1229        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1230        if (!mlxsw_sp_port_vlan)
1231                return 0;
1232        mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1233
1234        return 0;
1235}
1236
1237static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1238                                            size_t len)
1239{
1240        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1241        u8 module = mlxsw_sp_port->mapping.module;
1242        u8 width = mlxsw_sp_port->mapping.width;
1243        u8 lane = mlxsw_sp_port->mapping.lane;
1244        int err;
1245
1246        if (!mlxsw_sp_port->split)
1247                err = snprintf(name, len, "p%d", module + 1);
1248        else
1249                err = snprintf(name, len, "p%ds%d", module + 1,
1250                               lane / width);
1251
1252        if (err >= len)
1253                return -EINVAL;
1254
1255        return 0;
1256}
1257
1258static struct mlxsw_sp_port_mall_tc_entry *
1259mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1260                                 unsigned long cookie) {
1261        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1262
1263        list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1264                if (mall_tc_entry->cookie == cookie)
1265                        return mall_tc_entry;
1266
1267        return NULL;
1268}
1269
1270static int
1271mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1272                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1273                                      const struct tc_action *a,
1274                                      bool ingress)
1275{
1276        enum mlxsw_sp_span_type span_type;
1277        struct net_device *to_dev;
1278
1279        to_dev = tcf_mirred_dev(a);
1280        if (!to_dev) {
1281                netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1282                return -EINVAL;
1283        }
1284
1285        mirror->ingress = ingress;
1286        span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1287        return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
1288                                        true, &mirror->span_id);
1289}
1290
1291static void
1292mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1293                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1294{
1295        enum mlxsw_sp_span_type span_type;
1296
1297        span_type = mirror->ingress ?
1298                        MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1299        mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1300                                 span_type, true);
1301}
1302
1303static int
1304mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1305                                      struct tc_cls_matchall_offload *cls,
1306                                      const struct tc_action *a,
1307                                      bool ingress)
1308{
1309        int err;
1310
1311        if (!mlxsw_sp_port->sample)
1312                return -EOPNOTSUPP;
1313        if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1314                netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1315                return -EEXIST;
1316        }
1317        if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1318                netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1319                return -EOPNOTSUPP;
1320        }
1321
1322        rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1323                           tcf_sample_psample_group(a));
1324        mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1325        mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1326        mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1327
1328        err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1329        if (err)
1330                goto err_port_sample_set;
1331        return 0;
1332
1333err_port_sample_set:
1334        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1335        return err;
1336}
1337
1338static void
1339mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1340{
1341        if (!mlxsw_sp_port->sample)
1342                return;
1343
1344        mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1345        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1346}
1347
1348static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1349                                          struct tc_cls_matchall_offload *f,
1350                                          bool ingress)
1351{
1352        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1353        __be16 protocol = f->common.protocol;
1354        const struct tc_action *a;
1355        LIST_HEAD(actions);
1356        int err;
1357
1358        if (!tcf_exts_has_one_action(f->exts)) {
1359                netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1360                return -EOPNOTSUPP;
1361        }
1362
1363        mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1364        if (!mall_tc_entry)
1365                return -ENOMEM;
1366        mall_tc_entry->cookie = f->cookie;
1367
1368        tcf_exts_to_list(f->exts, &actions);
1369        a = list_first_entry(&actions, struct tc_action, list);
1370
1371        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1372                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1373
1374                mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1375                mirror = &mall_tc_entry->mirror;
1376                err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1377                                                            mirror, a, ingress);
1378        } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1379                mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1380                err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1381                                                            a, ingress);
1382        } else {
1383                err = -EOPNOTSUPP;
1384        }
1385
1386        if (err)
1387                goto err_add_action;
1388
1389        list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1390        return 0;
1391
1392err_add_action:
1393        kfree(mall_tc_entry);
1394        return err;
1395}
1396
1397static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1398                                           struct tc_cls_matchall_offload *f)
1399{
1400        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1401
1402        mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1403                                                         f->cookie);
1404        if (!mall_tc_entry) {
1405                netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1406                return;
1407        }
1408        list_del(&mall_tc_entry->list);
1409
1410        switch (mall_tc_entry->type) {
1411        case MLXSW_SP_PORT_MALL_MIRROR:
1412                mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1413                                                      &mall_tc_entry->mirror);
1414                break;
1415        case MLXSW_SP_PORT_MALL_SAMPLE:
1416                mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1417                break;
1418        default:
1419                WARN_ON(1);
1420        }
1421
1422        kfree(mall_tc_entry);
1423}
1424
1425static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1426                                          struct tc_cls_matchall_offload *f,
1427                                          bool ingress)
1428{
1429        switch (f->command) {
1430        case TC_CLSMATCHALL_REPLACE:
1431                return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1432                                                      ingress);
1433        case TC_CLSMATCHALL_DESTROY:
1434                mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1435                return 0;
1436        default:
1437                return -EOPNOTSUPP;
1438        }
1439}
1440
1441static int
1442mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1443                             struct tc_cls_flower_offload *f)
1444{
1445        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1446
1447        switch (f->command) {
1448        case TC_CLSFLOWER_REPLACE:
1449                return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1450        case TC_CLSFLOWER_DESTROY:
1451                mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1452                return 0;
1453        case TC_CLSFLOWER_STATS:
1454                return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1455        default:
1456                return -EOPNOTSUPP;
1457        }
1458}
1459
1460static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1461                                               void *type_data,
1462                                               void *cb_priv, bool ingress)
1463{
1464        struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1465
1466        switch (type) {
1467        case TC_SETUP_CLSMATCHALL:
1468                if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1469                                                   type_data))
1470                        return -EOPNOTSUPP;
1471
1472                return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1473                                                      ingress);
1474        case TC_SETUP_CLSFLOWER:
1475                return 0;
1476        default:
1477                return -EOPNOTSUPP;
1478        }
1479}
1480
1481static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1482                                                  void *type_data,
1483                                                  void *cb_priv)
1484{
1485        return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1486                                                   cb_priv, true);
1487}
1488
1489static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1490                                                  void *type_data,
1491                                                  void *cb_priv)
1492{
1493        return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1494                                                   cb_priv, false);
1495}
1496
1497static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1498                                             void *type_data, void *cb_priv)
1499{
1500        struct mlxsw_sp_acl_block *acl_block = cb_priv;
1501
1502        switch (type) {
1503        case TC_SETUP_CLSMATCHALL:
1504                return 0;
1505        case TC_SETUP_CLSFLOWER:
1506                if (mlxsw_sp_acl_block_disabled(acl_block))
1507                        return -EOPNOTSUPP;
1508
1509                return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1510        default:
1511                return -EOPNOTSUPP;
1512        }
1513}
1514
1515static int
1516mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1517                                    struct tcf_block *block, bool ingress)
1518{
1519        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1520        struct mlxsw_sp_acl_block *acl_block;
1521        struct tcf_block_cb *block_cb;
1522        int err;
1523
1524        block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1525                                       mlxsw_sp);
1526        if (!block_cb) {
1527                acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1528                if (!acl_block)
1529                        return -ENOMEM;
1530                block_cb = __tcf_block_cb_register(block,
1531                                                   mlxsw_sp_setup_tc_block_cb_flower,
1532                                                   mlxsw_sp, acl_block);
1533                if (IS_ERR(block_cb)) {
1534                        err = PTR_ERR(block_cb);
1535                        goto err_cb_register;
1536                }
1537        } else {
1538                acl_block = tcf_block_cb_priv(block_cb);
1539        }
1540        tcf_block_cb_incref(block_cb);
1541        err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1542                                      mlxsw_sp_port, ingress);
1543        if (err)
1544                goto err_block_bind;
1545
1546        if (ingress)
1547                mlxsw_sp_port->ing_acl_block = acl_block;
1548        else
1549                mlxsw_sp_port->eg_acl_block = acl_block;
1550
1551        return 0;
1552
1553err_block_bind:
1554        if (!tcf_block_cb_decref(block_cb)) {
1555                __tcf_block_cb_unregister(block_cb);
1556err_cb_register:
1557                mlxsw_sp_acl_block_destroy(acl_block);
1558        }
1559        return err;
1560}
1561
1562static void
1563mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1564                                      struct tcf_block *block, bool ingress)
1565{
1566        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1567        struct mlxsw_sp_acl_block *acl_block;
1568        struct tcf_block_cb *block_cb;
1569        int err;
1570
1571        block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1572                                       mlxsw_sp);
1573        if (!block_cb)
1574                return;
1575
1576        if (ingress)
1577                mlxsw_sp_port->ing_acl_block = NULL;
1578        else
1579                mlxsw_sp_port->eg_acl_block = NULL;
1580
1581        acl_block = tcf_block_cb_priv(block_cb);
1582        err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1583                                        mlxsw_sp_port, ingress);
1584        if (!err && !tcf_block_cb_decref(block_cb)) {
1585                __tcf_block_cb_unregister(block_cb);
1586                mlxsw_sp_acl_block_destroy(acl_block);
1587        }
1588}
1589
1590static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1591                                   struct tc_block_offload *f)
1592{
1593        tc_setup_cb_t *cb;
1594        bool ingress;
1595        int err;
1596
1597        if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1598                cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1599                ingress = true;
1600        } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1601                cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1602                ingress = false;
1603        } else {
1604                return -EOPNOTSUPP;
1605        }
1606
1607        switch (f->command) {
1608        case TC_BLOCK_BIND:
1609                err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1610                                            mlxsw_sp_port);
1611                if (err)
1612                        return err;
1613                err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1614                                                          f->block, ingress);
1615                if (err) {
1616                        tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1617                        return err;
1618                }
1619                return 0;
1620        case TC_BLOCK_UNBIND:
1621                mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1622                                                      f->block, ingress);
1623                tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1624                return 0;
1625        default:
1626                return -EOPNOTSUPP;
1627        }
1628}
1629
1630static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1631                             void *type_data)
1632{
1633        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1634
1635        switch (type) {
1636        case TC_SETUP_BLOCK:
1637                return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1638        case TC_SETUP_QDISC_RED:
1639                return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1640        case TC_SETUP_QDISC_PRIO:
1641                return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1642        default:
1643                return -EOPNOTSUPP;
1644        }
1645}
1646
1647
1648static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1649{
1650        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1651
1652        if (!enable) {
1653                if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1654                    mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1655                    !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1656                        netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1657                        return -EINVAL;
1658                }
1659                mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1660                mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1661        } else {
1662                mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1663                mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1664        }
1665        return 0;
1666}
1667
1668typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1669
1670static int mlxsw_sp_handle_feature(struct net_device *dev,
1671                                   netdev_features_t wanted_features,
1672                                   netdev_features_t feature,
1673                                   mlxsw_sp_feature_handler feature_handler)
1674{
1675        netdev_features_t changes = wanted_features ^ dev->features;
1676        bool enable = !!(wanted_features & feature);
1677        int err;
1678
1679        if (!(changes & feature))
1680                return 0;
1681
1682        err = feature_handler(dev, enable);
1683        if (err) {
1684                netdev_err(dev, "%s feature %pNF failed, err %d\n",
1685                           enable ? "Enable" : "Disable", &feature, err);
1686                return err;
1687        }
1688
1689        if (enable)
1690                dev->features |= feature;
1691        else
1692                dev->features &= ~feature;
1693
1694        return 0;
1695}
1696static int mlxsw_sp_set_features(struct net_device *dev,
1697                                 netdev_features_t features)
1698{
1699        return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1700                                       mlxsw_sp_feature_hw_tc);
1701}
1702
1703static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1704        .ndo_open               = mlxsw_sp_port_open,
1705        .ndo_stop               = mlxsw_sp_port_stop,
1706        .ndo_start_xmit         = mlxsw_sp_port_xmit,
1707        .ndo_setup_tc           = mlxsw_sp_setup_tc,
1708        .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1709        .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1710        .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1711        .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1712        .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1713        .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1714        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1715        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1716        .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1717        .ndo_set_features       = mlxsw_sp_set_features,
1718};
1719
1720static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1721                                      struct ethtool_drvinfo *drvinfo)
1722{
1723        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1724        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1725
1726        strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1727        strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1728                sizeof(drvinfo->version));
1729        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1730                 "%d.%d.%d",
1731                 mlxsw_sp->bus_info->fw_rev.major,
1732                 mlxsw_sp->bus_info->fw_rev.minor,
1733                 mlxsw_sp->bus_info->fw_rev.subminor);
1734        strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1735                sizeof(drvinfo->bus_info));
1736}
1737
1738static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1739                                         struct ethtool_pauseparam *pause)
1740{
1741        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1742
1743        pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1744        pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1745}
1746
1747static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1748                                   struct ethtool_pauseparam *pause)
1749{
1750        char pfcc_pl[MLXSW_REG_PFCC_LEN];
1751
1752        mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1753        mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1754        mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1755
1756        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1757                               pfcc_pl);
1758}
1759
1760static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1761                                        struct ethtool_pauseparam *pause)
1762{
1763        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1764        bool pause_en = pause->tx_pause || pause->rx_pause;
1765        int err;
1766
1767        if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1768                netdev_err(dev, "PFC already enabled on port\n");
1769                return -EINVAL;
1770        }
1771
1772        if (pause->autoneg) {
1773                netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1774                return -EINVAL;
1775        }
1776
1777        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1778        if (err) {
1779                netdev_err(dev, "Failed to configure port's headroom\n");
1780                return err;
1781        }
1782
1783        err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1784        if (err) {
1785                netdev_err(dev, "Failed to set PAUSE parameters\n");
1786                goto err_port_pause_configure;
1787        }
1788
1789        mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1790        mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1791
1792        return 0;
1793
1794err_port_pause_configure:
1795        pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1796        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1797        return err;
1798}
1799
1800struct mlxsw_sp_port_hw_stats {
1801        char str[ETH_GSTRING_LEN];
1802        u64 (*getter)(const char *payload);
1803        bool cells_bytes;
1804};
1805
1806static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1807        {
1808                .str = "a_frames_transmitted_ok",
1809                .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1810        },
1811        {
1812                .str = "a_frames_received_ok",
1813                .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1814        },
1815        {
1816                .str = "a_frame_check_sequence_errors",
1817                .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1818        },
1819        {
1820                .str = "a_alignment_errors",
1821                .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1822        },
1823        {
1824                .str = "a_octets_transmitted_ok",
1825                .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1826        },
1827        {
1828                .str = "a_octets_received_ok",
1829                .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1830        },
1831        {
1832                .str = "a_multicast_frames_xmitted_ok",
1833                .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1834        },
1835        {
1836                .str = "a_broadcast_frames_xmitted_ok",
1837                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1838        },
1839        {
1840                .str = "a_multicast_frames_received_ok",
1841                .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1842        },
1843        {
1844                .str = "a_broadcast_frames_received_ok",
1845                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1846        },
1847        {
1848                .str = "a_in_range_length_errors",
1849                .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1850        },
1851        {
1852                .str = "a_out_of_range_length_field",
1853                .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1854        },
1855        {
1856                .str = "a_frame_too_long_errors",
1857                .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1858        },
1859        {
1860                .str = "a_symbol_error_during_carrier",
1861                .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1862        },
1863        {
1864                .str = "a_mac_control_frames_transmitted",
1865                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1866        },
1867        {
1868                .str = "a_mac_control_frames_received",
1869                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1870        },
1871        {
1872                .str = "a_unsupported_opcodes_received",
1873                .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1874        },
1875        {
1876                .str = "a_pause_mac_ctrl_frames_received",
1877                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1878        },
1879        {
1880                .str = "a_pause_mac_ctrl_frames_xmitted",
1881                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1882        },
1883};
1884
1885#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1886
1887static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1888        {
1889                .str = "rx_octets_prio",
1890                .getter = mlxsw_reg_ppcnt_rx_octets_get,
1891        },
1892        {
1893                .str = "rx_frames_prio",
1894                .getter = mlxsw_reg_ppcnt_rx_frames_get,
1895        },
1896        {
1897                .str = "tx_octets_prio",
1898                .getter = mlxsw_reg_ppcnt_tx_octets_get,
1899        },
1900        {
1901                .str = "tx_frames_prio",
1902                .getter = mlxsw_reg_ppcnt_tx_frames_get,
1903        },
1904        {
1905                .str = "rx_pause_prio",
1906                .getter = mlxsw_reg_ppcnt_rx_pause_get,
1907        },
1908        {
1909                .str = "rx_pause_duration_prio",
1910                .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1911        },
1912        {
1913                .str = "tx_pause_prio",
1914                .getter = mlxsw_reg_ppcnt_tx_pause_get,
1915        },
1916        {
1917                .str = "tx_pause_duration_prio",
1918                .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1919        },
1920};
1921
1922#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1923
1924static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1925        {
1926                .str = "tc_transmit_queue_tc",
1927                .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1928                .cells_bytes = true,
1929        },
1930        {
1931                .str = "tc_no_buffer_discard_uc_tc",
1932                .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1933        },
1934};
1935
1936#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1937
1938#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1939                                         (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1940                                          MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1941                                         IEEE_8021QAZ_MAX_TCS)
1942
1943static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1944{
1945        int i;
1946
1947        for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1948                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1949                         mlxsw_sp_port_hw_prio_stats[i].str, prio);
1950                *p += ETH_GSTRING_LEN;
1951        }
1952}
1953
1954static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1955{
1956        int i;
1957
1958        for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1959                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1960                         mlxsw_sp_port_hw_tc_stats[i].str, tc);
1961                *p += ETH_GSTRING_LEN;
1962        }
1963}
1964
1965static void mlxsw_sp_port_get_strings(struct net_device *dev,
1966                                      u32 stringset, u8 *data)
1967{
1968        u8 *p = data;
1969        int i;
1970
1971        switch (stringset) {
1972        case ETH_SS_STATS:
1973                for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1974                        memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1975                               ETH_GSTRING_LEN);
1976                        p += ETH_GSTRING_LEN;
1977                }
1978
1979                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1980                        mlxsw_sp_port_get_prio_strings(&p, i);
1981
1982                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1983                        mlxsw_sp_port_get_tc_strings(&p, i);
1984
1985                break;
1986        }
1987}
1988
1989static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1990                                     enum ethtool_phys_id_state state)
1991{
1992        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1993        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1994        char mlcr_pl[MLXSW_REG_MLCR_LEN];
1995        bool active;
1996
1997        switch (state) {
1998        case ETHTOOL_ID_ACTIVE:
1999                active = true;
2000                break;
2001        case ETHTOOL_ID_INACTIVE:
2002                active = false;
2003                break;
2004        default:
2005                return -EOPNOTSUPP;
2006        }
2007
2008        mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2009        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2010}
2011
2012static int
2013mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2014                               int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2015{
2016        switch (grp) {
2017        case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
2018                *p_hw_stats = mlxsw_sp_port_hw_stats;
2019                *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2020                break;
2021        case MLXSW_REG_PPCNT_PRIO_CNT:
2022                *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2023                *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2024                break;
2025        case MLXSW_REG_PPCNT_TC_CNT:
2026                *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2027                *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2028                break;
2029        default:
2030                WARN_ON(1);
2031                return -EOPNOTSUPP;
2032        }
2033        return 0;
2034}
2035
2036static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2037                                      enum mlxsw_reg_ppcnt_grp grp, int prio,
2038                                      u64 *data, int data_index)
2039{
2040        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2041        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2042        struct mlxsw_sp_port_hw_stats *hw_stats;
2043        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2044        int i, len;
2045        int err;
2046
2047        err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2048        if (err)
2049                return;
2050        mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2051        for (i = 0; i < len; i++) {
2052                data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2053                if (!hw_stats[i].cells_bytes)
2054                        continue;
2055                data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2056                                                            data[data_index + i]);
2057        }
2058}
2059
2060static void mlxsw_sp_port_get_stats(struct net_device *dev,
2061                                    struct ethtool_stats *stats, u64 *data)
2062{
2063        int i, data_index = 0;
2064
2065        /* IEEE 802.3 Counters */
2066        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2067                                  data, data_index);
2068        data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2069
2070        /* Per-Priority Counters */
2071        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2072                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2073                                          data, data_index);
2074                data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2075        }
2076
2077        /* Per-TC Counters */
2078        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2079                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2080                                          data, data_index);
2081                data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2082        }
2083}
2084
2085static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2086{
2087        switch (sset) {
2088        case ETH_SS_STATS:
2089                return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2090        default:
2091                return -EOPNOTSUPP;
2092        }
2093}
2094
2095struct mlxsw_sp_port_link_mode {
2096        enum ethtool_link_mode_bit_indices mask_ethtool;
2097        u32 mask;
2098        u32 speed;
2099};
2100
2101static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2102        {
2103                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2104                .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2105                .speed          = SPEED_100,
2106        },
2107        {
2108                .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2109                                  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2110                .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2111                .speed          = SPEED_1000,
2112        },
2113        {
2114                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2115                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2116                .speed          = SPEED_10000,
2117        },
2118        {
2119                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2120                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2121                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2122                .speed          = SPEED_10000,
2123        },
2124        {
2125                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2126                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2127                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2128                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2129                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2130                .speed          = SPEED_10000,
2131        },
2132        {
2133                .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2134                .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2135                .speed          = SPEED_20000,
2136        },
2137        {
2138                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2139                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2140                .speed          = SPEED_40000,
2141        },
2142        {
2143                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2144                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2145                .speed          = SPEED_40000,
2146        },
2147        {
2148                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2149                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2150                .speed          = SPEED_40000,
2151        },
2152        {
2153                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2154                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2155                .speed          = SPEED_40000,
2156        },
2157        {
2158                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2159                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2160                .speed          = SPEED_25000,
2161        },
2162        {
2163                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2164                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2165                .speed          = SPEED_25000,
2166        },
2167        {
2168                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2169                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2170                .speed          = SPEED_25000,
2171        },
2172        {
2173                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2174                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2175                .speed          = SPEED_25000,
2176        },
2177        {
2178                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2179                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2180                .speed          = SPEED_50000,
2181        },
2182        {
2183                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2184                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2185                .speed          = SPEED_50000,
2186        },
2187        {
2188                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2189                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2190                .speed          = SPEED_50000,
2191        },
2192        {
2193                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2194                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2195                .speed          = SPEED_56000,
2196        },
2197        {
2198                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2199                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2200                .speed          = SPEED_56000,
2201        },
2202        {
2203                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2204                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2205                .speed          = SPEED_56000,
2206        },
2207        {
2208                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2209                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2210                .speed          = SPEED_56000,
2211        },
2212        {
2213                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2214                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2215                .speed          = SPEED_100000,
2216        },
2217        {
2218                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2219                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2220                .speed          = SPEED_100000,
2221        },
2222        {
2223                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2224                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2225                .speed          = SPEED_100000,
2226        },
2227        {
2228                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2229                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2230                .speed          = SPEED_100000,
2231        },
2232};
2233
2234#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2235
2236static void
2237mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2238                                  struct ethtool_link_ksettings *cmd)
2239{
2240        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2241                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2242                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2243                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2244                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2245                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2246                ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2247
2248        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2249                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2250                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2251                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2252                              MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2253                ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2254}
2255
2256static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2257{
2258        int i;
2259
2260        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2261                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2262                        __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2263                                  mode);
2264        }
2265}
2266
2267static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2268                                            struct ethtool_link_ksettings *cmd)
2269{
2270        u32 speed = SPEED_UNKNOWN;
2271        u8 duplex = DUPLEX_UNKNOWN;
2272        int i;
2273
2274        if (!carrier_ok)
2275                goto out;
2276
2277        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2278                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2279                        speed = mlxsw_sp_port_link_mode[i].speed;
2280                        duplex = DUPLEX_FULL;
2281                        break;
2282                }
2283        }
2284out:
2285        cmd->base.speed = speed;
2286        cmd->base.duplex = duplex;
2287}
2288
2289static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2290{
2291        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2292                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2293                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2294                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2295                return PORT_FIBRE;
2296
2297        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2298                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2299                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2300                return PORT_DA;
2301
2302        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2303                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2304                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2305                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2306                return PORT_NONE;
2307
2308        return PORT_OTHER;
2309}
2310
2311static u32
2312mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2313{
2314        u32 ptys_proto = 0;
2315        int i;
2316
2317        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2318                if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2319                             cmd->link_modes.advertising))
2320                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2321        }
2322        return ptys_proto;
2323}
2324
2325static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2326{
2327        u32 ptys_proto = 0;
2328        int i;
2329
2330        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2331                if (speed == mlxsw_sp_port_link_mode[i].speed)
2332                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2333        }
2334        return ptys_proto;
2335}
2336
2337static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2338{
2339        u32 ptys_proto = 0;
2340        int i;
2341
2342        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2343                if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2344                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2345        }
2346        return ptys_proto;
2347}
2348
2349static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2350                                             struct ethtool_link_ksettings *cmd)
2351{
2352        ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2353        ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2354        ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2355
2356        mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2357        mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2358}
2359
2360static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2361                                             struct ethtool_link_ksettings *cmd)
2362{
2363        if (!autoneg)
2364                return;
2365
2366        ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2367        mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2368}
2369
2370static void
2371mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2372                                    struct ethtool_link_ksettings *cmd)
2373{
2374        if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2375                return;
2376
2377        ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2378        mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2379}
2380
2381static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2382                                            struct ethtool_link_ksettings *cmd)
2383{
2384        u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2385        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2386        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2387        char ptys_pl[MLXSW_REG_PTYS_LEN];
2388        u8 autoneg_status;
2389        bool autoneg;
2390        int err;
2391
2392        autoneg = mlxsw_sp_port->link.autoneg;
2393        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2394        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2395        if (err)
2396                return err;
2397        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2398                                  &eth_proto_oper);
2399
2400        mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2401
2402        mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2403
2404        eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2405        autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2406        mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2407
2408        cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2409        cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2410        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2411                                        cmd);
2412
2413        return 0;
2414}
2415
2416static int
2417mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2418                                 const struct ethtool_link_ksettings *cmd)
2419{
2420        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2421        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2422        char ptys_pl[MLXSW_REG_PTYS_LEN];
2423        u32 eth_proto_cap, eth_proto_new;
2424        bool autoneg;
2425        int err;
2426
2427        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2428        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2429        if (err)
2430                return err;
2431        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2432
2433        autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2434        eth_proto_new = autoneg ?
2435                mlxsw_sp_to_ptys_advert_link(cmd) :
2436                mlxsw_sp_to_ptys_speed(cmd->base.speed);
2437
2438        eth_proto_new = eth_proto_new & eth_proto_cap;
2439        if (!eth_proto_new) {
2440                netdev_err(dev, "No supported speed requested\n");
2441                return -EINVAL;
2442        }
2443
2444        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2445                                eth_proto_new, autoneg);
2446        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2447        if (err)
2448                return err;
2449
2450        if (!netif_running(dev))
2451                return 0;
2452
2453        mlxsw_sp_port->link.autoneg = autoneg;
2454
2455        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2456        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2457
2458        return 0;
2459}
2460
2461static int mlxsw_sp_flash_device(struct net_device *dev,
2462                                 struct ethtool_flash *flash)
2463{
2464        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2465        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2466        const struct firmware *firmware;
2467        int err;
2468
2469        if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2470                return -EOPNOTSUPP;
2471
2472        dev_hold(dev);
2473        rtnl_unlock();
2474
2475        err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2476        if (err)
2477                goto out;
2478        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2479        release_firmware(firmware);
2480out:
2481        rtnl_lock();
2482        dev_put(dev);
2483        return err;
2484}
2485
2486#define MLXSW_SP_I2C_ADDR_LOW 0x50
2487#define MLXSW_SP_I2C_ADDR_HIGH 0x51
2488#define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2489
2490static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2491                                        u16 offset, u16 size, void *data,
2492                                        unsigned int *p_read_size)
2493{
2494        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2495        char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2496        char mcia_pl[MLXSW_REG_MCIA_LEN];
2497        u16 i2c_addr;
2498        int status;
2499        int err;
2500
2501        size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2502
2503        if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2504            offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2505                /* Cross pages read, read until offset 256 in low page */
2506                size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2507
2508        i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2509        if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2510                i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2511                offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2512        }
2513
2514        mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2515                            0, 0, offset, size, i2c_addr);
2516
2517        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2518        if (err)
2519                return err;
2520
2521        status = mlxsw_reg_mcia_status_get(mcia_pl);
2522        if (status)
2523                return -EIO;
2524
2525        mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2526        memcpy(data, eeprom_tmp, size);
2527        *p_read_size = size;
2528
2529        return 0;
2530}
2531
2532enum mlxsw_sp_eeprom_module_info_rev_id {
2533        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC      = 0x00,
2534        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436       = 0x01,
2535        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636       = 0x03,
2536};
2537
2538enum mlxsw_sp_eeprom_module_info_id {
2539        MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP              = 0x03,
2540        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP             = 0x0C,
2541        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS        = 0x0D,
2542        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28           = 0x11,
2543};
2544
2545enum mlxsw_sp_eeprom_module_info {
2546        MLXSW_SP_EEPROM_MODULE_INFO_ID,
2547        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2548        MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2549};
2550
2551static int mlxsw_sp_get_module_info(struct net_device *netdev,
2552                                    struct ethtool_modinfo *modinfo)
2553{
2554        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2555        u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2556        u8 module_rev_id, module_id;
2557        unsigned int read_size;
2558        int err;
2559
2560        err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2561                                           MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2562                                           module_info, &read_size);
2563        if (err)
2564                return err;
2565
2566        if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2567                return -EIO;
2568
2569        module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2570        module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2571
2572        switch (module_id) {
2573        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2574                modinfo->type       = ETH_MODULE_SFF_8436;
2575                modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2576                break;
2577        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2578        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2579                if (module_id  == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2580                    module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2581                        modinfo->type       = ETH_MODULE_SFF_8636;
2582                        modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2583                } else {
2584                        modinfo->type       = ETH_MODULE_SFF_8436;
2585                        modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2586                }
2587                break;
2588        case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2589                modinfo->type       = ETH_MODULE_SFF_8472;
2590                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2591                break;
2592        default:
2593                return -EINVAL;
2594        }
2595
2596        return 0;
2597}
2598
2599static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2600                                      struct ethtool_eeprom *ee,
2601                                      u8 *data)
2602{
2603        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2604        int offset = ee->offset;
2605        unsigned int read_size;
2606        int i = 0;
2607        int err;
2608
2609        if (!ee->len)
2610                return -EINVAL;
2611
2612        memset(data, 0, ee->len);
2613
2614        while (i < ee->len) {
2615                err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2616                                                   ee->len - i, data + i,
2617                                                   &read_size);
2618                if (err) {
2619                        netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2620                        return err;
2621                }
2622
2623                i += read_size;
2624                offset += read_size;
2625        }
2626
2627        return 0;
2628}
2629
2630static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2631        .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
2632        .get_link               = ethtool_op_get_link,
2633        .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
2634        .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
2635        .get_strings            = mlxsw_sp_port_get_strings,
2636        .set_phys_id            = mlxsw_sp_port_set_phys_id,
2637        .get_ethtool_stats      = mlxsw_sp_port_get_stats,
2638        .get_sset_count         = mlxsw_sp_port_get_sset_count,
2639        .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
2640        .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
2641        .flash_device           = mlxsw_sp_flash_device,
2642        .get_module_info        = mlxsw_sp_get_module_info,
2643        .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
2644};
2645
2646static int
2647mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2648{
2649        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2650        u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2651        char ptys_pl[MLXSW_REG_PTYS_LEN];
2652        u32 eth_proto_admin;
2653
2654        eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2655        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2656                                eth_proto_admin, mlxsw_sp_port->link.autoneg);
2657        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2658}
2659
2660int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2661                          enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2662                          bool dwrr, u8 dwrr_weight)
2663{
2664        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2665        char qeec_pl[MLXSW_REG_QEEC_LEN];
2666
2667        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2668                            next_index);
2669        mlxsw_reg_qeec_de_set(qeec_pl, true);
2670        mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2671        mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2672        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2673}
2674
2675int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2676                                  enum mlxsw_reg_qeec_hr hr, u8 index,
2677                                  u8 next_index, u32 maxrate)
2678{
2679        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2680        char qeec_pl[MLXSW_REG_QEEC_LEN];
2681
2682        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2683                            next_index);
2684        mlxsw_reg_qeec_mase_set(qeec_pl, true);
2685        mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2686        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2687}
2688
2689int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2690                              u8 switch_prio, u8 tclass)
2691{
2692        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2693        char qtct_pl[MLXSW_REG_QTCT_LEN];
2694
2695        mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2696                            tclass);
2697        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2698}
2699
2700static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2701{
2702        int err, i;
2703
2704        /* Setup the elements hierarcy, so that each TC is linked to
2705         * one subgroup, which are all member in the same group.
2706         */
2707        err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2708                                    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2709                                    0);
2710        if (err)
2711                return err;
2712        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2713                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2714                                            MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2715                                            0, false, 0);
2716                if (err)
2717                        return err;
2718        }
2719        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2720                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2721                                            MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2722                                            false, 0);
2723                if (err)
2724                        return err;
2725        }
2726
2727        /* Make sure the max shaper is disabled in all hierarcies that
2728         * support it.
2729         */
2730        err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2731                                            MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2732                                            MLXSW_REG_QEEC_MAS_DIS);
2733        if (err)
2734                return err;
2735        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2736                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2737                                                    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2738                                                    i, 0,
2739                                                    MLXSW_REG_QEEC_MAS_DIS);
2740                if (err)
2741                        return err;
2742        }
2743        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2744                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2745                                                    MLXSW_REG_QEEC_HIERARCY_TC,
2746                                                    i, i,
2747                                                    MLXSW_REG_QEEC_MAS_DIS);
2748                if (err)
2749                        return err;
2750        }
2751
2752        /* Map all priorities to traffic class 0. */
2753        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2754                err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2755                if (err)
2756                        return err;
2757        }
2758
2759        return 0;
2760}
2761
2762static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2763                                bool split, u8 module, u8 width, u8 lane)
2764{
2765        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2766        struct mlxsw_sp_port *mlxsw_sp_port;
2767        struct net_device *dev;
2768        int err;
2769
2770        err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2771        if (err) {
2772                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2773                        local_port);
2774                return err;
2775        }
2776
2777        dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2778        if (!dev) {
2779                err = -ENOMEM;
2780                goto err_alloc_etherdev;
2781        }
2782        SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2783        mlxsw_sp_port = netdev_priv(dev);
2784        mlxsw_sp_port->dev = dev;
2785        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2786        mlxsw_sp_port->local_port = local_port;
2787        mlxsw_sp_port->pvid = 1;
2788        mlxsw_sp_port->split = split;
2789        mlxsw_sp_port->mapping.module = module;
2790        mlxsw_sp_port->mapping.width = width;
2791        mlxsw_sp_port->mapping.lane = lane;
2792        mlxsw_sp_port->link.autoneg = 1;
2793        INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2794        INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2795
2796        mlxsw_sp_port->pcpu_stats =
2797                netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2798        if (!mlxsw_sp_port->pcpu_stats) {
2799                err = -ENOMEM;
2800                goto err_alloc_stats;
2801        }
2802
2803        mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2804                                        GFP_KERNEL);
2805        if (!mlxsw_sp_port->sample) {
2806                err = -ENOMEM;
2807                goto err_alloc_sample;
2808        }
2809
2810        INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
2811                          &update_stats_cache);
2812
2813        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2814        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2815
2816        err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2817        if (err) {
2818                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2819                        mlxsw_sp_port->local_port);
2820                goto err_port_module_map;
2821        }
2822
2823        err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2824        if (err) {
2825                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2826                        mlxsw_sp_port->local_port);
2827                goto err_port_swid_set;
2828        }
2829
2830        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2831        if (err) {
2832                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2833                        mlxsw_sp_port->local_port);
2834                goto err_dev_addr_init;
2835        }
2836
2837        netif_carrier_off(dev);
2838
2839        dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2840                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2841        dev->hw_features |= NETIF_F_HW_TC;
2842
2843        dev->min_mtu = 0;
2844        dev->max_mtu = ETH_MAX_MTU;
2845
2846        /* Each packet needs to have a Tx header (metadata) on top all other
2847         * headers.
2848         */
2849        dev->needed_headroom = MLXSW_TXHDR_LEN;
2850
2851        err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2852        if (err) {
2853                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2854                        mlxsw_sp_port->local_port);
2855                goto err_port_system_port_mapping_set;
2856        }
2857
2858        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2859        if (err) {
2860                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2861                        mlxsw_sp_port->local_port);
2862                goto err_port_speed_by_width_set;
2863        }
2864
2865        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2866        if (err) {
2867                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2868                        mlxsw_sp_port->local_port);
2869                goto err_port_mtu_set;
2870        }
2871
2872        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2873        if (err)
2874                goto err_port_admin_status_set;
2875
2876        err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2877        if (err) {
2878                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2879                        mlxsw_sp_port->local_port);
2880                goto err_port_buffers_init;
2881        }
2882
2883        err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2884        if (err) {
2885                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2886                        mlxsw_sp_port->local_port);
2887                goto err_port_ets_init;
2888        }
2889
2890        /* ETS and buffers must be initialized before DCB. */
2891        err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2892        if (err) {
2893                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2894                        mlxsw_sp_port->local_port);
2895                goto err_port_dcb_init;
2896        }
2897
2898        err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2899        if (err) {
2900                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2901                        mlxsw_sp_port->local_port);
2902                goto err_port_fids_init;
2903        }
2904
2905        err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
2906        if (err) {
2907                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
2908                        mlxsw_sp_port->local_port);
2909                goto err_port_qdiscs_init;
2910        }
2911
2912        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2913        if (IS_ERR(mlxsw_sp_port_vlan)) {
2914                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2915                        mlxsw_sp_port->local_port);
2916                err = PTR_ERR(mlxsw_sp_port_vlan);
2917                goto err_port_vlan_get;
2918        }
2919
2920        mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2921        mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2922        err = register_netdev(dev);
2923        if (err) {
2924                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2925                        mlxsw_sp_port->local_port);
2926                goto err_register_netdev;
2927        }
2928
2929        mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2930                                mlxsw_sp_port, dev, mlxsw_sp_port->split,
2931                                module);
2932        mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
2933        return 0;
2934
2935err_register_netdev:
2936        mlxsw_sp->ports[local_port] = NULL;
2937        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2938        mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2939err_port_vlan_get:
2940        mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
2941err_port_qdiscs_init:
2942        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2943err_port_fids_init:
2944        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2945err_port_dcb_init:
2946err_port_ets_init:
2947err_port_buffers_init:
2948err_port_admin_status_set:
2949err_port_mtu_set:
2950err_port_speed_by_width_set:
2951err_port_system_port_mapping_set:
2952err_dev_addr_init:
2953        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2954err_port_swid_set:
2955        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
2956err_port_module_map:
2957        kfree(mlxsw_sp_port->sample);
2958err_alloc_sample:
2959        free_percpu(mlxsw_sp_port->pcpu_stats);
2960err_alloc_stats:
2961        free_netdev(dev);
2962err_alloc_etherdev:
2963        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2964        return err;
2965}
2966
2967static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2968{
2969        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2970
2971        cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
2972        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2973        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2974        mlxsw_sp->ports[local_port] = NULL;
2975        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2976        mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
2977        mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
2978        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2979        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2980        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2981        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
2982        kfree(mlxsw_sp_port->sample);
2983        free_percpu(mlxsw_sp_port->pcpu_stats);
2984        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
2985        free_netdev(mlxsw_sp_port->dev);
2986        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2987}
2988
2989static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2990{
2991        return mlxsw_sp->ports[local_port] != NULL;
2992}
2993
2994static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2995{
2996        int i;
2997
2998        for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2999                if (mlxsw_sp_port_created(mlxsw_sp, i))
3000                        mlxsw_sp_port_remove(mlxsw_sp, i);
3001        kfree(mlxsw_sp->port_to_module);
3002        kfree(mlxsw_sp->ports);
3003}
3004
3005static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3006{
3007        unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3008        u8 module, width, lane;
3009        size_t alloc_size;
3010        int i;
3011        int err;
3012
3013        alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3014        mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3015        if (!mlxsw_sp->ports)
3016                return -ENOMEM;
3017
3018        mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3019                                                 GFP_KERNEL);
3020        if (!mlxsw_sp->port_to_module) {
3021                err = -ENOMEM;
3022                goto err_port_to_module_alloc;
3023        }
3024
3025        for (i = 1; i < max_ports; i++) {
3026                /* Mark as invalid */
3027                mlxsw_sp->port_to_module[i] = -1;
3028
3029                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3030                                                    &width, &lane);
3031                if (err)
3032                        goto err_port_module_info_get;
3033                if (!width)
3034                        continue;
3035                mlxsw_sp->port_to_module[i] = module;
3036                err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3037                                           module, width, lane);
3038                if (err)
3039                        goto err_port_create;
3040        }
3041        return 0;
3042
3043err_port_create:
3044err_port_module_info_get:
3045        for (i--; i >= 1; i--)
3046                if (mlxsw_sp_port_created(mlxsw_sp, i))
3047                        mlxsw_sp_port_remove(mlxsw_sp, i);
3048        kfree(mlxsw_sp->port_to_module);
3049err_port_to_module_alloc:
3050        kfree(mlxsw_sp->ports);
3051        return err;
3052}
3053
3054static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3055{
3056        u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3057
3058        return local_port - offset;
3059}
3060
3061static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3062                                      u8 module, unsigned int count)
3063{
3064        u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3065        int err, i;
3066
3067        for (i = 0; i < count; i++) {
3068                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3069                                           module, width, i * width);
3070                if (err)
3071                        goto err_port_create;
3072        }
3073
3074        return 0;
3075
3076err_port_create:
3077        for (i--; i >= 0; i--)
3078                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3079                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3080        return err;
3081}
3082
3083static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3084                                         u8 base_port, unsigned int count)
3085{
3086        u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3087        int i;
3088
3089        /* Split by four means we need to re-create two ports, otherwise
3090         * only one.
3091         */
3092        count = count / 2;
3093
3094        for (i = 0; i < count; i++) {
3095                local_port = base_port + i * 2;
3096                if (mlxsw_sp->port_to_module[local_port] < 0)
3097                        continue;
3098                module = mlxsw_sp->port_to_module[local_port];
3099
3100                mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3101                                     width, 0);
3102        }
3103}
3104
3105static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3106                               unsigned int count)
3107{
3108        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3109        struct mlxsw_sp_port *mlxsw_sp_port;
3110        u8 module, cur_width, base_port;
3111        int i;
3112        int err;
3113
3114        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3115        if (!mlxsw_sp_port) {
3116                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3117                        local_port);
3118                return -EINVAL;
3119        }
3120
3121        module = mlxsw_sp_port->mapping.module;
3122        cur_width = mlxsw_sp_port->mapping.width;
3123
3124        if (count != 2 && count != 4) {
3125                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3126                return -EINVAL;
3127        }
3128
3129        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3130                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3131                return -EINVAL;
3132        }
3133
3134        /* Make sure we have enough slave (even) ports for the split. */
3135        if (count == 2) {
3136                base_port = local_port;
3137                if (mlxsw_sp->ports[base_port + 1]) {
3138                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3139                        return -EINVAL;
3140                }
3141        } else {
3142                base_port = mlxsw_sp_cluster_base_port_get(local_port);
3143                if (mlxsw_sp->ports[base_port + 1] ||
3144                    mlxsw_sp->ports[base_port + 3]) {
3145                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3146                        return -EINVAL;
3147                }
3148        }
3149
3150        for (i = 0; i < count; i++)
3151                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3152                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3153
3154        err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3155        if (err) {
3156                dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3157                goto err_port_split_create;
3158        }
3159
3160        return 0;
3161
3162err_port_split_create:
3163        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3164        return err;
3165}
3166
3167static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3168{
3169        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3170        struct mlxsw_sp_port *mlxsw_sp_port;
3171        u8 cur_width, base_port;
3172        unsigned int count;
3173        int i;
3174
3175        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3176        if (!mlxsw_sp_port) {
3177                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3178                        local_port);
3179                return -EINVAL;
3180        }
3181
3182        if (!mlxsw_sp_port->split) {
3183                netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3184                return -EINVAL;
3185        }
3186
3187        cur_width = mlxsw_sp_port->mapping.width;
3188        count = cur_width == 1 ? 4 : 2;
3189
3190        base_port = mlxsw_sp_cluster_base_port_get(local_port);
3191
3192        /* Determine which ports to remove. */
3193        if (count == 2 && local_port >= base_port + 2)
3194                base_port = base_port + 2;
3195
3196        for (i = 0; i < count; i++)
3197                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3198                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3199
3200        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3201
3202        return 0;
3203}
3204
3205static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3206                                     char *pude_pl, void *priv)
3207{
3208        struct mlxsw_sp *mlxsw_sp = priv;
3209        struct mlxsw_sp_port *mlxsw_sp_port;
3210        enum mlxsw_reg_pude_oper_status status;
3211        u8 local_port;
3212
3213        local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3214        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3215        if (!mlxsw_sp_port)
3216                return;
3217
3218        status = mlxsw_reg_pude_oper_status_get(pude_pl);
3219        if (status == MLXSW_PORT_OPER_STATUS_UP) {
3220                netdev_info(mlxsw_sp_port->dev, "link up\n");
3221                netif_carrier_on(mlxsw_sp_port->dev);
3222        } else {
3223                netdev_info(mlxsw_sp_port->dev, "link down\n");
3224                netif_carrier_off(mlxsw_sp_port->dev);
3225        }
3226}
3227
3228static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3229                                              u8 local_port, void *priv)
3230{
3231        struct mlxsw_sp *mlxsw_sp = priv;
3232        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3233        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3234
3235        if (unlikely(!mlxsw_sp_port)) {
3236                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3237                                     local_port);
3238                return;
3239        }
3240
3241        skb->dev = mlxsw_sp_port->dev;
3242
3243        pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3244        u64_stats_update_begin(&pcpu_stats->syncp);
3245        pcpu_stats->rx_packets++;
3246        pcpu_stats->rx_bytes += skb->len;
3247        u64_stats_update_end(&pcpu_stats->syncp);
3248
3249        skb->protocol = eth_type_trans(skb, skb->dev);
3250        netif_receive_skb(skb);
3251}
3252
3253static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3254                                           void *priv)
3255{
3256        skb->offload_fwd_mark = 1;
3257        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3258}
3259
3260static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
3261                                              u8 local_port, void *priv)
3262{
3263        skb->offload_mr_fwd_mark = 1;
3264        skb->offload_fwd_mark = 1;
3265        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3266}
3267
3268static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3269                                             void *priv)
3270{
3271        struct mlxsw_sp *mlxsw_sp = priv;
3272        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3273        struct psample_group *psample_group;
3274        u32 size;
3275
3276        if (unlikely(!mlxsw_sp_port)) {
3277                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3278                                     local_port);
3279                goto out;
3280        }
3281        if (unlikely(!mlxsw_sp_port->sample)) {
3282                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3283                                     local_port);
3284                goto out;
3285        }
3286
3287        size = mlxsw_sp_port->sample->truncate ?
3288                  mlxsw_sp_port->sample->trunc_size : skb->len;
3289
3290        rcu_read_lock();
3291        psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3292        if (!psample_group)
3293                goto out_unlock;
3294        psample_sample_packet(psample_group, skb, size,
3295                              mlxsw_sp_port->dev->ifindex, 0,
3296                              mlxsw_sp_port->sample->rate);
3297out_unlock:
3298        rcu_read_unlock();
3299out:
3300        consume_skb(skb);
3301}
3302
3303#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
3304        MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3305                  _is_ctrl, SP_##_trap_group, DISCARD)
3306
3307#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
3308        MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
3309                _is_ctrl, SP_##_trap_group, DISCARD)
3310
3311#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
3312        MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
3313                _is_ctrl, SP_##_trap_group, DISCARD)
3314
3315#define MLXSW_SP_EVENTL(_func, _trap_id)                \
3316        MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3317
3318static const struct mlxsw_listener mlxsw_sp_listener[] = {
3319        /* Events */
3320        MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3321        /* L2 traps */
3322        MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3323        MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3324        MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3325        MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3326        MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3327        MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3328        MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3329        MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3330        MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3331        MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3332        MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3333        MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3334        MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3335                          false),
3336        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3337                             false),
3338        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3339                             false),
3340        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3341                             false),
3342        /* L3 traps */
3343        MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3344        MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3345        MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3346        MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3347        MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3348                          false),
3349        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3350        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3351        MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3352        MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3353                          false),
3354        MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3355        MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3356        MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3357        MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3358        MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3359        MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3360        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3361                          false),
3362        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3363                          false),
3364        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3365                          false),
3366        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3367                          false),
3368        MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3369        MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3370                          false),
3371        MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3372        MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3373        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3374        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3375        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3376        /* PKT Sample trap */
3377        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3378                  false, SP_IP2ME, DISCARD),
3379        /* ACL trap */
3380        MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3381        /* Multicast Router Traps */
3382        MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
3383        MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
3384        MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
3385        MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
3386        MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
3387};
3388
3389static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3390{
3391        char qpcr_pl[MLXSW_REG_QPCR_LEN];
3392        enum mlxsw_reg_qpcr_ir_units ir_units;
3393        int max_cpu_policers;
3394        bool is_bytes;
3395        u8 burst_size;
3396        u32 rate;
3397        int i, err;
3398
3399        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3400                return -EIO;
3401
3402        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3403
3404        ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3405        for (i = 0; i < max_cpu_policers; i++) {
3406                is_bytes = false;
3407                switch (i) {
3408                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3409                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3410                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3411                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3412                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3413                case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3414                        rate = 128;
3415                        burst_size = 7;
3416                        break;
3417                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3418                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3419                        rate = 16 * 1024;
3420                        burst_size = 10;
3421                        break;
3422                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3423                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3424                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3425                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3426                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3427                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3428                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3429                case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3430                        rate = 1024;
3431                        burst_size = 7;
3432                        break;
3433                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3434                        is_bytes = true;
3435                        rate = 4 * 1024;
3436                        burst_size = 4;
3437                        break;
3438                default:
3439                        continue;
3440                }
3441
3442                mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3443                                    burst_size);
3444                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3445                if (err)
3446                        return err;
3447        }
3448
3449        return 0;
3450}
3451
3452static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3453{
3454        char htgt_pl[MLXSW_REG_HTGT_LEN];
3455        enum mlxsw_reg_htgt_trap_group i;
3456        int max_cpu_policers;
3457        int max_trap_groups;
3458        u8 priority, tc;
3459        u16 policer_id;
3460        int err;
3461
3462        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3463                return -EIO;
3464
3465        max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3466        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3467
3468        for (i = 0; i < max_trap_groups; i++) {
3469                policer_id = i;
3470                switch (i) {
3471                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3472                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3473                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3474                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3475                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3476                        priority = 5;
3477                        tc = 5;
3478                        break;
3479                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3480                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3481                        priority = 4;
3482                        tc = 4;
3483                        break;
3484                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3485                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3486                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3487                        priority = 3;
3488                        tc = 3;
3489                        break;
3490                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3491                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3492                case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3493                        priority = 2;
3494                        tc = 2;
3495                        break;
3496                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3497                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3498                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3499                case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3500                        priority = 1;
3501                        tc = 1;
3502                        break;
3503                case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3504                        priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3505                        tc = MLXSW_REG_HTGT_DEFAULT_TC;
3506                        policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3507                        break;
3508                default:
3509                        continue;
3510                }
3511
3512                if (max_cpu_policers <= policer_id &&
3513                    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3514                        return -EIO;
3515
3516                mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3517                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3518                if (err)
3519                        return err;
3520        }
3521
3522        return 0;
3523}
3524
3525static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3526{
3527        int i;
3528        int err;
3529
3530        err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3531        if (err)
3532                return err;
3533
3534        err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3535        if (err)
3536                return err;
3537
3538        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3539                err = mlxsw_core_trap_register(mlxsw_sp->core,
3540                                               &mlxsw_sp_listener[i],
3541                                               mlxsw_sp);
3542                if (err)
3543                        goto err_listener_register;
3544
3545        }
3546        return 0;
3547
3548err_listener_register:
3549        for (i--; i >= 0; i--) {
3550                mlxsw_core_trap_unregister(mlxsw_sp->core,
3551                                           &mlxsw_sp_listener[i],
3552                                           mlxsw_sp);
3553        }
3554        return err;
3555}
3556
3557static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3558{
3559        int i;
3560
3561        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3562                mlxsw_core_trap_unregister(mlxsw_sp->core,
3563                                           &mlxsw_sp_listener[i],
3564                                           mlxsw_sp);
3565        }
3566}
3567
3568static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3569{
3570        char slcr_pl[MLXSW_REG_SLCR_LEN];
3571        int err;
3572
3573        mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3574                                     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3575                                     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3576                                     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3577                                     MLXSW_REG_SLCR_LAG_HASH_SIP |
3578                                     MLXSW_REG_SLCR_LAG_HASH_DIP |
3579                                     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3580                                     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3581                                     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3582        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3583        if (err)
3584                return err;
3585
3586        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3587            !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3588                return -EIO;
3589
3590        mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3591                                 sizeof(struct mlxsw_sp_upper),
3592                                 GFP_KERNEL);
3593        if (!mlxsw_sp->lags)
3594                return -ENOMEM;
3595
3596        return 0;
3597}
3598
3599static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3600{
3601        kfree(mlxsw_sp->lags);
3602}
3603
3604static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3605{
3606        char htgt_pl[MLXSW_REG_HTGT_LEN];
3607
3608        mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3609                            MLXSW_REG_HTGT_INVALID_POLICER,
3610                            MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3611                            MLXSW_REG_HTGT_DEFAULT_TC);
3612        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3613}
3614
3615static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3616                                    unsigned long event, void *ptr);
3617
3618static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3619                         const struct mlxsw_bus_info *mlxsw_bus_info)
3620{
3621        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3622        int err;
3623
3624        mlxsw_sp->core = mlxsw_core;
3625        mlxsw_sp->bus_info = mlxsw_bus_info;
3626
3627        err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3628        if (err) {
3629                dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3630                return err;
3631        }
3632
3633        err = mlxsw_sp_base_mac_get(mlxsw_sp);
3634        if (err) {
3635                dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3636                return err;
3637        }
3638
3639        err = mlxsw_sp_kvdl_init(mlxsw_sp);
3640        if (err) {
3641                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3642                return err;
3643        }
3644
3645        err = mlxsw_sp_fids_init(mlxsw_sp);
3646        if (err) {
3647                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3648                goto err_fids_init;
3649        }
3650
3651        err = mlxsw_sp_traps_init(mlxsw_sp);
3652        if (err) {
3653                dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3654                goto err_traps_init;
3655        }
3656
3657        err = mlxsw_sp_buffers_init(mlxsw_sp);
3658        if (err) {
3659                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3660                goto err_buffers_init;
3661        }
3662
3663        err = mlxsw_sp_lag_init(mlxsw_sp);
3664        if (err) {
3665                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3666                goto err_lag_init;
3667        }
3668
3669        err = mlxsw_sp_switchdev_init(mlxsw_sp);
3670        if (err) {
3671                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3672                goto err_switchdev_init;
3673        }
3674
3675        err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3676        if (err) {
3677                dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3678                goto err_counter_pool_init;
3679        }
3680
3681        err = mlxsw_sp_afa_init(mlxsw_sp);
3682        if (err) {
3683                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3684                goto err_afa_init;
3685        }
3686
3687        err = mlxsw_sp_span_init(mlxsw_sp);
3688        if (err) {
3689                dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3690                goto err_span_init;
3691        }
3692
3693        /* Initialize router after SPAN is initialized, so that the FIB and
3694         * neighbor event handlers can issue SPAN respin.
3695         */
3696        err = mlxsw_sp_router_init(mlxsw_sp);
3697        if (err) {
3698                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3699                goto err_router_init;
3700        }
3701
3702        /* Initialize netdevice notifier after router and SPAN is initialized,
3703         * so that the event handler can use router structures and call SPAN
3704         * respin.
3705         */
3706        mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3707        err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3708        if (err) {
3709                dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3710                goto err_netdev_notifier;
3711        }
3712
3713        err = mlxsw_sp_acl_init(mlxsw_sp);
3714        if (err) {
3715                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3716                goto err_acl_init;
3717        }
3718
3719        err = mlxsw_sp_dpipe_init(mlxsw_sp);
3720        if (err) {
3721                dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3722                goto err_dpipe_init;
3723        }
3724
3725        err = mlxsw_sp_ports_create(mlxsw_sp);
3726        if (err) {
3727                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3728                goto err_ports_create;
3729        }
3730
3731        return 0;
3732
3733err_ports_create:
3734        mlxsw_sp_dpipe_fini(mlxsw_sp);
3735err_dpipe_init:
3736        mlxsw_sp_acl_fini(mlxsw_sp);
3737err_acl_init:
3738        unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3739err_netdev_notifier:
3740        mlxsw_sp_router_fini(mlxsw_sp);
3741err_router_init:
3742        mlxsw_sp_span_fini(mlxsw_sp);
3743err_span_init:
3744        mlxsw_sp_afa_fini(mlxsw_sp);
3745err_afa_init:
3746        mlxsw_sp_counter_pool_fini(mlxsw_sp);
3747err_counter_pool_init:
3748        mlxsw_sp_switchdev_fini(mlxsw_sp);
3749err_switchdev_init:
3750        mlxsw_sp_lag_fini(mlxsw_sp);
3751err_lag_init:
3752        mlxsw_sp_buffers_fini(mlxsw_sp);
3753err_buffers_init:
3754        mlxsw_sp_traps_fini(mlxsw_sp);
3755err_traps_init:
3756        mlxsw_sp_fids_fini(mlxsw_sp);
3757err_fids_init:
3758        mlxsw_sp_kvdl_fini(mlxsw_sp);
3759        return err;
3760}
3761
3762static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3763{
3764        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3765
3766        mlxsw_sp_ports_remove(mlxsw_sp);
3767        mlxsw_sp_dpipe_fini(mlxsw_sp);
3768        mlxsw_sp_acl_fini(mlxsw_sp);
3769        unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3770        mlxsw_sp_router_fini(mlxsw_sp);
3771        mlxsw_sp_span_fini(mlxsw_sp);
3772        mlxsw_sp_afa_fini(mlxsw_sp);
3773        mlxsw_sp_counter_pool_fini(mlxsw_sp);
3774        mlxsw_sp_switchdev_fini(mlxsw_sp);
3775        mlxsw_sp_lag_fini(mlxsw_sp);
3776        mlxsw_sp_buffers_fini(mlxsw_sp);
3777        mlxsw_sp_traps_fini(mlxsw_sp);
3778        mlxsw_sp_fids_fini(mlxsw_sp);
3779        mlxsw_sp_kvdl_fini(mlxsw_sp);
3780}
3781
3782static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
3783        .used_max_mid                   = 1,
3784        .max_mid                        = MLXSW_SP_MID_MAX,
3785        .used_flood_tables              = 1,
3786        .used_flood_mode                = 1,
3787        .flood_mode                     = 3,
3788        .max_fid_offset_flood_tables    = 3,
3789        .fid_offset_flood_table_size    = VLAN_N_VID - 1,
3790        .max_fid_flood_tables           = 3,
3791        .fid_flood_table_size           = MLXSW_SP_FID_8021D_MAX,
3792        .used_max_ib_mc                 = 1,
3793        .max_ib_mc                      = 0,
3794        .used_max_pkey                  = 1,
3795        .max_pkey                       = 0,
3796        .used_kvd_sizes                 = 1,
3797        .kvd_hash_single_parts          = 59,
3798        .kvd_hash_double_parts          = 41,
3799        .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3800        .swid_config                    = {
3801                {
3802                        .used_type      = 1,
3803                        .type           = MLXSW_PORT_SWID_TYPE_ETH,
3804                }
3805        },
3806};
3807
3808static void
3809mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3810                                      struct devlink_resource_size_params *kvd_size_params,
3811                                      struct devlink_resource_size_params *linear_size_params,
3812                                      struct devlink_resource_size_params *hash_double_size_params,
3813                                      struct devlink_resource_size_params *hash_single_size_params)
3814{
3815        u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3816                                                 KVD_SINGLE_MIN_SIZE);
3817        u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3818                                                 KVD_DOUBLE_MIN_SIZE);
3819        u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3820        u32 linear_size_min = 0;
3821
3822        devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3823                                          MLXSW_SP_KVD_GRANULARITY,
3824                                          DEVLINK_RESOURCE_UNIT_ENTRY);
3825        devlink_resource_size_params_init(linear_size_params, linear_size_min,
3826                                          kvd_size - single_size_min -
3827                                          double_size_min,
3828                                          MLXSW_SP_KVD_GRANULARITY,
3829                                          DEVLINK_RESOURCE_UNIT_ENTRY);
3830        devlink_resource_size_params_init(hash_double_size_params,
3831                                          double_size_min,
3832                                          kvd_size - single_size_min -
3833                                          linear_size_min,
3834                                          MLXSW_SP_KVD_GRANULARITY,
3835                                          DEVLINK_RESOURCE_UNIT_ENTRY);
3836        devlink_resource_size_params_init(hash_single_size_params,
3837                                          single_size_min,
3838                                          kvd_size - double_size_min -
3839                                          linear_size_min,
3840                                          MLXSW_SP_KVD_GRANULARITY,
3841                                          DEVLINK_RESOURCE_UNIT_ENTRY);
3842}
3843
3844static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
3845{
3846        struct devlink *devlink = priv_to_devlink(mlxsw_core);
3847        struct devlink_resource_size_params hash_single_size_params;
3848        struct devlink_resource_size_params hash_double_size_params;
3849        struct devlink_resource_size_params linear_size_params;
3850        struct devlink_resource_size_params kvd_size_params;
3851        u32 kvd_size, single_size, double_size, linear_size;
3852        const struct mlxsw_config_profile *profile;
3853        int err;
3854
3855        profile = &mlxsw_sp_config_profile;
3856        if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3857                return -EIO;
3858
3859        mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3860                                              &linear_size_params,
3861                                              &hash_double_size_params,
3862                                              &hash_single_size_params);
3863
3864        kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3865        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3866                                        kvd_size, MLXSW_SP_RESOURCE_KVD,
3867                                        DEVLINK_RESOURCE_ID_PARENT_TOP,
3868                                        &kvd_size_params);
3869        if (err)
3870                return err;
3871
3872        linear_size = profile->kvd_linear_size;
3873        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3874                                        linear_size,
3875                                        MLXSW_SP_RESOURCE_KVD_LINEAR,
3876                                        MLXSW_SP_RESOURCE_KVD,
3877                                        &linear_size_params);
3878        if (err)
3879                return err;
3880
3881        err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
3882        if  (err)
3883                return err;
3884
3885        double_size = kvd_size - linear_size;
3886        double_size *= profile->kvd_hash_double_parts;
3887        double_size /= profile->kvd_hash_double_parts +
3888                       profile->kvd_hash_single_parts;
3889        double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3890        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3891                                        double_size,
3892                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3893                                        MLXSW_SP_RESOURCE_KVD,
3894                                        &hash_double_size_params);
3895        if (err)
3896                return err;
3897
3898        single_size = kvd_size - double_size - linear_size;
3899        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3900                                        single_size,
3901                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3902                                        MLXSW_SP_RESOURCE_KVD,
3903                                        &hash_single_size_params);
3904        if (err)
3905                return err;
3906
3907        return 0;
3908}
3909
3910static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3911                                  const struct mlxsw_config_profile *profile,
3912                                  u64 *p_single_size, u64 *p_double_size,
3913                                  u64 *p_linear_size)
3914{
3915        struct devlink *devlink = priv_to_devlink(mlxsw_core);
3916        u32 double_size;
3917        int err;
3918
3919        if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3920            !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3921                return -EIO;
3922
3923        /* The hash part is what left of the kvd without the
3924         * linear part. It is split to the single size and
3925         * double size by the parts ratio from the profile.
3926         * Both sizes must be a multiplications of the
3927         * granularity from the profile. In case the user
3928         * provided the sizes they are obtained via devlink.
3929         */
3930        err = devlink_resource_size_get(devlink,
3931                                        MLXSW_SP_RESOURCE_KVD_LINEAR,
3932                                        p_linear_size);
3933        if (err)
3934                *p_linear_size = profile->kvd_linear_size;
3935
3936        err = devlink_resource_size_get(devlink,
3937                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3938                                        p_double_size);
3939        if (err) {
3940                double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3941                              *p_linear_size;
3942                double_size *= profile->kvd_hash_double_parts;
3943                double_size /= profile->kvd_hash_double_parts +
3944                               profile->kvd_hash_single_parts;
3945                *p_double_size = rounddown(double_size,
3946                                           MLXSW_SP_KVD_GRANULARITY);
3947        }
3948
3949        err = devlink_resource_size_get(devlink,
3950                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3951                                        p_single_size);
3952        if (err)
3953                *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3954                                 *p_double_size - *p_linear_size;
3955
3956        /* Check results are legal. */
3957        if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3958            *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3959            MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3960                return -EIO;
3961
3962        return 0;
3963}
3964
3965static struct mlxsw_driver mlxsw_sp_driver = {
3966        .kind                           = mlxsw_sp_driver_name,
3967        .priv_size                      = sizeof(struct mlxsw_sp),
3968        .init                           = mlxsw_sp_init,
3969        .fini                           = mlxsw_sp_fini,
3970        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3971        .port_split                     = mlxsw_sp_port_split,
3972        .port_unsplit                   = mlxsw_sp_port_unsplit,
3973        .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3974        .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3975        .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3976        .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3977        .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3978        .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3979        .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3980        .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3981        .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3982        .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3983        .txhdr_construct                = mlxsw_sp_txhdr_construct,
3984        .resources_register             = mlxsw_sp_resources_register,
3985        .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3986        .txhdr_len                      = MLXSW_TXHDR_LEN,
3987        .profile                        = &mlxsw_sp_config_profile,
3988        .res_query_enabled              = true,
3989};
3990
3991bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3992{
3993        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3994}
3995
3996static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3997{
3998        struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3999        int ret = 0;
4000
4001        if (mlxsw_sp_port_dev_check(lower_dev)) {
4002                *p_mlxsw_sp_port = netdev_priv(lower_dev);
4003                ret = 1;
4004        }
4005
4006        return ret;
4007}
4008
4009struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4010{
4011        struct mlxsw_sp_port *mlxsw_sp_port;
4012
4013        if (mlxsw_sp_port_dev_check(dev))
4014                return netdev_priv(dev);
4015
4016        mlxsw_sp_port = NULL;
4017        netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
4018
4019        return mlxsw_sp_port;
4020}
4021
4022struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4023{
4024        struct mlxsw_sp_port *mlxsw_sp_port;
4025
4026        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4027        return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4028}
4029
4030struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4031{
4032        struct mlxsw_sp_port *mlxsw_sp_port;
4033
4034        if (mlxsw_sp_port_dev_check(dev))
4035                return netdev_priv(dev);
4036
4037        mlxsw_sp_port = NULL;
4038        netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4039                                      &mlxsw_sp_port);
4040
4041        return mlxsw_sp_port;
4042}
4043
4044struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4045{
4046        struct mlxsw_sp_port *mlxsw_sp_port;
4047
4048        rcu_read_lock();
4049        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4050        if (mlxsw_sp_port)
4051                dev_hold(mlxsw_sp_port->dev);
4052        rcu_read_unlock();
4053        return mlxsw_sp_port;
4054}
4055
4056void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4057{
4058        dev_put(mlxsw_sp_port->dev);
4059}
4060
4061static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4062{
4063        char sldr_pl[MLXSW_REG_SLDR_LEN];
4064
4065        mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4066        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4067}
4068
4069static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4070{
4071        char sldr_pl[MLXSW_REG_SLDR_LEN];
4072
4073        mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4074        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4075}
4076
4077static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4078                                     u16 lag_id, u8 port_index)
4079{
4080        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4081        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4082
4083        mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4084                                      lag_id, port_index);
4085        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4086}
4087
4088static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4089                                        u16 lag_id)
4090{
4091        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4092        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4093
4094        mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4095                                         lag_id);
4096        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4097}
4098
4099static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4100                                        u16 lag_id)
4101{
4102        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4103        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4104
4105        mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4106                                        lag_id);
4107        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4108}
4109
4110static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4111                                         u16 lag_id)
4112{
4113        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4114        char slcor_pl[MLXSW_REG_SLCOR_LEN];
4115
4116        mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4117                                         lag_id);
4118        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4119}
4120
4121static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4122                                  struct net_device *lag_dev,
4123                                  u16 *p_lag_id)
4124{
4125        struct mlxsw_sp_upper *lag;
4126        int free_lag_id = -1;
4127        u64 max_lag;
4128        int i;
4129
4130        max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4131        for (i = 0; i < max_lag; i++) {
4132                lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4133                if (lag->ref_count) {
4134                        if (lag->dev == lag_dev) {
4135                                *p_lag_id = i;
4136                                return 0;
4137                        }
4138                } else if (free_lag_id < 0) {
4139                        free_lag_id = i;
4140                }
4141        }
4142        if (free_lag_id < 0)
4143                return -EBUSY;
4144        *p_lag_id = free_lag_id;
4145        return 0;
4146}
4147
4148static bool
4149mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4150                          struct net_device *lag_dev,
4151                          struct netdev_lag_upper_info *lag_upper_info,
4152                          struct netlink_ext_ack *extack)
4153{
4154        u16 lag_id;
4155
4156        if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4157                NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4158                return false;
4159        }
4160        if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4161                NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4162                return false;
4163        }
4164        return true;
4165}
4166
4167static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4168                                       u16 lag_id, u8 *p_port_index)
4169{
4170        u64 max_lag_members;
4171        int i;
4172
4173        max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4174                                             MAX_LAG_MEMBERS);
4175        for (i = 0; i < max_lag_members; i++) {
4176                if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4177                        *p_port_index = i;
4178                        return 0;
4179                }
4180        }
4181        return -EBUSY;
4182}
4183
4184static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4185                                  struct net_device *lag_dev)
4186{
4187        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4188        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4189        struct mlxsw_sp_upper *lag;
4190        u16 lag_id;
4191        u8 port_index;
4192        int err;
4193
4194        err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4195        if (err)
4196                return err;
4197        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4198        if (!lag->ref_count) {
4199                err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4200                if (err)
4201                        return err;
4202                lag->dev = lag_dev;
4203        }
4204
4205        err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4206        if (err)
4207                return err;
4208        err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4209        if (err)
4210                goto err_col_port_add;
4211        err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4212        if (err)
4213                goto err_col_port_enable;
4214
4215        mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4216                                   mlxsw_sp_port->local_port);
4217        mlxsw_sp_port->lag_id = lag_id;
4218        mlxsw_sp_port->lagged = 1;
4219        lag->ref_count++;
4220
4221        /* Port is no longer usable as a router interface */
4222        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4223        if (mlxsw_sp_port_vlan->fid)
4224                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4225
4226        return 0;
4227
4228err_col_port_enable:
4229        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4230err_col_port_add:
4231        if (!lag->ref_count)
4232                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4233        return err;
4234}
4235
4236static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4237                                    struct net_device *lag_dev)
4238{
4239        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4240        u16 lag_id = mlxsw_sp_port->lag_id;
4241        struct mlxsw_sp_upper *lag;
4242
4243        if (!mlxsw_sp_port->lagged)
4244                return;
4245        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4246        WARN_ON(lag->ref_count == 0);
4247
4248        mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4249        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4250
4251        /* Any VLANs configured on the port are no longer valid */
4252        mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4253
4254        if (lag->ref_count == 1)
4255                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4256
4257        mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4258                                     mlxsw_sp_port->local_port);
4259        mlxsw_sp_port->lagged = 0;
4260        lag->ref_count--;
4261
4262        mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4263        /* Make sure untagged frames are allowed to ingress */
4264        mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4265}
4266
4267static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4268                                      u16 lag_id)
4269{
4270        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4271        char sldr_pl[MLXSW_REG_SLDR_LEN];
4272
4273        mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4274                                         mlxsw_sp_port->local_port);
4275        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4276}
4277
4278static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4279                                         u16 lag_id)
4280{
4281        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4282        char sldr_pl[MLXSW_REG_SLDR_LEN];
4283
4284        mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4285                                            mlxsw_sp_port->local_port);
4286        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4287}
4288
4289static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4290                                       bool lag_tx_enabled)
4291{
4292        if (lag_tx_enabled)
4293                return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4294                                                  mlxsw_sp_port->lag_id);
4295        else
4296                return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4297                                                     mlxsw_sp_port->lag_id);
4298}
4299
4300static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4301                                     struct netdev_lag_lower_state_info *info)
4302{
4303        return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4304}
4305
4306static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4307                                 bool enable)
4308{
4309        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4310        enum mlxsw_reg_spms_state spms_state;
4311        char *spms_pl;
4312        u16 vid;
4313        int err;
4314
4315        spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4316                              MLXSW_REG_SPMS_STATE_DISCARDING;
4317
4318        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4319        if (!spms_pl)
4320                return -ENOMEM;
4321        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4322
4323        for (vid = 0; vid < VLAN_N_VID; vid++)
4324                mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4325
4326        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4327        kfree(spms_pl);
4328        return err;
4329}
4330
4331static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4332{
4333        u16 vid = 1;
4334        int err;
4335
4336        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4337        if (err)
4338                return err;
4339        err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4340        if (err)
4341                goto err_port_stp_set;
4342        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4343                                     true, false);
4344        if (err)
4345                goto err_port_vlan_set;
4346
4347        for (; vid <= VLAN_N_VID - 1; vid++) {
4348                err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4349                                                     vid, false);
4350                if (err)
4351                        goto err_vid_learning_set;
4352        }
4353
4354        return 0;
4355
4356err_vid_learning_set:
4357        for (vid--; vid >= 1; vid--)
4358                mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4359err_port_vlan_set:
4360        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4361err_port_stp_set:
4362        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4363        return err;
4364}
4365
4366static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4367{
4368        u16 vid;
4369
4370        for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4371                mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4372                                               vid, true);
4373
4374        mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4375                               false, false);
4376        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4377        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4378}
4379
4380static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4381                                               struct net_device *dev,
4382                                               unsigned long event, void *ptr)
4383{
4384        struct netdev_notifier_changeupper_info *info;
4385        struct mlxsw_sp_port *mlxsw_sp_port;
4386        struct netlink_ext_ack *extack;
4387        struct net_device *upper_dev;
4388        struct mlxsw_sp *mlxsw_sp;
4389        int err = 0;
4390
4391        mlxsw_sp_port = netdev_priv(dev);
4392        mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4393        info = ptr;
4394        extack = netdev_notifier_info_to_extack(&info->info);
4395
4396        switch (event) {
4397        case NETDEV_PRECHANGEUPPER:
4398                upper_dev = info->upper_dev;
4399                if (!is_vlan_dev(upper_dev) &&
4400                    !netif_is_lag_master(upper_dev) &&
4401                    !netif_is_bridge_master(upper_dev) &&
4402                    !netif_is_ovs_master(upper_dev)) {
4403                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4404                        return -EINVAL;
4405                }
4406                if (!info->linking)
4407                        break;
4408                if (netdev_has_any_upper_dev(upper_dev) &&
4409                    (!netif_is_bridge_master(upper_dev) ||
4410                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4411                                                          upper_dev))) {
4412                        NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4413                        return -EINVAL;
4414                }
4415                if (netif_is_lag_master(upper_dev) &&
4416                    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4417                                               info->upper_info, extack))
4418                        return -EINVAL;
4419                if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4420                        NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4421                        return -EINVAL;
4422                }
4423                if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4424                    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4425                        NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4426                        return -EINVAL;
4427                }
4428                if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4429                        NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4430                        return -EINVAL;
4431                }
4432                if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4433                        NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4434                        return -EINVAL;
4435                }
4436                if (is_vlan_dev(upper_dev) &&
4437                    vlan_dev_vlan_id(upper_dev) == 1) {
4438                        NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
4439                        return -EINVAL;
4440                }
4441                break;
4442        case NETDEV_CHANGEUPPER:
4443                upper_dev = info->upper_dev;
4444                if (netif_is_bridge_master(upper_dev)) {
4445                        if (info->linking)
4446                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4447                                                                lower_dev,
4448                                                                upper_dev,
4449                                                                extack);
4450                        else
4451                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4452                                                           lower_dev,
4453                                                           upper_dev);
4454                } else if (netif_is_lag_master(upper_dev)) {
4455                        if (info->linking)
4456                                err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4457                                                             upper_dev);
4458                        else
4459                                mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4460                                                        upper_dev);
4461                } else if (netif_is_ovs_master(upper_dev)) {
4462                        if (info->linking)
4463                                err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4464                        else
4465                                mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4466                }
4467                break;
4468        }
4469
4470        return err;
4471}
4472
4473static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4474                                               unsigned long event, void *ptr)
4475{
4476        struct netdev_notifier_changelowerstate_info *info;
4477        struct mlxsw_sp_port *mlxsw_sp_port;
4478        int err;
4479
4480        mlxsw_sp_port = netdev_priv(dev);
4481        info = ptr;
4482
4483        switch (event) {
4484        case NETDEV_CHANGELOWERSTATE:
4485                if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4486                        err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4487                                                        info->lower_state_info);
4488                        if (err)
4489                                netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4490                }
4491                break;
4492        }
4493
4494        return 0;
4495}
4496
4497static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4498                                         struct net_device *port_dev,
4499                                         unsigned long event, void *ptr)
4500{
4501        switch (event) {
4502        case NETDEV_PRECHANGEUPPER:
4503        case NETDEV_CHANGEUPPER:
4504                return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4505                                                           event, ptr);
4506        case NETDEV_CHANGELOWERSTATE:
4507                return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4508                                                           ptr);
4509        }
4510
4511        return 0;
4512}
4513
4514static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4515                                        unsigned long event, void *ptr)
4516{
4517        struct net_device *dev;
4518        struct list_head *iter;
4519        int ret;
4520
4521        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4522                if (mlxsw_sp_port_dev_check(dev)) {
4523                        ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4524                                                            ptr);
4525                        if (ret)
4526                                return ret;
4527                }
4528        }
4529
4530        return 0;
4531}
4532
4533static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4534                                              struct net_device *dev,
4535                                              unsigned long event, void *ptr,
4536                                              u16 vid)
4537{
4538        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4539        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4540        struct netdev_notifier_changeupper_info *info = ptr;
4541        struct netlink_ext_ack *extack;
4542        struct net_device *upper_dev;
4543        int err = 0;
4544
4545        extack = netdev_notifier_info_to_extack(&info->info);
4546
4547        switch (event) {
4548        case NETDEV_PRECHANGEUPPER:
4549                upper_dev = info->upper_dev;
4550                if (!netif_is_bridge_master(upper_dev)) {
4551                        NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
4552                        return -EINVAL;
4553                }
4554                if (!info->linking)
4555                        break;
4556                if (netdev_has_any_upper_dev(upper_dev) &&
4557                    (!netif_is_bridge_master(upper_dev) ||
4558                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4559                                                          upper_dev))) {
4560                        NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4561                        return -EINVAL;
4562                }
4563                break;
4564        case NETDEV_CHANGEUPPER:
4565                upper_dev = info->upper_dev;
4566                if (netif_is_bridge_master(upper_dev)) {
4567                        if (info->linking)
4568                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4569                                                                vlan_dev,
4570                                                                upper_dev,
4571                                                                extack);
4572                        else
4573                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4574                                                           vlan_dev,
4575                                                           upper_dev);
4576                } else {
4577                        err = -EINVAL;
4578                        WARN_ON(1);
4579                }
4580                break;
4581        }
4582
4583        return err;
4584}
4585
4586static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4587                                                  struct net_device *lag_dev,
4588                                                  unsigned long event,
4589                                                  void *ptr, u16 vid)
4590{
4591        struct net_device *dev;
4592        struct list_head *iter;
4593        int ret;
4594
4595        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4596                if (mlxsw_sp_port_dev_check(dev)) {
4597                        ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4598                                                                 event, ptr,
4599                                                                 vid);
4600                        if (ret)
4601                                return ret;
4602                }
4603        }
4604
4605        return 0;
4606}
4607
4608static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4609                                         unsigned long event, void *ptr)
4610{
4611        struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4612        u16 vid = vlan_dev_vlan_id(vlan_dev);
4613
4614        if (mlxsw_sp_port_dev_check(real_dev))
4615                return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4616                                                          event, ptr, vid);
4617        else if (netif_is_lag_master(real_dev))
4618                return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4619                                                              real_dev, event,
4620                                                              ptr, vid);
4621
4622        return 0;
4623}
4624
4625static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4626{
4627        struct netdev_notifier_changeupper_info *info = ptr;
4628
4629        if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4630                return false;
4631        return netif_is_l3_master(info->upper_dev);
4632}
4633
4634static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4635                                    unsigned long event, void *ptr)
4636{
4637        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4638        struct mlxsw_sp_span_entry *span_entry;
4639        struct mlxsw_sp *mlxsw_sp;
4640        int err = 0;
4641
4642        mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4643        if (event == NETDEV_UNREGISTER) {
4644                span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4645                if (span_entry)
4646                        mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4647        }
4648        mlxsw_sp_span_respin(mlxsw_sp);
4649
4650        if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4651                err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4652                                                       event, ptr);
4653        else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4654                err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4655                                                       event, ptr);
4656        else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4657                err = mlxsw_sp_netdevice_router_port_event(dev);
4658        else if (mlxsw_sp_is_vrf_event(event, ptr))
4659                err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4660        else if (mlxsw_sp_port_dev_check(dev))
4661                err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4662        else if (netif_is_lag_master(dev))
4663                err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4664        else if (is_vlan_dev(dev))
4665                err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4666
4667        return notifier_from_errno(err);
4668}
4669
4670static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4671        .notifier_call = mlxsw_sp_inetaddr_valid_event,
4672};
4673
4674static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4675        .notifier_call = mlxsw_sp_inetaddr_event,
4676};
4677
4678static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4679        .notifier_call = mlxsw_sp_inet6addr_valid_event,
4680};
4681
4682static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4683        .notifier_call = mlxsw_sp_inet6addr_event,
4684};
4685
4686static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4687        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4688        {0, },
4689};
4690
4691static struct pci_driver mlxsw_sp_pci_driver = {
4692        .name = mlxsw_sp_driver_name,
4693        .id_table = mlxsw_sp_pci_id_table,
4694};
4695
4696static int __init mlxsw_sp_module_init(void)
4697{
4698        int err;
4699
4700        register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4701        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4702        register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4703        register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4704
4705        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4706        if (err)
4707                goto err_core_driver_register;
4708
4709        err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4710        if (err)
4711                goto err_pci_driver_register;
4712
4713        return 0;
4714
4715err_pci_driver_register:
4716        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4717err_core_driver_register:
4718        unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4719        unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4720        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4721        unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4722        return err;
4723}
4724
4725static void __exit mlxsw_sp_module_exit(void)
4726{
4727        mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4728        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4729        unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4730        unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4731        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4732        unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4733}
4734
4735module_init(mlxsw_sp_module_init);
4736module_exit(mlxsw_sp_module_exit);
4737
4738MODULE_LICENSE("Dual BSD/GPL");
4739MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4740MODULE_DESCRIPTION("Mellanox Spectrum driver");
4741MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4742MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
4743