linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
   3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
   5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
   6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/types.h>
  40#include <linux/pci.h>
  41#include <linux/netdevice.h>
  42#include <linux/etherdevice.h>
  43#include <linux/ethtool.h>
  44#include <linux/slab.h>
  45#include <linux/device.h>
  46#include <linux/skbuff.h>
  47#include <linux/if_vlan.h>
  48#include <linux/if_bridge.h>
  49#include <linux/workqueue.h>
  50#include <linux/jiffies.h>
  51#include <linux/bitops.h>
  52#include <linux/list.h>
  53#include <linux/notifier.h>
  54#include <linux/dcbnl.h>
  55#include <linux/inetdevice.h>
  56#include <net/switchdev.h>
  57#include <net/pkt_cls.h>
  58#include <net/tc_act/tc_mirred.h>
  59#include <net/netevent.h>
  60#include <net/tc_act/tc_sample.h>
  61#include <net/addrconf.h>
  62
  63#include "l3mdev.h"
  64#include "spectrum.h"
  65#include "pci.h"
  66#include "core.h"
  67#include "reg.h"
  68#include "port.h"
  69#include "trap.h"
  70#include "txheader.h"
  71#include "spectrum_cnt.h"
  72#include "spectrum_dpipe.h"
  73#include "../mlxfw/mlxfw.h"
  74
  75#define MLXSW_FWREV_MAJOR 13
  76#define MLXSW_FWREV_MINOR 1420
  77#define MLXSW_FWREV_SUBMINOR 122
  78
  79static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
  80        .major = MLXSW_FWREV_MAJOR,
  81        .minor = MLXSW_FWREV_MINOR,
  82        .subminor = MLXSW_FWREV_SUBMINOR
  83};
  84
  85#define MLXSW_SP_FW_FILENAME \
  86        "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
  87        "." __stringify(MLXSW_FWREV_MINOR) \
  88        "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
  89
  90static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  91static const char mlxsw_sp_driver_version[] = "1.0";
  92
  93/* tx_hdr_version
  94 * Tx header version.
  95 * Must be set to 1.
  96 */
  97MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  98
  99/* tx_hdr_ctl
 100 * Packet control type.
 101 * 0 - Ethernet control (e.g. EMADs, LACP)
 102 * 1 - Ethernet data
 103 */
 104MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
 105
 106/* tx_hdr_proto
 107 * Packet protocol type. Must be set to 1 (Ethernet).
 108 */
 109MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
 110
 111/* tx_hdr_rx_is_router
 112 * Packet is sent from the router. Valid for data packets only.
 113 */
 114MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
 115
 116/* tx_hdr_fid_valid
 117 * Indicates if the 'fid' field is valid and should be used for
 118 * forwarding lookup. Valid for data packets only.
 119 */
 120MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
 121
 122/* tx_hdr_swid
 123 * Switch partition ID. Must be set to 0.
 124 */
 125MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
 126
 127/* tx_hdr_control_tclass
 128 * Indicates if the packet should use the control TClass and not one
 129 * of the data TClasses.
 130 */
 131MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
 132
 133/* tx_hdr_etclass
 134 * Egress TClass to be used on the egress device on the egress port.
 135 */
 136MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
 137
 138/* tx_hdr_port_mid
 139 * Destination local port for unicast packets.
 140 * Destination multicast ID for multicast packets.
 141 *
 142 * Control packets are directed to a specific egress port, while data
 143 * packets are transmitted through the CPU port (0) into the switch partition,
 144 * where forwarding rules are applied.
 145 */
 146MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
 147
 148/* tx_hdr_fid
 149 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
 150 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
 151 * Valid for data packets only.
 152 */
 153MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
 154
 155/* tx_hdr_type
 156 * 0 - Data packets
 157 * 6 - Control packets
 158 */
 159MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 160
 161struct mlxsw_sp_mlxfw_dev {
 162        struct mlxfw_dev mlxfw_dev;
 163        struct mlxsw_sp *mlxsw_sp;
 164};
 165
 166static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
 167                                    u16 component_index, u32 *p_max_size,
 168                                    u8 *p_align_bits, u16 *p_max_write_size)
 169{
 170        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 171                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 172        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 173        char mcqi_pl[MLXSW_REG_MCQI_LEN];
 174        int err;
 175
 176        mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
 177        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
 178        if (err)
 179                return err;
 180        mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
 181                              p_max_write_size);
 182
 183        *p_align_bits = max_t(u8, *p_align_bits, 2);
 184        *p_max_write_size = min_t(u16, *p_max_write_size,
 185                                  MLXSW_REG_MCDA_MAX_DATA_LEN);
 186        return 0;
 187}
 188
 189static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
 190{
 191        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 192                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 193        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 194        char mcc_pl[MLXSW_REG_MCC_LEN];
 195        u8 control_state;
 196        int err;
 197
 198        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
 199        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 200        if (err)
 201                return err;
 202
 203        mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
 204        if (control_state != MLXFW_FSM_STATE_IDLE)
 205                return -EBUSY;
 206
 207        mlxsw_reg_mcc_pack(mcc_pl,
 208                           MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
 209                           0, *fwhandle, 0);
 210        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 211}
 212
 213static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
 214                                         u32 fwhandle, u16 component_index,
 215                                         u32 component_size)
 216{
 217        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 218                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 219        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 220        char mcc_pl[MLXSW_REG_MCC_LEN];
 221
 222        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
 223                           component_index, fwhandle, component_size);
 224        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 225}
 226
 227static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
 228                                       u32 fwhandle, u8 *data, u16 size,
 229                                       u32 offset)
 230{
 231        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 232                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 233        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 234        char mcda_pl[MLXSW_REG_MCDA_LEN];
 235
 236        mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
 237        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
 238}
 239
 240static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
 241                                         u32 fwhandle, u16 component_index)
 242{
 243        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 244                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 245        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 246        char mcc_pl[MLXSW_REG_MCC_LEN];
 247
 248        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
 249                           component_index, fwhandle, 0);
 250        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 251}
 252
 253static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 254{
 255        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 256                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 257        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 258        char mcc_pl[MLXSW_REG_MCC_LEN];
 259
 260        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
 261                           fwhandle, 0);
 262        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 263}
 264
 265static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
 266                                    enum mlxfw_fsm_state *fsm_state,
 267                                    enum mlxfw_fsm_state_err *fsm_state_err)
 268{
 269        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 270                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 271        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 272        char mcc_pl[MLXSW_REG_MCC_LEN];
 273        u8 control_state;
 274        u8 error_code;
 275        int err;
 276
 277        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
 278        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 279        if (err)
 280                return err;
 281
 282        mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
 283        *fsm_state = control_state;
 284        *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
 285                               MLXFW_FSM_STATE_ERR_MAX);
 286        return 0;
 287}
 288
 289static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 290{
 291        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 292                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 293        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 294        char mcc_pl[MLXSW_REG_MCC_LEN];
 295
 296        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
 297                           fwhandle, 0);
 298        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 299}
 300
 301static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 302{
 303        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 304                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 305        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 306        char mcc_pl[MLXSW_REG_MCC_LEN];
 307
 308        mlxsw_reg_mcc_pack(mcc_pl,
 309                           MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
 310                           fwhandle, 0);
 311        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 312}
 313
 314static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
 315        .component_query        = mlxsw_sp_component_query,
 316        .fsm_lock               = mlxsw_sp_fsm_lock,
 317        .fsm_component_update   = mlxsw_sp_fsm_component_update,
 318        .fsm_block_download     = mlxsw_sp_fsm_block_download,
 319        .fsm_component_verify   = mlxsw_sp_fsm_component_verify,
 320        .fsm_activate           = mlxsw_sp_fsm_activate,
 321        .fsm_query_state        = mlxsw_sp_fsm_query_state,
 322        .fsm_cancel             = mlxsw_sp_fsm_cancel,
 323        .fsm_release            = mlxsw_sp_fsm_release
 324};
 325
 326static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
 327                                   const struct firmware *firmware)
 328{
 329        struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
 330                .mlxfw_dev = {
 331                        .ops = &mlxsw_sp_mlxfw_dev_ops,
 332                        .psid = mlxsw_sp->bus_info->psid,
 333                        .psid_size = strlen(mlxsw_sp->bus_info->psid),
 334                },
 335                .mlxsw_sp = mlxsw_sp
 336        };
 337
 338        return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
 339}
 340
 341static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
 342                               const struct mlxsw_fw_rev *b)
 343{
 344        if (a->major != b->major)
 345                return a->major > b->major;
 346        if (a->minor != b->minor)
 347                return a->minor > b->minor;
 348        return a->subminor >= b->subminor;
 349}
 350
 351static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 352{
 353        const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
 354        const struct firmware *firmware;
 355        int err;
 356
 357        if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
 358                return 0;
 359
 360        dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
 361                 rev->major, rev->minor, rev->subminor);
 362        dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
 363                 MLXSW_SP_FW_FILENAME);
 364
 365        err = request_firmware(&firmware, MLXSW_SP_FW_FILENAME,
 366                               mlxsw_sp->bus_info->dev);
 367        if (err) {
 368                dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
 369                        MLXSW_SP_FW_FILENAME);
 370                return err;
 371        }
 372
 373        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
 374        release_firmware(firmware);
 375        return err;
 376}
 377
 378int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
 379                              unsigned int counter_index, u64 *packets,
 380                              u64 *bytes)
 381{
 382        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 383        int err;
 384
 385        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
 386                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 387        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 388        if (err)
 389                return err;
 390        if (packets)
 391                *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
 392        if (bytes)
 393                *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
 394        return 0;
 395}
 396
 397static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
 398                                       unsigned int counter_index)
 399{
 400        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 401
 402        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
 403                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 404        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 405}
 406
 407int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 408                                unsigned int *p_counter_index)
 409{
 410        int err;
 411
 412        err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 413                                     p_counter_index);
 414        if (err)
 415                return err;
 416        err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
 417        if (err)
 418                goto err_counter_clear;
 419        return 0;
 420
 421err_counter_clear:
 422        mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 423                              *p_counter_index);
 424        return err;
 425}
 426
 427void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
 428                                unsigned int counter_index)
 429{
 430         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 431                               counter_index);
 432}
 433
 434static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
 435                                     const struct mlxsw_tx_info *tx_info)
 436{
 437        char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
 438
 439        memset(txhdr, 0, MLXSW_TXHDR_LEN);
 440
 441        mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
 442        mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
 443        mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
 444        mlxsw_tx_hdr_swid_set(txhdr, 0);
 445        mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
 446        mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
 447        mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 448}
 449
 450int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 451                              u8 state)
 452{
 453        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 454        enum mlxsw_reg_spms_state spms_state;
 455        char *spms_pl;
 456        int err;
 457
 458        switch (state) {
 459        case BR_STATE_FORWARDING:
 460                spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
 461                break;
 462        case BR_STATE_LEARNING:
 463                spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
 464                break;
 465        case BR_STATE_LISTENING: /* fall-through */
 466        case BR_STATE_DISABLED: /* fall-through */
 467        case BR_STATE_BLOCKING:
 468                spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
 469                break;
 470        default:
 471                BUG();
 472        }
 473
 474        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
 475        if (!spms_pl)
 476                return -ENOMEM;
 477        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
 478        mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
 479
 480        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
 481        kfree(spms_pl);
 482        return err;
 483}
 484
 485static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
 486{
 487        char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
 488        int err;
 489
 490        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
 491        if (err)
 492                return err;
 493        mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
 494        return 0;
 495}
 496
 497static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 498{
 499        int i;
 500
 501        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
 502                return -EIO;
 503
 504        mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
 505                                                          MAX_SPAN);
 506        mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
 507                                         sizeof(struct mlxsw_sp_span_entry),
 508                                         GFP_KERNEL);
 509        if (!mlxsw_sp->span.entries)
 510                return -ENOMEM;
 511
 512        for (i = 0; i < mlxsw_sp->span.entries_count; i++)
 513                INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
 514
 515        return 0;
 516}
 517
 518static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
 519{
 520        int i;
 521
 522        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 523                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 524
 525                WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
 526        }
 527        kfree(mlxsw_sp->span.entries);
 528}
 529
 530static struct mlxsw_sp_span_entry *
 531mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
 532{
 533        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 534        struct mlxsw_sp_span_entry *span_entry;
 535        char mpat_pl[MLXSW_REG_MPAT_LEN];
 536        u8 local_port = port->local_port;
 537        int index;
 538        int i;
 539        int err;
 540
 541        /* find a free entry to use */
 542        index = -1;
 543        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 544                if (!mlxsw_sp->span.entries[i].used) {
 545                        index = i;
 546                        span_entry = &mlxsw_sp->span.entries[i];
 547                        break;
 548                }
 549        }
 550        if (index < 0)
 551                return NULL;
 552
 553        /* create a new port analayzer entry for local_port */
 554        mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
 555        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 556        if (err)
 557                return NULL;
 558
 559        span_entry->used = true;
 560        span_entry->id = index;
 561        span_entry->ref_count = 1;
 562        span_entry->local_port = local_port;
 563        return span_entry;
 564}
 565
 566static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
 567                                        struct mlxsw_sp_span_entry *span_entry)
 568{
 569        u8 local_port = span_entry->local_port;
 570        char mpat_pl[MLXSW_REG_MPAT_LEN];
 571        int pa_id = span_entry->id;
 572
 573        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
 574        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 575        span_entry->used = false;
 576}
 577
 578static struct mlxsw_sp_span_entry *
 579mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 580{
 581        int i;
 582
 583        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 584                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 585
 586                if (curr->used && curr->local_port == local_port)
 587                        return curr;
 588        }
 589        return NULL;
 590}
 591
 592static struct mlxsw_sp_span_entry
 593*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
 594{
 595        struct mlxsw_sp_span_entry *span_entry;
 596
 597        span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
 598                                              port->local_port);
 599        if (span_entry) {
 600                /* Already exists, just take a reference */
 601                span_entry->ref_count++;
 602                return span_entry;
 603        }
 604
 605        return mlxsw_sp_span_entry_create(port);
 606}
 607
 608static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
 609                                   struct mlxsw_sp_span_entry *span_entry)
 610{
 611        WARN_ON(!span_entry->ref_count);
 612        if (--span_entry->ref_count == 0)
 613                mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
 614        return 0;
 615}
 616
 617static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
 618{
 619        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 620        struct mlxsw_sp_span_inspected_port *p;
 621        int i;
 622
 623        for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 624                struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 625
 626                list_for_each_entry(p, &curr->bound_ports_list, list)
 627                        if (p->local_port == port->local_port &&
 628                            p->type == MLXSW_SP_SPAN_EGRESS)
 629                                return true;
 630        }
 631
 632        return false;
 633}
 634
 635static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
 636                                         int mtu)
 637{
 638        return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
 639}
 640
 641static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
 642{
 643        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 644        char sbib_pl[MLXSW_REG_SBIB_LEN];
 645        int err;
 646
 647        /* If port is egress mirrored, the shared buffer size should be
 648         * updated according to the mtu value
 649         */
 650        if (mlxsw_sp_span_is_egress_mirror(port)) {
 651                u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
 652
 653                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
 654                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 655                if (err) {
 656                        netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
 657                        return err;
 658                }
 659        }
 660
 661        return 0;
 662}
 663
 664static struct mlxsw_sp_span_inspected_port *
 665mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
 666                                    struct mlxsw_sp_span_entry *span_entry)
 667{
 668        struct mlxsw_sp_span_inspected_port *p;
 669
 670        list_for_each_entry(p, &span_entry->bound_ports_list, list)
 671                if (port->local_port == p->local_port)
 672                        return p;
 673        return NULL;
 674}
 675
 676static int
 677mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 678                                  struct mlxsw_sp_span_entry *span_entry,
 679                                  enum mlxsw_sp_span_type type)
 680{
 681        struct mlxsw_sp_span_inspected_port *inspected_port;
 682        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 683        char mpar_pl[MLXSW_REG_MPAR_LEN];
 684        char sbib_pl[MLXSW_REG_SBIB_LEN];
 685        int pa_id = span_entry->id;
 686        int err;
 687
 688        /* if it is an egress SPAN, bind a shared buffer to it */
 689        if (type == MLXSW_SP_SPAN_EGRESS) {
 690                u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
 691                                                             port->dev->mtu);
 692
 693                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
 694                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 695                if (err) {
 696                        netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
 697                        return err;
 698                }
 699        }
 700
 701        /* bind the port to the SPAN entry */
 702        mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
 703                            (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
 704        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 705        if (err)
 706                goto err_mpar_reg_write;
 707
 708        inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
 709        if (!inspected_port) {
 710                err = -ENOMEM;
 711                goto err_inspected_port_alloc;
 712        }
 713        inspected_port->local_port = port->local_port;
 714        inspected_port->type = type;
 715        list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
 716
 717        return 0;
 718
 719err_mpar_reg_write:
 720err_inspected_port_alloc:
 721        if (type == MLXSW_SP_SPAN_EGRESS) {
 722                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 723                mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 724        }
 725        return err;
 726}
 727
 728static void
 729mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
 730                                    struct mlxsw_sp_span_entry *span_entry,
 731                                    enum mlxsw_sp_span_type type)
 732{
 733        struct mlxsw_sp_span_inspected_port *inspected_port;
 734        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 735        char mpar_pl[MLXSW_REG_MPAR_LEN];
 736        char sbib_pl[MLXSW_REG_SBIB_LEN];
 737        int pa_id = span_entry->id;
 738
 739        inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
 740        if (!inspected_port)
 741                return;
 742
 743        /* remove the inspected port */
 744        mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
 745                            (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
 746        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 747
 748        /* remove the SBIB buffer if it was egress SPAN */
 749        if (type == MLXSW_SP_SPAN_EGRESS) {
 750                mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 751                mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 752        }
 753
 754        mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 755
 756        list_del(&inspected_port->list);
 757        kfree(inspected_port);
 758}
 759
 760static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 761                                    struct mlxsw_sp_port *to,
 762                                    enum mlxsw_sp_span_type type)
 763{
 764        struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
 765        struct mlxsw_sp_span_entry *span_entry;
 766        int err;
 767
 768        span_entry = mlxsw_sp_span_entry_get(to);
 769        if (!span_entry)
 770                return -ENOENT;
 771
 772        netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
 773                   span_entry->id);
 774
 775        err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
 776        if (err)
 777                goto err_port_bind;
 778
 779        return 0;
 780
 781err_port_bind:
 782        mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 783        return err;
 784}
 785
 786static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
 787                                        u8 destination_port,
 788                                        enum mlxsw_sp_span_type type)
 789{
 790        struct mlxsw_sp_span_entry *span_entry;
 791
 792        span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
 793                                              destination_port);
 794        if (!span_entry) {
 795                netdev_err(from->dev, "no span entry found\n");
 796                return;
 797        }
 798
 799        netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
 800                   span_entry->id);
 801        mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
 802}
 803
 804static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
 805                                    bool enable, u32 rate)
 806{
 807        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 808        char mpsc_pl[MLXSW_REG_MPSC_LEN];
 809
 810        mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
 811        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
 812}
 813
 814static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
 815                                          bool is_up)
 816{
 817        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 818        char paos_pl[MLXSW_REG_PAOS_LEN];
 819
 820        mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
 821                            is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
 822                            MLXSW_PORT_ADMIN_STATUS_DOWN);
 823        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 824}
 825
 826static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
 827                                      unsigned char *addr)
 828{
 829        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 830        char ppad_pl[MLXSW_REG_PPAD_LEN];
 831
 832        mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
 833        mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
 834        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
 835}
 836
 837static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
 838{
 839        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 840        unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
 841
 842        ether_addr_copy(addr, mlxsw_sp->base_mac);
 843        addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
 844        return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
 845}
 846
 847static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
 848{
 849        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 850        char pmtu_pl[MLXSW_REG_PMTU_LEN];
 851        int max_mtu;
 852        int err;
 853
 854        mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
 855        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
 856        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 857        if (err)
 858                return err;
 859        max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
 860
 861        if (mtu > max_mtu)
 862                return -EINVAL;
 863
 864        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
 865        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 866}
 867
 868static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 869{
 870        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 871        char pspa_pl[MLXSW_REG_PSPA_LEN];
 872
 873        mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
 874        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 875}
 876
 877int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
 878{
 879        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 880        char svpe_pl[MLXSW_REG_SVPE_LEN];
 881
 882        mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
 883        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
 884}
 885
 886int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 887                                   bool learn_enable)
 888{
 889        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 890        char *spvmlr_pl;
 891        int err;
 892
 893        spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
 894        if (!spvmlr_pl)
 895                return -ENOMEM;
 896        mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
 897                              learn_enable);
 898        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
 899        kfree(spvmlr_pl);
 900        return err;
 901}
 902
 903static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
 904                                    u16 vid)
 905{
 906        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 907        char spvid_pl[MLXSW_REG_SPVID_LEN];
 908
 909        mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
 910        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
 911}
 912
 913static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
 914                                            bool allow)
 915{
 916        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 917        char spaft_pl[MLXSW_REG_SPAFT_LEN];
 918
 919        mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
 920        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
 921}
 922
 923int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
 924{
 925        int err;
 926
 927        if (!vid) {
 928                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
 929                if (err)
 930                        return err;
 931        } else {
 932                err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
 933                if (err)
 934                        return err;
 935                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
 936                if (err)
 937                        goto err_port_allow_untagged_set;
 938        }
 939
 940        mlxsw_sp_port->pvid = vid;
 941        return 0;
 942
 943err_port_allow_untagged_set:
 944        __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
 945        return err;
 946}
 947
 948static int
 949mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
 950{
 951        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 952        char sspr_pl[MLXSW_REG_SSPR_LEN];
 953
 954        mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
 955        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 956}
 957
 958static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
 959                                         u8 local_port, u8 *p_module,
 960                                         u8 *p_width, u8 *p_lane)
 961{
 962        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 963        int err;
 964
 965        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 966        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 967        if (err)
 968                return err;
 969        *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 970        *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
 971        *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 972        return 0;
 973}
 974
 975static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
 976                                    u8 module, u8 width, u8 lane)
 977{
 978        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 979        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 980        int i;
 981
 982        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 983        mlxsw_reg_pmlp_width_set(pmlp_pl, width);
 984        for (i = 0; i < width; i++) {
 985                mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
 986                mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
 987        }
 988
 989        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 990}
 991
 992static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
 993{
 994        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 995        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 996
 997        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 998        mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
 999        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
1000}
1001
1002static int mlxsw_sp_port_open(struct net_device *dev)
1003{
1004        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1005        int err;
1006
1007        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1008        if (err)
1009                return err;
1010        netif_start_queue(dev);
1011        return 0;
1012}
1013
1014static int mlxsw_sp_port_stop(struct net_device *dev)
1015{
1016        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1017
1018        netif_stop_queue(dev);
1019        return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1020}
1021
1022static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1023                                      struct net_device *dev)
1024{
1025        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1026        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1027        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1028        const struct mlxsw_tx_info tx_info = {
1029                .local_port = mlxsw_sp_port->local_port,
1030                .is_emad = false,
1031        };
1032        u64 len;
1033        int err;
1034
1035        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1036                return NETDEV_TX_BUSY;
1037
1038        if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1039                struct sk_buff *skb_orig = skb;
1040
1041                skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1042                if (!skb) {
1043                        this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1044                        dev_kfree_skb_any(skb_orig);
1045                        return NETDEV_TX_OK;
1046                }
1047                dev_consume_skb_any(skb_orig);
1048        }
1049
1050        if (eth_skb_pad(skb)) {
1051                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1052                return NETDEV_TX_OK;
1053        }
1054
1055        mlxsw_sp_txhdr_construct(skb, &tx_info);
1056        /* TX header is consumed by HW on the way so we shouldn't count its
1057         * bytes as being sent.
1058         */
1059        len = skb->len - MLXSW_TXHDR_LEN;
1060
1061        /* Due to a race we might fail here because of a full queue. In that
1062         * unlikely case we simply drop the packet.
1063         */
1064        err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1065
1066        if (!err) {
1067                pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1068                u64_stats_update_begin(&pcpu_stats->syncp);
1069                pcpu_stats->tx_packets++;
1070                pcpu_stats->tx_bytes += len;
1071                u64_stats_update_end(&pcpu_stats->syncp);
1072        } else {
1073                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1074                dev_kfree_skb_any(skb);
1075        }
1076        return NETDEV_TX_OK;
1077}
1078
1079static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1080{
1081}
1082
1083static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1084{
1085        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1086        struct sockaddr *addr = p;
1087        int err;
1088
1089        if (!is_valid_ether_addr(addr->sa_data))
1090                return -EADDRNOTAVAIL;
1091
1092        err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1093        if (err)
1094                return err;
1095        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1096        return 0;
1097}
1098
1099static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1100                                         int mtu)
1101{
1102        return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1103}
1104
1105#define MLXSW_SP_CELL_FACTOR 2  /* 2 * cell_size / (IPG + cell_size + 1) */
1106
1107static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1108                                  u16 delay)
1109{
1110        delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1111                                                            BITS_PER_BYTE));
1112        return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1113                                                                   mtu);
1114}
1115
1116/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1117 * Assumes 100m cable and maximum MTU.
1118 */
1119#define MLXSW_SP_PAUSE_DELAY 58752
1120
1121static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1122                                     u16 delay, bool pfc, bool pause)
1123{
1124        if (pfc)
1125                return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1126        else if (pause)
1127                return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1128        else
1129                return 0;
1130}
1131
1132static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1133                                 bool lossy)
1134{
1135        if (lossy)
1136                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1137        else
1138                mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1139                                                    thres);
1140}
1141
1142int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1143                                 u8 *prio_tc, bool pause_en,
1144                                 struct ieee_pfc *my_pfc)
1145{
1146        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1147        u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1148        u16 delay = !!my_pfc ? my_pfc->delay : 0;
1149        char pbmc_pl[MLXSW_REG_PBMC_LEN];
1150        int i, j, err;
1151
1152        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1153        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1154        if (err)
1155                return err;
1156
1157        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1158                bool configure = false;
1159                bool pfc = false;
1160                bool lossy;
1161                u16 thres;
1162
1163                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1164                        if (prio_tc[j] == i) {
1165                                pfc = pfc_en & BIT(j);
1166                                configure = true;
1167                                break;
1168                        }
1169                }
1170
1171                if (!configure)
1172                        continue;
1173
1174                lossy = !(pfc || pause_en);
1175                thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1176                delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1177                                                  pause_en);
1178                mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
1179        }
1180
1181        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1182}
1183
1184static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1185                                      int mtu, bool pause_en)
1186{
1187        u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1188        bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1189        struct ieee_pfc *my_pfc;
1190        u8 *prio_tc;
1191
1192        prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1193        my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1194
1195        return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1196                                            pause_en, my_pfc);
1197}
1198
1199static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1200{
1201        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1202        bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1203        int err;
1204
1205        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1206        if (err)
1207                return err;
1208        err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1209        if (err)
1210                goto err_span_port_mtu_update;
1211        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1212        if (err)
1213                goto err_port_mtu_set;
1214        dev->mtu = mtu;
1215        return 0;
1216
1217err_port_mtu_set:
1218        mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1219err_span_port_mtu_update:
1220        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1221        return err;
1222}
1223
1224static int
1225mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1226                             struct rtnl_link_stats64 *stats)
1227{
1228        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1229        struct mlxsw_sp_port_pcpu_stats *p;
1230        u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1231        u32 tx_dropped = 0;
1232        unsigned int start;
1233        int i;
1234
1235        for_each_possible_cpu(i) {
1236                p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1237                do {
1238                        start = u64_stats_fetch_begin_irq(&p->syncp);
1239                        rx_packets      = p->rx_packets;
1240                        rx_bytes        = p->rx_bytes;
1241                        tx_packets      = p->tx_packets;
1242                        tx_bytes        = p->tx_bytes;
1243                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1244
1245                stats->rx_packets       += rx_packets;
1246                stats->rx_bytes         += rx_bytes;
1247                stats->tx_packets       += tx_packets;
1248                stats->tx_bytes         += tx_bytes;
1249                /* tx_dropped is u32, updated without syncp protection. */
1250                tx_dropped      += p->tx_dropped;
1251        }
1252        stats->tx_dropped       = tx_dropped;
1253        return 0;
1254}
1255
1256static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1257{
1258        switch (attr_id) {
1259        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1260                return true;
1261        }
1262
1263        return false;
1264}
1265
1266static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1267                                           void *sp)
1268{
1269        switch (attr_id) {
1270        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1271                return mlxsw_sp_port_get_sw_stats64(dev, sp);
1272        }
1273
1274        return -EINVAL;
1275}
1276
1277static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1278                                       int prio, char *ppcnt_pl)
1279{
1280        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1281        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1282
1283        mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1284        return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1285}
1286
1287static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1288                                      struct rtnl_link_stats64 *stats)
1289{
1290        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1291        int err;
1292
1293        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1294                                          0, ppcnt_pl);
1295        if (err)
1296                goto out;
1297
1298        stats->tx_packets =
1299                mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1300        stats->rx_packets =
1301                mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1302        stats->tx_bytes =
1303                mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1304        stats->rx_bytes =
1305                mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1306        stats->multicast =
1307                mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1308
1309        stats->rx_crc_errors =
1310                mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1311        stats->rx_frame_errors =
1312                mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1313
1314        stats->rx_length_errors = (
1315                mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1316                mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1317                mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1318
1319        stats->rx_errors = (stats->rx_crc_errors +
1320                stats->rx_frame_errors + stats->rx_length_errors);
1321
1322out:
1323        return err;
1324}
1325
1326static void update_stats_cache(struct work_struct *work)
1327{
1328        struct mlxsw_sp_port *mlxsw_sp_port =
1329                container_of(work, struct mlxsw_sp_port,
1330                             hw_stats.update_dw.work);
1331
1332        if (!netif_carrier_ok(mlxsw_sp_port->dev))
1333                goto out;
1334
1335        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1336                                   mlxsw_sp_port->hw_stats.cache);
1337
1338out:
1339        mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1340                               MLXSW_HW_STATS_UPDATE_TIME);
1341}
1342
1343/* Return the stats from a cache that is updated periodically,
1344 * as this function might get called in an atomic context.
1345 */
1346static void
1347mlxsw_sp_port_get_stats64(struct net_device *dev,
1348                          struct rtnl_link_stats64 *stats)
1349{
1350        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1351
1352        memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1353}
1354
1355static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1356                                    u16 vid_begin, u16 vid_end,
1357                                    bool is_member, bool untagged)
1358{
1359        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360        char *spvm_pl;
1361        int err;
1362
1363        spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1364        if (!spvm_pl)
1365                return -ENOMEM;
1366
1367        mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1368                            vid_end, is_member, untagged);
1369        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1370        kfree(spvm_pl);
1371        return err;
1372}
1373
1374int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1375                           u16 vid_end, bool is_member, bool untagged)
1376{
1377        u16 vid, vid_e;
1378        int err;
1379
1380        for (vid = vid_begin; vid <= vid_end;
1381             vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1382                vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1383                            vid_end);
1384
1385                err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1386                                               is_member, untagged);
1387                if (err)
1388                        return err;
1389        }
1390
1391        return 0;
1392}
1393
1394static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1395{
1396        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1397
1398        list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1399                                 &mlxsw_sp_port->vlans_list, list)
1400                mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1401}
1402
1403static struct mlxsw_sp_port_vlan *
1404mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1405{
1406        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1407        bool untagged = vid == 1;
1408        int err;
1409
1410        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1411        if (err)
1412                return ERR_PTR(err);
1413
1414        mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1415        if (!mlxsw_sp_port_vlan) {
1416                err = -ENOMEM;
1417                goto err_port_vlan_alloc;
1418        }
1419
1420        mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1421        mlxsw_sp_port_vlan->vid = vid;
1422        list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1423
1424        return mlxsw_sp_port_vlan;
1425
1426err_port_vlan_alloc:
1427        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1428        return ERR_PTR(err);
1429}
1430
1431static void
1432mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1433{
1434        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1435        u16 vid = mlxsw_sp_port_vlan->vid;
1436
1437        list_del(&mlxsw_sp_port_vlan->list);
1438        kfree(mlxsw_sp_port_vlan);
1439        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1440}
1441
1442struct mlxsw_sp_port_vlan *
1443mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1444{
1445        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1446
1447        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1448        if (mlxsw_sp_port_vlan)
1449                return mlxsw_sp_port_vlan;
1450
1451        return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1452}
1453
1454void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1455{
1456        struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1457
1458        if (mlxsw_sp_port_vlan->bridge_port)
1459                mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1460        else if (fid)
1461                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1462
1463        mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1464}
1465
1466static int mlxsw_sp_port_add_vid(struct net_device *dev,
1467                                 __be16 __always_unused proto, u16 vid)
1468{
1469        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1470
1471        /* VLAN 0 is added to HW filter when device goes up, but it is
1472         * reserved in our case, so simply return.
1473         */
1474        if (!vid)
1475                return 0;
1476
1477        return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1478}
1479
1480static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1481                                  __be16 __always_unused proto, u16 vid)
1482{
1483        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1484        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1485
1486        /* VLAN 0 is removed from HW filter when device goes down, but
1487         * it is reserved in our case, so simply return.
1488         */
1489        if (!vid)
1490                return 0;
1491
1492        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1493        if (!mlxsw_sp_port_vlan)
1494                return 0;
1495        mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1496
1497        return 0;
1498}
1499
1500static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1501                                            size_t len)
1502{
1503        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1504        u8 module = mlxsw_sp_port->mapping.module;
1505        u8 width = mlxsw_sp_port->mapping.width;
1506        u8 lane = mlxsw_sp_port->mapping.lane;
1507        int err;
1508
1509        if (!mlxsw_sp_port->split)
1510                err = snprintf(name, len, "p%d", module + 1);
1511        else
1512                err = snprintf(name, len, "p%ds%d", module + 1,
1513                               lane / width);
1514
1515        if (err >= len)
1516                return -EINVAL;
1517
1518        return 0;
1519}
1520
1521static struct mlxsw_sp_port_mall_tc_entry *
1522mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1523                                 unsigned long cookie) {
1524        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1525
1526        list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1527                if (mall_tc_entry->cookie == cookie)
1528                        return mall_tc_entry;
1529
1530        return NULL;
1531}
1532
1533static int
1534mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1535                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1536                                      const struct tc_action *a,
1537                                      bool ingress)
1538{
1539        struct net *net = dev_net(mlxsw_sp_port->dev);
1540        enum mlxsw_sp_span_type span_type;
1541        struct mlxsw_sp_port *to_port;
1542        struct net_device *to_dev;
1543        int ifindex;
1544
1545        ifindex = tcf_mirred_ifindex(a);
1546        to_dev = __dev_get_by_index(net, ifindex);
1547        if (!to_dev) {
1548                netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1549                return -EINVAL;
1550        }
1551
1552        if (!mlxsw_sp_port_dev_check(to_dev)) {
1553                netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1554                return -EOPNOTSUPP;
1555        }
1556        to_port = netdev_priv(to_dev);
1557
1558        mirror->to_local_port = to_port->local_port;
1559        mirror->ingress = ingress;
1560        span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1561        return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1562}
1563
1564static void
1565mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1566                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1567{
1568        enum mlxsw_sp_span_type span_type;
1569
1570        span_type = mirror->ingress ?
1571                        MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1572        mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
1573                                    span_type);
1574}
1575
1576static int
1577mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1578                                      struct tc_cls_matchall_offload *cls,
1579                                      const struct tc_action *a,
1580                                      bool ingress)
1581{
1582        int err;
1583
1584        if (!mlxsw_sp_port->sample)
1585                return -EOPNOTSUPP;
1586        if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1587                netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1588                return -EEXIST;
1589        }
1590        if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1591                netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1592                return -EOPNOTSUPP;
1593        }
1594
1595        rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1596                           tcf_sample_psample_group(a));
1597        mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1598        mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1599        mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1600
1601        err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1602        if (err)
1603                goto err_port_sample_set;
1604        return 0;
1605
1606err_port_sample_set:
1607        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1608        return err;
1609}
1610
1611static void
1612mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1613{
1614        if (!mlxsw_sp_port->sample)
1615                return;
1616
1617        mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1618        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1619}
1620
1621static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1622                                          struct tc_cls_matchall_offload *f,
1623                                          bool ingress)
1624{
1625        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1626        __be16 protocol = f->common.protocol;
1627        const struct tc_action *a;
1628        LIST_HEAD(actions);
1629        int err;
1630
1631        if (!tcf_exts_has_one_action(f->exts)) {
1632                netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1633                return -EOPNOTSUPP;
1634        }
1635
1636        mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1637        if (!mall_tc_entry)
1638                return -ENOMEM;
1639        mall_tc_entry->cookie = f->cookie;
1640
1641        tcf_exts_to_list(f->exts, &actions);
1642        a = list_first_entry(&actions, struct tc_action, list);
1643
1644        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1645                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1646
1647                mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1648                mirror = &mall_tc_entry->mirror;
1649                err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1650                                                            mirror, a, ingress);
1651        } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1652                mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1653                err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1654                                                            a, ingress);
1655        } else {
1656                err = -EOPNOTSUPP;
1657        }
1658
1659        if (err)
1660                goto err_add_action;
1661
1662        list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1663        return 0;
1664
1665err_add_action:
1666        kfree(mall_tc_entry);
1667        return err;
1668}
1669
1670static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1671                                           struct tc_cls_matchall_offload *f)
1672{
1673        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1674
1675        mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1676                                                         f->cookie);
1677        if (!mall_tc_entry) {
1678                netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1679                return;
1680        }
1681        list_del(&mall_tc_entry->list);
1682
1683        switch (mall_tc_entry->type) {
1684        case MLXSW_SP_PORT_MALL_MIRROR:
1685                mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1686                                                      &mall_tc_entry->mirror);
1687                break;
1688        case MLXSW_SP_PORT_MALL_SAMPLE:
1689                mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1690                break;
1691        default:
1692                WARN_ON(1);
1693        }
1694
1695        kfree(mall_tc_entry);
1696}
1697
1698static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1699                                          struct tc_cls_matchall_offload *f)
1700{
1701        bool ingress;
1702
1703        if (is_classid_clsact_ingress(f->common.classid))
1704                ingress = true;
1705        else if (is_classid_clsact_egress(f->common.classid))
1706                ingress = false;
1707        else
1708                return -EOPNOTSUPP;
1709
1710        if (f->common.chain_index)
1711                return -EOPNOTSUPP;
1712
1713        switch (f->command) {
1714        case TC_CLSMATCHALL_REPLACE:
1715                return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1716                                                      ingress);
1717        case TC_CLSMATCHALL_DESTROY:
1718                mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1719                return 0;
1720        default:
1721                return -EOPNOTSUPP;
1722        }
1723}
1724
1725static int
1726mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
1727                             struct tc_cls_flower_offload *f)
1728{
1729        bool ingress;
1730
1731        if (is_classid_clsact_ingress(f->common.classid))
1732                ingress = true;
1733        else if (is_classid_clsact_egress(f->common.classid))
1734                ingress = false;
1735        else
1736                return -EOPNOTSUPP;
1737
1738        switch (f->command) {
1739        case TC_CLSFLOWER_REPLACE:
1740                return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
1741        case TC_CLSFLOWER_DESTROY:
1742                mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1743                return 0;
1744        case TC_CLSFLOWER_STATS:
1745                return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1746        default:
1747                return -EOPNOTSUPP;
1748        }
1749}
1750
1751static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1752                             void *type_data)
1753{
1754        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1755
1756        switch (type) {
1757        case TC_SETUP_CLSMATCHALL:
1758                return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
1759        case TC_SETUP_CLSFLOWER:
1760                return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
1761        default:
1762                return -EOPNOTSUPP;
1763        }
1764}
1765
1766static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1767        .ndo_size               = sizeof(struct net_device_ops),
1768        .ndo_open               = mlxsw_sp_port_open,
1769        .ndo_stop               = mlxsw_sp_port_stop,
1770        .ndo_start_xmit         = mlxsw_sp_port_xmit,
1771        .extended.ndo_setup_tc_rh = mlxsw_sp_setup_tc,
1772        .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1773        .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1774        .extended.ndo_change_mtu = mlxsw_sp_port_change_mtu,
1775        .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1776        .extended.ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1777        .extended.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1778        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1779        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1780        .extended.ndo_get_phys_port_name        = mlxsw_sp_port_get_phys_port_name,
1781};
1782
1783static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1784                                      struct ethtool_drvinfo *drvinfo)
1785{
1786        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1787        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1788
1789        strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1790        strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1791                sizeof(drvinfo->version));
1792        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1793                 "%d.%d.%d",
1794                 mlxsw_sp->bus_info->fw_rev.major,
1795                 mlxsw_sp->bus_info->fw_rev.minor,
1796                 mlxsw_sp->bus_info->fw_rev.subminor);
1797        strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1798                sizeof(drvinfo->bus_info));
1799}
1800
1801static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1802                                         struct ethtool_pauseparam *pause)
1803{
1804        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1805
1806        pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1807        pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1808}
1809
1810static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1811                                   struct ethtool_pauseparam *pause)
1812{
1813        char pfcc_pl[MLXSW_REG_PFCC_LEN];
1814
1815        mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1816        mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1817        mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1818
1819        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1820                               pfcc_pl);
1821}
1822
1823static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1824                                        struct ethtool_pauseparam *pause)
1825{
1826        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1827        bool pause_en = pause->tx_pause || pause->rx_pause;
1828        int err;
1829
1830        if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1831                netdev_err(dev, "PFC already enabled on port\n");
1832                return -EINVAL;
1833        }
1834
1835        if (pause->autoneg) {
1836                netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1837                return -EINVAL;
1838        }
1839
1840        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1841        if (err) {
1842                netdev_err(dev, "Failed to configure port's headroom\n");
1843                return err;
1844        }
1845
1846        err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1847        if (err) {
1848                netdev_err(dev, "Failed to set PAUSE parameters\n");
1849                goto err_port_pause_configure;
1850        }
1851
1852        mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1853        mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1854
1855        return 0;
1856
1857err_port_pause_configure:
1858        pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1859        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1860        return err;
1861}
1862
1863struct mlxsw_sp_port_hw_stats {
1864        char str[ETH_GSTRING_LEN];
1865        u64 (*getter)(const char *payload);
1866        bool cells_bytes;
1867};
1868
1869static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1870        {
1871                .str = "a_frames_transmitted_ok",
1872                .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1873        },
1874        {
1875                .str = "a_frames_received_ok",
1876                .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1877        },
1878        {
1879                .str = "a_frame_check_sequence_errors",
1880                .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1881        },
1882        {
1883                .str = "a_alignment_errors",
1884                .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1885        },
1886        {
1887                .str = "a_octets_transmitted_ok",
1888                .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1889        },
1890        {
1891                .str = "a_octets_received_ok",
1892                .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1893        },
1894        {
1895                .str = "a_multicast_frames_xmitted_ok",
1896                .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1897        },
1898        {
1899                .str = "a_broadcast_frames_xmitted_ok",
1900                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1901        },
1902        {
1903                .str = "a_multicast_frames_received_ok",
1904                .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1905        },
1906        {
1907                .str = "a_broadcast_frames_received_ok",
1908                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1909        },
1910        {
1911                .str = "a_in_range_length_errors",
1912                .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1913        },
1914        {
1915                .str = "a_out_of_range_length_field",
1916                .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1917        },
1918        {
1919                .str = "a_frame_too_long_errors",
1920                .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1921        },
1922        {
1923                .str = "a_symbol_error_during_carrier",
1924                .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1925        },
1926        {
1927                .str = "a_mac_control_frames_transmitted",
1928                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1929        },
1930        {
1931                .str = "a_mac_control_frames_received",
1932                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1933        },
1934        {
1935                .str = "a_unsupported_opcodes_received",
1936                .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1937        },
1938        {
1939                .str = "a_pause_mac_ctrl_frames_received",
1940                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1941        },
1942        {
1943                .str = "a_pause_mac_ctrl_frames_xmitted",
1944                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1945        },
1946};
1947
1948#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1949
1950static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1951        {
1952                .str = "rx_octets_prio",
1953                .getter = mlxsw_reg_ppcnt_rx_octets_get,
1954        },
1955        {
1956                .str = "rx_frames_prio",
1957                .getter = mlxsw_reg_ppcnt_rx_frames_get,
1958        },
1959        {
1960                .str = "tx_octets_prio",
1961                .getter = mlxsw_reg_ppcnt_tx_octets_get,
1962        },
1963        {
1964                .str = "tx_frames_prio",
1965                .getter = mlxsw_reg_ppcnt_tx_frames_get,
1966        },
1967        {
1968                .str = "rx_pause_prio",
1969                .getter = mlxsw_reg_ppcnt_rx_pause_get,
1970        },
1971        {
1972                .str = "rx_pause_duration_prio",
1973                .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1974        },
1975        {
1976                .str = "tx_pause_prio",
1977                .getter = mlxsw_reg_ppcnt_tx_pause_get,
1978        },
1979        {
1980                .str = "tx_pause_duration_prio",
1981                .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1982        },
1983};
1984
1985#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1986
1987static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1988        {
1989                .str = "tc_transmit_queue_tc",
1990                .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1991                .cells_bytes = true,
1992        },
1993        {
1994                .str = "tc_no_buffer_discard_uc_tc",
1995                .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1996        },
1997};
1998
1999#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2000
2001#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2002                                         (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2003                                          MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2004                                         IEEE_8021QAZ_MAX_TCS)
2005
2006static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2007{
2008        int i;
2009
2010        for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2011                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2012                         mlxsw_sp_port_hw_prio_stats[i].str, prio);
2013                *p += ETH_GSTRING_LEN;
2014        }
2015}
2016
2017static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2018{
2019        int i;
2020
2021        for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2022                snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2023                         mlxsw_sp_port_hw_tc_stats[i].str, tc);
2024                *p += ETH_GSTRING_LEN;
2025        }
2026}
2027
2028static void mlxsw_sp_port_get_strings(struct net_device *dev,
2029                                      u32 stringset, u8 *data)
2030{
2031        u8 *p = data;
2032        int i;
2033
2034        switch (stringset) {
2035        case ETH_SS_STATS:
2036                for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2037                        memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2038                               ETH_GSTRING_LEN);
2039                        p += ETH_GSTRING_LEN;
2040                }
2041
2042                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2043                        mlxsw_sp_port_get_prio_strings(&p, i);
2044
2045                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2046                        mlxsw_sp_port_get_tc_strings(&p, i);
2047
2048                break;
2049        }
2050}
2051
2052static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2053                                     enum ethtool_phys_id_state state)
2054{
2055        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2056        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2057        char mlcr_pl[MLXSW_REG_MLCR_LEN];
2058        bool active;
2059
2060        switch (state) {
2061        case ETHTOOL_ID_ACTIVE:
2062                active = true;
2063                break;
2064        case ETHTOOL_ID_INACTIVE:
2065                active = false;
2066                break;
2067        default:
2068                return -EOPNOTSUPP;
2069        }
2070
2071        mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2072        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2073}
2074
2075static int
2076mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2077                               int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2078{
2079        switch (grp) {
2080        case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
2081                *p_hw_stats = mlxsw_sp_port_hw_stats;
2082                *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2083                break;
2084        case MLXSW_REG_PPCNT_PRIO_CNT:
2085                *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2086                *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2087                break;
2088        case MLXSW_REG_PPCNT_TC_CNT:
2089                *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2090                *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2091                break;
2092        default:
2093                WARN_ON(1);
2094                return -EOPNOTSUPP;
2095        }
2096        return 0;
2097}
2098
2099static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2100                                      enum mlxsw_reg_ppcnt_grp grp, int prio,
2101                                      u64 *data, int data_index)
2102{
2103        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2104        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2105        struct mlxsw_sp_port_hw_stats *hw_stats;
2106        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2107        int i, len;
2108        int err;
2109
2110        err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2111        if (err)
2112                return;
2113        mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2114        for (i = 0; i < len; i++) {
2115                data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2116                if (!hw_stats[i].cells_bytes)
2117                        continue;
2118                data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2119                                                            data[data_index + i]);
2120        }
2121}
2122
2123static void mlxsw_sp_port_get_stats(struct net_device *dev,
2124                                    struct ethtool_stats *stats, u64 *data)
2125{
2126        int i, data_index = 0;
2127
2128        /* IEEE 802.3 Counters */
2129        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2130                                  data, data_index);
2131        data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2132
2133        /* Per-Priority Counters */
2134        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2135                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2136                                          data, data_index);
2137                data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2138        }
2139
2140        /* Per-TC Counters */
2141        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2142                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2143                                          data, data_index);
2144                data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2145        }
2146}
2147
2148static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2149{
2150        switch (sset) {
2151        case ETH_SS_STATS:
2152                return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2153        default:
2154                return -EOPNOTSUPP;
2155        }
2156}
2157
2158struct mlxsw_sp_port_link_mode {
2159        enum ethtool_link_mode_bit_indices mask_ethtool;
2160        u32 mask;
2161        u32 speed;
2162};
2163
2164static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2165        {
2166                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2167                .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2168                .speed          = SPEED_100,
2169        },
2170        {
2171                .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2172                                  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2173                .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2174                .speed          = SPEED_1000,
2175        },
2176        {
2177                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2178                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2179                .speed          = SPEED_10000,
2180        },
2181        {
2182                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2183                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2184                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2185                .speed          = SPEED_10000,
2186        },
2187        {
2188                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2189                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2190                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2191                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2192                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2193                .speed          = SPEED_10000,
2194        },
2195        {
2196                .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2197                .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2198                .speed          = SPEED_20000,
2199        },
2200        {
2201                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2202                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2203                .speed          = SPEED_40000,
2204        },
2205        {
2206                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2207                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2208                .speed          = SPEED_40000,
2209        },
2210        {
2211                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2212                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2213                .speed          = SPEED_40000,
2214        },
2215        {
2216                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2217                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2218                .speed          = SPEED_40000,
2219        },
2220        {
2221                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2222                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2223                .speed          = SPEED_25000,
2224        },
2225        {
2226                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2227                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2228                .speed          = SPEED_25000,
2229        },
2230        {
2231                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2232                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2233                .speed          = SPEED_25000,
2234        },
2235        {
2236                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2237                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2238                .speed          = SPEED_25000,
2239        },
2240        {
2241                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2242                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2243                .speed          = SPEED_50000,
2244        },
2245        {
2246                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2247                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2248                .speed          = SPEED_50000,
2249        },
2250        {
2251                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2252                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2253                .speed          = SPEED_50000,
2254        },
2255        {
2256                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2257                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2258                .speed          = SPEED_56000,
2259        },
2260        {
2261                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2262                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2263                .speed          = SPEED_56000,
2264        },
2265        {
2266                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2267                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2268                .speed          = SPEED_56000,
2269        },
2270        {
2271                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2272                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2273                .speed          = SPEED_56000,
2274        },
2275        {
2276                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2277                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2278                .speed          = SPEED_100000,
2279        },
2280        {
2281                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2282                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2283                .speed          = SPEED_100000,
2284        },
2285        {
2286                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2287                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2288                .speed          = SPEED_100000,
2289        },
2290        {
2291                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2292                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2293                .speed          = SPEED_100000,
2294        },
2295};
2296
2297#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2298
2299static void
2300mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2301                                  struct ethtool_link_ksettings *cmd)
2302{
2303        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2304                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2305                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2306                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2307                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2308                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2309                ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2310
2311        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2312                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2313                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2314                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2315                              MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2316                ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2317}
2318
2319static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2320{
2321        int i;
2322
2323        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2324                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2325                        __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2326                                  mode);
2327        }
2328}
2329
2330static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2331                                            struct ethtool_link_ksettings *cmd)
2332{
2333        u32 speed = SPEED_UNKNOWN;
2334        u8 duplex = DUPLEX_UNKNOWN;
2335        int i;
2336
2337        if (!carrier_ok)
2338                goto out;
2339
2340        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2341                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2342                        speed = mlxsw_sp_port_link_mode[i].speed;
2343                        duplex = DUPLEX_FULL;
2344                        break;
2345                }
2346        }
2347out:
2348        cmd->base.speed = speed;
2349        cmd->base.duplex = duplex;
2350}
2351
2352static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2353{
2354        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2355                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2356                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2357                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2358                return PORT_FIBRE;
2359
2360        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2361                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2362                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2363                return PORT_DA;
2364
2365        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2366                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2367                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2368                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2369                return PORT_NONE;
2370
2371        return PORT_OTHER;
2372}
2373
2374static u32
2375mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2376{
2377        u32 ptys_proto = 0;
2378        int i;
2379
2380        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2381                if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2382                             cmd->link_modes.advertising))
2383                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2384        }
2385        return ptys_proto;
2386}
2387
2388static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2389{
2390        u32 ptys_proto = 0;
2391        int i;
2392
2393        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2394                if (speed == mlxsw_sp_port_link_mode[i].speed)
2395                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2396        }
2397        return ptys_proto;
2398}
2399
2400static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2401{
2402        u32 ptys_proto = 0;
2403        int i;
2404
2405        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2406                if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2407                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2408        }
2409        return ptys_proto;
2410}
2411
2412static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2413                                             struct ethtool_link_ksettings *cmd)
2414{
2415        ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2416        ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2417        ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2418
2419        mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2420        mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2421}
2422
2423static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2424                                             struct ethtool_link_ksettings *cmd)
2425{
2426        if (!autoneg)
2427                return;
2428
2429        ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2430        mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2431}
2432
2433static void
2434mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2435                                    struct ethtool_link_ksettings *cmd)
2436{
2437        if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2438                return;
2439
2440        ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2441        mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2442}
2443
2444static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2445                                            struct ethtool_link_ksettings *cmd)
2446{
2447        u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2448        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2449        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2450        char ptys_pl[MLXSW_REG_PTYS_LEN];
2451        u8 autoneg_status;
2452        bool autoneg;
2453        int err;
2454
2455        autoneg = mlxsw_sp_port->link.autoneg;
2456        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2457        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2458        if (err)
2459                return err;
2460        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2461                                  &eth_proto_oper);
2462
2463        mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2464
2465        mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2466
2467        eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2468        autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2469        mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2470
2471        cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2472        cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2473        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2474                                        cmd);
2475
2476        return 0;
2477}
2478
2479static int
2480mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2481                                 const struct ethtool_link_ksettings *cmd)
2482{
2483        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2484        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2485        char ptys_pl[MLXSW_REG_PTYS_LEN];
2486        u32 eth_proto_cap, eth_proto_new;
2487        bool autoneg;
2488        int err;
2489
2490        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2491        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2492        if (err)
2493                return err;
2494        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2495
2496        autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2497        eth_proto_new = autoneg ?
2498                mlxsw_sp_to_ptys_advert_link(cmd) :
2499                mlxsw_sp_to_ptys_speed(cmd->base.speed);
2500
2501        eth_proto_new = eth_proto_new & eth_proto_cap;
2502        if (!eth_proto_new) {
2503                netdev_err(dev, "No supported speed requested\n");
2504                return -EINVAL;
2505        }
2506
2507        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2508                                eth_proto_new);
2509        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2510        if (err)
2511                return err;
2512
2513        if (!netif_running(dev))
2514                return 0;
2515
2516        mlxsw_sp_port->link.autoneg = autoneg;
2517
2518        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2519        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2520
2521        return 0;
2522}
2523
2524static int mlxsw_sp_flash_device(struct net_device *dev,
2525                                 struct ethtool_flash *flash)
2526{
2527        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2528        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2529        const struct firmware *firmware;
2530        int err;
2531
2532        if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2533                return -EOPNOTSUPP;
2534
2535        dev_hold(dev);
2536        rtnl_unlock();
2537
2538        err = request_firmware(&firmware, flash->data, &dev->dev);
2539        if (err)
2540                goto out;
2541        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2542        release_firmware(firmware);
2543out:
2544        rtnl_lock();
2545        dev_put(dev);
2546        return err;
2547}
2548
2549#define MLXSW_SP_I2C_ADDR_LOW 0x50
2550#define MLXSW_SP_I2C_ADDR_HIGH 0x51
2551#define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2552
2553static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2554                                        u16 offset, u16 size, void *data,
2555                                        unsigned int *p_read_size)
2556{
2557        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2558        char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2559        char mcia_pl[MLXSW_REG_MCIA_LEN];
2560        u16 i2c_addr;
2561        int status;
2562        int err;
2563
2564        size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2565
2566        if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2567            offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2568                /* Cross pages read, read until offset 256 in low page */
2569                size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2570
2571        i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2572        if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2573                i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2574                offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2575        }
2576
2577        mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2578                            0, 0, offset, size, i2c_addr);
2579
2580        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2581        if (err)
2582                return err;
2583
2584        status = mlxsw_reg_mcia_status_get(mcia_pl);
2585        if (status)
2586                return -EIO;
2587
2588        mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2589        memcpy(data, eeprom_tmp, size);
2590        *p_read_size = size;
2591
2592        return 0;
2593}
2594
2595enum mlxsw_sp_eeprom_module_info_rev_id {
2596        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC      = 0x00,
2597        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436       = 0x01,
2598        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636       = 0x03,
2599};
2600
2601enum mlxsw_sp_eeprom_module_info_id {
2602        MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP              = 0x03,
2603        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP             = 0x0C,
2604        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS        = 0x0D,
2605        MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28           = 0x11,
2606};
2607
2608enum mlxsw_sp_eeprom_module_info {
2609        MLXSW_SP_EEPROM_MODULE_INFO_ID,
2610        MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2611        MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2612};
2613
2614static int mlxsw_sp_get_module_info(struct net_device *netdev,
2615                                    struct ethtool_modinfo *modinfo)
2616{
2617        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2618        u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2619        u8 module_rev_id, module_id;
2620        unsigned int read_size;
2621        int err;
2622
2623        err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2624                                           MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2625                                           module_info, &read_size);
2626        if (err)
2627                return err;
2628
2629        if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2630                return -EIO;
2631
2632        module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2633        module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2634
2635        switch (module_id) {
2636        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2637                modinfo->type       = ETH_MODULE_SFF_8436;
2638                modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2639                break;
2640        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2641        case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2642                if (module_id  == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2643                    module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2644                        modinfo->type       = ETH_MODULE_SFF_8636;
2645                        modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2646                } else {
2647                        modinfo->type       = ETH_MODULE_SFF_8436;
2648                        modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2649                }
2650                break;
2651        case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2652                modinfo->type       = ETH_MODULE_SFF_8472;
2653                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2654                break;
2655        default:
2656                return -EINVAL;
2657        }
2658
2659        return 0;
2660}
2661
2662static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2663                                      struct ethtool_eeprom *ee,
2664                                      u8 *data)
2665{
2666        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2667        int offset = ee->offset;
2668        unsigned int read_size;
2669        int i = 0;
2670        int err;
2671
2672        if (!ee->len)
2673                return -EINVAL;
2674
2675        memset(data, 0, ee->len);
2676
2677        while (i < ee->len) {
2678                err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2679                                                   ee->len - i, data + i,
2680                                                   &read_size);
2681                if (err) {
2682                        netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2683                        return err;
2684                }
2685
2686                i += read_size;
2687                offset += read_size;
2688        }
2689
2690        return 0;
2691}
2692
2693static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2694        .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
2695        .get_link               = ethtool_op_get_link,
2696        .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
2697        .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
2698        .get_strings            = mlxsw_sp_port_get_strings,
2699        .set_phys_id            = mlxsw_sp_port_set_phys_id,
2700        .get_ethtool_stats      = mlxsw_sp_port_get_stats,
2701        .get_sset_count         = mlxsw_sp_port_get_sset_count,
2702        .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
2703        .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
2704        .flash_device           = mlxsw_sp_flash_device,
2705        .get_module_info        = mlxsw_sp_get_module_info,
2706        .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
2707};
2708
2709static int
2710mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2711{
2712        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2713        u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2714        char ptys_pl[MLXSW_REG_PTYS_LEN];
2715        u32 eth_proto_admin;
2716
2717        eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2718        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2719                                eth_proto_admin);
2720        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2721}
2722
2723int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2724                          enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2725                          bool dwrr, u8 dwrr_weight)
2726{
2727        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2728        char qeec_pl[MLXSW_REG_QEEC_LEN];
2729
2730        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2731                            next_index);
2732        mlxsw_reg_qeec_de_set(qeec_pl, true);
2733        mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2734        mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2735        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2736}
2737
2738int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2739                                  enum mlxsw_reg_qeec_hr hr, u8 index,
2740                                  u8 next_index, u32 maxrate)
2741{
2742        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2743        char qeec_pl[MLXSW_REG_QEEC_LEN];
2744
2745        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2746                            next_index);
2747        mlxsw_reg_qeec_mase_set(qeec_pl, true);
2748        mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2749        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2750}
2751
2752int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2753                              u8 switch_prio, u8 tclass)
2754{
2755        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2756        char qtct_pl[MLXSW_REG_QTCT_LEN];
2757
2758        mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2759                            tclass);
2760        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2761}
2762
2763static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2764{
2765        int err, i;
2766
2767        /* Setup the elements hierarcy, so that each TC is linked to
2768         * one subgroup, which are all member in the same group.
2769         */
2770        err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2771                                    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2772                                    0);
2773        if (err)
2774                return err;
2775        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2776                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2777                                            MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2778                                            0, false, 0);
2779                if (err)
2780                        return err;
2781        }
2782        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2783                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2784                                            MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2785                                            false, 0);
2786                if (err)
2787                        return err;
2788        }
2789
2790        /* Make sure the max shaper is disabled in all hierarcies that
2791         * support it.
2792         */
2793        err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2794                                            MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2795                                            MLXSW_REG_QEEC_MAS_DIS);
2796        if (err)
2797                return err;
2798        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2799                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2800                                                    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2801                                                    i, 0,
2802                                                    MLXSW_REG_QEEC_MAS_DIS);
2803                if (err)
2804                        return err;
2805        }
2806        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2807                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2808                                                    MLXSW_REG_QEEC_HIERARCY_TC,
2809                                                    i, i,
2810                                                    MLXSW_REG_QEEC_MAS_DIS);
2811                if (err)
2812                        return err;
2813        }
2814
2815        /* Map all priorities to traffic class 0. */
2816        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2817                err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2818                if (err)
2819                        return err;
2820        }
2821
2822        return 0;
2823}
2824
2825static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2826                                bool split, u8 module, u8 width, u8 lane)
2827{
2828        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2829        struct mlxsw_sp_port *mlxsw_sp_port;
2830        struct net_device *dev;
2831        int err;
2832
2833        err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2834        if (err) {
2835                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2836                        local_port);
2837                return err;
2838        }
2839
2840        dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2841        if (!dev) {
2842                err = -ENOMEM;
2843                goto err_alloc_etherdev;
2844        }
2845        SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2846        mlxsw_sp_port = netdev_priv(dev);
2847        mlxsw_sp_port->dev = dev;
2848        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2849        mlxsw_sp_port->local_port = local_port;
2850        mlxsw_sp_port->pvid = 1;
2851        mlxsw_sp_port->split = split;
2852        mlxsw_sp_port->mapping.module = module;
2853        mlxsw_sp_port->mapping.width = width;
2854        mlxsw_sp_port->mapping.lane = lane;
2855        mlxsw_sp_port->link.autoneg = 1;
2856        INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2857        INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2858
2859        mlxsw_sp_port->pcpu_stats =
2860                netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2861        if (!mlxsw_sp_port->pcpu_stats) {
2862                err = -ENOMEM;
2863                goto err_alloc_stats;
2864        }
2865
2866        mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2867                                        GFP_KERNEL);
2868        if (!mlxsw_sp_port->sample) {
2869                err = -ENOMEM;
2870                goto err_alloc_sample;
2871        }
2872
2873        mlxsw_sp_port->hw_stats.cache =
2874                kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2875
2876        if (!mlxsw_sp_port->hw_stats.cache) {
2877                err = -ENOMEM;
2878                goto err_alloc_hw_stats;
2879        }
2880        INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2881                          &update_stats_cache);
2882
2883        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2884        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2885
2886        err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2887        if (err) {
2888                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2889                        mlxsw_sp_port->local_port);
2890                goto err_port_module_map;
2891        }
2892
2893        err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2894        if (err) {
2895                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2896                        mlxsw_sp_port->local_port);
2897                goto err_port_swid_set;
2898        }
2899
2900        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2901        if (err) {
2902                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2903                        mlxsw_sp_port->local_port);
2904                goto err_dev_addr_init;
2905        }
2906
2907        netif_carrier_off(dev);
2908
2909        dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2910                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2911        dev->hw_features |= NETIF_F_HW_TC;
2912
2913        dev->extended->min_mtu = 0;
2914        dev->extended->max_mtu = ETH_MAX_MTU;
2915
2916        /* Each packet needs to have a Tx header (metadata) on top all other
2917         * headers.
2918         */
2919        dev->needed_headroom = MLXSW_TXHDR_LEN;
2920
2921        err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2922        if (err) {
2923                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2924                        mlxsw_sp_port->local_port);
2925                goto err_port_system_port_mapping_set;
2926        }
2927
2928        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2929        if (err) {
2930                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2931                        mlxsw_sp_port->local_port);
2932                goto err_port_speed_by_width_set;
2933        }
2934
2935        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2936        if (err) {
2937                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2938                        mlxsw_sp_port->local_port);
2939                goto err_port_mtu_set;
2940        }
2941
2942        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2943        if (err)
2944                goto err_port_admin_status_set;
2945
2946        err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2947        if (err) {
2948                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2949                        mlxsw_sp_port->local_port);
2950                goto err_port_buffers_init;
2951        }
2952
2953        err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2954        if (err) {
2955                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2956                        mlxsw_sp_port->local_port);
2957                goto err_port_ets_init;
2958        }
2959
2960        /* ETS and buffers must be initialized before DCB. */
2961        err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2962        if (err) {
2963                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2964                        mlxsw_sp_port->local_port);
2965                goto err_port_dcb_init;
2966        }
2967
2968        err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2969        if (err) {
2970                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2971                        mlxsw_sp_port->local_port);
2972                goto err_port_fids_init;
2973        }
2974
2975        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2976        if (IS_ERR(mlxsw_sp_port_vlan)) {
2977                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2978                        mlxsw_sp_port->local_port);
2979                err = PTR_ERR(mlxsw_sp_port_vlan);
2980                goto err_port_vlan_get;
2981        }
2982
2983        mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2984        mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2985        err = register_netdev(dev);
2986        if (err) {
2987                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2988                        mlxsw_sp_port->local_port);
2989                goto err_register_netdev;
2990        }
2991
2992        mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2993                                mlxsw_sp_port, dev, mlxsw_sp_port->split,
2994                                module);
2995        mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2996        return 0;
2997
2998err_register_netdev:
2999        mlxsw_sp->ports[local_port] = NULL;
3000        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3001        mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
3002err_port_vlan_get:
3003        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3004err_port_fids_init:
3005        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3006err_port_dcb_init:
3007err_port_ets_init:
3008err_port_buffers_init:
3009err_port_admin_status_set:
3010err_port_mtu_set:
3011err_port_speed_by_width_set:
3012err_port_system_port_mapping_set:
3013err_dev_addr_init:
3014        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3015err_port_swid_set:
3016        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3017err_port_module_map:
3018        kfree(mlxsw_sp_port->hw_stats.cache);
3019err_alloc_hw_stats:
3020        kfree(mlxsw_sp_port->sample);
3021err_alloc_sample:
3022        free_percpu(mlxsw_sp_port->pcpu_stats);
3023err_alloc_stats:
3024        free_netdev(dev);
3025err_alloc_etherdev:
3026        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3027        return err;
3028}
3029
3030static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3031{
3032        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3033
3034        cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
3035        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3036        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3037        mlxsw_sp->ports[local_port] = NULL;
3038        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3039        mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3040        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3041        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3042        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3043        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3044        kfree(mlxsw_sp_port->hw_stats.cache);
3045        kfree(mlxsw_sp_port->sample);
3046        free_percpu(mlxsw_sp_port->pcpu_stats);
3047        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3048        free_netdev(mlxsw_sp_port->dev);
3049        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3050}
3051
3052static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3053{
3054        return mlxsw_sp->ports[local_port] != NULL;
3055}
3056
3057static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3058{
3059        int i;
3060
3061        for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3062                if (mlxsw_sp_port_created(mlxsw_sp, i))
3063                        mlxsw_sp_port_remove(mlxsw_sp, i);
3064        kfree(mlxsw_sp->port_to_module);
3065        kfree(mlxsw_sp->ports);
3066}
3067
3068static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3069{
3070        unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3071        u8 module, width, lane;
3072        size_t alloc_size;
3073        int i;
3074        int err;
3075
3076        alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3077        mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3078        if (!mlxsw_sp->ports)
3079                return -ENOMEM;
3080
3081        mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3082        if (!mlxsw_sp->port_to_module) {
3083                err = -ENOMEM;
3084                goto err_port_to_module_alloc;
3085        }
3086
3087        for (i = 1; i < max_ports; i++) {
3088                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3089                                                    &width, &lane);
3090                if (err)
3091                        goto err_port_module_info_get;
3092                if (!width)
3093                        continue;
3094                mlxsw_sp->port_to_module[i] = module;
3095                err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3096                                           module, width, lane);
3097                if (err)
3098                        goto err_port_create;
3099        }
3100        return 0;
3101
3102err_port_create:
3103err_port_module_info_get:
3104        for (i--; i >= 1; i--)
3105                if (mlxsw_sp_port_created(mlxsw_sp, i))
3106                        mlxsw_sp_port_remove(mlxsw_sp, i);
3107        kfree(mlxsw_sp->port_to_module);
3108err_port_to_module_alloc:
3109        kfree(mlxsw_sp->ports);
3110        return err;
3111}
3112
3113static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3114{
3115        u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3116
3117        return local_port - offset;
3118}
3119
3120static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3121                                      u8 module, unsigned int count)
3122{
3123        u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3124        int err, i;
3125
3126        for (i = 0; i < count; i++) {
3127                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3128                                           module, width, i * width);
3129                if (err)
3130                        goto err_port_create;
3131        }
3132
3133        return 0;
3134
3135err_port_create:
3136        for (i--; i >= 0; i--)
3137                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3138                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3139        return err;
3140}
3141
3142static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3143                                         u8 base_port, unsigned int count)
3144{
3145        u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3146        int i;
3147
3148        /* Split by four means we need to re-create two ports, otherwise
3149         * only one.
3150         */
3151        count = count / 2;
3152
3153        for (i = 0; i < count; i++) {
3154                local_port = base_port + i * 2;
3155                module = mlxsw_sp->port_to_module[local_port];
3156
3157                mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3158                                     width, 0);
3159        }
3160}
3161
3162static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3163                               unsigned int count)
3164{
3165        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3166        struct mlxsw_sp_port *mlxsw_sp_port;
3167        u8 module, cur_width, base_port;
3168        int i;
3169        int err;
3170
3171        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3172        if (!mlxsw_sp_port) {
3173                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3174                        local_port);
3175                return -EINVAL;
3176        }
3177
3178        module = mlxsw_sp_port->mapping.module;
3179        cur_width = mlxsw_sp_port->mapping.width;
3180
3181        if (count != 2 && count != 4) {
3182                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3183                return -EINVAL;
3184        }
3185
3186        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3187                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3188                return -EINVAL;
3189        }
3190
3191        /* Make sure we have enough slave (even) ports for the split. */
3192        if (count == 2) {
3193                base_port = local_port;
3194                if (mlxsw_sp->ports[base_port + 1]) {
3195                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3196                        return -EINVAL;
3197                }
3198        } else {
3199                base_port = mlxsw_sp_cluster_base_port_get(local_port);
3200                if (mlxsw_sp->ports[base_port + 1] ||
3201                    mlxsw_sp->ports[base_port + 3]) {
3202                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3203                        return -EINVAL;
3204                }
3205        }
3206
3207        for (i = 0; i < count; i++)
3208                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3209                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3210
3211        err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3212        if (err) {
3213                dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3214                goto err_port_split_create;
3215        }
3216
3217        return 0;
3218
3219err_port_split_create:
3220        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3221        return err;
3222}
3223
3224static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3225{
3226        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3227        struct mlxsw_sp_port *mlxsw_sp_port;
3228        u8 cur_width, base_port;
3229        unsigned int count;
3230        int i;
3231
3232        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3233        if (!mlxsw_sp_port) {
3234                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3235                        local_port);
3236                return -EINVAL;
3237        }
3238
3239        if (!mlxsw_sp_port->split) {
3240                netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3241                return -EINVAL;
3242        }
3243
3244        cur_width = mlxsw_sp_port->mapping.width;
3245        count = cur_width == 1 ? 4 : 2;
3246
3247        base_port = mlxsw_sp_cluster_base_port_get(local_port);
3248
3249        /* Determine which ports to remove. */
3250        if (count == 2 && local_port >= base_port + 2)
3251                base_port = base_port + 2;
3252
3253        for (i = 0; i < count; i++)
3254                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3255                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3256
3257        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3258
3259        return 0;
3260}
3261
3262static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3263                                     char *pude_pl, void *priv)
3264{
3265        struct mlxsw_sp *mlxsw_sp = priv;
3266        struct mlxsw_sp_port *mlxsw_sp_port;
3267        enum mlxsw_reg_pude_oper_status status;
3268        u8 local_port;
3269
3270        local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3271        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3272        if (!mlxsw_sp_port)
3273                return;
3274
3275        status = mlxsw_reg_pude_oper_status_get(pude_pl);
3276        if (status == MLXSW_PORT_OPER_STATUS_UP) {
3277                netdev_info(mlxsw_sp_port->dev, "link up\n");
3278                netif_carrier_on(mlxsw_sp_port->dev);
3279        } else {
3280                netdev_info(mlxsw_sp_port->dev, "link down\n");
3281                netif_carrier_off(mlxsw_sp_port->dev);
3282        }
3283}
3284
3285static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3286                                              u8 local_port, void *priv)
3287{
3288        struct mlxsw_sp *mlxsw_sp = priv;
3289        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3290        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3291
3292        if (unlikely(!mlxsw_sp_port)) {
3293                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3294                                     local_port);
3295                return;
3296        }
3297
3298        skb->dev = mlxsw_sp_port->dev;
3299
3300        pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3301        u64_stats_update_begin(&pcpu_stats->syncp);
3302        pcpu_stats->rx_packets++;
3303        pcpu_stats->rx_bytes += skb->len;
3304        u64_stats_update_end(&pcpu_stats->syncp);
3305
3306        skb->protocol = eth_type_trans(skb, skb->dev);
3307        netif_receive_skb(skb);
3308}
3309
3310static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3311                                           void *priv)
3312{
3313        skb->offload_fwd_mark = 1;
3314        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3315}
3316
3317static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3318                                             void *priv)
3319{
3320        struct mlxsw_sp *mlxsw_sp = priv;
3321        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3322        struct psample_group *psample_group;
3323        u32 size;
3324
3325        if (unlikely(!mlxsw_sp_port)) {
3326                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3327                                     local_port);
3328                goto out;
3329        }
3330        if (unlikely(!mlxsw_sp_port->sample)) {
3331                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3332                                     local_port);
3333                goto out;
3334        }
3335
3336        size = mlxsw_sp_port->sample->truncate ?
3337                  mlxsw_sp_port->sample->trunc_size : skb->len;
3338
3339        rcu_read_lock();
3340        psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3341        if (!psample_group)
3342                goto out_unlock;
3343        psample_sample_packet(psample_group, skb, size,
3344                              mlxsw_sp_port->dev->ifindex, 0,
3345                              mlxsw_sp_port->sample->rate);
3346out_unlock:
3347        rcu_read_unlock();
3348out:
3349        consume_skb(skb);
3350}
3351
3352#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
3353        MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3354                  _is_ctrl, SP_##_trap_group, DISCARD)
3355
3356#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
3357        MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
3358                _is_ctrl, SP_##_trap_group, DISCARD)
3359
3360#define MLXSW_SP_EVENTL(_func, _trap_id)                \
3361        MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3362
3363static const struct mlxsw_listener mlxsw_sp_listener[] = {
3364        /* Events */
3365        MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3366        /* L2 traps */
3367        MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3368        MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3369        MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3370        MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3371        MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3372        MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3373        MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3374        MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3375        MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3376        MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3377        MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3378        MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3379        MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3380                          false),
3381        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3382                             false),
3383        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3384                             false),
3385        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3386                             false),
3387        /* L3 traps */
3388        MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3389        MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3390        MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3391        MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3392        MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3393                          false),
3394        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3395        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3396        MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3397        MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3398                          false),
3399        MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3400        MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3401        MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3402        MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3403        MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3404        MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3405        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3406                          false),
3407        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3408                          false),
3409        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3410                          false),
3411        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3412                          false),
3413        MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3414        MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3415                          false),
3416        MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3417        MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3418        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3419        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3420        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3421        /* PKT Sample trap */
3422        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3423                  false, SP_IP2ME, DISCARD),
3424        /* ACL trap */
3425        MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3426};
3427
3428static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3429{
3430        char qpcr_pl[MLXSW_REG_QPCR_LEN];
3431        enum mlxsw_reg_qpcr_ir_units ir_units;
3432        int max_cpu_policers;
3433        bool is_bytes;
3434        u8 burst_size;
3435        u32 rate;
3436        int i, err;
3437
3438        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3439                return -EIO;
3440
3441        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3442
3443        ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3444        for (i = 0; i < max_cpu_policers; i++) {
3445                is_bytes = false;
3446                switch (i) {
3447                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3448                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3449                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3450                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3451                        rate = 128;
3452                        burst_size = 7;
3453                        break;
3454                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3455                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3456                        rate = 16 * 1024;
3457                        burst_size = 10;
3458                        break;
3459                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3460                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3461                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3462                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3463                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3464                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3465                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3466                        rate = 1024;
3467                        burst_size = 7;
3468                        break;
3469                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3470                        is_bytes = true;
3471                        rate = 4 * 1024;
3472                        burst_size = 4;
3473                        break;
3474                default:
3475                        continue;
3476                }
3477
3478                mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3479                                    burst_size);
3480                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3481                if (err)
3482                        return err;
3483        }
3484
3485        return 0;
3486}
3487
3488static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3489{
3490        char htgt_pl[MLXSW_REG_HTGT_LEN];
3491        enum mlxsw_reg_htgt_trap_group i;
3492        int max_cpu_policers;
3493        int max_trap_groups;
3494        u8 priority, tc;
3495        u16 policer_id;
3496        int err;
3497
3498        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3499                return -EIO;
3500
3501        max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3502        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3503
3504        for (i = 0; i < max_trap_groups; i++) {
3505                policer_id = i;
3506                switch (i) {
3507                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3508                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3509                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3510                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3511                        priority = 5;
3512                        tc = 5;
3513                        break;
3514                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3515                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3516                        priority = 4;
3517                        tc = 4;
3518                        break;
3519                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3520                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3521                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3522                        priority = 3;
3523                        tc = 3;
3524                        break;
3525                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3526                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3527                        priority = 2;
3528                        tc = 2;
3529                        break;
3530                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3531                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3532                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3533                        priority = 1;
3534                        tc = 1;
3535                        break;
3536                case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3537                        priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3538                        tc = MLXSW_REG_HTGT_DEFAULT_TC;
3539                        policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3540                        break;
3541                default:
3542                        continue;
3543                }
3544
3545                if (max_cpu_policers <= policer_id &&
3546                    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3547                        return -EIO;
3548
3549                mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3550                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3551                if (err)
3552                        return err;
3553        }
3554
3555        return 0;
3556}
3557
3558static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3559{
3560        int i;
3561        int err;
3562
3563        err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3564        if (err)
3565                return err;
3566
3567        err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3568        if (err)
3569                return err;
3570
3571        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3572                err = mlxsw_core_trap_register(mlxsw_sp->core,
3573                                               &mlxsw_sp_listener[i],
3574                                               mlxsw_sp);
3575                if (err)
3576                        goto err_listener_register;
3577
3578        }
3579        return 0;
3580
3581err_listener_register:
3582        for (i--; i >= 0; i--) {
3583                mlxsw_core_trap_unregister(mlxsw_sp->core,
3584                                           &mlxsw_sp_listener[i],
3585                                           mlxsw_sp);
3586        }
3587        return err;
3588}
3589
3590static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3591{
3592        int i;
3593
3594        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3595                mlxsw_core_trap_unregister(mlxsw_sp->core,
3596                                           &mlxsw_sp_listener[i],
3597                                           mlxsw_sp);
3598        }
3599}
3600
3601static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3602{
3603        char slcr_pl[MLXSW_REG_SLCR_LEN];
3604        int err;
3605
3606        mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3607                                     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3608                                     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3609                                     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3610                                     MLXSW_REG_SLCR_LAG_HASH_SIP |
3611                                     MLXSW_REG_SLCR_LAG_HASH_DIP |
3612                                     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3613                                     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3614                                     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3615        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3616        if (err)
3617                return err;
3618
3619        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3620            !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3621                return -EIO;
3622
3623        mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3624                                 sizeof(struct mlxsw_sp_upper),
3625                                 GFP_KERNEL);
3626        if (!mlxsw_sp->lags)
3627                return -ENOMEM;
3628
3629        return 0;
3630}
3631
3632static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3633{
3634        kfree(mlxsw_sp->lags);
3635}
3636
3637static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3638{
3639        char htgt_pl[MLXSW_REG_HTGT_LEN];
3640
3641        mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3642                            MLXSW_REG_HTGT_INVALID_POLICER,
3643                            MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3644                            MLXSW_REG_HTGT_DEFAULT_TC);
3645        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3646}
3647
3648static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3649                         const struct mlxsw_bus_info *mlxsw_bus_info)
3650{
3651        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3652        int err;
3653
3654        mlxsw_sp->core = mlxsw_core;
3655        mlxsw_sp->bus_info = mlxsw_bus_info;
3656
3657        err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3658        if (err) {
3659                dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3660                return err;
3661        }
3662
3663        err = mlxsw_sp_base_mac_get(mlxsw_sp);
3664        if (err) {
3665                dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3666                return err;
3667        }
3668
3669        err = mlxsw_sp_fids_init(mlxsw_sp);
3670        if (err) {
3671                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3672                return err;
3673        }
3674
3675        err = mlxsw_sp_traps_init(mlxsw_sp);
3676        if (err) {
3677                dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3678                goto err_traps_init;
3679        }
3680
3681        err = mlxsw_sp_buffers_init(mlxsw_sp);
3682        if (err) {
3683                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3684                goto err_buffers_init;
3685        }
3686
3687        err = mlxsw_sp_lag_init(mlxsw_sp);
3688        if (err) {
3689                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3690                goto err_lag_init;
3691        }
3692
3693        err = mlxsw_sp_switchdev_init(mlxsw_sp);
3694        if (err) {
3695                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3696                goto err_switchdev_init;
3697        }
3698
3699        err = mlxsw_sp_router_init(mlxsw_sp);
3700        if (err) {
3701                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3702                goto err_router_init;
3703        }
3704
3705        err = mlxsw_sp_span_init(mlxsw_sp);
3706        if (err) {
3707                dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3708                goto err_span_init;
3709        }
3710
3711        err = mlxsw_sp_acl_init(mlxsw_sp);
3712        if (err) {
3713                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3714                goto err_acl_init;
3715        }
3716
3717        err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3718        if (err) {
3719                dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3720                goto err_counter_pool_init;
3721        }
3722
3723        err = mlxsw_sp_dpipe_init(mlxsw_sp);
3724        if (err) {
3725                dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3726                goto err_dpipe_init;
3727        }
3728
3729        err = mlxsw_sp_ports_create(mlxsw_sp);
3730        if (err) {
3731                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3732                goto err_ports_create;
3733        }
3734
3735        return 0;
3736
3737err_ports_create:
3738        mlxsw_sp_dpipe_fini(mlxsw_sp);
3739err_dpipe_init:
3740        mlxsw_sp_counter_pool_fini(mlxsw_sp);
3741err_counter_pool_init:
3742        mlxsw_sp_acl_fini(mlxsw_sp);
3743err_acl_init:
3744        mlxsw_sp_span_fini(mlxsw_sp);
3745err_span_init:
3746        mlxsw_sp_router_fini(mlxsw_sp);
3747err_router_init:
3748        mlxsw_sp_switchdev_fini(mlxsw_sp);
3749err_switchdev_init:
3750        mlxsw_sp_lag_fini(mlxsw_sp);
3751err_lag_init:
3752        mlxsw_sp_buffers_fini(mlxsw_sp);
3753err_buffers_init:
3754        mlxsw_sp_traps_fini(mlxsw_sp);
3755err_traps_init:
3756        mlxsw_sp_fids_fini(mlxsw_sp);
3757        return err;
3758}
3759
3760static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3761{
3762        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3763
3764        mlxsw_sp_ports_remove(mlxsw_sp);
3765        mlxsw_sp_dpipe_fini(mlxsw_sp);
3766        mlxsw_sp_counter_pool_fini(mlxsw_sp);
3767        mlxsw_sp_acl_fini(mlxsw_sp);
3768        mlxsw_sp_span_fini(mlxsw_sp);
3769        mlxsw_sp_router_fini(mlxsw_sp);
3770        mlxsw_sp_switchdev_fini(mlxsw_sp);
3771        mlxsw_sp_lag_fini(mlxsw_sp);
3772        mlxsw_sp_buffers_fini(mlxsw_sp);
3773        mlxsw_sp_traps_fini(mlxsw_sp);
3774        mlxsw_sp_fids_fini(mlxsw_sp);
3775}
3776
3777static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
3778        .used_max_vepa_channels         = 1,
3779        .max_vepa_channels              = 0,
3780        .used_max_mid                   = 1,
3781        .max_mid                        = MLXSW_SP_MID_MAX,
3782        .used_max_pgt                   = 1,
3783        .max_pgt                        = 0,
3784        .used_flood_tables              = 1,
3785        .used_flood_mode                = 1,
3786        .flood_mode                     = 3,
3787        .max_fid_offset_flood_tables    = 3,
3788        .fid_offset_flood_table_size    = VLAN_N_VID - 1,
3789        .max_fid_flood_tables           = 3,
3790        .fid_flood_table_size           = MLXSW_SP_FID_8021D_MAX,
3791        .used_max_ib_mc                 = 1,
3792        .max_ib_mc                      = 0,
3793        .used_max_pkey                  = 1,
3794        .max_pkey                       = 0,
3795        .used_kvd_split_data            = 1,
3796        .kvd_hash_granularity           = MLXSW_SP_KVD_GRANULARITY,
3797        .kvd_hash_single_parts          = 2,
3798        .kvd_hash_double_parts          = 1,
3799        .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3800        .swid_config                    = {
3801                {
3802                        .used_type      = 1,
3803                        .type           = MLXSW_PORT_SWID_TYPE_ETH,
3804                }
3805        },
3806        .resource_query_enable          = 1,
3807};
3808
3809static struct mlxsw_driver mlxsw_sp_driver = {
3810        .kind                           = mlxsw_sp_driver_name,
3811        .priv_size                      = sizeof(struct mlxsw_sp),
3812        .init                           = mlxsw_sp_init,
3813        .fini                           = mlxsw_sp_fini,
3814        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3815        .port_split                     = mlxsw_sp_port_split,
3816        .port_unsplit                   = mlxsw_sp_port_unsplit,
3817        .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3818        .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3819        .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3820        .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3821        .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3822        .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3823        .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3824        .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3825        .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3826        .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3827        .txhdr_construct                = mlxsw_sp_txhdr_construct,
3828        .txhdr_len                      = MLXSW_TXHDR_LEN,
3829        .profile                        = &mlxsw_sp_config_profile,
3830};
3831
3832bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3833{
3834        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3835}
3836
3837static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3838{
3839        struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3840        int ret = 0;
3841
3842        if (mlxsw_sp_port_dev_check(lower_dev)) {
3843                *p_mlxsw_sp_port = netdev_priv(lower_dev);
3844                ret = 1;
3845        }
3846
3847        return ret;
3848}
3849
3850struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3851{
3852        struct mlxsw_sp_port *mlxsw_sp_port;
3853
3854        if (mlxsw_sp_port_dev_check(dev))
3855                return netdev_priv(dev);
3856
3857        mlxsw_sp_port = NULL;
3858        netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3859
3860        return mlxsw_sp_port;
3861}
3862
3863struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3864{
3865        struct mlxsw_sp_port *mlxsw_sp_port;
3866
3867        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3868        return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3869}
3870
3871struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3872{
3873        struct mlxsw_sp_port *mlxsw_sp_port;
3874
3875        if (mlxsw_sp_port_dev_check(dev))
3876                return netdev_priv(dev);
3877
3878        mlxsw_sp_port = NULL;
3879        netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3880                                      &mlxsw_sp_port);
3881
3882        return mlxsw_sp_port;
3883}
3884
3885struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3886{
3887        struct mlxsw_sp_port *mlxsw_sp_port;
3888
3889        rcu_read_lock();
3890        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3891        if (mlxsw_sp_port)
3892                dev_hold(mlxsw_sp_port->dev);
3893        rcu_read_unlock();
3894        return mlxsw_sp_port;
3895}
3896
3897void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3898{
3899        dev_put(mlxsw_sp_port->dev);
3900}
3901
3902static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3903{
3904        char sldr_pl[MLXSW_REG_SLDR_LEN];
3905
3906        mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3907        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3908}
3909
3910static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3911{
3912        char sldr_pl[MLXSW_REG_SLDR_LEN];
3913
3914        mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3915        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3916}
3917
3918static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3919                                     u16 lag_id, u8 port_index)
3920{
3921        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3922        char slcor_pl[MLXSW_REG_SLCOR_LEN];
3923
3924        mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3925                                      lag_id, port_index);
3926        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3927}
3928
3929static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3930                                        u16 lag_id)
3931{
3932        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3933        char slcor_pl[MLXSW_REG_SLCOR_LEN];
3934
3935        mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3936                                         lag_id);
3937        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3938}
3939
3940static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3941                                        u16 lag_id)
3942{
3943        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3944        char slcor_pl[MLXSW_REG_SLCOR_LEN];
3945
3946        mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3947                                        lag_id);
3948        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3949}
3950
3951static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3952                                         u16 lag_id)
3953{
3954        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3955        char slcor_pl[MLXSW_REG_SLCOR_LEN];
3956
3957        mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3958                                         lag_id);
3959        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3960}
3961
3962static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3963                                  struct net_device *lag_dev,
3964                                  u16 *p_lag_id)
3965{
3966        struct mlxsw_sp_upper *lag;
3967        int free_lag_id = -1;
3968        u64 max_lag;
3969        int i;
3970
3971        max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3972        for (i = 0; i < max_lag; i++) {
3973                lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3974                if (lag->ref_count) {
3975                        if (lag->dev == lag_dev) {
3976                                *p_lag_id = i;
3977                                return 0;
3978                        }
3979                } else if (free_lag_id < 0) {
3980                        free_lag_id = i;
3981                }
3982        }
3983        if (free_lag_id < 0)
3984                return -EBUSY;
3985        *p_lag_id = free_lag_id;
3986        return 0;
3987}
3988
3989static bool
3990mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3991                          struct net_device *lag_dev,
3992                          struct netdev_lag_upper_info *lag_upper_info)
3993{
3994        u16 lag_id;
3995
3996        if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3997                return false;
3998        if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3999                return false;
4000        return true;
4001}
4002
4003static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4004                                       u16 lag_id, u8 *p_port_index)
4005{
4006        u64 max_lag_members;
4007        int i;
4008
4009        max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4010                                             MAX_LAG_MEMBERS);
4011        for (i = 0; i < max_lag_members; i++) {
4012                if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4013                        *p_port_index = i;
4014                        return 0;
4015                }
4016        }
4017        return -EBUSY;
4018}
4019
4020static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4021                                  struct net_device *lag_dev)
4022{
4023        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4024        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4025        struct mlxsw_sp_upper *lag;
4026        u16 lag_id;
4027        u8 port_index;
4028        int err;
4029
4030        err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4031        if (err)
4032                return err;
4033        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4034        if (!lag->ref_count) {
4035                err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4036                if (err)
4037                        return err;
4038                lag->dev = lag_dev;
4039        }
4040
4041        err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4042        if (err)
4043                return err;
4044        err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4045        if (err)
4046                goto err_col_port_add;
4047        err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4048        if (err)
4049                goto err_col_port_enable;
4050
4051        mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4052                                   mlxsw_sp_port->local_port);
4053        mlxsw_sp_port->lag_id = lag_id;
4054        mlxsw_sp_port->lagged = 1;
4055        lag->ref_count++;
4056
4057        /* Port is no longer usable as a router interface */
4058        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4059        if (mlxsw_sp_port_vlan->fid)
4060                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4061
4062        return 0;
4063
4064err_col_port_enable:
4065        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4066err_col_port_add:
4067        if (!lag->ref_count)
4068                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4069        return err;
4070}
4071
4072static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4073                                    struct net_device *lag_dev)
4074{
4075        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4076        u16 lag_id = mlxsw_sp_port->lag_id;
4077        struct mlxsw_sp_upper *lag;
4078
4079        if (!mlxsw_sp_port->lagged)
4080                return;
4081        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4082        WARN_ON(lag->ref_count == 0);
4083
4084        mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4085        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4086
4087        /* Any VLANs configured on the port are no longer valid */
4088        mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4089
4090        if (lag->ref_count == 1)
4091                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4092
4093        mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4094                                     mlxsw_sp_port->local_port);
4095        mlxsw_sp_port->lagged = 0;
4096        lag->ref_count--;
4097
4098        mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4099        /* Make sure untagged frames are allowed to ingress */
4100        mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4101}
4102
4103static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4104                                      u16 lag_id)
4105{
4106        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4107        char sldr_pl[MLXSW_REG_SLDR_LEN];
4108
4109        mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4110                                         mlxsw_sp_port->local_port);
4111        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4112}
4113
4114static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4115                                         u16 lag_id)
4116{
4117        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4118        char sldr_pl[MLXSW_REG_SLDR_LEN];
4119
4120        mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4121                                            mlxsw_sp_port->local_port);
4122        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4123}
4124
4125static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4126                                       bool lag_tx_enabled)
4127{
4128        if (lag_tx_enabled)
4129                return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4130                                                  mlxsw_sp_port->lag_id);
4131        else
4132                return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4133                                                     mlxsw_sp_port->lag_id);
4134}
4135
4136static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4137                                     struct netdev_lag_lower_state_info *info)
4138{
4139        return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4140}
4141
4142static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4143                                 bool enable)
4144{
4145        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4146        enum mlxsw_reg_spms_state spms_state;
4147        char *spms_pl;
4148        u16 vid;
4149        int err;
4150
4151        spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4152                              MLXSW_REG_SPMS_STATE_DISCARDING;
4153
4154        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4155        if (!spms_pl)
4156                return -ENOMEM;
4157        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4158
4159        for (vid = 0; vid < VLAN_N_VID; vid++)
4160                mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4161
4162        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4163        kfree(spms_pl);
4164        return err;
4165}
4166
4167static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4168{
4169        int err;
4170
4171        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4172        if (err)
4173                return err;
4174        err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4175        if (err)
4176                goto err_port_stp_set;
4177        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4178                                     true, false);
4179        if (err)
4180                goto err_port_vlan_set;
4181        return 0;
4182
4183err_port_vlan_set:
4184        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4185err_port_stp_set:
4186        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4187        return err;
4188}
4189
4190static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4191{
4192        mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4193                               false, false);
4194        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4195        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4196}
4197
4198static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4199                                               struct net_device *dev,
4200                                               unsigned long event, void *ptr)
4201{
4202        struct netdev_notifier_changeupper_info *info;
4203        struct mlxsw_sp_port *mlxsw_sp_port;
4204        struct net_device *upper_dev;
4205        struct mlxsw_sp *mlxsw_sp;
4206        int err = 0;
4207
4208        mlxsw_sp_port = netdev_priv(dev);
4209        mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4210        info = ptr;
4211
4212        switch (event) {
4213        case NETDEV_PRECHANGEUPPER:
4214                upper_dev = info->upper_dev;
4215                if (!is_vlan_dev(upper_dev) &&
4216                    !netif_is_lag_master(upper_dev) &&
4217                    !netif_is_bridge_master(upper_dev) &&
4218                    !netif_is_ovs_master(upper_dev))
4219                        return -EINVAL;
4220                if (!info->linking)
4221                        break;
4222                if (netdev_has_any_upper_dev(upper_dev))
4223                        return -EINVAL;
4224                if (netif_is_lag_master(upper_dev) &&
4225                    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4226                                               info->upper_info))
4227                        return -EINVAL;
4228                if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4229                        return -EINVAL;
4230                if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4231                    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4232                        return -EINVAL;
4233                if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4234                        return -EINVAL;
4235                if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4236                        return -EINVAL;
4237                break;
4238        case NETDEV_CHANGEUPPER:
4239                upper_dev = info->upper_dev;
4240                if (netif_is_bridge_master(upper_dev)) {
4241                        if (info->linking)
4242                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4243                                                                lower_dev,
4244                                                                upper_dev);
4245                        else
4246                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4247                                                           lower_dev,
4248                                                           upper_dev);
4249                } else if (netif_is_lag_master(upper_dev)) {
4250                        if (info->linking)
4251                                err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4252                                                             upper_dev);
4253                        else
4254                                mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4255                                                        upper_dev);
4256                } else if (netif_is_ovs_master(upper_dev)) {
4257                        if (info->linking)
4258                                err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4259                        else
4260                                mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4261                }
4262                break;
4263        }
4264
4265        return err;
4266}
4267
4268static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4269                                               unsigned long event, void *ptr)
4270{
4271        struct netdev_notifier_changelowerstate_info *info;
4272        struct mlxsw_sp_port *mlxsw_sp_port;
4273        int err;
4274
4275        mlxsw_sp_port = netdev_priv(dev);
4276        info = ptr;
4277
4278        switch (event) {
4279        case NETDEV_CHANGELOWERSTATE:
4280                if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4281                        err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4282                                                        info->lower_state_info);
4283                        if (err)
4284                                netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4285                }
4286                break;
4287        }
4288
4289        return 0;
4290}
4291
4292static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4293                                         struct net_device *port_dev,
4294                                         unsigned long event, void *ptr)
4295{
4296        switch (event) {
4297        case NETDEV_PRECHANGEUPPER:
4298        case NETDEV_CHANGEUPPER:
4299                return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4300                                                           event, ptr);
4301        case NETDEV_CHANGELOWERSTATE:
4302                return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4303                                                           ptr);
4304        }
4305
4306        return 0;
4307}
4308
4309static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4310                                        unsigned long event, void *ptr)
4311{
4312        struct net_device *dev;
4313        struct list_head *iter;
4314        int ret;
4315
4316        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4317                if (mlxsw_sp_port_dev_check(dev)) {
4318                        ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4319                                                            ptr);
4320                        if (ret)
4321                                return ret;
4322                }
4323        }
4324
4325        return 0;
4326}
4327
4328static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4329                                              struct net_device *dev,
4330                                              unsigned long event, void *ptr,
4331                                              u16 vid)
4332{
4333        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4334        struct netdev_notifier_changeupper_info *info = ptr;
4335        struct net_device *upper_dev;
4336        int err = 0;
4337
4338        switch (event) {
4339        case NETDEV_PRECHANGEUPPER:
4340                upper_dev = info->upper_dev;
4341                if (!netif_is_bridge_master(upper_dev))
4342                        return -EINVAL;
4343                if (!info->linking)
4344                        break;
4345                if (netdev_has_any_upper_dev(upper_dev))
4346                        return -EINVAL;
4347                break;
4348        case NETDEV_CHANGEUPPER:
4349                upper_dev = info->upper_dev;
4350                if (netif_is_bridge_master(upper_dev)) {
4351                        if (info->linking)
4352                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4353                                                                vlan_dev,
4354                                                                upper_dev);
4355                        else
4356                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4357                                                           vlan_dev,
4358                                                           upper_dev);
4359                } else {
4360                        err = -EINVAL;
4361                        WARN_ON(1);
4362                }
4363                break;
4364        }
4365
4366        return err;
4367}
4368
4369static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4370                                                  struct net_device *lag_dev,
4371                                                  unsigned long event,
4372                                                  void *ptr, u16 vid)
4373{
4374        struct net_device *dev;
4375        struct list_head *iter;
4376        int ret;
4377
4378        netdev_for_each_lower_dev(lag_dev, dev, iter) {
4379                if (mlxsw_sp_port_dev_check(dev)) {
4380                        ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4381                                                                 event, ptr,
4382                                                                 vid);
4383                        if (ret)
4384                                return ret;
4385                }
4386        }
4387
4388        return 0;
4389}
4390
4391static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4392                                         unsigned long event, void *ptr)
4393{
4394        struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4395        u16 vid = vlan_dev_vlan_id(vlan_dev);
4396
4397        if (mlxsw_sp_port_dev_check(real_dev))
4398                return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4399                                                          event, ptr, vid);
4400        else if (netif_is_lag_master(real_dev))
4401                return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4402                                                              real_dev, event,
4403                                                              ptr, vid);
4404
4405        return 0;
4406}
4407
4408static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4409{
4410        struct netdev_notifier_changeupper_info *info = ptr;
4411
4412        if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4413                return false;
4414        return netif_is_l3_master(info->upper_dev);
4415}
4416
4417static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4418                                    unsigned long event, void *ptr)
4419{
4420        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4421        int err = 0;
4422
4423        if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4424                err = mlxsw_sp_netdevice_router_port_event(dev);
4425        else if (mlxsw_sp_is_vrf_event(event, ptr))
4426                err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4427        else if (mlxsw_sp_port_dev_check(dev))
4428                err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4429        else if (netif_is_lag_master(dev))
4430                err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4431        else if (is_vlan_dev(dev))
4432                err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4433
4434        return notifier_from_errno(err);
4435}
4436
4437static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4438        .notifier_call = mlxsw_sp_netdevice_event,
4439};
4440
4441static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4442        .notifier_call = mlxsw_sp_inetaddr_event,
4443        .priority = 10, /* Must be called before FIB notifier block */
4444};
4445
4446static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4447        .notifier_call = mlxsw_sp_inet6addr_event,
4448};
4449
4450static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4451        .notifier_call = mlxsw_sp_router_netevent_event,
4452};
4453
4454static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4455        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4456        {0, },
4457};
4458
4459static struct pci_driver mlxsw_sp_pci_driver = {
4460        .name = mlxsw_sp_driver_name,
4461        .id_table = mlxsw_sp_pci_id_table,
4462};
4463
4464static int __init mlxsw_sp_module_init(void)
4465{
4466        int err;
4467
4468        register_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4469        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4470        register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4471        register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4472
4473        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4474        if (err)
4475                goto err_core_driver_register;
4476
4477        err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4478        if (err)
4479                goto err_pci_driver_register;
4480
4481        return 0;
4482
4483err_pci_driver_register:
4484        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4485err_core_driver_register:
4486        unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4487        unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4488        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4489        unregister_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4490        return err;
4491}
4492
4493static void __exit mlxsw_sp_module_exit(void)
4494{
4495        mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4496        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4497        unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4498        unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4499        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4500        unregister_netdevice_notifier_rh(&mlxsw_sp_netdevice_nb);
4501}
4502
4503module_init(mlxsw_sp_module_init);
4504module_exit(mlxsw_sp_module_exit);
4505
4506MODULE_LICENSE("Dual BSD/GPL");
4507MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4508MODULE_DESCRIPTION("Mellanox Spectrum driver");
4509MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4510MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
4511