linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/types.h>
   7#include <linux/pci.h>
   8#include <linux/netdevice.h>
   9#include <linux/etherdevice.h>
  10#include <linux/ethtool.h>
  11#include <linux/slab.h>
  12#include <linux/device.h>
  13#include <linux/skbuff.h>
  14#include <linux/if_vlan.h>
  15#include <linux/if_bridge.h>
  16#include <linux/workqueue.h>
  17#include <linux/jiffies.h>
  18#include <linux/bitops.h>
  19#include <linux/list.h>
  20#include <linux/notifier.h>
  21#include <linux/dcbnl.h>
  22#include <linux/inetdevice.h>
  23#include <linux/netlink.h>
  24#include <linux/jhash.h>
  25#include <net/switchdev.h>
  26#include <net/pkt_cls.h>
  27#include <net/tc_act/tc_mirred.h>
  28#include <net/netevent.h>
  29#include <net/tc_act/tc_sample.h>
  30#include <net/addrconf.h>
  31
  32#include "spectrum.h"
  33#include "pci.h"
  34#include "core.h"
  35#include "core_env.h"
  36#include "reg.h"
  37#include "port.h"
  38#include "trap.h"
  39#include "txheader.h"
  40#include "spectrum_cnt.h"
  41#include "spectrum_dpipe.h"
  42#include "spectrum_acl_flex_actions.h"
  43#include "spectrum_span.h"
  44#include "../mlxfw/mlxfw.h"
  45
  46#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
  47
  48#define MLXSW_SP1_FWREV_MAJOR 13
  49#define MLXSW_SP1_FWREV_MINOR 2000
  50#define MLXSW_SP1_FWREV_SUBMINOR 1122
  51#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
  52
  53static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
  54        .major = MLXSW_SP1_FWREV_MAJOR,
  55        .minor = MLXSW_SP1_FWREV_MINOR,
  56        .subminor = MLXSW_SP1_FWREV_SUBMINOR,
  57        .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
  58};
  59
  60#define MLXSW_SP1_FW_FILENAME \
  61        "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
  62        "." __stringify(MLXSW_SP1_FWREV_MINOR) \
  63        "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
  64
  65static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
  66static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
  67static const char mlxsw_sp_driver_version[] = "1.0";
  68
  69static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
  70        0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
  71};
  72static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
  73        0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
  74};
  75
  76/* tx_hdr_version
  77 * Tx header version.
  78 * Must be set to 1.
  79 */
  80MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  81
  82/* tx_hdr_ctl
  83 * Packet control type.
  84 * 0 - Ethernet control (e.g. EMADs, LACP)
  85 * 1 - Ethernet data
  86 */
  87MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  88
  89/* tx_hdr_proto
  90 * Packet protocol type. Must be set to 1 (Ethernet).
  91 */
  92MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  93
  94/* tx_hdr_rx_is_router
  95 * Packet is sent from the router. Valid for data packets only.
  96 */
  97MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  98
  99/* tx_hdr_fid_valid
 100 * Indicates if the 'fid' field is valid and should be used for
 101 * forwarding lookup. Valid for data packets only.
 102 */
 103MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
 104
 105/* tx_hdr_swid
 106 * Switch partition ID. Must be set to 0.
 107 */
 108MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
 109
 110/* tx_hdr_control_tclass
 111 * Indicates if the packet should use the control TClass and not one
 112 * of the data TClasses.
 113 */
 114MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
 115
 116/* tx_hdr_etclass
 117 * Egress TClass to be used on the egress device on the egress port.
 118 */
 119MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
 120
 121/* tx_hdr_port_mid
 122 * Destination local port for unicast packets.
 123 * Destination multicast ID for multicast packets.
 124 *
 125 * Control packets are directed to a specific egress port, while data
 126 * packets are transmitted through the CPU port (0) into the switch partition,
 127 * where forwarding rules are applied.
 128 */
 129MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
 130
 131/* tx_hdr_fid
 132 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
 133 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
 134 * Valid for data packets only.
 135 */
 136MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
 137
 138/* tx_hdr_type
 139 * 0 - Data packets
 140 * 6 - Control packets
 141 */
 142MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 143
 144struct mlxsw_sp_mlxfw_dev {
 145        struct mlxfw_dev mlxfw_dev;
 146        struct mlxsw_sp *mlxsw_sp;
 147};
 148
 149static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
 150                                    u16 component_index, u32 *p_max_size,
 151                                    u8 *p_align_bits, u16 *p_max_write_size)
 152{
 153        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 154                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 155        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 156        char mcqi_pl[MLXSW_REG_MCQI_LEN];
 157        int err;
 158
 159        mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
 160        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
 161        if (err)
 162                return err;
 163        mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
 164                              p_max_write_size);
 165
 166        *p_align_bits = max_t(u8, *p_align_bits, 2);
 167        *p_max_write_size = min_t(u16, *p_max_write_size,
 168                                  MLXSW_REG_MCDA_MAX_DATA_LEN);
 169        return 0;
 170}
 171
 172static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
 173{
 174        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 175                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 176        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 177        char mcc_pl[MLXSW_REG_MCC_LEN];
 178        u8 control_state;
 179        int err;
 180
 181        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
 182        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 183        if (err)
 184                return err;
 185
 186        mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
 187        if (control_state != MLXFW_FSM_STATE_IDLE)
 188                return -EBUSY;
 189
 190        mlxsw_reg_mcc_pack(mcc_pl,
 191                           MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
 192                           0, *fwhandle, 0);
 193        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 194}
 195
 196static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
 197                                         u32 fwhandle, u16 component_index,
 198                                         u32 component_size)
 199{
 200        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 201                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 202        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 203        char mcc_pl[MLXSW_REG_MCC_LEN];
 204
 205        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
 206                           component_index, fwhandle, component_size);
 207        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 208}
 209
 210static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
 211                                       u32 fwhandle, u8 *data, u16 size,
 212                                       u32 offset)
 213{
 214        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 215                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 216        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 217        char mcda_pl[MLXSW_REG_MCDA_LEN];
 218
 219        mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
 220        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
 221}
 222
 223static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
 224                                         u32 fwhandle, u16 component_index)
 225{
 226        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 227                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 228        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 229        char mcc_pl[MLXSW_REG_MCC_LEN];
 230
 231        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
 232                           component_index, fwhandle, 0);
 233        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 234}
 235
 236static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 237{
 238        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 239                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 240        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 241        char mcc_pl[MLXSW_REG_MCC_LEN];
 242
 243        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
 244                           fwhandle, 0);
 245        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 246}
 247
 248static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
 249                                    enum mlxfw_fsm_state *fsm_state,
 250                                    enum mlxfw_fsm_state_err *fsm_state_err)
 251{
 252        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 253                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 254        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 255        char mcc_pl[MLXSW_REG_MCC_LEN];
 256        u8 control_state;
 257        u8 error_code;
 258        int err;
 259
 260        mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
 261        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 262        if (err)
 263                return err;
 264
 265        mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
 266        *fsm_state = control_state;
 267        *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
 268                               MLXFW_FSM_STATE_ERR_MAX);
 269        return 0;
 270}
 271
 272static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 273{
 274        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 275                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 276        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 277        char mcc_pl[MLXSW_REG_MCC_LEN];
 278
 279        mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
 280                           fwhandle, 0);
 281        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 282}
 283
 284static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
 285{
 286        struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
 287                container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
 288        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
 289        char mcc_pl[MLXSW_REG_MCC_LEN];
 290
 291        mlxsw_reg_mcc_pack(mcc_pl,
 292                           MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
 293                           fwhandle, 0);
 294        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
 295}
 296
 297static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
 298        .component_query        = mlxsw_sp_component_query,
 299        .fsm_lock               = mlxsw_sp_fsm_lock,
 300        .fsm_component_update   = mlxsw_sp_fsm_component_update,
 301        .fsm_block_download     = mlxsw_sp_fsm_block_download,
 302        .fsm_component_verify   = mlxsw_sp_fsm_component_verify,
 303        .fsm_activate           = mlxsw_sp_fsm_activate,
 304        .fsm_query_state        = mlxsw_sp_fsm_query_state,
 305        .fsm_cancel             = mlxsw_sp_fsm_cancel,
 306        .fsm_release            = mlxsw_sp_fsm_release
 307};
 308
 309static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
 310                                   const struct firmware *firmware)
 311{
 312        struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
 313                .mlxfw_dev = {
 314                        .ops = &mlxsw_sp_mlxfw_dev_ops,
 315                        .psid = mlxsw_sp->bus_info->psid,
 316                        .psid_size = strlen(mlxsw_sp->bus_info->psid),
 317                },
 318                .mlxsw_sp = mlxsw_sp
 319        };
 320        int err;
 321
 322        mlxsw_core_fw_flash_start(mlxsw_sp->core);
 323        err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
 324        mlxsw_core_fw_flash_end(mlxsw_sp->core);
 325
 326        return err;
 327}
 328
 329static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 330{
 331        const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
 332        const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
 333        const char *fw_filename = mlxsw_sp->fw_filename;
 334        union devlink_param_value value;
 335        const struct firmware *firmware;
 336        int err;
 337
 338        /* Don't check if driver does not require it */
 339        if (!req_rev || !fw_filename)
 340                return 0;
 341
 342        /* Don't check if devlink 'fw_load_policy' param is 'flash' */
 343        err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
 344                                                 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
 345                                                 &value);
 346        if (err)
 347                return err;
 348        if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
 349                return 0;
 350
 351        /* Validate driver & FW are compatible */
 352        if (rev->major != req_rev->major) {
 353                WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
 354                     rev->major, req_rev->major);
 355                return -EINVAL;
 356        }
 357        if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
 358            MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
 359            (rev->minor > req_rev->minor ||
 360             (rev->minor == req_rev->minor &&
 361              rev->subminor >= req_rev->subminor)))
 362                return 0;
 363
 364        dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
 365                 rev->major, rev->minor, rev->subminor);
 366        dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
 367                 fw_filename);
 368
 369        err = request_firmware_direct(&firmware, fw_filename,
 370                                      mlxsw_sp->bus_info->dev);
 371        if (err) {
 372                dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
 373                        fw_filename);
 374                return err;
 375        }
 376
 377        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
 378        release_firmware(firmware);
 379        if (err)
 380                dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
 381
 382        /* On FW flash success, tell the caller FW reset is needed
 383         * if current FW supports it.
 384         */
 385        if (rev->minor >= req_rev->can_reset_minor)
 386                return err ? err : -EAGAIN;
 387        else
 388                return 0;
 389}
 390
 391int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
 392                              unsigned int counter_index, u64 *packets,
 393                              u64 *bytes)
 394{
 395        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 396        int err;
 397
 398        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
 399                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 400        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 401        if (err)
 402                return err;
 403        if (packets)
 404                *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
 405        if (bytes)
 406                *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
 407        return 0;
 408}
 409
 410static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
 411                                       unsigned int counter_index)
 412{
 413        char mgpc_pl[MLXSW_REG_MGPC_LEN];
 414
 415        mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
 416                            MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
 417        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
 418}
 419
 420int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 421                                unsigned int *p_counter_index)
 422{
 423        int err;
 424
 425        err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 426                                     p_counter_index);
 427        if (err)
 428                return err;
 429        err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
 430        if (err)
 431                goto err_counter_clear;
 432        return 0;
 433
 434err_counter_clear:
 435        mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 436                              *p_counter_index);
 437        return err;
 438}
 439
 440void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
 441                                unsigned int counter_index)
 442{
 443         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 444                               counter_index);
 445}
 446
 447static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
 448                                     const struct mlxsw_tx_info *tx_info)
 449{
 450        char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
 451
 452        memset(txhdr, 0, MLXSW_TXHDR_LEN);
 453
 454        mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
 455        mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
 456        mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
 457        mlxsw_tx_hdr_swid_set(txhdr, 0);
 458        mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
 459        mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
 460        mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 461}
 462
 463enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
 464{
 465        switch (state) {
 466        case BR_STATE_FORWARDING:
 467                return MLXSW_REG_SPMS_STATE_FORWARDING;
 468        case BR_STATE_LEARNING:
 469                return MLXSW_REG_SPMS_STATE_LEARNING;
 470        case BR_STATE_LISTENING: /* fall-through */
 471        case BR_STATE_DISABLED: /* fall-through */
 472        case BR_STATE_BLOCKING:
 473                return MLXSW_REG_SPMS_STATE_DISCARDING;
 474        default:
 475                BUG();
 476        }
 477}
 478
 479int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 480                              u8 state)
 481{
 482        enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
 483        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 484        char *spms_pl;
 485        int err;
 486
 487        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
 488        if (!spms_pl)
 489                return -ENOMEM;
 490        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
 491        mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
 492
 493        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
 494        kfree(spms_pl);
 495        return err;
 496}
 497
 498static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
 499{
 500        char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
 501        int err;
 502
 503        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
 504        if (err)
 505                return err;
 506        mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
 507        return 0;
 508}
 509
 510static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
 511                                    bool enable, u32 rate)
 512{
 513        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 514        char mpsc_pl[MLXSW_REG_MPSC_LEN];
 515
 516        mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
 517        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
 518}
 519
 520static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
 521                                          bool is_up)
 522{
 523        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 524        char paos_pl[MLXSW_REG_PAOS_LEN];
 525
 526        mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
 527                            is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
 528                            MLXSW_PORT_ADMIN_STATUS_DOWN);
 529        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 530}
 531
 532static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
 533                                      unsigned char *addr)
 534{
 535        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 536        char ppad_pl[MLXSW_REG_PPAD_LEN];
 537
 538        mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
 539        mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
 540        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
 541}
 542
 543static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
 544{
 545        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 546        unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
 547
 548        ether_addr_copy(addr, mlxsw_sp->base_mac);
 549        addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
 550        return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
 551}
 552
 553static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
 554{
 555        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 556        char pmtu_pl[MLXSW_REG_PMTU_LEN];
 557        int max_mtu;
 558        int err;
 559
 560        mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
 561        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
 562        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 563        if (err)
 564                return err;
 565        max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
 566
 567        if (mtu > max_mtu)
 568                return -EINVAL;
 569
 570        mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
 571        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 572}
 573
 574static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 575{
 576        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 577        char pspa_pl[MLXSW_REG_PSPA_LEN];
 578
 579        mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
 580        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 581}
 582
 583int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
 584{
 585        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 586        char svpe_pl[MLXSW_REG_SVPE_LEN];
 587
 588        mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
 589        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
 590}
 591
 592int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
 593                                   bool learn_enable)
 594{
 595        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 596        char *spvmlr_pl;
 597        int err;
 598
 599        spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
 600        if (!spvmlr_pl)
 601                return -ENOMEM;
 602        mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
 603                              learn_enable);
 604        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
 605        kfree(spvmlr_pl);
 606        return err;
 607}
 608
 609static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
 610                                    u16 vid)
 611{
 612        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 613        char spvid_pl[MLXSW_REG_SPVID_LEN];
 614
 615        mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
 616        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
 617}
 618
 619static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
 620                                            bool allow)
 621{
 622        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 623        char spaft_pl[MLXSW_REG_SPAFT_LEN];
 624
 625        mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
 626        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
 627}
 628
 629int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
 630{
 631        int err;
 632
 633        if (!vid) {
 634                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
 635                if (err)
 636                        return err;
 637        } else {
 638                err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
 639                if (err)
 640                        return err;
 641                err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
 642                if (err)
 643                        goto err_port_allow_untagged_set;
 644        }
 645
 646        mlxsw_sp_port->pvid = vid;
 647        return 0;
 648
 649err_port_allow_untagged_set:
 650        __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
 651        return err;
 652}
 653
 654static int
 655mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
 656{
 657        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 658        char sspr_pl[MLXSW_REG_SSPR_LEN];
 659
 660        mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
 661        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 662}
 663
 664static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
 665                                         u8 local_port, u8 *p_module,
 666                                         u8 *p_width, u8 *p_lane)
 667{
 668        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 669        int err;
 670
 671        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 672        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 673        if (err)
 674                return err;
 675        *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 676        *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
 677        *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 678        return 0;
 679}
 680
 681static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
 682                                    u8 module, u8 width, u8 lane)
 683{
 684        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 685        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 686        int i;
 687
 688        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 689        mlxsw_reg_pmlp_width_set(pmlp_pl, width);
 690        for (i = 0; i < width; i++) {
 691                mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
 692                mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
 693        }
 694
 695        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 696}
 697
 698static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
 699{
 700        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 701        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 702
 703        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
 704        mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
 705        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 706}
 707
 708static int mlxsw_sp_port_open(struct net_device *dev)
 709{
 710        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 711        int err;
 712
 713        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 714        if (err)
 715                return err;
 716        netif_start_queue(dev);
 717        return 0;
 718}
 719
 720static int mlxsw_sp_port_stop(struct net_device *dev)
 721{
 722        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 723
 724        netif_stop_queue(dev);
 725        return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 726}
 727
 728static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
 729                                      struct net_device *dev)
 730{
 731        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 732        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 733        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
 734        const struct mlxsw_tx_info tx_info = {
 735                .local_port = mlxsw_sp_port->local_port,
 736                .is_emad = false,
 737        };
 738        u64 len;
 739        int err;
 740
 741        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
 742                return NETDEV_TX_BUSY;
 743
 744        if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
 745                struct sk_buff *skb_orig = skb;
 746
 747                skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
 748                if (!skb) {
 749                        this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 750                        dev_kfree_skb_any(skb_orig);
 751                        return NETDEV_TX_OK;
 752                }
 753                dev_consume_skb_any(skb_orig);
 754        }
 755
 756        if (eth_skb_pad(skb)) {
 757                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 758                return NETDEV_TX_OK;
 759        }
 760
 761        mlxsw_sp_txhdr_construct(skb, &tx_info);
 762        /* TX header is consumed by HW on the way so we shouldn't count its
 763         * bytes as being sent.
 764         */
 765        len = skb->len - MLXSW_TXHDR_LEN;
 766
 767        /* Due to a race we might fail here because of a full queue. In that
 768         * unlikely case we simply drop the packet.
 769         */
 770        err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
 771
 772        if (!err) {
 773                pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
 774                u64_stats_update_begin(&pcpu_stats->syncp);
 775                pcpu_stats->tx_packets++;
 776                pcpu_stats->tx_bytes += len;
 777                u64_stats_update_end(&pcpu_stats->syncp);
 778        } else {
 779                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
 780                dev_kfree_skb_any(skb);
 781        }
 782        return NETDEV_TX_OK;
 783}
 784
 785static void mlxsw_sp_set_rx_mode(struct net_device *dev)
 786{
 787}
 788
 789static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
 790{
 791        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 792        struct sockaddr *addr = p;
 793        int err;
 794
 795        if (!is_valid_ether_addr(addr->sa_data))
 796                return -EADDRNOTAVAIL;
 797
 798        err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
 799        if (err)
 800                return err;
 801        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 802        return 0;
 803}
 804
 805static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
 806                                         int mtu)
 807{
 808        return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
 809}
 810
 811#define MLXSW_SP_CELL_FACTOR 2  /* 2 * cell_size / (IPG + cell_size + 1) */
 812
 813static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
 814                                  u16 delay)
 815{
 816        delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
 817                                                            BITS_PER_BYTE));
 818        return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
 819                                                                   mtu);
 820}
 821
 822/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
 823 * Assumes 100m cable and maximum MTU.
 824 */
 825#define MLXSW_SP_PAUSE_DELAY 58752
 826
 827static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
 828                                     u16 delay, bool pfc, bool pause)
 829{
 830        if (pfc)
 831                return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
 832        else if (pause)
 833                return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
 834        else
 835                return 0;
 836}
 837
 838static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
 839                                 bool lossy)
 840{
 841        if (lossy)
 842                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
 843        else
 844                mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
 845                                                    thres);
 846}
 847
 848int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 849                                 u8 *prio_tc, bool pause_en,
 850                                 struct ieee_pfc *my_pfc)
 851{
 852        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 853        u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
 854        u16 delay = !!my_pfc ? my_pfc->delay : 0;
 855        char pbmc_pl[MLXSW_REG_PBMC_LEN];
 856        u32 taken_headroom_cells = 0;
 857        u32 max_headroom_cells;
 858        int i, j, err;
 859
 860        max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
 861
 862        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
 863        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 864        if (err)
 865                return err;
 866
 867        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 868                bool configure = false;
 869                bool pfc = false;
 870                u16 thres_cells;
 871                u16 delay_cells;
 872                u16 total_cells;
 873                bool lossy;
 874
 875                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
 876                        if (prio_tc[j] == i) {
 877                                pfc = pfc_en & BIT(j);
 878                                configure = true;
 879                                break;
 880                        }
 881                }
 882
 883                if (!configure)
 884                        continue;
 885
 886                lossy = !(pfc || pause_en);
 887                thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
 888                delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
 889                                                        pfc, pause_en);
 890                total_cells = thres_cells + delay_cells;
 891
 892                taken_headroom_cells += total_cells;
 893                if (taken_headroom_cells > max_headroom_cells)
 894                        return -ENOBUFS;
 895
 896                mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
 897                                     thres_cells, lossy);
 898        }
 899
 900        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 901}
 902
 903static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
 904                                      int mtu, bool pause_en)
 905{
 906        u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
 907        bool dcb_en = !!mlxsw_sp_port->dcb.ets;
 908        struct ieee_pfc *my_pfc;
 909        u8 *prio_tc;
 910
 911        prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
 912        my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
 913
 914        return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
 915                                            pause_en, my_pfc);
 916}
 917
 918static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
 919{
 920        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 921        bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
 922        int err;
 923
 924        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
 925        if (err)
 926                return err;
 927        err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
 928        if (err)
 929                goto err_span_port_mtu_update;
 930        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
 931        if (err)
 932                goto err_port_mtu_set;
 933        dev->mtu = mtu;
 934        return 0;
 935
 936err_port_mtu_set:
 937        mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
 938err_span_port_mtu_update:
 939        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
 940        return err;
 941}
 942
 943static int
 944mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
 945                             struct rtnl_link_stats64 *stats)
 946{
 947        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 948        struct mlxsw_sp_port_pcpu_stats *p;
 949        u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 950        u32 tx_dropped = 0;
 951        unsigned int start;
 952        int i;
 953
 954        for_each_possible_cpu(i) {
 955                p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
 956                do {
 957                        start = u64_stats_fetch_begin_irq(&p->syncp);
 958                        rx_packets      = p->rx_packets;
 959                        rx_bytes        = p->rx_bytes;
 960                        tx_packets      = p->tx_packets;
 961                        tx_bytes        = p->tx_bytes;
 962                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 963
 964                stats->rx_packets       += rx_packets;
 965                stats->rx_bytes         += rx_bytes;
 966                stats->tx_packets       += tx_packets;
 967                stats->tx_bytes         += tx_bytes;
 968                /* tx_dropped is u32, updated without syncp protection. */
 969                tx_dropped      += p->tx_dropped;
 970        }
 971        stats->tx_dropped       = tx_dropped;
 972        return 0;
 973}
 974
 975static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
 976{
 977        switch (attr_id) {
 978        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 979                return true;
 980        }
 981
 982        return false;
 983}
 984
 985static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
 986                                           void *sp)
 987{
 988        switch (attr_id) {
 989        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 990                return mlxsw_sp_port_get_sw_stats64(dev, sp);
 991        }
 992
 993        return -EINVAL;
 994}
 995
 996static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
 997                                       int prio, char *ppcnt_pl)
 998{
 999        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1000        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1001
1002        mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1003        return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1004}
1005
1006static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1007                                      struct rtnl_link_stats64 *stats)
1008{
1009        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1010        int err;
1011
1012        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1013                                          0, ppcnt_pl);
1014        if (err)
1015                goto out;
1016
1017        stats->tx_packets =
1018                mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1019        stats->rx_packets =
1020                mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1021        stats->tx_bytes =
1022                mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1023        stats->rx_bytes =
1024                mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1025        stats->multicast =
1026                mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1027
1028        stats->rx_crc_errors =
1029                mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1030        stats->rx_frame_errors =
1031                mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1032
1033        stats->rx_length_errors = (
1034                mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1035                mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1036                mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1037
1038        stats->rx_errors = (stats->rx_crc_errors +
1039                stats->rx_frame_errors + stats->rx_length_errors);
1040
1041out:
1042        return err;
1043}
1044
1045static void
1046mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1047                            struct mlxsw_sp_port_xstats *xstats)
1048{
1049        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1050        int err, i;
1051
1052        err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1053                                          ppcnt_pl);
1054        if (!err)
1055                xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1056
1057        for (i = 0; i < TC_MAX_QUEUE; i++) {
1058                err = mlxsw_sp_port_get_stats_raw(dev,
1059                                                  MLXSW_REG_PPCNT_TC_CONG_TC,
1060                                                  i, ppcnt_pl);
1061                if (!err)
1062                        xstats->wred_drop[i] =
1063                                mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1064
1065                err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1066                                                  i, ppcnt_pl);
1067                if (err)
1068                        continue;
1069
1070                xstats->backlog[i] =
1071                        mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1072                xstats->tail_drop[i] =
1073                        mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1074        }
1075
1076        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1077                err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1078                                                  i, ppcnt_pl);
1079                if (err)
1080                        continue;
1081
1082                xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1083                xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1084        }
1085}
1086
1087static void update_stats_cache(struct work_struct *work)
1088{
1089        struct mlxsw_sp_port *mlxsw_sp_port =
1090                container_of(work, struct mlxsw_sp_port,
1091                             periodic_hw_stats.update_dw.work);
1092
1093        if (!netif_carrier_ok(mlxsw_sp_port->dev))
1094                goto out;
1095
1096        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1097                                   &mlxsw_sp_port->periodic_hw_stats.stats);
1098        mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1099                                    &mlxsw_sp_port->periodic_hw_stats.xstats);
1100
1101out:
1102        mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1103                               MLXSW_HW_STATS_UPDATE_TIME);
1104}
1105
1106/* Return the stats from a cache that is updated periodically,
1107 * as this function might get called in an atomic context.
1108 */
1109static void
1110mlxsw_sp_port_get_stats64(struct net_device *dev,
1111                          struct rtnl_link_stats64 *stats)
1112{
1113        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1114
1115        memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1116}
1117
1118static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1119                                    u16 vid_begin, u16 vid_end,
1120                                    bool is_member, bool untagged)
1121{
1122        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1123        char *spvm_pl;
1124        int err;
1125
1126        spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1127        if (!spvm_pl)
1128                return -ENOMEM;
1129
1130        mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1131                            vid_end, is_member, untagged);
1132        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1133        kfree(spvm_pl);
1134        return err;
1135}
1136
1137int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1138                           u16 vid_end, bool is_member, bool untagged)
1139{
1140        u16 vid, vid_e;
1141        int err;
1142
1143        for (vid = vid_begin; vid <= vid_end;
1144             vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1145                vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1146                            vid_end);
1147
1148                err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1149                                               is_member, untagged);
1150                if (err)
1151                        return err;
1152        }
1153
1154        return 0;
1155}
1156
1157static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1158                                     bool flush_default)
1159{
1160        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1161
1162        list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1163                                 &mlxsw_sp_port->vlans_list, list) {
1164                if (!flush_default &&
1165                    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1166                        continue;
1167                mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1168        }
1169}
1170
1171static void
1172mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1173{
1174        if (mlxsw_sp_port_vlan->bridge_port)
1175                mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1176        else if (mlxsw_sp_port_vlan->fid)
1177                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1178}
1179
1180struct mlxsw_sp_port_vlan *
1181mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1182{
1183        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1184        bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1185        int err;
1186
1187        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1188        if (mlxsw_sp_port_vlan)
1189                return ERR_PTR(-EEXIST);
1190
1191        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1192        if (err)
1193                return ERR_PTR(err);
1194
1195        mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1196        if (!mlxsw_sp_port_vlan) {
1197                err = -ENOMEM;
1198                goto err_port_vlan_alloc;
1199        }
1200
1201        mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1202        mlxsw_sp_port_vlan->vid = vid;
1203        list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1204
1205        return mlxsw_sp_port_vlan;
1206
1207err_port_vlan_alloc:
1208        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1209        return ERR_PTR(err);
1210}
1211
1212void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1213{
1214        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1215        u16 vid = mlxsw_sp_port_vlan->vid;
1216
1217        mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1218        list_del(&mlxsw_sp_port_vlan->list);
1219        kfree(mlxsw_sp_port_vlan);
1220        mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1221}
1222
1223static int mlxsw_sp_port_add_vid(struct net_device *dev,
1224                                 __be16 __always_unused proto, u16 vid)
1225{
1226        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1227
1228        /* VLAN 0 is added to HW filter when device goes up, but it is
1229         * reserved in our case, so simply return.
1230         */
1231        if (!vid)
1232                return 0;
1233
1234        return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1235}
1236
1237static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1238                                  __be16 __always_unused proto, u16 vid)
1239{
1240        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1241        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1242
1243        /* VLAN 0 is removed from HW filter when device goes down, but
1244         * it is reserved in our case, so simply return.
1245         */
1246        if (!vid)
1247                return 0;
1248
1249        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1250        if (!mlxsw_sp_port_vlan)
1251                return 0;
1252        mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1253
1254        return 0;
1255}
1256
1257static struct mlxsw_sp_port_mall_tc_entry *
1258mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1259                                 unsigned long cookie) {
1260        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1261
1262        list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1263                if (mall_tc_entry->cookie == cookie)
1264                        return mall_tc_entry;
1265
1266        return NULL;
1267}
1268
1269static int
1270mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1271                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1272                                      const struct flow_action_entry *act,
1273                                      bool ingress)
1274{
1275        enum mlxsw_sp_span_type span_type;
1276
1277        if (!act->dev) {
1278                netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1279                return -EINVAL;
1280        }
1281
1282        mirror->ingress = ingress;
1283        span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1284        return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
1285                                        true, &mirror->span_id);
1286}
1287
1288static void
1289mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1290                                      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1291{
1292        enum mlxsw_sp_span_type span_type;
1293
1294        span_type = mirror->ingress ?
1295                        MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1296        mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1297                                 span_type, true);
1298}
1299
1300static int
1301mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1302                                      struct tc_cls_matchall_offload *cls,
1303                                      const struct flow_action_entry *act,
1304                                      bool ingress)
1305{
1306        int err;
1307
1308        if (!mlxsw_sp_port->sample)
1309                return -EOPNOTSUPP;
1310        if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1311                netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1312                return -EEXIST;
1313        }
1314        if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
1315                netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1316                return -EOPNOTSUPP;
1317        }
1318
1319        rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1320                           act->sample.psample_group);
1321        mlxsw_sp_port->sample->truncate = act->sample.truncate;
1322        mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
1323        mlxsw_sp_port->sample->rate = act->sample.rate;
1324
1325        err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
1326        if (err)
1327                goto err_port_sample_set;
1328        return 0;
1329
1330err_port_sample_set:
1331        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1332        return err;
1333}
1334
1335static void
1336mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1337{
1338        if (!mlxsw_sp_port->sample)
1339                return;
1340
1341        mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1342        RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1343}
1344
1345static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1346                                          struct tc_cls_matchall_offload *f,
1347                                          bool ingress)
1348{
1349        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1350        __be16 protocol = f->common.protocol;
1351        struct flow_action_entry *act;
1352        int err;
1353
1354        if (!flow_offload_has_one_action(&f->rule->action)) {
1355                netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1356                return -EOPNOTSUPP;
1357        }
1358
1359        mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1360        if (!mall_tc_entry)
1361                return -ENOMEM;
1362        mall_tc_entry->cookie = f->cookie;
1363
1364        act = &f->rule->action.entries[0];
1365
1366        if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
1367                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1368
1369                mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1370                mirror = &mall_tc_entry->mirror;
1371                err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1372                                                            mirror, act,
1373                                                            ingress);
1374        } else if (act->id == FLOW_ACTION_SAMPLE &&
1375                   protocol == htons(ETH_P_ALL)) {
1376                mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1377                err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1378                                                            act, ingress);
1379        } else {
1380                err = -EOPNOTSUPP;
1381        }
1382
1383        if (err)
1384                goto err_add_action;
1385
1386        list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1387        return 0;
1388
1389err_add_action:
1390        kfree(mall_tc_entry);
1391        return err;
1392}
1393
1394static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1395                                           struct tc_cls_matchall_offload *f)
1396{
1397        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1398
1399        mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1400                                                         f->cookie);
1401        if (!mall_tc_entry) {
1402                netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1403                return;
1404        }
1405        list_del(&mall_tc_entry->list);
1406
1407        switch (mall_tc_entry->type) {
1408        case MLXSW_SP_PORT_MALL_MIRROR:
1409                mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1410                                                      &mall_tc_entry->mirror);
1411                break;
1412        case MLXSW_SP_PORT_MALL_SAMPLE:
1413                mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1414                break;
1415        default:
1416                WARN_ON(1);
1417        }
1418
1419        kfree(mall_tc_entry);
1420}
1421
1422static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1423                                          struct tc_cls_matchall_offload *f,
1424                                          bool ingress)
1425{
1426        switch (f->command) {
1427        case TC_CLSMATCHALL_REPLACE:
1428                return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1429                                                      ingress);
1430        case TC_CLSMATCHALL_DESTROY:
1431                mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1432                return 0;
1433        default:
1434                return -EOPNOTSUPP;
1435        }
1436}
1437
1438static int
1439mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1440                             struct tc_cls_flower_offload *f)
1441{
1442        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1443
1444        switch (f->command) {
1445        case TC_CLSFLOWER_REPLACE:
1446                return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1447        case TC_CLSFLOWER_DESTROY:
1448                mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1449                return 0;
1450        case TC_CLSFLOWER_STATS:
1451                return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1452        case TC_CLSFLOWER_TMPLT_CREATE:
1453                return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
1454        case TC_CLSFLOWER_TMPLT_DESTROY:
1455                mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
1456                return 0;
1457        default:
1458                return -EOPNOTSUPP;
1459        }
1460}
1461
1462static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1463                                               void *type_data,
1464                                               void *cb_priv, bool ingress)
1465{
1466        struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1467
1468        switch (type) {
1469        case TC_SETUP_CLSMATCHALL:
1470                if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1471                                                   type_data))
1472                        return -EOPNOTSUPP;
1473
1474                return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1475                                                      ingress);
1476        case TC_SETUP_CLSFLOWER:
1477                return 0;
1478        default:
1479                return -EOPNOTSUPP;
1480        }
1481}
1482
1483static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1484                                                  void *type_data,
1485                                                  void *cb_priv)
1486{
1487        return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1488                                                   cb_priv, true);
1489}
1490
1491static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1492                                                  void *type_data,
1493                                                  void *cb_priv)
1494{
1495        return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1496                                                   cb_priv, false);
1497}
1498
1499static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1500                                             void *type_data, void *cb_priv)
1501{
1502        struct mlxsw_sp_acl_block *acl_block = cb_priv;
1503
1504        switch (type) {
1505        case TC_SETUP_CLSMATCHALL:
1506                return 0;
1507        case TC_SETUP_CLSFLOWER:
1508                if (mlxsw_sp_acl_block_disabled(acl_block))
1509                        return -EOPNOTSUPP;
1510
1511                return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1512        default:
1513                return -EOPNOTSUPP;
1514        }
1515}
1516
1517static int
1518mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1519                                    struct tcf_block *block, bool ingress,
1520                                    struct netlink_ext_ack *extack)
1521{
1522        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1523        struct mlxsw_sp_acl_block *acl_block;
1524        struct tcf_block_cb *block_cb;
1525        int err;
1526
1527        block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1528                                       mlxsw_sp);
1529        if (!block_cb) {
1530                acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1531                if (!acl_block)
1532                        return -ENOMEM;
1533                block_cb = __tcf_block_cb_register(block,
1534                                                   mlxsw_sp_setup_tc_block_cb_flower,
1535                                                   mlxsw_sp, acl_block, extack);
1536                if (IS_ERR(block_cb)) {
1537                        err = PTR_ERR(block_cb);
1538                        goto err_cb_register;
1539                }
1540        } else {
1541                acl_block = tcf_block_cb_priv(block_cb);
1542        }
1543        tcf_block_cb_incref(block_cb);
1544        err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1545                                      mlxsw_sp_port, ingress);
1546        if (err)
1547                goto err_block_bind;
1548
1549        if (ingress)
1550                mlxsw_sp_port->ing_acl_block = acl_block;
1551        else
1552                mlxsw_sp_port->eg_acl_block = acl_block;
1553
1554        return 0;
1555
1556err_block_bind:
1557        if (!tcf_block_cb_decref(block_cb)) {
1558                __tcf_block_cb_unregister(block, block_cb);
1559err_cb_register:
1560                mlxsw_sp_acl_block_destroy(acl_block);
1561        }
1562        return err;
1563}
1564
1565static void
1566mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1567                                      struct tcf_block *block, bool ingress)
1568{
1569        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1570        struct mlxsw_sp_acl_block *acl_block;
1571        struct tcf_block_cb *block_cb;
1572        int err;
1573
1574        block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1575                                       mlxsw_sp);
1576        if (!block_cb)
1577                return;
1578
1579        if (ingress)
1580                mlxsw_sp_port->ing_acl_block = NULL;
1581        else
1582                mlxsw_sp_port->eg_acl_block = NULL;
1583
1584        acl_block = tcf_block_cb_priv(block_cb);
1585        err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1586                                        mlxsw_sp_port, ingress);
1587        if (!err && !tcf_block_cb_decref(block_cb)) {
1588                __tcf_block_cb_unregister(block, block_cb);
1589                mlxsw_sp_acl_block_destroy(acl_block);
1590        }
1591}
1592
1593static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1594                                   struct tc_block_offload *f)
1595{
1596        tc_setup_cb_t *cb;
1597        bool ingress;
1598        int err;
1599
1600        if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1601                cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1602                ingress = true;
1603        } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1604                cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1605                ingress = false;
1606        } else {
1607                return -EOPNOTSUPP;
1608        }
1609
1610        switch (f->command) {
1611        case TC_BLOCK_BIND:
1612                err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1613                                            mlxsw_sp_port, f->extack);
1614                if (err)
1615                        return err;
1616                err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1617                                                          f->block, ingress,
1618                                                          f->extack);
1619                if (err) {
1620                        tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1621                        return err;
1622                }
1623                return 0;
1624        case TC_BLOCK_UNBIND:
1625                mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1626                                                      f->block, ingress);
1627                tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1628                return 0;
1629        default:
1630                return -EOPNOTSUPP;
1631        }
1632}
1633
1634static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1635                             void *type_data)
1636{
1637        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1638
1639        switch (type) {
1640        case TC_SETUP_BLOCK:
1641                return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1642        case TC_SETUP_QDISC_RED:
1643                return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1644        case TC_SETUP_QDISC_PRIO:
1645                return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1646        default:
1647                return -EOPNOTSUPP;
1648        }
1649}
1650
1651
1652static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1653{
1654        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1655
1656        if (!enable) {
1657                if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1658                    mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1659                    !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1660                        netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1661                        return -EINVAL;
1662                }
1663                mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1664                mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1665        } else {
1666                mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1667                mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1668        }
1669        return 0;
1670}
1671
1672static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1673{
1674        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1675        char pplr_pl[MLXSW_REG_PPLR_LEN];
1676        int err;
1677
1678        if (netif_running(dev))
1679                mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1680
1681        mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1682        err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1683                              pplr_pl);
1684
1685        if (netif_running(dev))
1686                mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1687
1688        return err;
1689}
1690
1691typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1692
1693static int mlxsw_sp_handle_feature(struct net_device *dev,
1694                                   netdev_features_t wanted_features,
1695                                   netdev_features_t feature,
1696                                   mlxsw_sp_feature_handler feature_handler)
1697{
1698        netdev_features_t changes = wanted_features ^ dev->features;
1699        bool enable = !!(wanted_features & feature);
1700        int err;
1701
1702        if (!(changes & feature))
1703                return 0;
1704
1705        err = feature_handler(dev, enable);
1706        if (err) {
1707                netdev_err(dev, "%s feature %pNF failed, err %d\n",
1708                           enable ? "Enable" : "Disable", &feature, err);
1709                return err;
1710        }
1711
1712        if (enable)
1713                dev->features |= feature;
1714        else
1715                dev->features &= ~feature;
1716
1717        return 0;
1718}
1719static int mlxsw_sp_set_features(struct net_device *dev,
1720                                 netdev_features_t features)
1721{
1722        netdev_features_t oper_features = dev->features;
1723        int err = 0;
1724
1725        err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1726                                       mlxsw_sp_feature_hw_tc);
1727        err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1728                                       mlxsw_sp_feature_loopback);
1729
1730        if (err) {
1731                dev->features = oper_features;
1732                return -EINVAL;
1733        }
1734
1735        return 0;
1736}
1737
1738static struct devlink_port *
1739mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1740{
1741        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1742        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1743
1744        return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1745                                                mlxsw_sp_port->local_port);
1746}
1747
1748static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1749        .ndo_open               = mlxsw_sp_port_open,
1750        .ndo_stop               = mlxsw_sp_port_stop,
1751        .ndo_start_xmit         = mlxsw_sp_port_xmit,
1752        .ndo_setup_tc           = mlxsw_sp_setup_tc,
1753        .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1754        .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1755        .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1756        .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1757        .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1758        .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1759        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1760        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1761        .ndo_set_features       = mlxsw_sp_set_features,
1762        .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
1763};
1764
1765static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1766                                      struct ethtool_drvinfo *drvinfo)
1767{
1768        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1769        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1770
1771        strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
1772                sizeof(drvinfo->driver));
1773        strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1774                sizeof(drvinfo->version));
1775        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1776                 "%d.%d.%d",
1777                 mlxsw_sp->bus_info->fw_rev.major,
1778                 mlxsw_sp->bus_info->fw_rev.minor,
1779                 mlxsw_sp->bus_info->fw_rev.subminor);
1780        strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1781                sizeof(drvinfo->bus_info));
1782}
1783
1784static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1785                                         struct ethtool_pauseparam *pause)
1786{
1787        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1788
1789        pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1790        pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1791}
1792
1793static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1794                                   struct ethtool_pauseparam *pause)
1795{
1796        char pfcc_pl[MLXSW_REG_PFCC_LEN];
1797
1798        mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1799        mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1800        mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1801
1802        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1803                               pfcc_pl);
1804}
1805
1806static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1807                                        struct ethtool_pauseparam *pause)
1808{
1809        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1810        bool pause_en = pause->tx_pause || pause->rx_pause;
1811        int err;
1812
1813        if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1814                netdev_err(dev, "PFC already enabled on port\n");
1815                return -EINVAL;
1816        }
1817
1818        if (pause->autoneg) {
1819                netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1820                return -EINVAL;
1821        }
1822
1823        err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1824        if (err) {
1825                netdev_err(dev, "Failed to configure port's headroom\n");
1826                return err;
1827        }
1828
1829        err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1830        if (err) {
1831                netdev_err(dev, "Failed to set PAUSE parameters\n");
1832                goto err_port_pause_configure;
1833        }
1834
1835        mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1836        mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1837
1838        return 0;
1839
1840err_port_pause_configure:
1841        pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1842        mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1843        return err;
1844}
1845
1846struct mlxsw_sp_port_hw_stats {
1847        char str[ETH_GSTRING_LEN];
1848        u64 (*getter)(const char *payload);
1849        bool cells_bytes;
1850};
1851
1852static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1853        {
1854                .str = "a_frames_transmitted_ok",
1855                .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1856        },
1857        {
1858                .str = "a_frames_received_ok",
1859                .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1860        },
1861        {
1862                .str = "a_frame_check_sequence_errors",
1863                .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1864        },
1865        {
1866                .str = "a_alignment_errors",
1867                .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1868        },
1869        {
1870                .str = "a_octets_transmitted_ok",
1871                .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1872        },
1873        {
1874                .str = "a_octets_received_ok",
1875                .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1876        },
1877        {
1878                .str = "a_multicast_frames_xmitted_ok",
1879                .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1880        },
1881        {
1882                .str = "a_broadcast_frames_xmitted_ok",
1883                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1884        },
1885        {
1886                .str = "a_multicast_frames_received_ok",
1887                .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1888        },
1889        {
1890                .str = "a_broadcast_frames_received_ok",
1891                .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1892        },
1893        {
1894                .str = "a_in_range_length_errors",
1895                .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1896        },
1897        {
1898                .str = "a_out_of_range_length_field",
1899                .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1900        },
1901        {
1902                .str = "a_frame_too_long_errors",
1903                .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1904        },
1905        {
1906                .str = "a_symbol_error_during_carrier",
1907                .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1908        },
1909        {
1910                .str = "a_mac_control_frames_transmitted",
1911                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1912        },
1913        {
1914                .str = "a_mac_control_frames_received",
1915                .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1916        },
1917        {
1918                .str = "a_unsupported_opcodes_received",
1919                .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1920        },
1921        {
1922                .str = "a_pause_mac_ctrl_frames_received",
1923                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1924        },
1925        {
1926                .str = "a_pause_mac_ctrl_frames_xmitted",
1927                .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1928        },
1929};
1930
1931#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1932
1933static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
1934        {
1935                .str = "if_in_discards",
1936                .getter = mlxsw_reg_ppcnt_if_in_discards_get,
1937        },
1938        {
1939                .str = "if_out_discards",
1940                .getter = mlxsw_reg_ppcnt_if_out_discards_get,
1941        },
1942        {
1943                .str = "if_out_errors",
1944                .getter = mlxsw_reg_ppcnt_if_out_errors_get,
1945        },
1946};
1947
1948#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
1949        ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
1950
1951static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
1952        {
1953                .str = "ether_stats_undersize_pkts",
1954                .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
1955        },
1956        {
1957                .str = "ether_stats_oversize_pkts",
1958                .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
1959        },
1960        {
1961                .str = "ether_stats_fragments",
1962                .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
1963        },
1964        {
1965                .str = "ether_pkts64octets",
1966                .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
1967        },
1968        {
1969                .str = "ether_pkts65to127octets",
1970                .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
1971        },
1972        {
1973                .str = "ether_pkts128to255octets",
1974                .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
1975        },
1976        {
1977                .str = "ether_pkts256to511octets",
1978                .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
1979        },
1980        {
1981                .str = "ether_pkts512to1023octets",
1982                .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
1983        },
1984        {
1985                .str = "ether_pkts1024to1518octets",
1986                .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
1987        },
1988        {
1989                .str = "ether_pkts1519to2047octets",
1990                .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
1991        },
1992        {
1993                .str = "ether_pkts2048to4095octets",
1994                .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
1995        },
1996        {
1997                .str = "ether_pkts4096to8191octets",
1998                .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
1999        },
2000        {
2001                .str = "ether_pkts8192to10239octets",
2002                .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
2003        },
2004};
2005
2006#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
2007        ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
2008
2009static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
2010        {
2011                .str = "dot3stats_fcs_errors",
2012                .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
2013        },
2014        {
2015                .str = "dot3stats_symbol_errors",
2016                .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
2017        },
2018        {
2019                .str = "dot3control_in_unknown_opcodes",
2020                .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
2021        },
2022        {
2023                .str = "dot3in_pause_frames",
2024                .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
2025        },
2026};
2027
2028#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
2029        ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
2030
2031static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
2032        {
2033                .str = "discard_ingress_general",
2034                .getter = mlxsw_reg_ppcnt_ingress_general_get,
2035        },
2036        {
2037                .str = "discard_ingress_policy_engine",
2038                .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
2039        },
2040        {
2041                .str = "discard_ingress_vlan_membership",
2042                .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
2043        },
2044        {
2045                .str = "discard_ingress_tag_frame_type",
2046                .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
2047        },
2048        {
2049                .str = "discard_egress_vlan_membership",
2050                .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
2051        },
2052        {
2053                .str = "discard_loopback_filter",
2054                .getter = mlxsw_reg_ppcnt_loopback_filter_get,
2055        },
2056        {
2057                .str = "discard_egress_general",
2058                .getter = mlxsw_reg_ppcnt_egress_general_get,
2059        },
2060        {
2061                .str = "discard_egress_hoq",
2062                .getter = mlxsw_reg_ppcnt_egress_hoq_get,
2063        },
2064        {
2065                .str = "discard_egress_policy_engine",
2066                .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
2067        },
2068        {
2069                .str = "discard_ingress_tx_link_down",
2070                .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
2071        },
2072        {
2073                .str = "discard_egress_stp_filter",
2074                .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
2075        },
2076        {
2077                .str = "discard_egress_sll",
2078                .getter = mlxsw_reg_ppcnt_egress_sll_get,
2079        },
2080};
2081
2082#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
2083        ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
2084
2085static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
2086        {
2087                .str = "rx_octets_prio",
2088                .getter = mlxsw_reg_ppcnt_rx_octets_get,
2089        },
2090        {
2091                .str = "rx_frames_prio",
2092                .getter = mlxsw_reg_ppcnt_rx_frames_get,
2093        },
2094        {
2095                .str = "tx_octets_prio",
2096                .getter = mlxsw_reg_ppcnt_tx_octets_get,
2097        },
2098        {
2099                .str = "tx_frames_prio",
2100                .getter = mlxsw_reg_ppcnt_tx_frames_get,
2101        },
2102        {
2103                .str = "rx_pause_prio",
2104                .getter = mlxsw_reg_ppcnt_rx_pause_get,
2105        },
2106        {
2107                .str = "rx_pause_duration_prio",
2108                .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
2109        },
2110        {
2111                .str = "tx_pause_prio",
2112                .getter = mlxsw_reg_ppcnt_tx_pause_get,
2113        },
2114        {
2115                .str = "tx_pause_duration_prio",
2116                .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
2117        },
2118};
2119
2120#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2121
2122static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
2123        {
2124                .str = "tc_transmit_queue_tc",
2125                .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2126                .cells_bytes = true,
2127        },
2128        {
2129                .str = "tc_no_buffer_discard_uc_tc",
2130                .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2131        },
2132};
2133
2134#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2135
2136#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2137                                         MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
2138                                         MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
2139                                         MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
2140                                         MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
2141                                         (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
2142                                          IEEE_8021QAZ_MAX_TCS) + \
2143                                         (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
2144                                          TC_MAX_QUEUE))
2145
2146static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2147{
2148        int i;
2149
2150        for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2151                snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2152                         mlxsw_sp_port_hw_prio_stats[i].str, prio);
2153                *p += ETH_GSTRING_LEN;
2154        }
2155}
2156
2157static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2158{
2159        int i;
2160
2161        for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2162                snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2163                         mlxsw_sp_port_hw_tc_stats[i].str, tc);
2164                *p += ETH_GSTRING_LEN;
2165        }
2166}
2167
2168static void mlxsw_sp_port_get_strings(struct net_device *dev,
2169                                      u32 stringset, u8 *data)
2170{
2171        u8 *p = data;
2172        int i;
2173
2174        switch (stringset) {
2175        case ETH_SS_STATS:
2176                for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2177                        memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2178                               ETH_GSTRING_LEN);
2179                        p += ETH_GSTRING_LEN;
2180                }
2181
2182                for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
2183                        memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
2184                               ETH_GSTRING_LEN);
2185                        p += ETH_GSTRING_LEN;
2186                }
2187
2188                for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
2189                        memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
2190                               ETH_GSTRING_LEN);
2191                        p += ETH_GSTRING_LEN;
2192                }
2193
2194                for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
2195                        memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
2196                               ETH_GSTRING_LEN);
2197                        p += ETH_GSTRING_LEN;
2198                }
2199
2200                for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
2201                        memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
2202                               ETH_GSTRING_LEN);
2203                        p += ETH_GSTRING_LEN;
2204                }
2205
2206                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2207                        mlxsw_sp_port_get_prio_strings(&p, i);
2208
2209                for (i = 0; i < TC_MAX_QUEUE; i++)
2210                        mlxsw_sp_port_get_tc_strings(&p, i);
2211
2212                break;
2213        }
2214}
2215
2216static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2217                                     enum ethtool_phys_id_state state)
2218{
2219        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2220        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2221        char mlcr_pl[MLXSW_REG_MLCR_LEN];
2222        bool active;
2223
2224        switch (state) {
2225        case ETHTOOL_ID_ACTIVE:
2226                active = true;
2227                break;
2228        case ETHTOOL_ID_INACTIVE:
2229                active = false;
2230                break;
2231        default:
2232                return -EOPNOTSUPP;
2233        }
2234
2235        mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2236        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2237}
2238
2239static int
2240mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2241                               int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2242{
2243        switch (grp) {
2244        case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2245                *p_hw_stats = mlxsw_sp_port_hw_stats;
2246                *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2247                break;
2248        case MLXSW_REG_PPCNT_RFC_2863_CNT:
2249                *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
2250                *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2251                break;
2252        case MLXSW_REG_PPCNT_RFC_2819_CNT:
2253                *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
2254                *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2255                break;
2256        case MLXSW_REG_PPCNT_RFC_3635_CNT:
2257                *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
2258                *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2259                break;
2260        case MLXSW_REG_PPCNT_DISCARD_CNT:
2261                *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
2262                *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2263                break;
2264        case MLXSW_REG_PPCNT_PRIO_CNT:
2265                *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2266                *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2267                break;
2268        case MLXSW_REG_PPCNT_TC_CNT:
2269                *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2270                *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2271                break;
2272        default:
2273                WARN_ON(1);
2274                return -EOPNOTSUPP;
2275        }
2276        return 0;
2277}
2278
2279static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2280                                      enum mlxsw_reg_ppcnt_grp grp, int prio,
2281                                      u64 *data, int data_index)
2282{
2283        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2284        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2285        struct mlxsw_sp_port_hw_stats *hw_stats;
2286        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2287        int i, len;
2288        int err;
2289
2290        err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2291        if (err)
2292                return;
2293        mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2294        for (i = 0; i < len; i++) {
2295                data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2296                if (!hw_stats[i].cells_bytes)
2297                        continue;
2298                data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2299                                                            data[data_index + i]);
2300        }
2301}
2302
2303static void mlxsw_sp_port_get_stats(struct net_device *dev,
2304                                    struct ethtool_stats *stats, u64 *data)
2305{
2306        int i, data_index = 0;
2307
2308        /* IEEE 802.3 Counters */
2309        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2310                                  data, data_index);
2311        data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2312
2313        /* RFC 2863 Counters */
2314        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
2315                                  data, data_index);
2316        data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2317
2318        /* RFC 2819 Counters */
2319        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
2320                                  data, data_index);
2321        data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2322
2323        /* RFC 3635 Counters */
2324        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
2325                                  data, data_index);
2326        data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2327
2328        /* Discard Counters */
2329        __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
2330                                  data, data_index);
2331        data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2332
2333        /* Per-Priority Counters */
2334        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2335                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2336                                          data, data_index);
2337                data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2338        }
2339
2340        /* Per-TC Counters */
2341        for (i = 0; i < TC_MAX_QUEUE; i++) {
2342                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2343                                          data, data_index);
2344                data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2345        }
2346}
2347
2348static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2349{
2350        switch (sset) {
2351        case ETH_SS_STATS:
2352                return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2353        default:
2354                return -EOPNOTSUPP;
2355        }
2356}
2357
2358struct mlxsw_sp1_port_link_mode {
2359        enum ethtool_link_mode_bit_indices mask_ethtool;
2360        u32 mask;
2361        u32 speed;
2362};
2363
2364static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
2365        {
2366                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2367                .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2368                .speed          = SPEED_100,
2369        },
2370        {
2371                .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2372                                  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2373                .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2374                .speed          = SPEED_1000,
2375        },
2376        {
2377                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2378                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2379                .speed          = SPEED_10000,
2380        },
2381        {
2382                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2383                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2384                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2385                .speed          = SPEED_10000,
2386        },
2387        {
2388                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2389                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2390                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2391                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2392                .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2393                .speed          = SPEED_10000,
2394        },
2395        {
2396                .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2397                .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2398                .speed          = SPEED_20000,
2399        },
2400        {
2401                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2402                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2403                .speed          = SPEED_40000,
2404        },
2405        {
2406                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2407                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2408                .speed          = SPEED_40000,
2409        },
2410        {
2411                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2412                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2413                .speed          = SPEED_40000,
2414        },
2415        {
2416                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2417                .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2418                .speed          = SPEED_40000,
2419        },
2420        {
2421                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2422                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2423                .speed          = SPEED_25000,
2424        },
2425        {
2426                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2427                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2428                .speed          = SPEED_25000,
2429        },
2430        {
2431                .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2432                .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2433                .speed          = SPEED_25000,
2434        },
2435        {
2436                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2437                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2438                .speed          = SPEED_50000,
2439        },
2440        {
2441                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2442                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2443                .speed          = SPEED_50000,
2444        },
2445        {
2446                .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2447                .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2448                .speed          = SPEED_50000,
2449        },
2450        {
2451                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2452                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2453                .speed          = SPEED_56000,
2454        },
2455        {
2456                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2457                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2458                .speed          = SPEED_56000,
2459        },
2460        {
2461                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2462                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2463                .speed          = SPEED_56000,
2464        },
2465        {
2466                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2467                .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2468                .speed          = SPEED_56000,
2469        },
2470        {
2471                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2472                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2473                .speed          = SPEED_100000,
2474        },
2475        {
2476                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2477                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2478                .speed          = SPEED_100000,
2479        },
2480        {
2481                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2482                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2483                .speed          = SPEED_100000,
2484        },
2485        {
2486                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2487                .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2488                .speed          = SPEED_100000,
2489        },
2490};
2491
2492#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
2493
2494static void
2495mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
2496                                   u32 ptys_eth_proto,
2497                                   struct ethtool_link_ksettings *cmd)
2498{
2499        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2500                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2501                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2502                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2503                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2504                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2505                ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2506
2507        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2508                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2509                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2510                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2511                              MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2512                ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2513}
2514
2515static void
2516mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
2517                         unsigned long *mode)
2518{
2519        int i;
2520
2521        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2522                if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
2523                        __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
2524                                  mode);
2525        }
2526}
2527
2528static void
2529mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
2530                                 u32 ptys_eth_proto,
2531                                 struct ethtool_link_ksettings *cmd)
2532{
2533        u32 speed = SPEED_UNKNOWN;
2534        u8 duplex = DUPLEX_UNKNOWN;
2535        int i;
2536
2537        if (!carrier_ok)
2538                goto out;
2539
2540        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2541                if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
2542                        speed = mlxsw_sp1_port_link_mode[i].speed;
2543                        duplex = DUPLEX_FULL;
2544                        break;
2545                }
2546        }
2547out:
2548        cmd->base.speed = speed;
2549        cmd->base.duplex = duplex;
2550}
2551
2552static u32
2553mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
2554                              const struct ethtool_link_ksettings *cmd)
2555{
2556        u32 ptys_proto = 0;
2557        int i;
2558
2559        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2560                if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
2561                             cmd->link_modes.advertising))
2562                        ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2563        }
2564        return ptys_proto;
2565}
2566
2567static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
2568{
2569        u32 ptys_proto = 0;
2570        int i;
2571
2572        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2573                if (speed == mlxsw_sp1_port_link_mode[i].speed)
2574                        ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2575        }
2576        return ptys_proto;
2577}
2578
2579static u32
2580mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
2581{
2582        u32 ptys_proto = 0;
2583        int i;
2584
2585        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2586                if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed)
2587                        ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2588        }
2589        return ptys_proto;
2590}
2591
2592static int
2593mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2594                          u32 *base_speed)
2595{
2596        *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
2597        return 0;
2598}
2599
2600static void
2601mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
2602                            u8 local_port, u32 proto_admin, bool autoneg)
2603{
2604        mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
2605}
2606
2607static void
2608mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
2609                              u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
2610                              u32 *p_eth_proto_oper)
2611{
2612        mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
2613                                  p_eth_proto_oper);
2614}
2615
2616static const struct mlxsw_sp_port_type_speed_ops
2617mlxsw_sp1_port_type_speed_ops = {
2618        .from_ptys_supported_port       = mlxsw_sp1_from_ptys_supported_port,
2619        .from_ptys_link                 = mlxsw_sp1_from_ptys_link,
2620        .from_ptys_speed_duplex         = mlxsw_sp1_from_ptys_speed_duplex,
2621        .to_ptys_advert_link            = mlxsw_sp1_to_ptys_advert_link,
2622        .to_ptys_speed                  = mlxsw_sp1_to_ptys_speed,
2623        .to_ptys_upper_speed            = mlxsw_sp1_to_ptys_upper_speed,
2624        .port_speed_base                = mlxsw_sp1_port_speed_base,
2625        .reg_ptys_eth_pack              = mlxsw_sp1_reg_ptys_eth_pack,
2626        .reg_ptys_eth_unpack            = mlxsw_sp1_reg_ptys_eth_unpack,
2627};
2628
2629static const enum ethtool_link_mode_bit_indices
2630mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
2631        ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2632};
2633
2634#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
2635        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
2636
2637static const enum ethtool_link_mode_bit_indices
2638mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
2639        ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
2640        ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2641};
2642
2643#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
2644        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
2645
2646static const enum ethtool_link_mode_bit_indices
2647mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
2648        ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
2649};
2650
2651#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
2652        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
2653
2654static const enum ethtool_link_mode_bit_indices
2655mlxsw_sp2_mask_ethtool_5gbase_r[] = {
2656        ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
2657};
2658
2659#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
2660        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
2661
2662static const enum ethtool_link_mode_bit_indices
2663mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
2664        ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2665        ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2666        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
2667        ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
2668        ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
2669        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
2670        ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
2671};
2672
2673#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
2674        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
2675
2676static const enum ethtool_link_mode_bit_indices
2677mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
2678        ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2679        ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2680        ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2681        ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2682};
2683
2684#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
2685        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
2686
2687static const enum ethtool_link_mode_bit_indices
2688mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
2689        ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2690        ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2691        ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2692};
2693
2694#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
2695        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
2696
2697static const enum ethtool_link_mode_bit_indices
2698mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
2699        ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2700        ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2701        ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2702};
2703
2704#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
2705        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
2706
2707static const enum ethtool_link_mode_bit_indices
2708mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
2709        ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
2710        ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
2711        ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
2712        ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
2713        ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
2714};
2715
2716#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
2717        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
2718
2719static const enum ethtool_link_mode_bit_indices
2720mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
2721        ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2722        ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2723        ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2724        ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2725};
2726
2727#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
2728        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
2729
2730static const enum ethtool_link_mode_bit_indices
2731mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
2732        ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
2733        ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
2734        ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
2735        ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
2736        ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
2737};
2738
2739#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
2740        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
2741
2742static const enum ethtool_link_mode_bit_indices
2743mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
2744        ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
2745        ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
2746        ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
2747        ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
2748        ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
2749};
2750
2751#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
2752        ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
2753
2754struct mlxsw_sp2_port_link_mode {
2755        const enum ethtool_link_mode_bit_indices *mask_ethtool;
2756        int m_ethtool_len;
2757        u32 mask;
2758        u32 speed;
2759};
2760
2761static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
2762        {
2763                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
2764                .mask_ethtool   = mlxsw_sp2_mask_ethtool_sgmii_100m,
2765                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
2766                .speed          = SPEED_100,
2767        },
2768        {
2769                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
2770                .mask_ethtool   = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
2771                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
2772                .speed          = SPEED_1000,
2773        },
2774        {
2775                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
2776                .mask_ethtool   = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
2777                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
2778                .speed          = SPEED_2500,
2779        },
2780        {
2781                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
2782                .mask_ethtool   = mlxsw_sp2_mask_ethtool_5gbase_r,
2783                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
2784                .speed          = SPEED_5000,
2785        },
2786        {
2787                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
2788                .mask_ethtool   = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
2789                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
2790                .speed          = SPEED_10000,
2791        },
2792        {
2793                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
2794                .mask_ethtool   = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
2795                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
2796                .speed          = SPEED_40000,
2797        },
2798        {
2799                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
2800                .mask_ethtool   = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
2801                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
2802                .speed          = SPEED_25000,
2803        },
2804        {
2805                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
2806                .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
2807                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
2808                .speed          = SPEED_50000,
2809        },
2810        {
2811                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
2812                .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
2813                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
2814                .speed          = SPEED_50000,
2815        },
2816        {
2817                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
2818                .mask_ethtool   = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
2819                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
2820                .speed          = SPEED_100000,
2821        },
2822        {
2823                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
2824                .mask_ethtool   = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
2825                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
2826                .speed          = SPEED_100000,
2827        },
2828        {
2829                .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
2830                .mask_ethtool   = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
2831                .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
2832                .speed          = SPEED_200000,
2833        },
2834};
2835
2836#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
2837
2838static void
2839mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
2840                                   u32 ptys_eth_proto,
2841                                   struct ethtool_link_ksettings *cmd)
2842{
2843        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2844        ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2845}
2846
2847static void
2848mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
2849                          unsigned long *mode)
2850{
2851        int i;
2852
2853        for (i = 0; i < link_mode->m_ethtool_len; i++)
2854                __set_bit(link_mode->mask_ethtool[i], mode);
2855}
2856
2857static void
2858mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
2859                         unsigned long *mode)
2860{
2861        int i;
2862
2863        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2864                if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
2865                        mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
2866                                                  mode);
2867        }
2868}
2869
2870static void
2871mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
2872                                 u32 ptys_eth_proto,
2873                                 struct ethtool_link_ksettings *cmd)
2874{
2875        u32 speed = SPEED_UNKNOWN;
2876        u8 duplex = DUPLEX_UNKNOWN;
2877        int i;
2878
2879        if (!carrier_ok)
2880                goto out;
2881
2882        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2883                if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
2884                        speed = mlxsw_sp2_port_link_mode[i].speed;
2885                        duplex = DUPLEX_FULL;
2886                        break;
2887                }
2888        }
2889out:
2890        cmd->base.speed = speed;
2891        cmd->base.duplex = duplex;
2892}
2893
2894static bool
2895mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
2896                           const unsigned long *mode)
2897{
2898        int cnt = 0;
2899        int i;
2900
2901        for (i = 0; i < link_mode->m_ethtool_len; i++) {
2902                if (test_bit(link_mode->mask_ethtool[i], mode))
2903                        cnt++;
2904        }
2905
2906        return cnt == link_mode->m_ethtool_len;
2907}
2908
2909static u32
2910mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
2911                              const struct ethtool_link_ksettings *cmd)
2912{
2913        u32 ptys_proto = 0;
2914        int i;
2915
2916        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2917                if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
2918                                               cmd->link_modes.advertising))
2919                        ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
2920        }
2921        return ptys_proto;
2922}
2923
2924static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
2925{
2926        u32 ptys_proto = 0;
2927        int i;
2928
2929        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2930                if (speed == mlxsw_sp2_port_link_mode[i].speed)
2931                        ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
2932        }
2933        return ptys_proto;
2934}
2935
2936static u32
2937mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
2938{
2939        u32 ptys_proto = 0;
2940        int i;
2941
2942        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2943                if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed)
2944                        ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
2945        }
2946        return ptys_proto;
2947}
2948
2949static int
2950mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2951                          u32 *base_speed)
2952{
2953        char ptys_pl[MLXSW_REG_PTYS_LEN];
2954        u32 eth_proto_cap;
2955        int err;
2956
2957        /* In Spectrum-2, the speed of 1x can change from port to port, so query
2958         * it from firmware.
2959         */
2960        mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false);
2961        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2962        if (err)
2963                return err;
2964        mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2965
2966        if (eth_proto_cap &
2967            MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) {
2968                *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G;
2969                return 0;
2970        }
2971
2972        if (eth_proto_cap &
2973            MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) {
2974                *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
2975                return 0;
2976        }
2977
2978        return -EIO;
2979}
2980
2981static void
2982mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
2983                            u8 local_port, u32 proto_admin,
2984                            bool autoneg)
2985{
2986        mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
2987}
2988
2989static void
2990mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
2991                              u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
2992                              u32 *p_eth_proto_oper)
2993{
2994        mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
2995                                      p_eth_proto_admin, p_eth_proto_oper);
2996}
2997
2998static const struct mlxsw_sp_port_type_speed_ops
2999mlxsw_sp2_port_type_speed_ops = {
3000        .from_ptys_supported_port       = mlxsw_sp2_from_ptys_supported_port,
3001        .from_ptys_link                 = mlxsw_sp2_from_ptys_link,
3002        .from_ptys_speed_duplex         = mlxsw_sp2_from_ptys_speed_duplex,
3003        .to_ptys_advert_link            = mlxsw_sp2_to_ptys_advert_link,
3004        .to_ptys_speed                  = mlxsw_sp2_to_ptys_speed,
3005        .to_ptys_upper_speed            = mlxsw_sp2_to_ptys_upper_speed,
3006        .port_speed_base                = mlxsw_sp2_port_speed_base,
3007        .reg_ptys_eth_pack              = mlxsw_sp2_reg_ptys_eth_pack,
3008        .reg_ptys_eth_unpack            = mlxsw_sp2_reg_ptys_eth_unpack,
3009};
3010
3011static void
3012mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
3013                                 struct ethtool_link_ksettings *cmd)
3014{
3015        const struct mlxsw_sp_port_type_speed_ops *ops;
3016
3017        ops = mlxsw_sp->port_type_speed_ops;
3018
3019        ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
3020        ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
3021        ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
3022
3023        ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
3024        ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported);
3025}
3026
3027static void
3028mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
3029                                 u32 eth_proto_admin, bool autoneg,
3030                                 struct ethtool_link_ksettings *cmd)
3031{
3032        const struct mlxsw_sp_port_type_speed_ops *ops;
3033
3034        ops = mlxsw_sp->port_type_speed_ops;
3035
3036        if (!autoneg)
3037                return;
3038
3039        ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
3040        ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
3041                            cmd->link_modes.advertising);
3042}
3043
3044static u8
3045mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
3046{
3047        switch (connector_type) {
3048        case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
3049                return PORT_OTHER;
3050        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
3051                return PORT_NONE;
3052        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
3053                return PORT_TP;
3054        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
3055                return PORT_AUI;
3056        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
3057                return PORT_BNC;
3058        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
3059                return PORT_MII;
3060        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
3061                return PORT_FIBRE;
3062        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
3063                return PORT_DA;
3064        case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
3065                return PORT_OTHER;
3066        default:
3067                WARN_ON_ONCE(1);
3068                return PORT_OTHER;
3069        }
3070}
3071
3072static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
3073                                            struct ethtool_link_ksettings *cmd)
3074{
3075        u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
3076        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3077        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3078        const struct mlxsw_sp_port_type_speed_ops *ops;
3079        char ptys_pl[MLXSW_REG_PTYS_LEN];
3080        u8 connector_type;
3081        bool autoneg;
3082        int err;
3083
3084        ops = mlxsw_sp->port_type_speed_ops;
3085
3086        autoneg = mlxsw_sp_port->link.autoneg;
3087        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3088                               0, false);
3089        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3090        if (err)
3091                return err;
3092        ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
3093                                 &eth_proto_admin, &eth_proto_oper);
3094
3095        mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
3096
3097        mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
3098                                         cmd);
3099
3100        cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3101        connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
3102        cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
3103        ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
3104                                    eth_proto_oper, cmd);
3105
3106        return 0;
3107}
3108
3109static int
3110mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
3111                                 const struct ethtool_link_ksettings *cmd)
3112{
3113        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3114        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3115        const struct mlxsw_sp_port_type_speed_ops *ops;
3116        char ptys_pl[MLXSW_REG_PTYS_LEN];
3117        u32 eth_proto_cap, eth_proto_new;
3118        bool autoneg;
3119        int err;
3120
3121        ops = mlxsw_sp->port_type_speed_ops;
3122
3123        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3124                               0, false);
3125        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3126        if (err)
3127                return err;
3128        ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
3129
3130        autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
3131        if (!autoneg && cmd->base.speed == SPEED_56000) {
3132                netdev_err(dev, "56G not supported with autoneg off\n");
3133                return -EINVAL;
3134        }
3135        eth_proto_new = autoneg ?
3136                ops->to_ptys_advert_link(mlxsw_sp, cmd) :
3137                ops->to_ptys_speed(mlxsw_sp, cmd->base.speed);
3138
3139        eth_proto_new = eth_proto_new & eth_proto_cap;
3140        if (!eth_proto_new) {
3141                netdev_err(dev, "No supported speed requested\n");
3142                return -EINVAL;
3143        }
3144
3145        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3146                               eth_proto_new, autoneg);
3147        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3148        if (err)
3149                return err;
3150
3151        mlxsw_sp_port->link.autoneg = autoneg;
3152
3153        if (!netif_running(dev))
3154                return 0;
3155
3156        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3157        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
3158
3159        return 0;
3160}
3161
3162static int mlxsw_sp_flash_device(struct net_device *dev,
3163                                 struct ethtool_flash *flash)
3164{
3165        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3166        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3167        const struct firmware *firmware;
3168        int err;
3169
3170        if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
3171                return -EOPNOTSUPP;
3172
3173        dev_hold(dev);
3174        rtnl_unlock();
3175
3176        err = request_firmware_direct(&firmware, flash->data, &dev->dev);
3177        if (err)
3178                goto out;
3179        err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
3180        release_firmware(firmware);
3181out:
3182        rtnl_lock();
3183        dev_put(dev);
3184        return err;
3185}
3186
3187static int mlxsw_sp_get_module_info(struct net_device *netdev,
3188                                    struct ethtool_modinfo *modinfo)
3189{
3190        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
3191        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3192        int err;
3193
3194        err = mlxsw_env_get_module_info(mlxsw_sp->core,
3195                                        mlxsw_sp_port->mapping.module,
3196                                        modinfo);
3197
3198        return err;
3199}
3200
3201static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
3202                                      struct ethtool_eeprom *ee,
3203                                      u8 *data)
3204{
3205        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
3206        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3207        int err;
3208
3209        err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
3210                                          mlxsw_sp_port->mapping.module, ee,
3211                                          data);
3212
3213        return err;
3214}
3215
3216static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
3217        .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
3218        .get_link               = ethtool_op_get_link,
3219        .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
3220        .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
3221        .get_strings            = mlxsw_sp_port_get_strings,
3222        .set_phys_id            = mlxsw_sp_port_set_phys_id,
3223        .get_ethtool_stats      = mlxsw_sp_port_get_stats,
3224        .get_sset_count         = mlxsw_sp_port_get_sset_count,
3225        .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
3226        .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
3227        .flash_device           = mlxsw_sp_flash_device,
3228        .get_module_info        = mlxsw_sp_get_module_info,
3229        .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
3230};
3231
3232static int
3233mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
3234{
3235        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3236        const struct mlxsw_sp_port_type_speed_ops *ops;
3237        char ptys_pl[MLXSW_REG_PTYS_LEN];
3238        u32 eth_proto_admin;
3239        u32 upper_speed;
3240        u32 base_speed;
3241        int err;
3242
3243        ops = mlxsw_sp->port_type_speed_ops;
3244
3245        err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port,
3246                                   &base_speed);
3247        if (err)
3248                return err;
3249        upper_speed = base_speed * width;
3250
3251        eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
3252        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3253                               eth_proto_admin, mlxsw_sp_port->link.autoneg);
3254        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3255}
3256
3257int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
3258                          enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
3259                          bool dwrr, u8 dwrr_weight)
3260{
3261        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3262        char qeec_pl[MLXSW_REG_QEEC_LEN];
3263
3264        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3265                            next_index);
3266        mlxsw_reg_qeec_de_set(qeec_pl, true);
3267        mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
3268        mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
3269        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3270}
3271
3272int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
3273                                  enum mlxsw_reg_qeec_hr hr, u8 index,
3274                                  u8 next_index, u32 maxrate)
3275{
3276        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3277        char qeec_pl[MLXSW_REG_QEEC_LEN];
3278
3279        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3280                            next_index);
3281        mlxsw_reg_qeec_mase_set(qeec_pl, true);
3282        mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
3283        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3284}
3285
3286static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
3287                                    enum mlxsw_reg_qeec_hr hr, u8 index,
3288                                    u8 next_index, u32 minrate)
3289{
3290        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3291        char qeec_pl[MLXSW_REG_QEEC_LEN];
3292
3293        mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3294                            next_index);
3295        mlxsw_reg_qeec_mise_set(qeec_pl, true);
3296        mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
3297
3298        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3299}
3300
3301int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
3302                              u8 switch_prio, u8 tclass)
3303{
3304        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3305        char qtct_pl[MLXSW_REG_QTCT_LEN];
3306
3307        mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
3308                            tclass);
3309        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
3310}
3311
3312static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
3313{
3314        int err, i;
3315
3316        /* Setup the elements hierarcy, so that each TC is linked to
3317         * one subgroup, which are all member in the same group.
3318         */
3319        err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3320                                    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
3321                                    0);
3322        if (err)
3323                return err;
3324        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3325                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3326                                            MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
3327                                            0, false, 0);
3328                if (err)
3329                        return err;
3330        }
3331        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3332                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3333                                            MLXSW_REG_QEEC_HIERARCY_TC, i, i,
3334                                            false, 0);
3335                if (err)
3336                        return err;
3337
3338                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3339                                            MLXSW_REG_QEEC_HIERARCY_TC,
3340                                            i + 8, i,
3341                                            true, 100);
3342                if (err)
3343                        return err;
3344        }
3345
3346        /* Make sure the max shaper is disabled in all hierarchies that
3347         * support it.
3348         */
3349        err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3350                                            MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
3351                                            MLXSW_REG_QEEC_MAS_DIS);
3352        if (err)
3353                return err;
3354        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3355                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3356                                                    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
3357                                                    i, 0,
3358                                                    MLXSW_REG_QEEC_MAS_DIS);
3359                if (err)
3360                        return err;
3361        }
3362        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3363                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3364                                                    MLXSW_REG_QEEC_HIERARCY_TC,
3365                                                    i, i,
3366                                                    MLXSW_REG_QEEC_MAS_DIS);
3367                if (err)
3368                        return err;
3369
3370                err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3371                                                    MLXSW_REG_QEEC_HIERARCY_TC,
3372                                                    i + 8, i,
3373                                                    MLXSW_REG_QEEC_MAS_DIS);
3374                if (err)
3375                        return err;
3376        }
3377
3378        /* Configure the min shaper for multicast TCs. */
3379        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3380                err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
3381                                               MLXSW_REG_QEEC_HIERARCY_TC,
3382                                               i + 8, i,
3383                                               MLXSW_REG_QEEC_MIS_MIN);
3384                if (err)
3385                        return err;
3386        }
3387
3388        /* Map all priorities to traffic class 0. */
3389        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3390                err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
3391                if (err)
3392                        return err;
3393        }
3394
3395        return 0;
3396}
3397
3398static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
3399                                        bool enable)
3400{
3401        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3402        char qtctm_pl[MLXSW_REG_QTCTM_LEN];
3403
3404        mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
3405        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
3406}
3407
3408static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
3409                                bool split, u8 module, u8 width, u8 lane)
3410{
3411        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3412        struct mlxsw_sp_port *mlxsw_sp_port;
3413        struct net_device *dev;
3414        int err;
3415
3416        err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
3417                                   module + 1, split, lane / width,
3418                                   mlxsw_sp->base_mac,
3419                                   sizeof(mlxsw_sp->base_mac));
3420        if (err) {
3421                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
3422                        local_port);
3423                return err;
3424        }
3425
3426        dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
3427        if (!dev) {
3428                err = -ENOMEM;
3429                goto err_alloc_etherdev;
3430        }
3431        SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
3432        mlxsw_sp_port = netdev_priv(dev);
3433        mlxsw_sp_port->dev = dev;
3434        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
3435        mlxsw_sp_port->local_port = local_port;
3436        mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
3437        mlxsw_sp_port->split = split;
3438        mlxsw_sp_port->mapping.module = module;
3439        mlxsw_sp_port->mapping.width = width;
3440        mlxsw_sp_port->mapping.lane = lane;
3441        mlxsw_sp_port->link.autoneg = 1;
3442        INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
3443        INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
3444
3445        mlxsw_sp_port->pcpu_stats =
3446                netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
3447        if (!mlxsw_sp_port->pcpu_stats) {
3448                err = -ENOMEM;
3449                goto err_alloc_stats;
3450        }
3451
3452        mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
3453                                        GFP_KERNEL);
3454        if (!mlxsw_sp_port->sample) {
3455                err = -ENOMEM;
3456                goto err_alloc_sample;
3457        }
3458
3459        INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
3460                          &update_stats_cache);
3461
3462        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
3463        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
3464
3465        err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
3466        if (err) {
3467                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
3468                        mlxsw_sp_port->local_port);
3469                goto err_port_module_map;
3470        }
3471
3472        err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
3473        if (err) {
3474                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
3475                        mlxsw_sp_port->local_port);
3476                goto err_port_swid_set;
3477        }
3478
3479        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
3480        if (err) {
3481                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
3482                        mlxsw_sp_port->local_port);
3483                goto err_dev_addr_init;
3484        }
3485
3486        netif_carrier_off(dev);
3487
3488        dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
3489                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
3490        dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
3491
3492        dev->min_mtu = 0;
3493        dev->max_mtu = ETH_MAX_MTU;
3494
3495        /* Each packet needs to have a Tx header (metadata) on top all other
3496         * headers.
3497         */
3498        dev->needed_headroom = MLXSW_TXHDR_LEN;
3499
3500        err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
3501        if (err) {
3502                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
3503                        mlxsw_sp_port->local_port);
3504                goto err_port_system_port_mapping_set;
3505        }
3506
3507        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
3508        if (err) {
3509                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
3510                        mlxsw_sp_port->local_port);
3511                goto err_port_speed_by_width_set;
3512        }
3513
3514        err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
3515        if (err) {
3516                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
3517                        mlxsw_sp_port->local_port);
3518                goto err_port_mtu_set;
3519        }
3520
3521        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3522        if (err)
3523                goto err_port_admin_status_set;
3524
3525        err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
3526        if (err) {
3527                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
3528                        mlxsw_sp_port->local_port);
3529                goto err_port_buffers_init;
3530        }
3531
3532        err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
3533        if (err) {
3534                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
3535                        mlxsw_sp_port->local_port);
3536                goto err_port_ets_init;
3537        }
3538
3539        err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
3540        if (err) {
3541                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
3542                        mlxsw_sp_port->local_port);
3543                goto err_port_tc_mc_mode;
3544        }
3545
3546        /* ETS and buffers must be initialized before DCB. */
3547        err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
3548        if (err) {
3549                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
3550                        mlxsw_sp_port->local_port);
3551                goto err_port_dcb_init;
3552        }
3553
3554        err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
3555        if (err) {
3556                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
3557                        mlxsw_sp_port->local_port);
3558                goto err_port_fids_init;
3559        }
3560
3561        err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
3562        if (err) {
3563                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
3564                        mlxsw_sp_port->local_port);
3565                goto err_port_qdiscs_init;
3566        }
3567
3568        err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
3569        if (err) {
3570                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
3571                        mlxsw_sp_port->local_port);
3572                goto err_port_nve_init;
3573        }
3574
3575        err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3576        if (err) {
3577                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
3578                        mlxsw_sp_port->local_port);
3579                goto err_port_pvid_set;
3580        }
3581
3582        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
3583                                                       MLXSW_SP_DEFAULT_VID);
3584        if (IS_ERR(mlxsw_sp_port_vlan)) {
3585                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3586                        mlxsw_sp_port->local_port);
3587                err = PTR_ERR(mlxsw_sp_port_vlan);
3588                goto err_port_vlan_create;
3589        }
3590        mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
3591
3592        mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3593        err = register_netdev(dev);
3594        if (err) {
3595                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3596                        mlxsw_sp_port->local_port);
3597                goto err_register_netdev;
3598        }
3599
3600        mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3601                                mlxsw_sp_port, dev);
3602        mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
3603        return 0;
3604
3605err_register_netdev:
3606        mlxsw_sp->ports[local_port] = NULL;
3607        mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
3608err_port_vlan_create:
3609err_port_pvid_set:
3610        mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3611err_port_nve_init:
3612        mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3613err_port_qdiscs_init:
3614        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3615err_port_fids_init:
3616        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3617err_port_dcb_init:
3618        mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3619err_port_tc_mc_mode:
3620err_port_ets_init:
3621err_port_buffers_init:
3622err_port_admin_status_set:
3623err_port_mtu_set:
3624err_port_speed_by_width_set:
3625err_port_system_port_mapping_set:
3626err_dev_addr_init:
3627        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3628err_port_swid_set:
3629        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3630err_port_module_map:
3631        kfree(mlxsw_sp_port->sample);
3632err_alloc_sample:
3633        free_percpu(mlxsw_sp_port->pcpu_stats);
3634err_alloc_stats:
3635        free_netdev(dev);
3636err_alloc_etherdev:
3637        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3638        return err;
3639}
3640
3641static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3642{
3643        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3644
3645        cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3646        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3647        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3648        mlxsw_sp->ports[local_port] = NULL;
3649        mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
3650        mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3651        mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3652        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3653        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3654        mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3655        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3656        mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3657        kfree(mlxsw_sp_port->sample);
3658        free_percpu(mlxsw_sp_port->pcpu_stats);
3659        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3660        free_netdev(mlxsw_sp_port->dev);
3661        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3662}
3663
3664static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3665{
3666        return mlxsw_sp->ports[local_port] != NULL;
3667}
3668
3669static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3670{
3671        int i;
3672
3673        for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3674                if (mlxsw_sp_port_created(mlxsw_sp, i))
3675                        mlxsw_sp_port_remove(mlxsw_sp, i);
3676        kfree(mlxsw_sp->port_to_module);
3677        kfree(mlxsw_sp->ports);
3678}
3679
3680static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3681{
3682        unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3683        u8 module, width, lane;
3684        size_t alloc_size;
3685        int i;
3686        int err;
3687
3688        alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3689        mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3690        if (!mlxsw_sp->ports)
3691                return -ENOMEM;
3692
3693        mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3694                                                 GFP_KERNEL);
3695        if (!mlxsw_sp->port_to_module) {
3696                err = -ENOMEM;
3697                goto err_port_to_module_alloc;
3698        }
3699
3700        for (i = 1; i < max_ports; i++) {
3701                /* Mark as invalid */
3702                mlxsw_sp->port_to_module[i] = -1;
3703
3704                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3705                                                    &width, &lane);
3706                if (err)
3707                        goto err_port_module_info_get;
3708                if (!width)
3709                        continue;
3710                mlxsw_sp->port_to_module[i] = module;
3711                err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3712                                           module, width, lane);
3713                if (err)
3714                        goto err_port_create;
3715        }
3716        return 0;
3717
3718err_port_create:
3719err_port_module_info_get:
3720        for (i--; i >= 1; i--)
3721                if (mlxsw_sp_port_created(mlxsw_sp, i))
3722                        mlxsw_sp_port_remove(mlxsw_sp, i);
3723        kfree(mlxsw_sp->port_to_module);
3724err_port_to_module_alloc:
3725        kfree(mlxsw_sp->ports);
3726        return err;
3727}
3728
3729static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3730{
3731        u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3732
3733        return local_port - offset;
3734}
3735
3736static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3737                                      u8 module, unsigned int count, u8 offset)
3738{
3739        u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3740        int err, i;
3741
3742        for (i = 0; i < count; i++) {
3743                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
3744                                           true, module, width, i * width);
3745                if (err)
3746                        goto err_port_create;
3747        }
3748
3749        return 0;
3750
3751err_port_create:
3752        for (i--; i >= 0; i--)
3753                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
3754                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
3755        return err;
3756}
3757
3758static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3759                                         u8 base_port, unsigned int count)
3760{
3761        u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3762        int i;
3763
3764        /* Split by four means we need to re-create two ports, otherwise
3765         * only one.
3766         */
3767        count = count / 2;
3768
3769        for (i = 0; i < count; i++) {
3770                local_port = base_port + i * 2;
3771                if (mlxsw_sp->port_to_module[local_port] < 0)
3772                        continue;
3773                module = mlxsw_sp->port_to_module[local_port];
3774
3775                mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3776                                     width, 0);
3777        }
3778}
3779
3780static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3781                               unsigned int count,
3782                               struct netlink_ext_ack *extack)
3783{
3784        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3785        u8 local_ports_in_1x, local_ports_in_2x, offset;
3786        struct mlxsw_sp_port *mlxsw_sp_port;
3787        u8 module, cur_width, base_port;
3788        int i;
3789        int err;
3790
3791        if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
3792            !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
3793                return -EIO;
3794
3795        local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
3796        local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
3797
3798        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3799        if (!mlxsw_sp_port) {
3800                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3801                        local_port);
3802                NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3803                return -EINVAL;
3804        }
3805
3806        module = mlxsw_sp_port->mapping.module;
3807        cur_width = mlxsw_sp_port->mapping.width;
3808
3809        if (count != 2 && count != 4) {
3810                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3811                NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
3812                return -EINVAL;
3813        }
3814
3815        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3816                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3817                NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
3818                return -EINVAL;
3819        }
3820
3821        /* Make sure we have enough slave (even) ports for the split. */
3822        if (count == 2) {
3823                offset = local_ports_in_2x;
3824                base_port = local_port;
3825                if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
3826                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3827                        NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3828                        return -EINVAL;
3829                }
3830        } else {
3831                offset = local_ports_in_1x;
3832                base_port = mlxsw_sp_cluster_base_port_get(local_port);
3833                if (mlxsw_sp->ports[base_port + 1] ||
3834                    mlxsw_sp->ports[base_port + 3]) {
3835                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3836                        NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3837                        return -EINVAL;
3838                }
3839        }
3840
3841        for (i = 0; i < count; i++)
3842                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
3843                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
3844
3845        err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
3846                                         offset);
3847        if (err) {
3848                dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3849                goto err_port_split_create;
3850        }
3851
3852        return 0;
3853
3854err_port_split_create:
3855        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3856        return err;
3857}
3858
3859static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
3860                                 struct netlink_ext_ack *extack)
3861{
3862        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3863        u8 local_ports_in_1x, local_ports_in_2x, offset;
3864        struct mlxsw_sp_port *mlxsw_sp_port;
3865        u8 cur_width, base_port;
3866        unsigned int count;
3867        int i;
3868
3869        if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
3870            !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
3871                return -EIO;
3872
3873        local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
3874        local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
3875
3876        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3877        if (!mlxsw_sp_port) {
3878                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3879                        local_port);
3880                NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3881                return -EINVAL;
3882        }
3883
3884        if (!mlxsw_sp_port->split) {
3885                netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
3886                NL_SET_ERR_MSG_MOD(extack, "Port was not split");
3887                return -EINVAL;
3888        }
3889
3890        cur_width = mlxsw_sp_port->mapping.width;
3891        count = cur_width == 1 ? 4 : 2;
3892
3893        if (count == 2)
3894                offset = local_ports_in_2x;
3895        else
3896                offset = local_ports_in_1x;
3897
3898        base_port = mlxsw_sp_cluster_base_port_get(local_port);
3899
3900        /* Determine which ports to remove. */
3901        if (count == 2 && local_port >= base_port + 2)
3902                base_port = base_port + 2;
3903
3904        for (i = 0; i < count; i++)
3905                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
3906                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
3907
3908        mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3909
3910        return 0;
3911}
3912
3913static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3914                                     char *pude_pl, void *priv)
3915{
3916        struct mlxsw_sp *mlxsw_sp = priv;
3917        struct mlxsw_sp_port *mlxsw_sp_port;
3918        enum mlxsw_reg_pude_oper_status status;
3919        u8 local_port;
3920
3921        local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3922        mlxsw_sp_port = mlxsw_sp->ports[local_port];
3923        if (!mlxsw_sp_port)
3924                return;
3925
3926        status = mlxsw_reg_pude_oper_status_get(pude_pl);
3927        if (status == MLXSW_PORT_OPER_STATUS_UP) {
3928                netdev_info(mlxsw_sp_port->dev, "link up\n");
3929                netif_carrier_on(mlxsw_sp_port->dev);
3930        } else {
3931                netdev_info(mlxsw_sp_port->dev, "link down\n");
3932                netif_carrier_off(mlxsw_sp_port->dev);
3933        }
3934}
3935
3936static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3937                                              u8 local_port, void *priv)
3938{
3939        struct mlxsw_sp *mlxsw_sp = priv;
3940        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3941        struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3942
3943        if (unlikely(!mlxsw_sp_port)) {
3944                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3945                                     local_port);
3946                return;
3947        }
3948
3949        skb->dev = mlxsw_sp_port->dev;
3950
3951        pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3952        u64_stats_update_begin(&pcpu_stats->syncp);
3953        pcpu_stats->rx_packets++;
3954        pcpu_stats->rx_bytes += skb->len;
3955        u64_stats_update_end(&pcpu_stats->syncp);
3956
3957        skb->protocol = eth_type_trans(skb, skb->dev);
3958        netif_receive_skb(skb);
3959}
3960
3961static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3962                                           void *priv)
3963{
3964        skb->offload_fwd_mark = 1;
3965        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3966}
3967
3968static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
3969                                              u8 local_port, void *priv)
3970{
3971        skb->offload_l3_fwd_mark = 1;
3972        skb->offload_fwd_mark = 1;
3973        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3974}
3975
3976static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3977                                             void *priv)
3978{
3979        struct mlxsw_sp *mlxsw_sp = priv;
3980        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3981        struct psample_group *psample_group;
3982        u32 size;
3983
3984        if (unlikely(!mlxsw_sp_port)) {
3985                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3986                                     local_port);
3987                goto out;
3988        }
3989        if (unlikely(!mlxsw_sp_port->sample)) {
3990                dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3991                                     local_port);
3992                goto out;
3993        }
3994
3995        size = mlxsw_sp_port->sample->truncate ?
3996                  mlxsw_sp_port->sample->trunc_size : skb->len;
3997
3998        rcu_read_lock();
3999        psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
4000        if (!psample_group)
4001                goto out_unlock;
4002        psample_sample_packet(psample_group, skb, size,
4003                              mlxsw_sp_port->dev->ifindex, 0,
4004                              mlxsw_sp_port->sample->rate);
4005out_unlock:
4006        rcu_read_unlock();
4007out:
4008        consume_skb(skb);
4009}
4010
4011#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
4012        MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
4013                  _is_ctrl, SP_##_trap_group, DISCARD)
4014
4015#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
4016        MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
4017                _is_ctrl, SP_##_trap_group, DISCARD)
4018
4019#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
4020        MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
4021                _is_ctrl, SP_##_trap_group, DISCARD)
4022
4023#define MLXSW_SP_EVENTL(_func, _trap_id)                \
4024        MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
4025
4026static const struct mlxsw_listener mlxsw_sp_listener[] = {
4027        /* Events */
4028        MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
4029        /* L2 traps */
4030        MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
4031        MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
4032        MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
4033        MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
4034        MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
4035        MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
4036        MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
4037        MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
4038        MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
4039        MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
4040        MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
4041        MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
4042        MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
4043                          false),
4044        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
4045                             false),
4046        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
4047                             false),
4048        MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
4049                             false),
4050        /* L3 traps */
4051        MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4052        MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4053        MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
4054        MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
4055        MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
4056                          false),
4057        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
4058        MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
4059        MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
4060        MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
4061                          false),
4062        MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
4063        MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
4064        MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
4065        MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
4066        MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
4067        MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
4068        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
4069                          false),
4070        MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
4071                          false),
4072        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
4073                          false),
4074        MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
4075                          false),
4076        MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
4077        MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
4078                          false),
4079        MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
4080        MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
4081        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
4082        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
4083        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4084        MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
4085        MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
4086        MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
4087        /* PKT Sample trap */
4088        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
4089                  false, SP_IP2ME, DISCARD),
4090        /* ACL trap */
4091        MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
4092        /* Multicast Router Traps */
4093        MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
4094        MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
4095        MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
4096        MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
4097        MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
4098        /* NVE traps */
4099        MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
4100        MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
4101};
4102
4103static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
4104{
4105        char qpcr_pl[MLXSW_REG_QPCR_LEN];
4106        enum mlxsw_reg_qpcr_ir_units ir_units;
4107        int max_cpu_policers;
4108        bool is_bytes;
4109        u8 burst_size;
4110        u32 rate;
4111        int i, err;
4112
4113        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
4114                return -EIO;
4115
4116        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
4117
4118        ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
4119        for (i = 0; i < max_cpu_policers; i++) {
4120                is_bytes = false;
4121                switch (i) {
4122                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
4123                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
4124                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
4125                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
4126                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
4127                case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
4128                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
4129                        rate = 128;
4130                        burst_size = 7;
4131                        break;
4132                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
4133                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
4134                        rate = 16 * 1024;
4135                        burst_size = 10;
4136                        break;
4137                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
4138                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
4139                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
4140                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
4141                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
4142                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
4143                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
4144                case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
4145                        rate = 1024;
4146                        burst_size = 7;
4147                        break;
4148                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
4149                        rate = 1024;
4150                        burst_size = 7;
4151                        break;
4152                default:
4153                        continue;
4154                }
4155
4156                mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
4157                                    burst_size);
4158                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
4159                if (err)
4160                        return err;
4161        }
4162
4163        return 0;
4164}
4165
4166static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
4167{
4168        char htgt_pl[MLXSW_REG_HTGT_LEN];
4169        enum mlxsw_reg_htgt_trap_group i;
4170        int max_cpu_policers;
4171        int max_trap_groups;
4172        u8 priority, tc;
4173        u16 policer_id;
4174        int err;
4175
4176        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
4177                return -EIO;
4178
4179        max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
4180        max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
4181
4182        for (i = 0; i < max_trap_groups; i++) {
4183                policer_id = i;
4184                switch (i) {
4185                case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
4186                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
4187                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
4188                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
4189                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
4190                        priority = 5;
4191                        tc = 5;
4192                        break;
4193                case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
4194                case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
4195                        priority = 4;
4196                        tc = 4;
4197                        break;
4198                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
4199                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
4200                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
4201                        priority = 3;
4202                        tc = 3;
4203                        break;
4204                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
4205                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
4206                case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
4207                        priority = 2;
4208                        tc = 2;
4209                        break;
4210                case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
4211                case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
4212                case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
4213                case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
4214                case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
4215                        priority = 1;
4216                        tc = 1;
4217                        break;
4218                case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
4219                        priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
4220                        tc = MLXSW_REG_HTGT_DEFAULT_TC;
4221                        policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
4222                        break;
4223                default:
4224                        continue;
4225                }
4226
4227                if (max_cpu_policers <= policer_id &&
4228                    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
4229                        return -EIO;
4230
4231                mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
4232                err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
4233                if (err)
4234                        return err;
4235        }
4236
4237        return 0;
4238}
4239
4240static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
4241{
4242        int i;
4243        int err;
4244
4245        err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
4246        if (err)
4247                return err;
4248
4249        err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
4250        if (err)
4251                return err;
4252
4253        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
4254                err = mlxsw_core_trap_register(mlxsw_sp->core,
4255                                               &mlxsw_sp_listener[i],
4256                                               mlxsw_sp);
4257                if (err)
4258                        goto err_listener_register;
4259
4260        }
4261        return 0;
4262
4263err_listener_register:
4264        for (i--; i >= 0; i--) {
4265                mlxsw_core_trap_unregister(mlxsw_sp->core,
4266                                           &mlxsw_sp_listener[i],
4267                                           mlxsw_sp);
4268        }
4269        return err;
4270}
4271
4272static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
4273{
4274        int i;
4275
4276        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
4277                mlxsw_core_trap_unregister(mlxsw_sp->core,
4278                                           &mlxsw_sp_listener[i],
4279                                           mlxsw_sp);
4280        }
4281}
4282
4283#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
4284
4285static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
4286{
4287        char slcr_pl[MLXSW_REG_SLCR_LEN];
4288        u32 seed;
4289        int err;
4290
4291        seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
4292                     MLXSW_SP_LAG_SEED_INIT);
4293        mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
4294                                     MLXSW_REG_SLCR_LAG_HASH_DMAC |
4295                                     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
4296                                     MLXSW_REG_SLCR_LAG_HASH_VLANID |
4297                                     MLXSW_REG_SLCR_LAG_HASH_SIP |
4298                                     MLXSW_REG_SLCR_LAG_HASH_DIP |
4299                                     MLXSW_REG_SLCR_LAG_HASH_SPORT |
4300                                     MLXSW_REG_SLCR_LAG_HASH_DPORT |
4301                                     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
4302        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
4303        if (err)
4304                return err;
4305
4306        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
4307            !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
4308                return -EIO;
4309
4310        mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
4311                                 sizeof(struct mlxsw_sp_upper),
4312                                 GFP_KERNEL);
4313        if (!mlxsw_sp->lags)
4314                return -ENOMEM;
4315
4316        return 0;
4317}
4318
4319static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
4320{
4321        kfree(mlxsw_sp->lags);
4322}
4323
4324static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
4325{
4326        char htgt_pl[MLXSW_REG_HTGT_LEN];
4327
4328        mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
4329                            MLXSW_REG_HTGT_INVALID_POLICER,
4330                            MLXSW_REG_HTGT_DEFAULT_PRIORITY,
4331                            MLXSW_REG_HTGT_DEFAULT_TC);
4332        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
4333}
4334
4335static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4336                                    unsigned long event, void *ptr);
4337
4338static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
4339                         const struct mlxsw_bus_info *mlxsw_bus_info)
4340{
4341        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4342        int err;
4343
4344        mlxsw_sp->core = mlxsw_core;
4345        mlxsw_sp->bus_info = mlxsw_bus_info;
4346
4347        err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
4348        if (err)
4349                return err;
4350
4351        err = mlxsw_sp_base_mac_get(mlxsw_sp);
4352        if (err) {
4353                dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
4354                return err;
4355        }
4356
4357        err = mlxsw_sp_kvdl_init(mlxsw_sp);
4358        if (err) {
4359                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
4360                return err;
4361        }
4362
4363        err = mlxsw_sp_fids_init(mlxsw_sp);
4364        if (err) {
4365                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
4366                goto err_fids_init;
4367        }
4368
4369        err = mlxsw_sp_traps_init(mlxsw_sp);
4370        if (err) {
4371                dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
4372                goto err_traps_init;
4373        }
4374
4375        err = mlxsw_sp_buffers_init(mlxsw_sp);
4376        if (err) {
4377                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
4378                goto err_buffers_init;
4379        }
4380
4381        err = mlxsw_sp_lag_init(mlxsw_sp);
4382        if (err) {
4383                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
4384                goto err_lag_init;
4385        }
4386
4387        /* Initialize SPAN before router and switchdev, so that those components
4388         * can call mlxsw_sp_span_respin().
4389         */
4390        err = mlxsw_sp_span_init(mlxsw_sp);
4391        if (err) {
4392                dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
4393                goto err_span_init;
4394        }
4395
4396        err = mlxsw_sp_switchdev_init(mlxsw_sp);
4397        if (err) {
4398                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
4399                goto err_switchdev_init;
4400        }
4401
4402        err = mlxsw_sp_counter_pool_init(mlxsw_sp);
4403        if (err) {
4404                dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
4405                goto err_counter_pool_init;
4406        }
4407
4408        err = mlxsw_sp_afa_init(mlxsw_sp);
4409        if (err) {
4410                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
4411                goto err_afa_init;
4412        }
4413
4414        err = mlxsw_sp_nve_init(mlxsw_sp);
4415        if (err) {
4416                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
4417                goto err_nve_init;
4418        }
4419
4420        err = mlxsw_sp_acl_init(mlxsw_sp);
4421        if (err) {
4422                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
4423                goto err_acl_init;
4424        }
4425
4426        err = mlxsw_sp_router_init(mlxsw_sp);
4427        if (err) {
4428                dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
4429                goto err_router_init;
4430        }
4431
4432        /* Initialize netdevice notifier after router and SPAN is initialized,
4433         * so that the event handler can use router structures and call SPAN
4434         * respin.
4435         */
4436        mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
4437        err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4438        if (err) {
4439                dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
4440                goto err_netdev_notifier;
4441        }
4442
4443        err = mlxsw_sp_dpipe_init(mlxsw_sp);
4444        if (err) {
4445                dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
4446                goto err_dpipe_init;
4447        }
4448
4449        err = mlxsw_sp_ports_create(mlxsw_sp);
4450        if (err) {
4451                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
4452                goto err_ports_create;
4453        }
4454
4455        return 0;
4456
4457err_ports_create:
4458        mlxsw_sp_dpipe_fini(mlxsw_sp);
4459err_dpipe_init:
4460        unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4461err_netdev_notifier:
4462        mlxsw_sp_router_fini(mlxsw_sp);
4463err_router_init:
4464        mlxsw_sp_acl_fini(mlxsw_sp);
4465err_acl_init:
4466        mlxsw_sp_nve_fini(mlxsw_sp);
4467err_nve_init:
4468        mlxsw_sp_afa_fini(mlxsw_sp);
4469err_afa_init:
4470        mlxsw_sp_counter_pool_fini(mlxsw_sp);
4471err_counter_pool_init:
4472        mlxsw_sp_switchdev_fini(mlxsw_sp);
4473err_switchdev_init:
4474        mlxsw_sp_span_fini(mlxsw_sp);
4475err_span_init:
4476        mlxsw_sp_lag_fini(mlxsw_sp);
4477err_lag_init:
4478        mlxsw_sp_buffers_fini(mlxsw_sp);
4479err_buffers_init:
4480        mlxsw_sp_traps_fini(mlxsw_sp);
4481err_traps_init:
4482        mlxsw_sp_fids_fini(mlxsw_sp);
4483err_fids_init:
4484        mlxsw_sp_kvdl_fini(mlxsw_sp);
4485        return err;
4486}
4487
4488static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
4489                          const struct mlxsw_bus_info *mlxsw_bus_info)
4490{
4491        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4492
4493        mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
4494        mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
4495        mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
4496        mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
4497        mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
4498        mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
4499        mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
4500        mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
4501        mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
4502        mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
4503        mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
4504        mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
4505
4506        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4507}
4508
4509static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
4510                          const struct mlxsw_bus_info *mlxsw_bus_info)
4511{
4512        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4513
4514        mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
4515        mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
4516        mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
4517        mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
4518        mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
4519        mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
4520        mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
4521        mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
4522        mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
4523        mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
4524
4525        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4526}
4527
4528static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
4529{
4530        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4531
4532        mlxsw_sp_ports_remove(mlxsw_sp);
4533        mlxsw_sp_dpipe_fini(mlxsw_sp);
4534        unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4535        mlxsw_sp_router_fini(mlxsw_sp);
4536        mlxsw_sp_acl_fini(mlxsw_sp);
4537        mlxsw_sp_nve_fini(mlxsw_sp);
4538        mlxsw_sp_afa_fini(mlxsw_sp);
4539        mlxsw_sp_counter_pool_fini(mlxsw_sp);
4540        mlxsw_sp_switchdev_fini(mlxsw_sp);
4541        mlxsw_sp_span_fini(mlxsw_sp);
4542        mlxsw_sp_lag_fini(mlxsw_sp);
4543        mlxsw_sp_buffers_fini(mlxsw_sp);
4544        mlxsw_sp_traps_fini(mlxsw_sp);
4545        mlxsw_sp_fids_fini(mlxsw_sp);
4546        mlxsw_sp_kvdl_fini(mlxsw_sp);
4547}
4548
4549/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
4550 * 802.1Q FIDs
4551 */
4552#define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
4553                                         VLAN_VID_MASK - 1)
4554
4555static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
4556        .used_max_mid                   = 1,
4557        .max_mid                        = MLXSW_SP_MID_MAX,
4558        .used_flood_tables              = 1,
4559        .used_flood_mode                = 1,
4560        .flood_mode                     = 3,
4561        .max_fid_flood_tables           = 3,
4562        .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4563        .used_max_ib_mc                 = 1,
4564        .max_ib_mc                      = 0,
4565        .used_max_pkey                  = 1,
4566        .max_pkey                       = 0,
4567        .used_kvd_sizes                 = 1,
4568        .kvd_hash_single_parts          = 59,
4569        .kvd_hash_double_parts          = 41,
4570        .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
4571        .swid_config                    = {
4572                {
4573                        .used_type      = 1,
4574                        .type           = MLXSW_PORT_SWID_TYPE_ETH,
4575                }
4576        },
4577};
4578
4579static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
4580        .used_max_mid                   = 1,
4581        .max_mid                        = MLXSW_SP_MID_MAX,
4582        .used_flood_tables              = 1,
4583        .used_flood_mode                = 1,
4584        .flood_mode                     = 3,
4585        .max_fid_flood_tables           = 3,
4586        .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4587        .used_max_ib_mc                 = 1,
4588        .max_ib_mc                      = 0,
4589        .used_max_pkey                  = 1,
4590        .max_pkey                       = 0,
4591        .swid_config                    = {
4592                {
4593                        .used_type      = 1,
4594                        .type           = MLXSW_PORT_SWID_TYPE_ETH,
4595                }
4596        },
4597};
4598
4599static void
4600mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4601                                      struct devlink_resource_size_params *kvd_size_params,
4602                                      struct devlink_resource_size_params *linear_size_params,
4603                                      struct devlink_resource_size_params *hash_double_size_params,
4604                                      struct devlink_resource_size_params *hash_single_size_params)
4605{
4606        u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4607                                                 KVD_SINGLE_MIN_SIZE);
4608        u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4609                                                 KVD_DOUBLE_MIN_SIZE);
4610        u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4611        u32 linear_size_min = 0;
4612
4613        devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4614                                          MLXSW_SP_KVD_GRANULARITY,
4615                                          DEVLINK_RESOURCE_UNIT_ENTRY);
4616        devlink_resource_size_params_init(linear_size_params, linear_size_min,
4617                                          kvd_size - single_size_min -
4618                                          double_size_min,
4619                                          MLXSW_SP_KVD_GRANULARITY,
4620                                          DEVLINK_RESOURCE_UNIT_ENTRY);
4621        devlink_resource_size_params_init(hash_double_size_params,
4622                                          double_size_min,
4623                                          kvd_size - single_size_min -
4624                                          linear_size_min,
4625                                          MLXSW_SP_KVD_GRANULARITY,
4626                                          DEVLINK_RESOURCE_UNIT_ENTRY);
4627        devlink_resource_size_params_init(hash_single_size_params,
4628                                          single_size_min,
4629                                          kvd_size - double_size_min -
4630                                          linear_size_min,
4631                                          MLXSW_SP_KVD_GRANULARITY,
4632                                          DEVLINK_RESOURCE_UNIT_ENTRY);
4633}
4634
4635static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
4636{
4637        struct devlink *devlink = priv_to_devlink(mlxsw_core);
4638        struct devlink_resource_size_params hash_single_size_params;
4639        struct devlink_resource_size_params hash_double_size_params;
4640        struct devlink_resource_size_params linear_size_params;
4641        struct devlink_resource_size_params kvd_size_params;
4642        u32 kvd_size, single_size, double_size, linear_size;
4643        const struct mlxsw_config_profile *profile;
4644        int err;
4645
4646        profile = &mlxsw_sp1_config_profile;
4647        if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4648                return -EIO;
4649
4650        mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4651                                              &linear_size_params,
4652                                              &hash_double_size_params,
4653                                              &hash_single_size_params);
4654
4655        kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4656        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4657                                        kvd_size, MLXSW_SP_RESOURCE_KVD,
4658                                        DEVLINK_RESOURCE_ID_PARENT_TOP,
4659                                        &kvd_size_params);
4660        if (err)
4661                return err;
4662
4663        linear_size = profile->kvd_linear_size;
4664        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
4665                                        linear_size,
4666                                        MLXSW_SP_RESOURCE_KVD_LINEAR,
4667                                        MLXSW_SP_RESOURCE_KVD,
4668                                        &linear_size_params);
4669        if (err)
4670                return err;
4671
4672        err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
4673        if  (err)
4674                return err;
4675
4676        double_size = kvd_size - linear_size;
4677        double_size *= profile->kvd_hash_double_parts;
4678        double_size /= profile->kvd_hash_double_parts +
4679                       profile->kvd_hash_single_parts;
4680        double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
4681        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
4682                                        double_size,
4683                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4684                                        MLXSW_SP_RESOURCE_KVD,
4685                                        &hash_double_size_params);
4686        if (err)
4687                return err;
4688
4689        single_size = kvd_size - double_size - linear_size;
4690        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
4691                                        single_size,
4692                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4693                                        MLXSW_SP_RESOURCE_KVD,
4694                                        &hash_single_size_params);
4695        if (err)
4696                return err;
4697
4698        return 0;
4699}
4700
4701static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
4702{
4703        return mlxsw_sp1_resources_kvd_register(mlxsw_core);
4704}
4705
4706static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
4707{
4708        return 0;
4709}
4710
4711static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
4712                                  const struct mlxsw_config_profile *profile,
4713                                  u64 *p_single_size, u64 *p_double_size,
4714                                  u64 *p_linear_size)
4715{
4716        struct devlink *devlink = priv_to_devlink(mlxsw_core);
4717        u32 double_size;
4718        int err;
4719
4720        if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4721            !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
4722                return -EIO;
4723
4724        /* The hash part is what left of the kvd without the
4725         * linear part. It is split to the single size and
4726         * double size by the parts ratio from the profile.
4727         * Both sizes must be a multiplications of the
4728         * granularity from the profile. In case the user
4729         * provided the sizes they are obtained via devlink.
4730         */
4731        err = devlink_resource_size_get(devlink,
4732                                        MLXSW_SP_RESOURCE_KVD_LINEAR,
4733                                        p_linear_size);
4734        if (err)
4735                *p_linear_size = profile->kvd_linear_size;
4736
4737        err = devlink_resource_size_get(devlink,
4738                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4739                                        p_double_size);
4740        if (err) {
4741                double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4742                              *p_linear_size;
4743                double_size *= profile->kvd_hash_double_parts;
4744                double_size /= profile->kvd_hash_double_parts +
4745                               profile->kvd_hash_single_parts;
4746                *p_double_size = rounddown(double_size,
4747                                           MLXSW_SP_KVD_GRANULARITY);
4748        }
4749
4750        err = devlink_resource_size_get(devlink,
4751                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4752                                        p_single_size);
4753        if (err)
4754                *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4755                                 *p_double_size - *p_linear_size;
4756
4757        /* Check results are legal. */
4758        if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4759            *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
4760            MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
4761                return -EIO;
4762
4763        return 0;
4764}
4765
4766static int
4767mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
4768                                               union devlink_param_value val,
4769                                               struct netlink_ext_ack *extack)
4770{
4771        if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
4772            (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
4773                NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
4774                return -EINVAL;
4775        }
4776
4777        return 0;
4778}
4779
4780static const struct devlink_param mlxsw_sp_devlink_params[] = {
4781        DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
4782                              BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
4783                              NULL, NULL,
4784                              mlxsw_sp_devlink_param_fw_load_policy_validate),
4785};
4786
4787static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
4788{
4789        struct devlink *devlink = priv_to_devlink(mlxsw_core);
4790        union devlink_param_value value;
4791        int err;
4792
4793        err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
4794                                      ARRAY_SIZE(mlxsw_sp_devlink_params));
4795        if (err)
4796                return err;
4797
4798        value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
4799        devlink_param_driverinit_value_set(devlink,
4800                                           DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
4801                                           value);
4802        return 0;
4803}
4804
4805static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
4806{
4807        devlink_params_unregister(priv_to_devlink(mlxsw_core),
4808                                  mlxsw_sp_devlink_params,
4809                                  ARRAY_SIZE(mlxsw_sp_devlink_params));
4810}
4811
4812static int
4813mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
4814                                             struct devlink_param_gset_ctx *ctx)
4815{
4816        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
4817        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4818
4819        ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
4820        return 0;
4821}
4822
4823static int
4824mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
4825                                             struct devlink_param_gset_ctx *ctx)
4826{
4827        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
4828        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4829
4830        return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
4831}
4832
4833static const struct devlink_param mlxsw_sp2_devlink_params[] = {
4834        DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
4835                             "acl_region_rehash_interval",
4836                             DEVLINK_PARAM_TYPE_U32,
4837                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
4838                             mlxsw_sp_params_acl_region_rehash_intrvl_get,
4839                             mlxsw_sp_params_acl_region_rehash_intrvl_set,
4840                             NULL),
4841};
4842
4843static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
4844{
4845        struct devlink *devlink = priv_to_devlink(mlxsw_core);
4846        union devlink_param_value value;
4847        int err;
4848
4849        err = mlxsw_sp_params_register(mlxsw_core);
4850        if (err)
4851                return err;
4852
4853        err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
4854                                      ARRAY_SIZE(mlxsw_sp2_devlink_params));
4855        if (err)
4856                goto err_devlink_params_register;
4857
4858        value.vu32 = 0;
4859        devlink_param_driverinit_value_set(devlink,
4860                                           MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
4861                                           value);
4862        return 0;
4863
4864err_devlink_params_register:
4865        mlxsw_sp_params_unregister(mlxsw_core);
4866        return err;
4867}
4868
4869static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
4870{
4871        devlink_params_unregister(priv_to_devlink(mlxsw_core),
4872                                  mlxsw_sp2_devlink_params,
4873                                  ARRAY_SIZE(mlxsw_sp2_devlink_params));
4874        mlxsw_sp_params_unregister(mlxsw_core);
4875}
4876
4877static struct mlxsw_driver mlxsw_sp1_driver = {
4878        .kind                           = mlxsw_sp1_driver_name,
4879        .priv_size                      = sizeof(struct mlxsw_sp),
4880        .init                           = mlxsw_sp1_init,
4881        .fini                           = mlxsw_sp_fini,
4882        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
4883        .port_split                     = mlxsw_sp_port_split,
4884        .port_unsplit                   = mlxsw_sp_port_unsplit,
4885        .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4886        .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4887        .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4888        .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4889        .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4890        .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4891        .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4892        .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4893        .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4894        .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4895        .txhdr_construct                = mlxsw_sp_txhdr_construct,
4896        .resources_register             = mlxsw_sp1_resources_register,
4897        .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
4898        .params_register                = mlxsw_sp_params_register,
4899        .params_unregister              = mlxsw_sp_params_unregister,
4900        .txhdr_len                      = MLXSW_TXHDR_LEN,
4901        .profile                        = &mlxsw_sp1_config_profile,
4902        .res_query_enabled              = true,
4903};
4904
4905static struct mlxsw_driver mlxsw_sp2_driver = {
4906        .kind                           = mlxsw_sp2_driver_name,
4907        .priv_size                      = sizeof(struct mlxsw_sp),
4908        .init                           = mlxsw_sp2_init,
4909        .fini                           = mlxsw_sp_fini,
4910        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
4911        .port_split                     = mlxsw_sp_port_split,
4912        .port_unsplit                   = mlxsw_sp_port_unsplit,
4913        .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4914        .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4915        .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4916        .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4917        .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4918        .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4919        .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4920        .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4921        .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4922        .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4923        .txhdr_construct                = mlxsw_sp_txhdr_construct,
4924        .resources_register             = mlxsw_sp2_resources_register,
4925        .params_register                = mlxsw_sp2_params_register,
4926        .params_unregister              = mlxsw_sp2_params_unregister,
4927        .txhdr_len                      = MLXSW_TXHDR_LEN,
4928        .profile                        = &mlxsw_sp2_config_profile,
4929        .res_query_enabled              = true,
4930};
4931
4932bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4933{
4934        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4935}
4936
4937static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
4938{
4939        struct mlxsw_sp_port **p_mlxsw_sp_port = data;
4940        int ret = 0;
4941
4942        if (mlxsw_sp_port_dev_check(lower_dev)) {
4943                *p_mlxsw_sp_port = netdev_priv(lower_dev);
4944                ret = 1;
4945        }
4946
4947        return ret;
4948}
4949
4950struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4951{
4952        struct mlxsw_sp_port *mlxsw_sp_port;
4953
4954        if (mlxsw_sp_port_dev_check(dev))
4955                return netdev_priv(dev);
4956
4957        mlxsw_sp_port = NULL;
4958        netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
4959
4960        return mlxsw_sp_port;
4961}
4962
4963struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4964{
4965        struct mlxsw_sp_port *mlxsw_sp_port;
4966
4967        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4968        return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4969}
4970
4971struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4972{
4973        struct mlxsw_sp_port *mlxsw_sp_port;
4974
4975        if (mlxsw_sp_port_dev_check(dev))
4976                return netdev_priv(dev);
4977
4978        mlxsw_sp_port = NULL;
4979        netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4980                                      &mlxsw_sp_port);
4981
4982        return mlxsw_sp_port;
4983}
4984
4985struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4986{
4987        struct mlxsw_sp_port *mlxsw_sp_port;
4988
4989        rcu_read_lock();
4990        mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4991        if (mlxsw_sp_port)
4992                dev_hold(mlxsw_sp_port->dev);
4993        rcu_read_unlock();
4994        return mlxsw_sp_port;
4995}
4996
4997void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4998{
4999        dev_put(mlxsw_sp_port->dev);
5000}
5001
5002static void
5003mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
5004                                 struct net_device *lag_dev)
5005{
5006        struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
5007        struct net_device *upper_dev;
5008        struct list_head *iter;
5009
5010        if (netif_is_bridge_port(lag_dev))
5011                mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
5012
5013        netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
5014                if (!netif_is_bridge_port(upper_dev))
5015                        continue;
5016                br_dev = netdev_master_upper_dev_get(upper_dev);
5017                mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
5018        }
5019}
5020
5021static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
5022{
5023        char sldr_pl[MLXSW_REG_SLDR_LEN];
5024
5025        mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
5026        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5027}
5028
5029static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
5030{
5031        char sldr_pl[MLXSW_REG_SLDR_LEN];
5032
5033        mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
5034        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5035}
5036
5037static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
5038                                     u16 lag_id, u8 port_index)
5039{
5040        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5041        char slcor_pl[MLXSW_REG_SLCOR_LEN];
5042
5043        mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
5044                                      lag_id, port_index);
5045        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5046}
5047
5048static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
5049                                        u16 lag_id)
5050{
5051        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5052        char slcor_pl[MLXSW_REG_SLCOR_LEN];
5053
5054        mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
5055                                         lag_id);
5056        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5057}
5058
5059static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
5060                                        u16 lag_id)
5061{
5062        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5063        char slcor_pl[MLXSW_REG_SLCOR_LEN];
5064
5065        mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
5066                                        lag_id);
5067        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5068}
5069
5070static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
5071                                         u16 lag_id)
5072{
5073        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5074        char slcor_pl[MLXSW_REG_SLCOR_LEN];
5075
5076        mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
5077                                         lag_id);
5078        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5079}
5080
5081static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
5082                                  struct net_device *lag_dev,
5083                                  u16 *p_lag_id)
5084{
5085        struct mlxsw_sp_upper *lag;
5086        int free_lag_id = -1;
5087        u64 max_lag;
5088        int i;
5089
5090        max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
5091        for (i = 0; i < max_lag; i++) {
5092                lag = mlxsw_sp_lag_get(mlxsw_sp, i);
5093                if (lag->ref_count) {
5094                        if (lag->dev == lag_dev) {
5095                                *p_lag_id = i;
5096                                return 0;
5097                        }
5098                } else if (free_lag_id < 0) {
5099                        free_lag_id = i;
5100                }
5101        }
5102        if (free_lag_id < 0)
5103                return -EBUSY;
5104        *p_lag_id = free_lag_id;
5105        return 0;
5106}
5107
5108static bool
5109mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
5110                          struct net_device *lag_dev,
5111                          struct netdev_lag_upper_info *lag_upper_info,
5112                          struct netlink_ext_ack *extack)
5113{
5114        u16 lag_id;
5115
5116        if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
5117                NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
5118                return false;
5119        }
5120        if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
5121                NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
5122                return false;
5123        }
5124        return true;
5125}
5126
5127static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
5128                                       u16 lag_id, u8 *p_port_index)
5129{
5130        u64 max_lag_members;
5131        int i;
5132
5133        max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
5134                                             MAX_LAG_MEMBERS);
5135        for (i = 0; i < max_lag_members; i++) {
5136                if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
5137                        *p_port_index = i;
5138                        return 0;
5139                }
5140        }
5141        return -EBUSY;
5142}
5143
5144static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
5145                                  struct net_device *lag_dev)
5146{
5147        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5148        struct mlxsw_sp_upper *lag;
5149        u16 lag_id;
5150        u8 port_index;
5151        int err;
5152
5153        err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
5154        if (err)
5155                return err;
5156        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
5157        if (!lag->ref_count) {
5158                err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
5159                if (err)
5160                        return err;
5161                lag->dev = lag_dev;
5162        }
5163
5164        err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
5165        if (err)
5166                return err;
5167        err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
5168        if (err)
5169                goto err_col_port_add;
5170
5171        mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
5172                                   mlxsw_sp_port->local_port);
5173        mlxsw_sp_port->lag_id = lag_id;
5174        mlxsw_sp_port->lagged = 1;
5175        lag->ref_count++;
5176
5177        /* Port is no longer usable as a router interface */
5178        if (mlxsw_sp_port->default_vlan->fid)
5179                mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
5180
5181        return 0;
5182
5183err_col_port_add:
5184        if (!lag->ref_count)
5185                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
5186        return err;
5187}
5188
5189static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
5190                                    struct net_device *lag_dev)
5191{
5192        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5193        u16 lag_id = mlxsw_sp_port->lag_id;
5194        struct mlxsw_sp_upper *lag;
5195
5196        if (!mlxsw_sp_port->lagged)
5197                return;
5198        lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
5199        WARN_ON(lag->ref_count == 0);
5200
5201        mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
5202
5203        /* Any VLANs configured on the port are no longer valid */
5204        mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
5205        mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
5206        /* Make the LAG and its directly linked uppers leave bridges they
5207         * are memeber in
5208         */
5209        mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
5210
5211        if (lag->ref_count == 1)
5212                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
5213
5214        mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
5215                                     mlxsw_sp_port->local_port);
5216        mlxsw_sp_port->lagged = 0;
5217        lag->ref_count--;
5218
5219        /* Make sure untagged frames are allowed to ingress */
5220        mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
5221}
5222
5223static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
5224                                      u16 lag_id)
5225{
5226        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5227        char sldr_pl[MLXSW_REG_SLDR_LEN];
5228
5229        mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
5230                                         mlxsw_sp_port->local_port);
5231        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5232}
5233
5234static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
5235                                         u16 lag_id)
5236{
5237        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5238        char sldr_pl[MLXSW_REG_SLDR_LEN];
5239
5240        mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
5241                                            mlxsw_sp_port->local_port);
5242        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5243}
5244
5245static int
5246mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
5247{
5248        int err;
5249
5250        err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
5251                                           mlxsw_sp_port->lag_id);
5252        if (err)
5253                return err;
5254
5255        err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5256        if (err)
5257                goto err_dist_port_add;
5258
5259        return 0;
5260
5261err_dist_port_add:
5262        mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5263        return err;
5264}
5265
5266static int
5267mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
5268{
5269        int err;
5270
5271        err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
5272                                            mlxsw_sp_port->lag_id);
5273        if (err)
5274                return err;
5275
5276        err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
5277                                            mlxsw_sp_port->lag_id);
5278        if (err)
5279                goto err_col_port_disable;
5280
5281        return 0;
5282
5283err_col_port_disable:
5284        mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5285        return err;
5286}
5287
5288static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
5289                                     struct netdev_lag_lower_state_info *info)
5290{
5291        if (info->tx_enabled)
5292                return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
5293        else
5294                return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
5295}
5296
5297static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
5298                                 bool enable)
5299{
5300        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5301        enum mlxsw_reg_spms_state spms_state;
5302        char *spms_pl;
5303        u16 vid;
5304        int err;
5305
5306        spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
5307                              MLXSW_REG_SPMS_STATE_DISCARDING;
5308
5309        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
5310        if (!spms_pl)
5311                return -ENOMEM;
5312        mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
5313
5314        for (vid = 0; vid < VLAN_N_VID; vid++)
5315                mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
5316
5317        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
5318        kfree(spms_pl);
5319        return err;
5320}
5321
5322static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
5323{
5324        u16 vid = 1;
5325        int err;
5326
5327        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
5328        if (err)
5329                return err;
5330        err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
5331        if (err)
5332                goto err_port_stp_set;
5333        err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
5334                                     true, false);
5335        if (err)
5336                goto err_port_vlan_set;
5337
5338        for (; vid <= VLAN_N_VID - 1; vid++) {
5339                err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
5340                                                     vid, false);
5341                if (err)
5342                        goto err_vid_learning_set;
5343        }
5344
5345        return 0;
5346
5347err_vid_learning_set:
5348        for (vid--; vid >= 1; vid--)
5349                mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
5350err_port_vlan_set:
5351        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
5352err_port_stp_set:
5353        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
5354        return err;
5355}
5356
5357static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
5358{
5359        u16 vid;
5360
5361        for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
5362                mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
5363                                               vid, true);
5364
5365        mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
5366                               false, false);
5367        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
5368        mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
5369}
5370
5371static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
5372{
5373        unsigned int num_vxlans = 0;
5374        struct net_device *dev;
5375        struct list_head *iter;
5376
5377        netdev_for_each_lower_dev(br_dev, dev, iter) {
5378                if (netif_is_vxlan(dev))
5379                        num_vxlans++;
5380        }
5381
5382        return num_vxlans > 1;
5383}
5384
5385static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
5386{
5387        DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
5388        struct net_device *dev;
5389        struct list_head *iter;
5390
5391        netdev_for_each_lower_dev(br_dev, dev, iter) {
5392                u16 pvid;
5393                int err;
5394
5395                if (!netif_is_vxlan(dev))
5396                        continue;
5397
5398                err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
5399                if (err || !pvid)
5400                        continue;
5401
5402                if (test_and_set_bit(pvid, vlans))
5403                        return false;
5404        }
5405
5406        return true;
5407}
5408
5409static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
5410                                           struct netlink_ext_ack *extack)
5411{
5412        if (br_multicast_enabled(br_dev)) {
5413                NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
5414                return false;
5415        }
5416
5417        if (!br_vlan_enabled(br_dev) &&
5418            mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
5419                NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
5420                return false;
5421        }
5422
5423        if (br_vlan_enabled(br_dev) &&
5424            !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
5425                NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
5426                return false;
5427        }
5428
5429        return true;
5430}
5431
5432static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5433                                               struct net_device *dev,
5434                                               unsigned long event, void *ptr)
5435{
5436        struct netdev_notifier_changeupper_info *info;
5437        struct mlxsw_sp_port *mlxsw_sp_port;
5438        struct netlink_ext_ack *extack;
5439        struct net_device *upper_dev;
5440        struct mlxsw_sp *mlxsw_sp;
5441        int err = 0;
5442
5443        mlxsw_sp_port = netdev_priv(dev);
5444        mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5445        info = ptr;
5446        extack = netdev_notifier_info_to_extack(&info->info);
5447
5448        switch (event) {
5449        case NETDEV_PRECHANGEUPPER:
5450                upper_dev = info->upper_dev;
5451                if (!is_vlan_dev(upper_dev) &&
5452                    !netif_is_lag_master(upper_dev) &&
5453                    !netif_is_bridge_master(upper_dev) &&
5454                    !netif_is_ovs_master(upper_dev) &&
5455                    !netif_is_macvlan(upper_dev)) {
5456                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5457                        return -EINVAL;
5458                }
5459                if (!info->linking)
5460                        break;
5461                if (netif_is_bridge_master(upper_dev) &&
5462                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5463                    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5464                    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5465                        return -EOPNOTSUPP;
5466                if (netdev_has_any_upper_dev(upper_dev) &&
5467                    (!netif_is_bridge_master(upper_dev) ||
5468                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5469                                                          upper_dev))) {
5470                        NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5471                        return -EINVAL;
5472                }
5473                if (netif_is_lag_master(upper_dev) &&
5474                    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
5475                                               info->upper_info, extack))
5476                        return -EINVAL;
5477                if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
5478                        NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
5479                        return -EINVAL;
5480                }
5481                if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
5482                    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
5483                        NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
5484                        return -EINVAL;
5485                }
5486                if (netif_is_macvlan(upper_dev) &&
5487                    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
5488                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5489                        return -EOPNOTSUPP;
5490                }
5491                if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
5492                        NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
5493                        return -EINVAL;
5494                }
5495                if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
5496                        NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
5497                        return -EINVAL;
5498                }
5499                break;
5500        case NETDEV_CHANGEUPPER:
5501                upper_dev = info->upper_dev;
5502                if (netif_is_bridge_master(upper_dev)) {
5503                        if (info->linking)
5504                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5505                                                                lower_dev,
5506                                                                upper_dev,
5507                                                                extack);
5508                        else
5509                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5510                                                           lower_dev,
5511                                                           upper_dev);
5512                } else if (netif_is_lag_master(upper_dev)) {
5513                        if (info->linking) {
5514                                err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5515                                                             upper_dev);
5516                        } else {
5517                                mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
5518                                mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5519                                                        upper_dev);
5520                        }
5521                } else if (netif_is_ovs_master(upper_dev)) {
5522                        if (info->linking)
5523                                err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
5524                        else
5525                                mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
5526                } else if (netif_is_macvlan(upper_dev)) {
5527                        if (!info->linking)
5528                                mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5529                } else if (is_vlan_dev(upper_dev)) {
5530                        struct net_device *br_dev;
5531
5532                        if (!netif_is_bridge_port(upper_dev))
5533                                break;
5534                        if (info->linking)
5535                                break;
5536                        br_dev = netdev_master_upper_dev_get(upper_dev);
5537                        mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5538                                                   br_dev);
5539                }
5540                break;
5541        }
5542
5543        return err;
5544}
5545
5546static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5547                                               unsigned long event, void *ptr)
5548{
5549        struct netdev_notifier_changelowerstate_info *info;
5550        struct mlxsw_sp_port *mlxsw_sp_port;
5551        int err;
5552
5553        mlxsw_sp_port = netdev_priv(dev);
5554        info = ptr;
5555
5556        switch (event) {
5557        case NETDEV_CHANGELOWERSTATE:
5558                if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5559                        err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5560                                                        info->lower_state_info);
5561                        if (err)
5562                                netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5563                }
5564                break;
5565        }
5566
5567        return 0;
5568}
5569
5570static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5571                                         struct net_device *port_dev,
5572                                         unsigned long event, void *ptr)
5573{
5574        switch (event) {
5575        case NETDEV_PRECHANGEUPPER:
5576        case NETDEV_CHANGEUPPER:
5577                return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5578                                                           event, ptr);
5579        case NETDEV_CHANGELOWERSTATE:
5580                return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5581                                                           ptr);
5582        }
5583
5584        return 0;
5585}
5586
5587static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5588                                        unsigned long event, void *ptr)
5589{
5590        struct net_device *dev;
5591        struct list_head *iter;
5592        int ret;
5593
5594        netdev_for_each_lower_dev(lag_dev, dev, iter) {
5595                if (mlxsw_sp_port_dev_check(dev)) {
5596                        ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5597                                                            ptr);
5598                        if (ret)
5599                                return ret;
5600                }
5601        }
5602
5603        return 0;
5604}
5605
5606static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5607                                              struct net_device *dev,
5608                                              unsigned long event, void *ptr,
5609                                              u16 vid)
5610{
5611        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5612        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5613        struct netdev_notifier_changeupper_info *info = ptr;
5614        struct netlink_ext_ack *extack;
5615        struct net_device *upper_dev;
5616        int err = 0;
5617
5618        extack = netdev_notifier_info_to_extack(&info->info);
5619
5620        switch (event) {
5621        case NETDEV_PRECHANGEUPPER:
5622                upper_dev = info->upper_dev;
5623                if (!netif_is_bridge_master(upper_dev) &&
5624                    !netif_is_macvlan(upper_dev)) {
5625                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5626                        return -EINVAL;
5627                }
5628                if (!info->linking)
5629                        break;
5630                if (netif_is_bridge_master(upper_dev) &&
5631                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5632                    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5633                    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5634                        return -EOPNOTSUPP;
5635                if (netdev_has_any_upper_dev(upper_dev) &&
5636                    (!netif_is_bridge_master(upper_dev) ||
5637                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5638                                                          upper_dev))) {
5639                        NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5640                        return -EINVAL;
5641                }
5642                if (netif_is_macvlan(upper_dev) &&
5643                    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5644                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5645                        return -EOPNOTSUPP;
5646                }
5647                break;
5648        case NETDEV_CHANGEUPPER:
5649                upper_dev = info->upper_dev;
5650                if (netif_is_bridge_master(upper_dev)) {
5651                        if (info->linking)
5652                                err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5653                                                                vlan_dev,
5654                                                                upper_dev,
5655                                                                extack);
5656                        else
5657                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5658                                                           vlan_dev,
5659                                                           upper_dev);
5660                } else if (netif_is_macvlan(upper_dev)) {
5661                        if (!info->linking)
5662                                mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5663                } else {
5664                        err = -EINVAL;
5665                        WARN_ON(1);
5666                }
5667                break;
5668        }
5669
5670        return err;
5671}
5672
5673static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5674                                                  struct net_device *lag_dev,
5675                                                  unsigned long event,
5676                                                  void *ptr, u16 vid)
5677{
5678        struct net_device *dev;
5679        struct list_head *iter;
5680        int ret;
5681
5682        netdev_for_each_lower_dev(lag_dev, dev, iter) {
5683                if (mlxsw_sp_port_dev_check(dev)) {
5684                        ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5685                                                                 event, ptr,
5686                                                                 vid);
5687                        if (ret)
5688                                return ret;
5689                }
5690        }
5691
5692        return 0;
5693}
5694
5695static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
5696                                                struct net_device *br_dev,
5697                                                unsigned long event, void *ptr,
5698                                                u16 vid)
5699{
5700        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
5701        struct netdev_notifier_changeupper_info *info = ptr;
5702        struct netlink_ext_ack *extack;
5703        struct net_device *upper_dev;
5704
5705        if (!mlxsw_sp)
5706                return 0;
5707
5708        extack = netdev_notifier_info_to_extack(&info->info);
5709
5710        switch (event) {
5711        case NETDEV_PRECHANGEUPPER:
5712                upper_dev = info->upper_dev;
5713                if (!netif_is_macvlan(upper_dev)) {
5714                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5715                        return -EOPNOTSUPP;
5716                }
5717                if (!info->linking)
5718                        break;
5719                if (netif_is_macvlan(upper_dev) &&
5720                    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5721                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5722                        return -EOPNOTSUPP;
5723                }
5724                break;
5725        case NETDEV_CHANGEUPPER:
5726                upper_dev = info->upper_dev;
5727                if (info->linking)
5728                        break;
5729                if (netif_is_macvlan(upper_dev))
5730                        mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5731                break;
5732        }
5733
5734        return 0;
5735}
5736
5737static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
5738                                         unsigned long event, void *ptr)
5739{
5740        struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5741        u16 vid = vlan_dev_vlan_id(vlan_dev);
5742
5743        if (mlxsw_sp_port_dev_check(real_dev))
5744                return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5745                                                          event, ptr, vid);
5746        else if (netif_is_lag_master(real_dev))
5747                return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5748                                                              real_dev, event,
5749                                                              ptr, vid);
5750        else if (netif_is_bridge_master(real_dev))
5751                return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
5752                                                            event, ptr, vid);
5753
5754        return 0;
5755}
5756
5757static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
5758                                           unsigned long event, void *ptr)
5759{
5760        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
5761        struct netdev_notifier_changeupper_info *info = ptr;
5762        struct netlink_ext_ack *extack;
5763        struct net_device *upper_dev;
5764
5765        if (!mlxsw_sp)
5766                return 0;
5767
5768        extack = netdev_notifier_info_to_extack(&info->info);
5769
5770        switch (event) {
5771        case NETDEV_PRECHANGEUPPER:
5772                upper_dev = info->upper_dev;
5773                if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
5774                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5775                        return -EOPNOTSUPP;
5776                }
5777                if (!info->linking)
5778                        break;
5779                if (netif_is_macvlan(upper_dev) &&
5780                    !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
5781                        NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5782                        return -EOPNOTSUPP;
5783                }
5784                break;
5785        case NETDEV_CHANGEUPPER:
5786                upper_dev = info->upper_dev;
5787                if (info->linking)
5788                        break;
5789                if (is_vlan_dev(upper_dev))
5790                        mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5791                if (netif_is_macvlan(upper_dev))
5792                        mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5793                break;
5794        }
5795
5796        return 0;
5797}
5798
5799static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5800                                            unsigned long event, void *ptr)
5801{
5802        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5803        struct netdev_notifier_changeupper_info *info = ptr;
5804        struct netlink_ext_ack *extack;
5805
5806        if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5807                return 0;
5808
5809        extack = netdev_notifier_info_to_extack(&info->info);
5810
5811        /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
5812        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5813
5814        return -EOPNOTSUPP;
5815}
5816
5817static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
5818{
5819        struct netdev_notifier_changeupper_info *info = ptr;
5820
5821        if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
5822                return false;
5823        return netif_is_l3_master(info->upper_dev);
5824}
5825
5826static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5827                                          struct net_device *dev,
5828                                          unsigned long event, void *ptr)
5829{
5830        struct netdev_notifier_changeupper_info *cu_info;
5831        struct netdev_notifier_info *info = ptr;
5832        struct netlink_ext_ack *extack;
5833        struct net_device *upper_dev;
5834
5835        extack = netdev_notifier_info_to_extack(info);
5836
5837        switch (event) {
5838        case NETDEV_CHANGEUPPER:
5839                cu_info = container_of(info,
5840                                       struct netdev_notifier_changeupper_info,
5841                                       info);
5842                upper_dev = cu_info->upper_dev;
5843                if (!netif_is_bridge_master(upper_dev))
5844                        return 0;
5845                if (!mlxsw_sp_lower_get(upper_dev))
5846                        return 0;
5847                if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5848                        return -EOPNOTSUPP;
5849                if (cu_info->linking) {
5850                        if (!netif_running(dev))
5851                                return 0;
5852                        /* When the bridge is VLAN-aware, the VNI of the VxLAN
5853                         * device needs to be mapped to a VLAN, but at this
5854                         * point no VLANs are configured on the VxLAN device
5855                         */
5856                        if (br_vlan_enabled(upper_dev))
5857                                return 0;
5858                        return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5859                                                          dev, 0, extack);
5860                } else {
5861                        /* VLANs were already flushed, which triggered the
5862                         * necessary cleanup
5863                         */
5864                        if (br_vlan_enabled(upper_dev))
5865                                return 0;
5866                        mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5867                }
5868                break;
5869        case NETDEV_PRE_UP:
5870                upper_dev = netdev_master_upper_dev_get(dev);
5871                if (!upper_dev)
5872                        return 0;
5873                if (!netif_is_bridge_master(upper_dev))
5874                        return 0;
5875                if (!mlxsw_sp_lower_get(upper_dev))
5876                        return 0;
5877                return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5878                                                  extack);
5879        case NETDEV_DOWN:
5880                upper_dev = netdev_master_upper_dev_get(dev);
5881                if (!upper_dev)
5882                        return 0;
5883                if (!netif_is_bridge_master(upper_dev))
5884                        return 0;
5885                if (!mlxsw_sp_lower_get(upper_dev))
5886                        return 0;
5887                mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5888                break;
5889        }
5890
5891        return 0;
5892}
5893
5894static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5895                                    unsigned long event, void *ptr)
5896{
5897        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5898        struct mlxsw_sp_span_entry *span_entry;
5899        struct mlxsw_sp *mlxsw_sp;
5900        int err = 0;
5901
5902        mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5903        if (event == NETDEV_UNREGISTER) {
5904                span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5905                if (span_entry)
5906                        mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5907        }
5908        mlxsw_sp_span_respin(mlxsw_sp);
5909
5910        if (netif_is_vxlan(dev))
5911                err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5912        if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
5913                err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
5914                                                       event, ptr);
5915        else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
5916                err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
5917                                                       event, ptr);
5918        else if (event == NETDEV_PRE_CHANGEADDR ||
5919                 event == NETDEV_CHANGEADDR ||
5920                 event == NETDEV_CHANGEMTU)
5921                err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
5922        else if (mlxsw_sp_is_vrf_event(event, ptr))
5923                err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
5924        else if (mlxsw_sp_port_dev_check(dev))
5925                err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5926        else if (netif_is_lag_master(dev))
5927                err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5928        else if (is_vlan_dev(dev))
5929                err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5930        else if (netif_is_bridge_master(dev))
5931                err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5932        else if (netif_is_macvlan(dev))
5933                err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5934
5935        return notifier_from_errno(err);
5936}
5937
5938static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5939        .notifier_call = mlxsw_sp_inetaddr_valid_event,
5940};
5941
5942static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5943        .notifier_call = mlxsw_sp_inet6addr_valid_event,
5944};
5945
5946static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5947        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5948        {0, },
5949};
5950
5951static struct pci_driver mlxsw_sp1_pci_driver = {
5952        .name = mlxsw_sp1_driver_name,
5953        .id_table = mlxsw_sp1_pci_id_table,
5954};
5955
5956static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5957        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5958        {0, },
5959};
5960
5961static struct pci_driver mlxsw_sp2_pci_driver = {
5962        .name = mlxsw_sp2_driver_name,
5963        .id_table = mlxsw_sp2_pci_id_table,
5964};
5965
5966static int __init mlxsw_sp_module_init(void)
5967{
5968        int err;
5969
5970        register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5971        register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5972
5973        err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5974        if (err)
5975                goto err_sp1_core_driver_register;
5976
5977        err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5978        if (err)
5979                goto err_sp2_core_driver_register;
5980
5981        err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5982        if (err)
5983                goto err_sp1_pci_driver_register;
5984
5985        err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5986        if (err)
5987                goto err_sp2_pci_driver_register;
5988
5989        return 0;
5990
5991err_sp2_pci_driver_register:
5992        mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5993err_sp1_pci_driver_register:
5994        mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5995err_sp2_core_driver_register:
5996        mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5997err_sp1_core_driver_register:
5998        unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5999        unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
6000        return err;
6001}
6002
6003static void __exit mlxsw_sp_module_exit(void)
6004{
6005        mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
6006        mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
6007        mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
6008        mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
6009        unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
6010        unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
6011}
6012
6013module_init(mlxsw_sp_module_init);
6014module_exit(mlxsw_sp_module_exit);
6015
6016MODULE_LICENSE("Dual BSD/GPL");
6017MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
6018MODULE_DESCRIPTION("Mellanox Spectrum driver");
6019MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
6020MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
6021MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
6022