linux/drivers/net/ethernet/qlogic/qed/qed_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/pci.h>
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <asm/byteorder.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/string.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/workqueue.h>
  18#include <linux/ethtool.h>
  19#include <linux/etherdevice.h>
  20#include <linux/vmalloc.h>
  21#include <linux/crash_dump.h>
  22#include <linux/crc32.h>
  23#include <linux/qed/qed_if.h>
  24#include <linux/qed/qed_ll2_if.h>
  25#include <net/devlink.h>
  26#include <linux/aer.h>
  27#include <linux/phylink.h>
  28
  29#include "qed.h"
  30#include "qed_sriov.h"
  31#include "qed_sp.h"
  32#include "qed_dev_api.h"
  33#include "qed_ll2.h"
  34#include "qed_fcoe.h"
  35#include "qed_iscsi.h"
  36
  37#include "qed_mcp.h"
  38#include "qed_reg_addr.h"
  39#include "qed_hw.h"
  40#include "qed_selftest.h"
  41#include "qed_debug.h"
  42#include "qed_devlink.h"
  43
  44#define QED_ROCE_QPS                    (8192)
  45#define QED_ROCE_DPIS                   (8)
  46#define QED_RDMA_SRQS                   QED_ROCE_QPS
  47#define QED_NVM_CFG_GET_FLAGS           0xA
  48#define QED_NVM_CFG_GET_PF_FLAGS        0x1A
  49#define QED_NVM_CFG_MAX_ATTRS           50
  50
  51static char version[] =
  52        "QLogic FastLinQ 4xxxx Core Module qed\n";
  53
  54MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  55MODULE_LICENSE("GPL");
  56
  57#define FW_FILE_VERSION                         \
  58        __stringify(FW_MAJOR_VERSION) "."       \
  59        __stringify(FW_MINOR_VERSION) "."       \
  60        __stringify(FW_REVISION_VERSION) "."    \
  61        __stringify(FW_ENGINEERING_VERSION)
  62
  63#define QED_FW_FILE_NAME        \
  64        "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  65
  66MODULE_FIRMWARE(QED_FW_FILE_NAME);
  67
  68/* MFW speed capabilities maps */
  69
  70struct qed_mfw_speed_map {
  71        u32             mfw_val;
  72        __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
  73
  74        const u32       *cap_arr;
  75        u32             arr_size;
  76};
  77
  78#define QED_MFW_SPEED_MAP(type, arr)            \
  79{                                               \
  80        .mfw_val        = (type),               \
  81        .cap_arr        = (arr),                \
  82        .arr_size       = ARRAY_SIZE(arr),      \
  83}
  84
  85static const u32 qed_mfw_ext_1g[] __initconst = {
  86        ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
  87        ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
  88        ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
  89};
  90
  91static const u32 qed_mfw_ext_10g[] __initconst = {
  92        ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
  93        ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
  94        ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
  95        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
  96        ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
  97        ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
  98        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
  99        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 100};
 101
 102static const u32 qed_mfw_ext_20g[] __initconst = {
 103        ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
 104};
 105
 106static const u32 qed_mfw_ext_25g[] __initconst = {
 107        ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 108        ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 109        ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 110};
 111
 112static const u32 qed_mfw_ext_40g[] __initconst = {
 113        ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 114        ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 115        ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 116        ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 117};
 118
 119static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
 120        ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
 121        ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
 122        ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
 123        ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
 124        ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
 125};
 126
 127static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
 128        ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 129        ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 130        ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 131};
 132
 133static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
 134        ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
 135        ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
 136        ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
 137        ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
 138        ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
 139};
 140
 141static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
 142        ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 143        ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 144        ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 145        ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 146};
 147
 148static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
 149        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
 150        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
 151        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
 152        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
 153        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
 154        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
 155                          qed_mfw_ext_50g_base_r),
 156        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
 157                          qed_mfw_ext_50g_base_r2),
 158        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
 159                          qed_mfw_ext_100g_base_r2),
 160        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
 161                          qed_mfw_ext_100g_base_r4),
 162};
 163
 164static const u32 qed_mfw_legacy_1g[] __initconst = {
 165        ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 166        ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 167        ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
 168};
 169
 170static const u32 qed_mfw_legacy_10g[] __initconst = {
 171        ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 172        ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
 173        ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
 174        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
 175        ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
 176        ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
 177        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
 178        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 179};
 180
 181static const u32 qed_mfw_legacy_20g[] __initconst = {
 182        ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
 183};
 184
 185static const u32 qed_mfw_legacy_25g[] __initconst = {
 186        ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 187        ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 188        ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 189};
 190
 191static const u32 qed_mfw_legacy_40g[] __initconst = {
 192        ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 193        ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 194        ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 195        ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 196};
 197
 198static const u32 qed_mfw_legacy_50g[] __initconst = {
 199        ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 200        ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 201        ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 202};
 203
 204static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
 205        ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 206        ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 207        ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 208        ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 209};
 210
 211static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
 212        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
 213                          qed_mfw_legacy_1g),
 214        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
 215                          qed_mfw_legacy_10g),
 216        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
 217                          qed_mfw_legacy_20g),
 218        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
 219                          qed_mfw_legacy_25g),
 220        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
 221                          qed_mfw_legacy_40g),
 222        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
 223                          qed_mfw_legacy_50g),
 224        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
 225                          qed_mfw_legacy_bb_100g),
 226};
 227
 228static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
 229{
 230        linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
 231
 232        map->cap_arr = NULL;
 233        map->arr_size = 0;
 234}
 235
 236static void __init qed_mfw_speed_maps_init(void)
 237{
 238        u32 i;
 239
 240        for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
 241                qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
 242
 243        for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
 244                qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
 245}
 246
 247static int __init qed_init(void)
 248{
 249        pr_info("%s", version);
 250
 251        qed_mfw_speed_maps_init();
 252
 253        return 0;
 254}
 255module_init(qed_init);
 256
 257static void __exit qed_exit(void)
 258{
 259        /* To prevent marking this module as "permanent" */
 260}
 261module_exit(qed_exit);
 262
 263/* Check if the DMA controller on the machine can properly handle the DMA
 264 * addressing required by the device.
 265*/
 266static int qed_set_coherency_mask(struct qed_dev *cdev)
 267{
 268        struct device *dev = &cdev->pdev->dev;
 269
 270        if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 271                if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 272                        DP_NOTICE(cdev,
 273                                  "Can't request 64-bit consistent allocations\n");
 274                        return -EIO;
 275                }
 276        } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 277                DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
 278                return -EIO;
 279        }
 280
 281        return 0;
 282}
 283
 284static void qed_free_pci(struct qed_dev *cdev)
 285{
 286        struct pci_dev *pdev = cdev->pdev;
 287
 288        pci_disable_pcie_error_reporting(pdev);
 289
 290        if (cdev->doorbells && cdev->db_size)
 291                iounmap(cdev->doorbells);
 292        if (cdev->regview)
 293                iounmap(cdev->regview);
 294        if (atomic_read(&pdev->enable_cnt) == 1)
 295                pci_release_regions(pdev);
 296
 297        pci_disable_device(pdev);
 298}
 299
 300#define PCI_REVISION_ID_ERROR_VAL       0xff
 301
 302/* Performs PCI initializations as well as initializing PCI-related parameters
 303 * in the device structrue. Returns 0 in case of success.
 304 */
 305static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 306{
 307        u8 rev_id;
 308        int rc;
 309
 310        cdev->pdev = pdev;
 311
 312        rc = pci_enable_device(pdev);
 313        if (rc) {
 314                DP_NOTICE(cdev, "Cannot enable PCI device\n");
 315                goto err0;
 316        }
 317
 318        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 319                DP_NOTICE(cdev, "No memory region found in bar #0\n");
 320                rc = -EIO;
 321                goto err1;
 322        }
 323
 324        if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 325                DP_NOTICE(cdev, "No memory region found in bar #2\n");
 326                rc = -EIO;
 327                goto err1;
 328        }
 329
 330        if (atomic_read(&pdev->enable_cnt) == 1) {
 331                rc = pci_request_regions(pdev, "qed");
 332                if (rc) {
 333                        DP_NOTICE(cdev,
 334                                  "Failed to request PCI memory resources\n");
 335                        goto err1;
 336                }
 337                pci_set_master(pdev);
 338                pci_save_state(pdev);
 339        }
 340
 341        pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 342        if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 343                DP_NOTICE(cdev,
 344                          "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 345                          rev_id);
 346                rc = -ENODEV;
 347                goto err2;
 348        }
 349        if (!pci_is_pcie(pdev)) {
 350                DP_NOTICE(cdev, "The bus is not PCI Express\n");
 351                rc = -EIO;
 352                goto err2;
 353        }
 354
 355        cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 356        if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 357                DP_NOTICE(cdev, "Cannot find power management capability\n");
 358
 359        rc = qed_set_coherency_mask(cdev);
 360        if (rc)
 361                goto err2;
 362
 363        cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 364        cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 365        cdev->pci_params.irq = pdev->irq;
 366
 367        cdev->regview = pci_ioremap_bar(pdev, 0);
 368        if (!cdev->regview) {
 369                DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 370                rc = -ENOMEM;
 371                goto err2;
 372        }
 373
 374        cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 375        cdev->db_size = pci_resource_len(cdev->pdev, 2);
 376        if (!cdev->db_size) {
 377                if (IS_PF(cdev)) {
 378                        DP_NOTICE(cdev, "No Doorbell bar available\n");
 379                        return -EINVAL;
 380                } else {
 381                        return 0;
 382                }
 383        }
 384
 385        cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 386
 387        if (!cdev->doorbells) {
 388                DP_NOTICE(cdev, "Cannot map doorbell space\n");
 389                return -ENOMEM;
 390        }
 391
 392        /* AER (Advanced Error reporting) configuration */
 393        rc = pci_enable_pcie_error_reporting(pdev);
 394        if (rc)
 395                DP_VERBOSE(cdev, NETIF_MSG_DRV,
 396                           "Failed to configure PCIe AER [%d]\n", rc);
 397
 398        return 0;
 399
 400err2:
 401        pci_release_regions(pdev);
 402err1:
 403        pci_disable_device(pdev);
 404err0:
 405        return rc;
 406}
 407
 408int qed_fill_dev_info(struct qed_dev *cdev,
 409                      struct qed_dev_info *dev_info)
 410{
 411        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 412        struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 413        struct qed_tunnel_info *tun = &cdev->tunnel;
 414        struct qed_ptt  *ptt;
 415
 416        memset(dev_info, 0, sizeof(struct qed_dev_info));
 417
 418        if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 419            tun->vxlan.b_mode_enabled)
 420                dev_info->vxlan_enable = true;
 421
 422        if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 423            tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 424            tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 425                dev_info->gre_enable = true;
 426
 427        if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 428            tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 429            tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 430                dev_info->geneve_enable = true;
 431
 432        dev_info->num_hwfns = cdev->num_hwfns;
 433        dev_info->pci_mem_start = cdev->pci_params.mem_start;
 434        dev_info->pci_mem_end = cdev->pci_params.mem_end;
 435        dev_info->pci_irq = cdev->pci_params.irq;
 436        dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 437        dev_info->dev_type = cdev->type;
 438        ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 439
 440        if (IS_PF(cdev)) {
 441                dev_info->fw_major = FW_MAJOR_VERSION;
 442                dev_info->fw_minor = FW_MINOR_VERSION;
 443                dev_info->fw_rev = FW_REVISION_VERSION;
 444                dev_info->fw_eng = FW_ENGINEERING_VERSION;
 445                dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 446                                                       &cdev->mf_bits);
 447                if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
 448                        dev_info->b_arfs_capable = true;
 449                dev_info->tx_switching = true;
 450
 451                if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 452                        dev_info->wol_support = true;
 453
 454                dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 455
 456                dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 457        } else {
 458                qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 459                                      &dev_info->fw_minor, &dev_info->fw_rev,
 460                                      &dev_info->fw_eng);
 461        }
 462
 463        if (IS_PF(cdev)) {
 464                ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 465                if (ptt) {
 466                        qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 467                                            &dev_info->mfw_rev, NULL);
 468
 469                        qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 470                                            &dev_info->mbi_version);
 471
 472                        qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 473                                               &dev_info->flash_size);
 474
 475                        qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 476                }
 477        } else {
 478                qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 479                                    &dev_info->mfw_rev, NULL);
 480        }
 481
 482        dev_info->mtu = hw_info->mtu;
 483        cdev->common_dev_info = *dev_info;
 484
 485        return 0;
 486}
 487
 488static void qed_free_cdev(struct qed_dev *cdev)
 489{
 490        kfree((void *)cdev);
 491}
 492
 493static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 494{
 495        struct qed_dev *cdev;
 496
 497        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 498        if (!cdev)
 499                return cdev;
 500
 501        qed_init_struct(cdev);
 502
 503        return cdev;
 504}
 505
 506/* Sets the requested power state */
 507static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 508{
 509        if (!cdev)
 510                return -ENODEV;
 511
 512        DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 513        return 0;
 514}
 515
 516/* probing */
 517static struct qed_dev *qed_probe(struct pci_dev *pdev,
 518                                 struct qed_probe_params *params)
 519{
 520        struct qed_dev *cdev;
 521        int rc;
 522
 523        cdev = qed_alloc_cdev(pdev);
 524        if (!cdev)
 525                goto err0;
 526
 527        cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 528        cdev->protocol = params->protocol;
 529
 530        if (params->is_vf)
 531                cdev->b_is_vf = true;
 532
 533        qed_init_dp(cdev, params->dp_module, params->dp_level);
 534
 535        cdev->recov_in_prog = params->recov_in_prog;
 536
 537        rc = qed_init_pci(cdev, pdev);
 538        if (rc) {
 539                DP_ERR(cdev, "init pci failed\n");
 540                goto err1;
 541        }
 542        DP_INFO(cdev, "PCI init completed successfully\n");
 543
 544        rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 545        if (rc) {
 546                DP_ERR(cdev, "hw prepare failed\n");
 547                goto err2;
 548        }
 549
 550        DP_INFO(cdev, "qed_probe completed successfully\n");
 551
 552        return cdev;
 553
 554err2:
 555        qed_free_pci(cdev);
 556err1:
 557        qed_free_cdev(cdev);
 558err0:
 559        return NULL;
 560}
 561
 562static void qed_remove(struct qed_dev *cdev)
 563{
 564        if (!cdev)
 565                return;
 566
 567        qed_hw_remove(cdev);
 568
 569        qed_free_pci(cdev);
 570
 571        qed_set_power_state(cdev, PCI_D3hot);
 572
 573        qed_free_cdev(cdev);
 574}
 575
 576static void qed_disable_msix(struct qed_dev *cdev)
 577{
 578        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 579                pci_disable_msix(cdev->pdev);
 580                kfree(cdev->int_params.msix_table);
 581        } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 582                pci_disable_msi(cdev->pdev);
 583        }
 584
 585        memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 586}
 587
 588static int qed_enable_msix(struct qed_dev *cdev,
 589                           struct qed_int_params *int_params)
 590{
 591        int i, rc, cnt;
 592
 593        cnt = int_params->in.num_vectors;
 594
 595        for (i = 0; i < cnt; i++)
 596                int_params->msix_table[i].entry = i;
 597
 598        rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 599                                   int_params->in.min_msix_cnt, cnt);
 600        if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 601            (rc % cdev->num_hwfns)) {
 602                pci_disable_msix(cdev->pdev);
 603
 604                /* If fastpath is initialized, we need at least one interrupt
 605                 * per hwfn [and the slow path interrupts]. New requested number
 606                 * should be a multiple of the number of hwfns.
 607                 */
 608                cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 609                DP_NOTICE(cdev,
 610                          "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 611                          cnt, int_params->in.num_vectors);
 612                rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 613                                           cnt);
 614                if (!rc)
 615                        rc = cnt;
 616        }
 617
 618        /* For VFs, we should return with an error in case we didn't get the
 619         * exact number of msix vectors as we requested.
 620         * Not doing that will lead to a crash when starting queues for
 621         * this VF.
 622         */
 623        if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
 624                /* MSI-x configuration was achieved */
 625                int_params->out.int_mode = QED_INT_MODE_MSIX;
 626                int_params->out.num_vectors = rc;
 627                rc = 0;
 628        } else {
 629                DP_NOTICE(cdev,
 630                          "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 631                          cnt, rc);
 632        }
 633
 634        return rc;
 635}
 636
 637/* This function outputs the int mode and the number of enabled msix vector */
 638static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 639{
 640        struct qed_int_params *int_params = &cdev->int_params;
 641        struct msix_entry *tbl;
 642        int rc = 0, cnt;
 643
 644        switch (int_params->in.int_mode) {
 645        case QED_INT_MODE_MSIX:
 646                /* Allocate MSIX table */
 647                cnt = int_params->in.num_vectors;
 648                int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 649                if (!int_params->msix_table) {
 650                        rc = -ENOMEM;
 651                        goto out;
 652                }
 653
 654                /* Enable MSIX */
 655                rc = qed_enable_msix(cdev, int_params);
 656                if (!rc)
 657                        goto out;
 658
 659                DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 660                kfree(int_params->msix_table);
 661                if (force_mode)
 662                        goto out;
 663                fallthrough;
 664
 665        case QED_INT_MODE_MSI:
 666                if (cdev->num_hwfns == 1) {
 667                        rc = pci_enable_msi(cdev->pdev);
 668                        if (!rc) {
 669                                int_params->out.int_mode = QED_INT_MODE_MSI;
 670                                goto out;
 671                        }
 672
 673                        DP_NOTICE(cdev, "Failed to enable MSI\n");
 674                        if (force_mode)
 675                                goto out;
 676                }
 677                fallthrough;
 678
 679        case QED_INT_MODE_INTA:
 680                        int_params->out.int_mode = QED_INT_MODE_INTA;
 681                        rc = 0;
 682                        goto out;
 683        default:
 684                DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 685                          int_params->in.int_mode);
 686                rc = -EINVAL;
 687        }
 688
 689out:
 690        if (!rc)
 691                DP_INFO(cdev, "Using %s interrupts\n",
 692                        int_params->out.int_mode == QED_INT_MODE_INTA ?
 693                        "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 694                        "MSI" : "MSIX");
 695        cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 696
 697        return rc;
 698}
 699
 700static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 701                                    int index, void(*handler)(void *))
 702{
 703        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 704        int relative_idx = index / cdev->num_hwfns;
 705
 706        hwfn->simd_proto_handler[relative_idx].func = handler;
 707        hwfn->simd_proto_handler[relative_idx].token = token;
 708}
 709
 710static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 711{
 712        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 713        int relative_idx = index / cdev->num_hwfns;
 714
 715        memset(&hwfn->simd_proto_handler[relative_idx], 0,
 716               sizeof(struct qed_simd_fp_handler));
 717}
 718
 719static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 720{
 721        tasklet_schedule((struct tasklet_struct *)tasklet);
 722        return IRQ_HANDLED;
 723}
 724
 725static irqreturn_t qed_single_int(int irq, void *dev_instance)
 726{
 727        struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 728        struct qed_hwfn *hwfn;
 729        irqreturn_t rc = IRQ_NONE;
 730        u64 status;
 731        int i, j;
 732
 733        for (i = 0; i < cdev->num_hwfns; i++) {
 734                status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 735
 736                if (!status)
 737                        continue;
 738
 739                hwfn = &cdev->hwfns[i];
 740
 741                /* Slowpath interrupt */
 742                if (unlikely(status & 0x1)) {
 743                        tasklet_schedule(&hwfn->sp_dpc);
 744                        status &= ~0x1;
 745                        rc = IRQ_HANDLED;
 746                }
 747
 748                /* Fastpath interrupts */
 749                for (j = 0; j < 64; j++) {
 750                        if ((0x2ULL << j) & status) {
 751                                struct qed_simd_fp_handler *p_handler =
 752                                        &hwfn->simd_proto_handler[j];
 753
 754                                if (p_handler->func)
 755                                        p_handler->func(p_handler->token);
 756                                else
 757                                        DP_NOTICE(hwfn,
 758                                                  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 759                                                  j, status);
 760
 761                                status &= ~(0x2ULL << j);
 762                                rc = IRQ_HANDLED;
 763                        }
 764                }
 765
 766                if (unlikely(status))
 767                        DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 768                                   "got an unknown interrupt status 0x%llx\n",
 769                                   status);
 770        }
 771
 772        return rc;
 773}
 774
 775int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 776{
 777        struct qed_dev *cdev = hwfn->cdev;
 778        u32 int_mode;
 779        int rc = 0;
 780        u8 id;
 781
 782        int_mode = cdev->int_params.out.int_mode;
 783        if (int_mode == QED_INT_MODE_MSIX) {
 784                id = hwfn->my_id;
 785                snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 786                         id, cdev->pdev->bus->number,
 787                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 788                rc = request_irq(cdev->int_params.msix_table[id].vector,
 789                                 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
 790        } else {
 791                unsigned long flags = 0;
 792
 793                snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 794                         cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 795                         PCI_FUNC(cdev->pdev->devfn));
 796
 797                if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 798                        flags |= IRQF_SHARED;
 799
 800                rc = request_irq(cdev->pdev->irq, qed_single_int,
 801                                 flags, cdev->name, cdev);
 802        }
 803
 804        if (rc)
 805                DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 806        else
 807                DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 808                           "Requested slowpath %s\n",
 809                           (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 810
 811        return rc;
 812}
 813
 814static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 815{
 816        /* Calling the disable function will make sure that any
 817         * currently-running function is completed. The following call to the
 818         * enable function makes this sequence a flush-like operation.
 819         */
 820        if (p_hwfn->b_sp_dpc_enabled) {
 821                tasklet_disable(&p_hwfn->sp_dpc);
 822                tasklet_enable(&p_hwfn->sp_dpc);
 823        }
 824}
 825
 826void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 827{
 828        struct qed_dev *cdev = p_hwfn->cdev;
 829        u8 id = p_hwfn->my_id;
 830        u32 int_mode;
 831
 832        int_mode = cdev->int_params.out.int_mode;
 833        if (int_mode == QED_INT_MODE_MSIX)
 834                synchronize_irq(cdev->int_params.msix_table[id].vector);
 835        else
 836                synchronize_irq(cdev->pdev->irq);
 837
 838        qed_slowpath_tasklet_flush(p_hwfn);
 839}
 840
 841static void qed_slowpath_irq_free(struct qed_dev *cdev)
 842{
 843        int i;
 844
 845        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 846                for_each_hwfn(cdev, i) {
 847                        if (!cdev->hwfns[i].b_int_requested)
 848                                break;
 849                        synchronize_irq(cdev->int_params.msix_table[i].vector);
 850                        free_irq(cdev->int_params.msix_table[i].vector,
 851                                 &cdev->hwfns[i].sp_dpc);
 852                }
 853        } else {
 854                if (QED_LEADING_HWFN(cdev)->b_int_requested)
 855                        free_irq(cdev->pdev->irq, cdev);
 856        }
 857        qed_int_disable_post_isr_release(cdev);
 858}
 859
 860static int qed_nic_stop(struct qed_dev *cdev)
 861{
 862        int i, rc;
 863
 864        rc = qed_hw_stop(cdev);
 865
 866        for (i = 0; i < cdev->num_hwfns; i++) {
 867                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 868
 869                if (p_hwfn->b_sp_dpc_enabled) {
 870                        tasklet_disable(&p_hwfn->sp_dpc);
 871                        p_hwfn->b_sp_dpc_enabled = false;
 872                        DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 873                                   "Disabled sp tasklet [hwfn %d] at %p\n",
 874                                   i, &p_hwfn->sp_dpc);
 875                }
 876        }
 877
 878        qed_dbg_pf_exit(cdev);
 879
 880        return rc;
 881}
 882
 883static int qed_nic_setup(struct qed_dev *cdev)
 884{
 885        int rc, i;
 886
 887        /* Determine if interface is going to require LL2 */
 888        if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 889                for (i = 0; i < cdev->num_hwfns; i++) {
 890                        struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 891
 892                        p_hwfn->using_ll2 = true;
 893                }
 894        }
 895
 896        rc = qed_resc_alloc(cdev);
 897        if (rc)
 898                return rc;
 899
 900        DP_INFO(cdev, "Allocated qed resources\n");
 901
 902        qed_resc_setup(cdev);
 903
 904        return rc;
 905}
 906
 907static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 908{
 909        int limit = 0;
 910
 911        /* Mark the fastpath as free/used */
 912        cdev->int_params.fp_initialized = cnt ? true : false;
 913
 914        if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 915                limit = cdev->num_hwfns * 63;
 916        else if (cdev->int_params.fp_msix_cnt)
 917                limit = cdev->int_params.fp_msix_cnt;
 918
 919        if (!limit)
 920                return -ENOMEM;
 921
 922        return min_t(int, cnt, limit);
 923}
 924
 925static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 926{
 927        memset(info, 0, sizeof(struct qed_int_info));
 928
 929        if (!cdev->int_params.fp_initialized) {
 930                DP_INFO(cdev,
 931                        "Protocol driver requested interrupt information, but its support is not yet configured\n");
 932                return -EINVAL;
 933        }
 934
 935        /* Need to expose only MSI-X information; Single IRQ is handled solely
 936         * by qed.
 937         */
 938        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 939                int msix_base = cdev->int_params.fp_msix_base;
 940
 941                info->msix_cnt = cdev->int_params.fp_msix_cnt;
 942                info->msix = &cdev->int_params.msix_table[msix_base];
 943        }
 944
 945        return 0;
 946}
 947
 948static int qed_slowpath_setup_int(struct qed_dev *cdev,
 949                                  enum qed_int_mode int_mode)
 950{
 951        struct qed_sb_cnt_info sb_cnt_info;
 952        int num_l2_queues = 0;
 953        int rc;
 954        int i;
 955
 956        if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 957                DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 958                return -EINVAL;
 959        }
 960
 961        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 962        cdev->int_params.in.int_mode = int_mode;
 963        for_each_hwfn(cdev, i) {
 964                memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 965                qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 966                cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 967                cdev->int_params.in.num_vectors++; /* slowpath */
 968        }
 969
 970        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
 971        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 972
 973        if (is_kdump_kernel()) {
 974                DP_INFO(cdev,
 975                        "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 976                        cdev->int_params.in.min_msix_cnt);
 977                cdev->int_params.in.num_vectors =
 978                        cdev->int_params.in.min_msix_cnt;
 979        }
 980
 981        rc = qed_set_int_mode(cdev, false);
 982        if (rc)  {
 983                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
 984                return rc;
 985        }
 986
 987        cdev->int_params.fp_msix_base = cdev->num_hwfns;
 988        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 989                                       cdev->num_hwfns;
 990
 991        if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 992            !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 993                return 0;
 994
 995        for_each_hwfn(cdev, i)
 996                num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 997
 998        DP_VERBOSE(cdev, QED_MSG_RDMA,
 999                   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
1000                   cdev->int_params.fp_msix_cnt, num_l2_queues);
1001
1002        if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
1003                cdev->int_params.rdma_msix_cnt =
1004                        (cdev->int_params.fp_msix_cnt - num_l2_queues)
1005                        / cdev->num_hwfns;
1006                cdev->int_params.rdma_msix_base =
1007                        cdev->int_params.fp_msix_base + num_l2_queues;
1008                cdev->int_params.fp_msix_cnt = num_l2_queues;
1009        } else {
1010                cdev->int_params.rdma_msix_cnt = 0;
1011        }
1012
1013        DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
1014                   cdev->int_params.rdma_msix_cnt,
1015                   cdev->int_params.rdma_msix_base);
1016
1017        return 0;
1018}
1019
1020static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
1021{
1022        int rc;
1023
1024        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1025        cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1026
1027        qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1028                            &cdev->int_params.in.num_vectors);
1029        if (cdev->num_hwfns > 1) {
1030                u8 vectors = 0;
1031
1032                qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1033                cdev->int_params.in.num_vectors += vectors;
1034        }
1035
1036        /* We want a minimum of one fastpath vector per vf hwfn */
1037        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1038
1039        rc = qed_set_int_mode(cdev, true);
1040        if (rc)
1041                return rc;
1042
1043        cdev->int_params.fp_msix_base = 0;
1044        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1045
1046        return 0;
1047}
1048
1049u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1050                   u8 *input_buf, u32 max_size, u8 *unzip_buf)
1051{
1052        int rc;
1053
1054        p_hwfn->stream->next_in = input_buf;
1055        p_hwfn->stream->avail_in = input_len;
1056        p_hwfn->stream->next_out = unzip_buf;
1057        p_hwfn->stream->avail_out = max_size;
1058
1059        rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1060
1061        if (rc != Z_OK) {
1062                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1063                           rc);
1064                return 0;
1065        }
1066
1067        rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1068        zlib_inflateEnd(p_hwfn->stream);
1069
1070        if (rc != Z_OK && rc != Z_STREAM_END) {
1071                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1072                           p_hwfn->stream->msg, rc);
1073                return 0;
1074        }
1075
1076        return p_hwfn->stream->total_out / 4;
1077}
1078
1079static int qed_alloc_stream_mem(struct qed_dev *cdev)
1080{
1081        int i;
1082        void *workspace;
1083
1084        for_each_hwfn(cdev, i) {
1085                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1086
1087                p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1088                if (!p_hwfn->stream)
1089                        return -ENOMEM;
1090
1091                workspace = vzalloc(zlib_inflate_workspacesize());
1092                if (!workspace)
1093                        return -ENOMEM;
1094                p_hwfn->stream->workspace = workspace;
1095        }
1096
1097        return 0;
1098}
1099
1100static void qed_free_stream_mem(struct qed_dev *cdev)
1101{
1102        int i;
1103
1104        for_each_hwfn(cdev, i) {
1105                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1106
1107                if (!p_hwfn->stream)
1108                        return;
1109
1110                vfree(p_hwfn->stream->workspace);
1111                kfree(p_hwfn->stream);
1112        }
1113}
1114
1115static void qed_update_pf_params(struct qed_dev *cdev,
1116                                 struct qed_pf_params *params)
1117{
1118        int i;
1119
1120        if (IS_ENABLED(CONFIG_QED_RDMA)) {
1121                params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1122                params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1123                params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1124                /* divide by 3 the MRs to avoid MF ILT overflow */
1125                params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1126        }
1127
1128        if (cdev->num_hwfns > 1 || IS_VF(cdev))
1129                params->eth_pf_params.num_arfs_filters = 0;
1130
1131        /* In case we might support RDMA, don't allow qede to be greedy
1132         * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1133         * per hwfn.
1134         */
1135        if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1136                u16 *num_cons;
1137
1138                num_cons = &params->eth_pf_params.num_cons;
1139                *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1140        }
1141
1142        for (i = 0; i < cdev->num_hwfns; i++) {
1143                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1144
1145                p_hwfn->pf_params = *params;
1146        }
1147}
1148
1149#define QED_PERIODIC_DB_REC_COUNT               10
1150#define QED_PERIODIC_DB_REC_INTERVAL_MS         100
1151#define QED_PERIODIC_DB_REC_INTERVAL \
1152        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1153
1154static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1155                                     enum qed_slowpath_wq_flag wq_flag,
1156                                     unsigned long delay)
1157{
1158        if (!hwfn->slowpath_wq_active)
1159                return -EINVAL;
1160
1161        /* Memory barrier for setting atomic bit */
1162        smp_mb__before_atomic();
1163        set_bit(wq_flag, &hwfn->slowpath_task_flags);
1164        smp_mb__after_atomic();
1165        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1166
1167        return 0;
1168}
1169
1170void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1171{
1172        /* Reset periodic Doorbell Recovery counter */
1173        p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1174
1175        /* Don't schedule periodic Doorbell Recovery if already scheduled */
1176        if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1177                     &p_hwfn->slowpath_task_flags))
1178                return;
1179
1180        qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1181                                  QED_PERIODIC_DB_REC_INTERVAL);
1182}
1183
1184static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1185{
1186        int i;
1187
1188        if (IS_VF(cdev))
1189                return;
1190
1191        for_each_hwfn(cdev, i) {
1192                if (!cdev->hwfns[i].slowpath_wq)
1193                        continue;
1194
1195                /* Stop queuing new delayed works */
1196                cdev->hwfns[i].slowpath_wq_active = false;
1197
1198                cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1199                destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1200        }
1201}
1202
1203static void qed_slowpath_task(struct work_struct *work)
1204{
1205        struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1206                                             slowpath_task.work);
1207        struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1208
1209        if (!ptt) {
1210                if (hwfn->slowpath_wq_active)
1211                        queue_delayed_work(hwfn->slowpath_wq,
1212                                           &hwfn->slowpath_task, 0);
1213
1214                return;
1215        }
1216
1217        if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1218                               &hwfn->slowpath_task_flags))
1219                qed_mfw_process_tlv_req(hwfn, ptt);
1220
1221        if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1222                               &hwfn->slowpath_task_flags)) {
1223                /* skip qed_db_rec_handler during recovery/unload */
1224                if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1225                        goto out;
1226
1227                qed_db_rec_handler(hwfn, ptt);
1228                if (hwfn->periodic_db_rec_count--)
1229                        qed_slowpath_delayed_work(hwfn,
1230                                                  QED_SLOWPATH_PERIODIC_DB_REC,
1231                                                  QED_PERIODIC_DB_REC_INTERVAL);
1232        }
1233
1234out:
1235        qed_ptt_release(hwfn, ptt);
1236}
1237
1238static int qed_slowpath_wq_start(struct qed_dev *cdev)
1239{
1240        struct qed_hwfn *hwfn;
1241        char name[NAME_SIZE];
1242        int i;
1243
1244        if (IS_VF(cdev))
1245                return 0;
1246
1247        for_each_hwfn(cdev, i) {
1248                hwfn = &cdev->hwfns[i];
1249
1250                snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1251                         cdev->pdev->bus->number,
1252                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1253
1254                hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1255                if (!hwfn->slowpath_wq) {
1256                        DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1257                        return -ENOMEM;
1258                }
1259
1260                INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1261                hwfn->slowpath_wq_active = true;
1262        }
1263
1264        return 0;
1265}
1266
1267static int qed_slowpath_start(struct qed_dev *cdev,
1268                              struct qed_slowpath_params *params)
1269{
1270        struct qed_drv_load_params drv_load_params;
1271        struct qed_hw_init_params hw_init_params;
1272        struct qed_mcp_drv_version drv_version;
1273        struct qed_tunnel_info tunn_info;
1274        const u8 *data = NULL;
1275        struct qed_hwfn *hwfn;
1276        struct qed_ptt *p_ptt;
1277        int rc = -EINVAL;
1278
1279        if (qed_iov_wq_start(cdev))
1280                goto err;
1281
1282        if (qed_slowpath_wq_start(cdev))
1283                goto err;
1284
1285        if (IS_PF(cdev)) {
1286                rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1287                                      &cdev->pdev->dev);
1288                if (rc) {
1289                        DP_NOTICE(cdev,
1290                                  "Failed to find fw file - /lib/firmware/%s\n",
1291                                  QED_FW_FILE_NAME);
1292                        goto err;
1293                }
1294
1295                if (cdev->num_hwfns == 1) {
1296                        p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1297                        if (p_ptt) {
1298                                QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1299                        } else {
1300                                DP_NOTICE(cdev,
1301                                          "Failed to acquire PTT for aRFS\n");
1302                                rc = -EINVAL;
1303                                goto err;
1304                        }
1305                }
1306        }
1307
1308        cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1309        rc = qed_nic_setup(cdev);
1310        if (rc)
1311                goto err;
1312
1313        if (IS_PF(cdev))
1314                rc = qed_slowpath_setup_int(cdev, params->int_mode);
1315        else
1316                rc = qed_slowpath_vf_setup_int(cdev);
1317        if (rc)
1318                goto err1;
1319
1320        if (IS_PF(cdev)) {
1321                /* Allocate stream for unzipping */
1322                rc = qed_alloc_stream_mem(cdev);
1323                if (rc)
1324                        goto err2;
1325
1326                /* First Dword used to differentiate between various sources */
1327                data = cdev->firmware->data + sizeof(u32);
1328
1329                qed_dbg_pf_init(cdev);
1330        }
1331
1332        /* Start the slowpath */
1333        memset(&hw_init_params, 0, sizeof(hw_init_params));
1334        memset(&tunn_info, 0, sizeof(tunn_info));
1335        tunn_info.vxlan.b_mode_enabled = true;
1336        tunn_info.l2_gre.b_mode_enabled = true;
1337        tunn_info.ip_gre.b_mode_enabled = true;
1338        tunn_info.l2_geneve.b_mode_enabled = true;
1339        tunn_info.ip_geneve.b_mode_enabled = true;
1340        tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1341        tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1342        tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1343        tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1344        tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1345        hw_init_params.p_tunn = &tunn_info;
1346        hw_init_params.b_hw_start = true;
1347        hw_init_params.int_mode = cdev->int_params.out.int_mode;
1348        hw_init_params.allow_npar_tx_switch = true;
1349        hw_init_params.bin_fw_data = data;
1350
1351        memset(&drv_load_params, 0, sizeof(drv_load_params));
1352        drv_load_params.is_crash_kernel = is_kdump_kernel();
1353        drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1354        drv_load_params.avoid_eng_reset = false;
1355        drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1356        hw_init_params.p_drv_load_params = &drv_load_params;
1357
1358        rc = qed_hw_init(cdev, &hw_init_params);
1359        if (rc)
1360                goto err2;
1361
1362        DP_INFO(cdev,
1363                "HW initialization and function start completed successfully\n");
1364
1365        if (IS_PF(cdev)) {
1366                cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1367                                           BIT(QED_MODE_L2GENEVE_TUNN) |
1368                                           BIT(QED_MODE_IPGENEVE_TUNN) |
1369                                           BIT(QED_MODE_L2GRE_TUNN) |
1370                                           BIT(QED_MODE_IPGRE_TUNN));
1371        }
1372
1373        /* Allocate LL2 interface if needed */
1374        if (QED_LEADING_HWFN(cdev)->using_ll2) {
1375                rc = qed_ll2_alloc_if(cdev);
1376                if (rc)
1377                        goto err3;
1378        }
1379        if (IS_PF(cdev)) {
1380                hwfn = QED_LEADING_HWFN(cdev);
1381                drv_version.version = (params->drv_major << 24) |
1382                                      (params->drv_minor << 16) |
1383                                      (params->drv_rev << 8) |
1384                                      (params->drv_eng);
1385                strlcpy(drv_version.name, params->name,
1386                        MCP_DRV_VER_STR_SIZE - 4);
1387                rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1388                                              &drv_version);
1389                if (rc) {
1390                        DP_NOTICE(cdev, "Failed sending drv version command\n");
1391                        goto err4;
1392                }
1393        }
1394
1395        qed_reset_vport_stats(cdev);
1396
1397        return 0;
1398
1399err4:
1400        qed_ll2_dealloc_if(cdev);
1401err3:
1402        qed_hw_stop(cdev);
1403err2:
1404        qed_hw_timers_stop_all(cdev);
1405        if (IS_PF(cdev))
1406                qed_slowpath_irq_free(cdev);
1407        qed_free_stream_mem(cdev);
1408        qed_disable_msix(cdev);
1409err1:
1410        qed_resc_free(cdev);
1411err:
1412        if (IS_PF(cdev))
1413                release_firmware(cdev->firmware);
1414
1415        if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1416            QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1417                qed_ptt_release(QED_LEADING_HWFN(cdev),
1418                                QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1419
1420        qed_iov_wq_stop(cdev, false);
1421
1422        qed_slowpath_wq_stop(cdev);
1423
1424        return rc;
1425}
1426
1427static int qed_slowpath_stop(struct qed_dev *cdev)
1428{
1429        if (!cdev)
1430                return -ENODEV;
1431
1432        qed_slowpath_wq_stop(cdev);
1433
1434        qed_ll2_dealloc_if(cdev);
1435
1436        if (IS_PF(cdev)) {
1437                if (cdev->num_hwfns == 1)
1438                        qed_ptt_release(QED_LEADING_HWFN(cdev),
1439                                        QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1440                qed_free_stream_mem(cdev);
1441                if (IS_QED_ETH_IF(cdev))
1442                        qed_sriov_disable(cdev, true);
1443        }
1444
1445        qed_nic_stop(cdev);
1446
1447        if (IS_PF(cdev))
1448                qed_slowpath_irq_free(cdev);
1449
1450        qed_disable_msix(cdev);
1451
1452        qed_resc_free(cdev);
1453
1454        qed_iov_wq_stop(cdev, true);
1455
1456        if (IS_PF(cdev))
1457                release_firmware(cdev->firmware);
1458
1459        return 0;
1460}
1461
1462static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1463{
1464        int i;
1465
1466        memcpy(cdev->name, name, NAME_SIZE);
1467        for_each_hwfn(cdev, i)
1468                snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1469}
1470
1471static u32 qed_sb_init(struct qed_dev *cdev,
1472                       struct qed_sb_info *sb_info,
1473                       void *sb_virt_addr,
1474                       dma_addr_t sb_phy_addr, u16 sb_id,
1475                       enum qed_sb_type type)
1476{
1477        struct qed_hwfn *p_hwfn;
1478        struct qed_ptt *p_ptt;
1479        u16 rel_sb_id;
1480        u32 rc;
1481
1482        /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1483        if (type == QED_SB_TYPE_L2_QUEUE) {
1484                p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1485                rel_sb_id = sb_id / cdev->num_hwfns;
1486        } else {
1487                p_hwfn = QED_AFFIN_HWFN(cdev);
1488                rel_sb_id = sb_id;
1489        }
1490
1491        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1492                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1493                   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1494
1495        if (IS_PF(p_hwfn->cdev)) {
1496                p_ptt = qed_ptt_acquire(p_hwfn);
1497                if (!p_ptt)
1498                        return -EBUSY;
1499
1500                rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1501                                     sb_phy_addr, rel_sb_id);
1502                qed_ptt_release(p_hwfn, p_ptt);
1503        } else {
1504                rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1505                                     sb_phy_addr, rel_sb_id);
1506        }
1507
1508        return rc;
1509}
1510
1511static u32 qed_sb_release(struct qed_dev *cdev,
1512                          struct qed_sb_info *sb_info,
1513                          u16 sb_id,
1514                          enum qed_sb_type type)
1515{
1516        struct qed_hwfn *p_hwfn;
1517        u16 rel_sb_id;
1518        u32 rc;
1519
1520        /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1521        if (type == QED_SB_TYPE_L2_QUEUE) {
1522                p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1523                rel_sb_id = sb_id / cdev->num_hwfns;
1524        } else {
1525                p_hwfn = QED_AFFIN_HWFN(cdev);
1526                rel_sb_id = sb_id;
1527        }
1528
1529        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1530                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1531                   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1532
1533        rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1534
1535        return rc;
1536}
1537
1538static bool qed_can_link_change(struct qed_dev *cdev)
1539{
1540        return true;
1541}
1542
1543static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1544                                     const struct qed_link_params *params)
1545{
1546        struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1547        const struct qed_mfw_speed_map *map;
1548        u32 i;
1549
1550        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1551                ext_speed->autoneg = !!params->autoneg;
1552
1553        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1554                ext_speed->advertised_speeds = 0;
1555
1556                for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1557                        map = qed_mfw_ext_maps + i;
1558
1559                        if (linkmode_intersects(params->adv_speeds, map->caps))
1560                                ext_speed->advertised_speeds |= map->mfw_val;
1561                }
1562        }
1563
1564        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1565                switch (params->forced_speed) {
1566                case SPEED_1000:
1567                        ext_speed->forced_speed = QED_EXT_SPEED_1G;
1568                        break;
1569                case SPEED_10000:
1570                        ext_speed->forced_speed = QED_EXT_SPEED_10G;
1571                        break;
1572                case SPEED_20000:
1573                        ext_speed->forced_speed = QED_EXT_SPEED_20G;
1574                        break;
1575                case SPEED_25000:
1576                        ext_speed->forced_speed = QED_EXT_SPEED_25G;
1577                        break;
1578                case SPEED_40000:
1579                        ext_speed->forced_speed = QED_EXT_SPEED_40G;
1580                        break;
1581                case SPEED_50000:
1582                        ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1583                                                  QED_EXT_SPEED_50G_R2;
1584                        break;
1585                case SPEED_100000:
1586                        ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1587                                                  QED_EXT_SPEED_100G_R4 |
1588                                                  QED_EXT_SPEED_100G_P4;
1589                        break;
1590                default:
1591                        break;
1592                }
1593        }
1594
1595        if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1596                return;
1597
1598        switch (params->forced_speed) {
1599        case SPEED_25000:
1600                switch (params->fec) {
1601                case FEC_FORCE_MODE_NONE:
1602                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1603                        break;
1604                case FEC_FORCE_MODE_FIRECODE:
1605                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1606                        break;
1607                case FEC_FORCE_MODE_RS:
1608                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1609                        break;
1610                case FEC_FORCE_MODE_AUTO:
1611                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1612                                                    ETH_EXT_FEC_25G_BASE_R |
1613                                                    ETH_EXT_FEC_25G_NONE;
1614                        break;
1615                default:
1616                        break;
1617                }
1618
1619                break;
1620        case SPEED_40000:
1621                switch (params->fec) {
1622                case FEC_FORCE_MODE_NONE:
1623                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1624                        break;
1625                case FEC_FORCE_MODE_FIRECODE:
1626                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1627                        break;
1628                case FEC_FORCE_MODE_AUTO:
1629                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1630                                                    ETH_EXT_FEC_40G_NONE;
1631                        break;
1632                default:
1633                        break;
1634                }
1635
1636                break;
1637        case SPEED_50000:
1638                switch (params->fec) {
1639                case FEC_FORCE_MODE_NONE:
1640                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1641                        break;
1642                case FEC_FORCE_MODE_FIRECODE:
1643                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1644                        break;
1645                case FEC_FORCE_MODE_RS:
1646                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1647                        break;
1648                case FEC_FORCE_MODE_AUTO:
1649                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1650                                                    ETH_EXT_FEC_50G_BASE_R |
1651                                                    ETH_EXT_FEC_50G_NONE;
1652                        break;
1653                default:
1654                        break;
1655                }
1656
1657                break;
1658        case SPEED_100000:
1659                switch (params->fec) {
1660                case FEC_FORCE_MODE_NONE:
1661                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1662                        break;
1663                case FEC_FORCE_MODE_FIRECODE:
1664                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1665                        break;
1666                case FEC_FORCE_MODE_RS:
1667                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1668                        break;
1669                case FEC_FORCE_MODE_AUTO:
1670                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1671                                                    ETH_EXT_FEC_100G_BASE_R |
1672                                                    ETH_EXT_FEC_100G_NONE;
1673                        break;
1674                default:
1675                        break;
1676                }
1677
1678                break;
1679        default:
1680                break;
1681        }
1682}
1683
1684static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1685{
1686        struct qed_mcp_link_params *link_params;
1687        struct qed_mcp_link_speed_params *speed;
1688        const struct qed_mfw_speed_map *map;
1689        struct qed_hwfn *hwfn;
1690        struct qed_ptt *ptt;
1691        int rc;
1692        u32 i;
1693
1694        if (!cdev)
1695                return -ENODEV;
1696
1697        /* The link should be set only once per PF */
1698        hwfn = &cdev->hwfns[0];
1699
1700        /* When VF wants to set link, force it to read the bulletin instead.
1701         * This mimics the PF behavior, where a noitification [both immediate
1702         * and possible later] would be generated when changing properties.
1703         */
1704        if (IS_VF(cdev)) {
1705                qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1706                return 0;
1707        }
1708
1709        ptt = qed_ptt_acquire(hwfn);
1710        if (!ptt)
1711                return -EBUSY;
1712
1713        link_params = qed_mcp_get_link_params(hwfn);
1714        if (!link_params)
1715                return -ENODATA;
1716
1717        speed = &link_params->speed;
1718
1719        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1720                speed->autoneg = !!params->autoneg;
1721
1722        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1723                speed->advertised_speeds = 0;
1724
1725                for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1726                        map = qed_mfw_legacy_maps + i;
1727
1728                        if (linkmode_intersects(params->adv_speeds, map->caps))
1729                                speed->advertised_speeds |= map->mfw_val;
1730                }
1731        }
1732
1733        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1734                speed->forced_speed = params->forced_speed;
1735
1736        if (qed_mcp_is_ext_speed_supported(hwfn))
1737                qed_set_ext_speed_params(link_params, params);
1738
1739        if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1740                if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1741                        link_params->pause.autoneg = true;
1742                else
1743                        link_params->pause.autoneg = false;
1744                if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1745                        link_params->pause.forced_rx = true;
1746                else
1747                        link_params->pause.forced_rx = false;
1748                if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1749                        link_params->pause.forced_tx = true;
1750                else
1751                        link_params->pause.forced_tx = false;
1752        }
1753
1754        if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1755                switch (params->loopback_mode) {
1756                case QED_LINK_LOOPBACK_INT_PHY:
1757                        link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1758                        break;
1759                case QED_LINK_LOOPBACK_EXT_PHY:
1760                        link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1761                        break;
1762                case QED_LINK_LOOPBACK_EXT:
1763                        link_params->loopback_mode = ETH_LOOPBACK_EXT;
1764                        break;
1765                case QED_LINK_LOOPBACK_MAC:
1766                        link_params->loopback_mode = ETH_LOOPBACK_MAC;
1767                        break;
1768                case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1769                        link_params->loopback_mode =
1770                                ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1771                        break;
1772                case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1773                        link_params->loopback_mode =
1774                                ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1775                        break;
1776                case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1777                        link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1778                        break;
1779                case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1780                        link_params->loopback_mode =
1781                                ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1782                        break;
1783                case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1784                        link_params->loopback_mode =
1785                                ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1786                        break;
1787                default:
1788                        link_params->loopback_mode = ETH_LOOPBACK_NONE;
1789                        break;
1790                }
1791        }
1792
1793        if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1794                memcpy(&link_params->eee, &params->eee,
1795                       sizeof(link_params->eee));
1796
1797        if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1798                link_params->fec = params->fec;
1799
1800        rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1801
1802        qed_ptt_release(hwfn, ptt);
1803
1804        return rc;
1805}
1806
1807static int qed_get_port_type(u32 media_type)
1808{
1809        int port_type;
1810
1811        switch (media_type) {
1812        case MEDIA_SFPP_10G_FIBER:
1813        case MEDIA_SFP_1G_FIBER:
1814        case MEDIA_XFP_FIBER:
1815        case MEDIA_MODULE_FIBER:
1816                port_type = PORT_FIBRE;
1817                break;
1818        case MEDIA_DA_TWINAX:
1819                port_type = PORT_DA;
1820                break;
1821        case MEDIA_BASE_T:
1822                port_type = PORT_TP;
1823                break;
1824        case MEDIA_KR:
1825        case MEDIA_NOT_PRESENT:
1826                port_type = PORT_NONE;
1827                break;
1828        case MEDIA_UNSPECIFIED:
1829        default:
1830                port_type = PORT_OTHER;
1831                break;
1832        }
1833        return port_type;
1834}
1835
1836static int qed_get_link_data(struct qed_hwfn *hwfn,
1837                             struct qed_mcp_link_params *params,
1838                             struct qed_mcp_link_state *link,
1839                             struct qed_mcp_link_capabilities *link_caps)
1840{
1841        void *p;
1842
1843        if (!IS_PF(hwfn->cdev)) {
1844                qed_vf_get_link_params(hwfn, params);
1845                qed_vf_get_link_state(hwfn, link);
1846                qed_vf_get_link_caps(hwfn, link_caps);
1847
1848                return 0;
1849        }
1850
1851        p = qed_mcp_get_link_params(hwfn);
1852        if (!p)
1853                return -ENXIO;
1854        memcpy(params, p, sizeof(*params));
1855
1856        p = qed_mcp_get_link_state(hwfn);
1857        if (!p)
1858                return -ENXIO;
1859        memcpy(link, p, sizeof(*link));
1860
1861        p = qed_mcp_get_link_capabilities(hwfn);
1862        if (!p)
1863                return -ENXIO;
1864        memcpy(link_caps, p, sizeof(*link_caps));
1865
1866        return 0;
1867}
1868
1869static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1870                                     struct qed_ptt *ptt, u32 capability,
1871                                     unsigned long *if_caps)
1872{
1873        u32 media_type, tcvr_state, tcvr_type;
1874        u32 speed_mask, board_cfg;
1875
1876        if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1877                media_type = MEDIA_UNSPECIFIED;
1878
1879        if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1880                tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1881
1882        if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1883                speed_mask = 0xFFFFFFFF;
1884
1885        if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1886                board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1887
1888        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1889                   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1890                   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1891
1892        switch (media_type) {
1893        case MEDIA_DA_TWINAX:
1894                phylink_set(if_caps, FIBRE);
1895
1896                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1897                        phylink_set(if_caps, 20000baseKR2_Full);
1898
1899                /* For DAC media multiple speed capabilities are supported */
1900                capability |= speed_mask;
1901
1902                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1903                        phylink_set(if_caps, 1000baseKX_Full);
1904                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1905                        phylink_set(if_caps, 10000baseCR_Full);
1906
1907                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1908                        switch (tcvr_type) {
1909                        case ETH_TRANSCEIVER_TYPE_40G_CR4:
1910                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1911                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1912                                phylink_set(if_caps, 40000baseCR4_Full);
1913                                break;
1914                        default:
1915                                break;
1916                        }
1917
1918                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1919                        phylink_set(if_caps, 25000baseCR_Full);
1920                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1921                        phylink_set(if_caps, 50000baseCR2_Full);
1922
1923                if (capability &
1924                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1925                        switch (tcvr_type) {
1926                        case ETH_TRANSCEIVER_TYPE_100G_CR4:
1927                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1928                                phylink_set(if_caps, 100000baseCR4_Full);
1929                                break;
1930                        default:
1931                                break;
1932                        }
1933
1934                break;
1935        case MEDIA_BASE_T:
1936                phylink_set(if_caps, TP);
1937
1938                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1939                        if (capability &
1940                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1941                                phylink_set(if_caps, 1000baseT_Full);
1942                        if (capability &
1943                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1944                                phylink_set(if_caps, 10000baseT_Full);
1945                }
1946
1947                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1948                        phylink_set(if_caps, FIBRE);
1949
1950                        switch (tcvr_type) {
1951                        case ETH_TRANSCEIVER_TYPE_1000BASET:
1952                                phylink_set(if_caps, 1000baseT_Full);
1953                                break;
1954                        case ETH_TRANSCEIVER_TYPE_10G_BASET:
1955                                phylink_set(if_caps, 10000baseT_Full);
1956                                break;
1957                        default:
1958                                break;
1959                        }
1960                }
1961
1962                break;
1963        case MEDIA_SFP_1G_FIBER:
1964        case MEDIA_SFPP_10G_FIBER:
1965        case MEDIA_XFP_FIBER:
1966        case MEDIA_MODULE_FIBER:
1967                phylink_set(if_caps, FIBRE);
1968                capability |= speed_mask;
1969
1970                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1971                        switch (tcvr_type) {
1972                        case ETH_TRANSCEIVER_TYPE_1G_LX:
1973                        case ETH_TRANSCEIVER_TYPE_1G_SX:
1974                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1975                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1976                                phylink_set(if_caps, 1000baseKX_Full);
1977                                break;
1978                        default:
1979                                break;
1980                        }
1981
1982                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1983                        switch (tcvr_type) {
1984                        case ETH_TRANSCEIVER_TYPE_10G_SR:
1985                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1986                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1987                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1988                                phylink_set(if_caps, 10000baseSR_Full);
1989                                break;
1990                        case ETH_TRANSCEIVER_TYPE_10G_LR:
1991                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1992                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1993                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1994                                phylink_set(if_caps, 10000baseLR_Full);
1995                                break;
1996                        case ETH_TRANSCEIVER_TYPE_10G_LRM:
1997                                phylink_set(if_caps, 10000baseLRM_Full);
1998                                break;
1999                        case ETH_TRANSCEIVER_TYPE_10G_ER:
2000                                phylink_set(if_caps, 10000baseR_FEC);
2001                                break;
2002                        default:
2003                                break;
2004                        }
2005
2006                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2007                        phylink_set(if_caps, 20000baseKR2_Full);
2008
2009                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2010                        switch (tcvr_type) {
2011                        case ETH_TRANSCEIVER_TYPE_25G_SR:
2012                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2013                                phylink_set(if_caps, 25000baseSR_Full);
2014                                break;
2015                        default:
2016                                break;
2017                        }
2018
2019                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2020                        switch (tcvr_type) {
2021                        case ETH_TRANSCEIVER_TYPE_40G_LR4:
2022                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2023                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2024                                phylink_set(if_caps, 40000baseLR4_Full);
2025                                break;
2026                        case ETH_TRANSCEIVER_TYPE_40G_SR4:
2027                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2028                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2029                                phylink_set(if_caps, 40000baseSR4_Full);
2030                                break;
2031                        default:
2032                                break;
2033                        }
2034
2035                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2036                        phylink_set(if_caps, 50000baseKR2_Full);
2037
2038                if (capability &
2039                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2040                        switch (tcvr_type) {
2041                        case ETH_TRANSCEIVER_TYPE_100G_SR4:
2042                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2043                                phylink_set(if_caps, 100000baseSR4_Full);
2044                                break;
2045                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2046                                phylink_set(if_caps, 100000baseLR4_ER4_Full);
2047                                break;
2048                        default:
2049                                break;
2050                        }
2051
2052                break;
2053        case MEDIA_KR:
2054                phylink_set(if_caps, Backplane);
2055
2056                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2057                        phylink_set(if_caps, 20000baseKR2_Full);
2058                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2059                        phylink_set(if_caps, 1000baseKX_Full);
2060                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2061                        phylink_set(if_caps, 10000baseKR_Full);
2062                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2063                        phylink_set(if_caps, 25000baseKR_Full);
2064                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2065                        phylink_set(if_caps, 40000baseKR4_Full);
2066                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2067                        phylink_set(if_caps, 50000baseKR2_Full);
2068                if (capability &
2069                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2070                        phylink_set(if_caps, 100000baseKR4_Full);
2071
2072                break;
2073        case MEDIA_UNSPECIFIED:
2074        case MEDIA_NOT_PRESENT:
2075        default:
2076                DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2077                           "Unknown media and transceiver type;\n");
2078                break;
2079        }
2080}
2081
2082static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2083{
2084        *speed_mask = 0;
2085
2086        if (caps &
2087            (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2088                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2089        if (caps & QED_LINK_PARTNER_SPEED_10G)
2090                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2091        if (caps & QED_LINK_PARTNER_SPEED_20G)
2092                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2093        if (caps & QED_LINK_PARTNER_SPEED_25G)
2094                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2095        if (caps & QED_LINK_PARTNER_SPEED_40G)
2096                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2097        if (caps & QED_LINK_PARTNER_SPEED_50G)
2098                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2099        if (caps & QED_LINK_PARTNER_SPEED_100G)
2100                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2101}
2102
2103static void qed_fill_link(struct qed_hwfn *hwfn,
2104                          struct qed_ptt *ptt,
2105                          struct qed_link_output *if_link)
2106{
2107        struct qed_mcp_link_capabilities link_caps;
2108        struct qed_mcp_link_params params;
2109        struct qed_mcp_link_state link;
2110        u32 media_type, speed_mask;
2111
2112        memset(if_link, 0, sizeof(*if_link));
2113
2114        /* Prepare source inputs */
2115        if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2116                dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2117                return;
2118        }
2119
2120        /* Set the link parameters to pass to protocol driver */
2121        if (link.link_up)
2122                if_link->link_up = true;
2123
2124        if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2125                if (link_caps.default_ext_autoneg)
2126                        phylink_set(if_link->supported_caps, Autoneg);
2127
2128                linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2129
2130                if (params.ext_speed.autoneg)
2131                        phylink_set(if_link->advertised_caps, Autoneg);
2132                else
2133                        phylink_clear(if_link->advertised_caps, Autoneg);
2134
2135                qed_fill_link_capability(hwfn, ptt,
2136                                         params.ext_speed.advertised_speeds,
2137                                         if_link->advertised_caps);
2138        } else {
2139                if (link_caps.default_speed_autoneg)
2140                        phylink_set(if_link->supported_caps, Autoneg);
2141
2142                linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2143
2144                if (params.speed.autoneg)
2145                        phylink_set(if_link->advertised_caps, Autoneg);
2146                else
2147                        phylink_clear(if_link->advertised_caps, Autoneg);
2148        }
2149
2150        if (params.pause.autoneg ||
2151            (params.pause.forced_rx && params.pause.forced_tx))
2152                phylink_set(if_link->supported_caps, Asym_Pause);
2153        if (params.pause.autoneg || params.pause.forced_rx ||
2154            params.pause.forced_tx)
2155                phylink_set(if_link->supported_caps, Pause);
2156
2157        if_link->sup_fec = link_caps.fec_default;
2158        if_link->active_fec = params.fec;
2159
2160        /* Fill link advertised capability */
2161        qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2162                                 if_link->advertised_caps);
2163
2164        /* Fill link supported capability */
2165        qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2166                                 if_link->supported_caps);
2167
2168        /* Fill partner advertised capability */
2169        qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2170        qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2171
2172        if (link.link_up)
2173                if_link->speed = link.speed;
2174
2175        /* TODO - fill duplex properly */
2176        if_link->duplex = DUPLEX_FULL;
2177        qed_mcp_get_media_type(hwfn, ptt, &media_type);
2178        if_link->port = qed_get_port_type(media_type);
2179
2180        if_link->autoneg = params.speed.autoneg;
2181
2182        if (params.pause.autoneg)
2183                if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2184        if (params.pause.forced_rx)
2185                if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2186        if (params.pause.forced_tx)
2187                if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2188
2189        if (link.an_complete)
2190                phylink_set(if_link->lp_caps, Autoneg);
2191        if (link.partner_adv_pause)
2192                phylink_set(if_link->lp_caps, Pause);
2193        if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2194            link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2195                phylink_set(if_link->lp_caps, Asym_Pause);
2196
2197        if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2198                if_link->eee_supported = false;
2199        } else {
2200                if_link->eee_supported = true;
2201                if_link->eee_active = link.eee_active;
2202                if_link->sup_caps = link_caps.eee_speed_caps;
2203                /* MFW clears adv_caps on eee disable; use configured value */
2204                if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2205                                        params.eee.adv_caps;
2206                if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2207                if_link->eee.enable = params.eee.enable;
2208                if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2209                if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2210        }
2211}
2212
2213static void qed_get_current_link(struct qed_dev *cdev,
2214                                 struct qed_link_output *if_link)
2215{
2216        struct qed_hwfn *hwfn;
2217        struct qed_ptt *ptt;
2218        int i;
2219
2220        hwfn = &cdev->hwfns[0];
2221        if (IS_PF(cdev)) {
2222                ptt = qed_ptt_acquire(hwfn);
2223                if (ptt) {
2224                        qed_fill_link(hwfn, ptt, if_link);
2225                        qed_ptt_release(hwfn, ptt);
2226                } else {
2227                        DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2228                }
2229        } else {
2230                qed_fill_link(hwfn, NULL, if_link);
2231        }
2232
2233        for_each_hwfn(cdev, i)
2234                qed_inform_vf_link_state(&cdev->hwfns[i]);
2235}
2236
2237void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2238{
2239        void *cookie = hwfn->cdev->ops_cookie;
2240        struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2241        struct qed_link_output if_link;
2242
2243        qed_fill_link(hwfn, ptt, &if_link);
2244        qed_inform_vf_link_state(hwfn);
2245
2246        if (IS_LEAD_HWFN(hwfn) && cookie)
2247                op->link_update(cookie, &if_link);
2248}
2249
2250void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2251{
2252        void *cookie = hwfn->cdev->ops_cookie;
2253        struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2254
2255        if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2256                op->bw_update(cookie);
2257}
2258
2259static int qed_drain(struct qed_dev *cdev)
2260{
2261        struct qed_hwfn *hwfn;
2262        struct qed_ptt *ptt;
2263        int i, rc;
2264
2265        if (IS_VF(cdev))
2266                return 0;
2267
2268        for_each_hwfn(cdev, i) {
2269                hwfn = &cdev->hwfns[i];
2270                ptt = qed_ptt_acquire(hwfn);
2271                if (!ptt) {
2272                        DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2273                        return -EBUSY;
2274                }
2275                rc = qed_mcp_drain(hwfn, ptt);
2276                qed_ptt_release(hwfn, ptt);
2277                if (rc)
2278                        return rc;
2279        }
2280
2281        return 0;
2282}
2283
2284static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2285                                          struct qed_nvm_image_att *nvm_image,
2286                                          u32 *crc)
2287{
2288        u8 *buf = NULL;
2289        int rc;
2290
2291        /* Allocate a buffer for holding the nvram image */
2292        buf = kzalloc(nvm_image->length, GFP_KERNEL);
2293        if (!buf)
2294                return -ENOMEM;
2295
2296        /* Read image into buffer */
2297        rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2298                              buf, nvm_image->length);
2299        if (rc) {
2300                DP_ERR(cdev, "Failed reading image from nvm\n");
2301                goto out;
2302        }
2303
2304        /* Convert the buffer into big-endian format (excluding the
2305         * closing 4 bytes of CRC).
2306         */
2307        cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2308                          DIV_ROUND_UP(nvm_image->length - 4, 4));
2309
2310        /* Calc CRC for the "actual" image buffer, i.e. not including
2311         * the last 4 CRC bytes.
2312         */
2313        *crc = ~crc32(~0U, buf, nvm_image->length - 4);
2314        *crc = (__force u32)cpu_to_be32p(crc);
2315
2316out:
2317        kfree(buf);
2318
2319        return rc;
2320}
2321
2322/* Binary file format -
2323 *     /----------------------------------------------------------------------\
2324 * 0B  |                       0x4 [command index]                            |
2325 * 4B  | image_type     | Options        |  Number of register settings       |
2326 * 8B  |                       Value                                          |
2327 * 12B |                       Mask                                           |
2328 * 16B |                       Offset                                         |
2329 *     \----------------------------------------------------------------------/
2330 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2331 * Options - 0'b - Calculate & Update CRC for image
2332 */
2333static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2334                                      bool *check_resp)
2335{
2336        struct qed_nvm_image_att nvm_image;
2337        struct qed_hwfn *p_hwfn;
2338        bool is_crc = false;
2339        u32 image_type;
2340        int rc = 0, i;
2341        u16 len;
2342
2343        *data += 4;
2344        image_type = **data;
2345        p_hwfn = QED_LEADING_HWFN(cdev);
2346        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2347                if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2348                        break;
2349        if (i == p_hwfn->nvm_info.num_images) {
2350                DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2351                       image_type);
2352                return -ENOENT;
2353        }
2354
2355        nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2356        nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2357
2358        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2359                   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2360                   **data, image_type, nvm_image.start_addr,
2361                   nvm_image.start_addr + nvm_image.length - 1);
2362        (*data)++;
2363        is_crc = !!(**data & BIT(0));
2364        (*data)++;
2365        len = *((u16 *)*data);
2366        *data += 2;
2367        if (is_crc) {
2368                u32 crc = 0;
2369
2370                rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2371                if (rc) {
2372                        DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2373                        goto exit;
2374                }
2375
2376                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2377                                       (nvm_image.start_addr +
2378                                        nvm_image.length - 4), (u8 *)&crc, 4);
2379                if (rc)
2380                        DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2381                               nvm_image.start_addr + nvm_image.length - 4, rc);
2382                goto exit;
2383        }
2384
2385        /* Iterate over the values for setting */
2386        while (len) {
2387                u32 offset, mask, value, cur_value;
2388                u8 buf[4];
2389
2390                value = *((u32 *)*data);
2391                *data += 4;
2392                mask = *((u32 *)*data);
2393                *data += 4;
2394                offset = *((u32 *)*data);
2395                *data += 4;
2396
2397                rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2398                                      4);
2399                if (rc) {
2400                        DP_ERR(cdev, "Failed reading from %08x\n",
2401                               nvm_image.start_addr + offset);
2402                        goto exit;
2403                }
2404
2405                cur_value = le32_to_cpu(*((__le32 *)buf));
2406                DP_VERBOSE(cdev, NETIF_MSG_DRV,
2407                           "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2408                           nvm_image.start_addr + offset, cur_value,
2409                           (cur_value & ~mask) | (value & mask), value, mask);
2410                value = (value & mask) | (cur_value & ~mask);
2411                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2412                                       nvm_image.start_addr + offset,
2413                                       (u8 *)&value, 4);
2414                if (rc) {
2415                        DP_ERR(cdev, "Failed writing to %08x\n",
2416                               nvm_image.start_addr + offset);
2417                        goto exit;
2418                }
2419
2420                len--;
2421        }
2422exit:
2423        return rc;
2424}
2425
2426/* Binary file format -
2427 *     /----------------------------------------------------------------------\
2428 * 0B  |                       0x3 [command index]                            |
2429 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2430 * 8B  | File-type |                   reserved                               |
2431 * 12B |                    Image length in bytes                             |
2432 *     \----------------------------------------------------------------------/
2433 *     Start a new file of the provided type
2434 */
2435static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2436                                          const u8 **data, bool *check_resp)
2437{
2438        u32 file_type, file_size = 0;
2439        int rc;
2440
2441        *data += 4;
2442        *check_resp = !!(**data & BIT(0));
2443        *data += 4;
2444        file_type = **data;
2445
2446        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2447                   "About to start a new file of type %02x\n", file_type);
2448        if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2449                *data += 4;
2450                file_size = *((u32 *)(*data));
2451        }
2452
2453        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2454                               (u8 *)(&file_size), 4);
2455        *data += 4;
2456
2457        return rc;
2458}
2459
2460/* Binary file format -
2461 *     /----------------------------------------------------------------------\
2462 * 0B  |                       0x2 [command index]                            |
2463 * 4B  |                       Length in bytes                                |
2464 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2465 * 12B |                       Offset in bytes                                |
2466 * 16B |                       Data ...                                       |
2467 *     \----------------------------------------------------------------------/
2468 *     Write data as part of a file that was previously started. Data should be
2469 *     of length equal to that provided in the message
2470 */
2471static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2472                                         const u8 **data, bool *check_resp)
2473{
2474        u32 offset, len;
2475        int rc;
2476
2477        *data += 4;
2478        len = *((u32 *)(*data));
2479        *data += 4;
2480        *check_resp = !!(**data & BIT(0));
2481        *data += 4;
2482        offset = *((u32 *)(*data));
2483        *data += 4;
2484
2485        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2486                   "About to write File-data: %08x bytes to offset %08x\n",
2487                   len, offset);
2488
2489        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2490                               (char *)(*data), len);
2491        *data += len;
2492
2493        return rc;
2494}
2495
2496/* Binary file format [General header] -
2497 *     /----------------------------------------------------------------------\
2498 * 0B  |                       QED_NVM_SIGNATURE                              |
2499 * 4B  |                       Length in bytes                                |
2500 * 8B  | Highest command in this batchfile |          Reserved                |
2501 *     \----------------------------------------------------------------------/
2502 */
2503static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2504                                        const struct firmware *image,
2505                                        const u8 **data)
2506{
2507        u32 signature, len;
2508
2509        /* Check minimum size */
2510        if (image->size < 12) {
2511                DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2512                return -EINVAL;
2513        }
2514
2515        /* Check signature */
2516        signature = *((u32 *)(*data));
2517        if (signature != QED_NVM_SIGNATURE) {
2518                DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2519                return -EINVAL;
2520        }
2521
2522        *data += 4;
2523        /* Validate internal size equals the image-size */
2524        len = *((u32 *)(*data));
2525        if (len != image->size) {
2526                DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2527                       len, (u32)image->size);
2528                return -EINVAL;
2529        }
2530
2531        *data += 4;
2532        /* Make sure driver familiar with all commands necessary for this */
2533        if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2534                DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2535                       *((u16 *)(*data)));
2536                return -EINVAL;
2537        }
2538
2539        *data += 4;
2540
2541        return 0;
2542}
2543
2544/* Binary file format -
2545 *     /----------------------------------------------------------------------\
2546 * 0B  |                       0x5 [command index]                            |
2547 * 4B  | Number of config attributes     |          Reserved                  |
2548 * 4B  | Config ID                       | Entity ID      | Length            |
2549 * 4B  | Value                                                                |
2550 *     |                                                                      |
2551 *     \----------------------------------------------------------------------/
2552 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2553 * 'Number of config attributes'.
2554 *
2555 * The API parses config attributes from the user provided buffer and flashes
2556 * them to the respective NVM path using Management FW inerface.
2557 */
2558static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2559{
2560        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2561        u8 entity_id, len, buf[32];
2562        bool need_nvm_init = true;
2563        struct qed_ptt *ptt;
2564        u16 cfg_id, count;
2565        int rc = 0, i;
2566        u32 flags;
2567
2568        ptt = qed_ptt_acquire(hwfn);
2569        if (!ptt)
2570                return -EAGAIN;
2571
2572        /* NVM CFG ID attribute header */
2573        *data += 4;
2574        count = *((u16 *)*data);
2575        *data += 4;
2576
2577        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2578                   "Read config ids: num_attrs = %0d\n", count);
2579        /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2580         * arithmetic operations in the implementation.
2581         */
2582        for (i = 1; i <= count; i++) {
2583                cfg_id = *((u16 *)*data);
2584                *data += 2;
2585                entity_id = **data;
2586                (*data)++;
2587                len = **data;
2588                (*data)++;
2589                memcpy(buf, *data, len);
2590                *data += len;
2591
2592                flags = 0;
2593                if (need_nvm_init) {
2594                        flags |= QED_NVM_CFG_OPTION_INIT;
2595                        need_nvm_init = false;
2596                }
2597
2598                /* Commit to flash and free the resources */
2599                if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2600                        flags |= QED_NVM_CFG_OPTION_COMMIT |
2601                                 QED_NVM_CFG_OPTION_FREE;
2602                        need_nvm_init = true;
2603                }
2604
2605                if (entity_id)
2606                        flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2607
2608                DP_VERBOSE(cdev, NETIF_MSG_DRV,
2609                           "cfg_id = %d entity = %d len = %d\n", cfg_id,
2610                           entity_id, len);
2611                rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2612                                         buf, len);
2613                if (rc) {
2614                        DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2615                        break;
2616                }
2617        }
2618
2619        qed_ptt_release(hwfn, ptt);
2620
2621        return rc;
2622}
2623
2624#define QED_MAX_NVM_BUF_LEN     32
2625static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2626{
2627        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2628        u8 buf[QED_MAX_NVM_BUF_LEN];
2629        struct qed_ptt *ptt;
2630        u32 len;
2631        int rc;
2632
2633        ptt = qed_ptt_acquire(hwfn);
2634        if (!ptt)
2635                return QED_MAX_NVM_BUF_LEN;
2636
2637        rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2638                                 &len);
2639        if (rc || !len) {
2640                DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2641                len = QED_MAX_NVM_BUF_LEN;
2642        }
2643
2644        qed_ptt_release(hwfn, ptt);
2645
2646        return len;
2647}
2648
2649static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2650                                  u32 cmd, u32 entity_id)
2651{
2652        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2653        struct qed_ptt *ptt;
2654        u32 flags, len;
2655        int rc = 0;
2656
2657        ptt = qed_ptt_acquire(hwfn);
2658        if (!ptt)
2659                return -EAGAIN;
2660
2661        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2662                   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2663        flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2664        rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2665        if (rc)
2666                DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2667
2668        qed_ptt_release(hwfn, ptt);
2669
2670        return rc;
2671}
2672
2673static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2674{
2675        const struct firmware *image;
2676        const u8 *data, *data_end;
2677        u32 cmd_type;
2678        int rc;
2679
2680        rc = request_firmware(&image, name, &cdev->pdev->dev);
2681        if (rc) {
2682                DP_ERR(cdev, "Failed to find '%s'\n", name);
2683                return rc;
2684        }
2685
2686        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2687                   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2688                   name, image->data, (u32)image->size);
2689        data = image->data;
2690        data_end = data + image->size;
2691
2692        rc = qed_nvm_flash_image_validate(cdev, image, &data);
2693        if (rc)
2694                goto exit;
2695
2696        while (data < data_end) {
2697                bool check_resp = false;
2698
2699                /* Parse the actual command */
2700                cmd_type = *((u32 *)data);
2701                switch (cmd_type) {
2702                case QED_NVM_FLASH_CMD_FILE_DATA:
2703                        rc = qed_nvm_flash_image_file_data(cdev, &data,
2704                                                           &check_resp);
2705                        break;
2706                case QED_NVM_FLASH_CMD_FILE_START:
2707                        rc = qed_nvm_flash_image_file_start(cdev, &data,
2708                                                            &check_resp);
2709                        break;
2710                case QED_NVM_FLASH_CMD_NVM_CHANGE:
2711                        rc = qed_nvm_flash_image_access(cdev, &data,
2712                                                        &check_resp);
2713                        break;
2714                case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2715                        rc = qed_nvm_flash_cfg_write(cdev, &data);
2716                        break;
2717                default:
2718                        DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2719                        rc = -EINVAL;
2720                        goto exit;
2721                }
2722
2723                if (rc) {
2724                        DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2725                        goto exit;
2726                }
2727
2728                /* Check response if needed */
2729                if (check_resp) {
2730                        u32 mcp_response = 0;
2731
2732                        if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2733                                DP_ERR(cdev, "Failed getting MCP response\n");
2734                                rc = -EINVAL;
2735                                goto exit;
2736                        }
2737
2738                        switch (mcp_response & FW_MSG_CODE_MASK) {
2739                        case FW_MSG_CODE_OK:
2740                        case FW_MSG_CODE_NVM_OK:
2741                        case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2742                        case FW_MSG_CODE_PHY_OK:
2743                                break;
2744                        default:
2745                                DP_ERR(cdev, "MFW returns error: %08x\n",
2746                                       mcp_response);
2747                                rc = -EINVAL;
2748                                goto exit;
2749                        }
2750                }
2751        }
2752
2753exit:
2754        release_firmware(image);
2755
2756        return rc;
2757}
2758
2759static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2760                             u8 *buf, u16 len)
2761{
2762        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2763
2764        return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2765}
2766
2767void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2768{
2769        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2770        void *cookie = p_hwfn->cdev->ops_cookie;
2771
2772        if (ops && ops->schedule_recovery_handler)
2773                ops->schedule_recovery_handler(cookie);
2774}
2775
2776static const char * const qed_hw_err_type_descr[] = {
2777        [QED_HW_ERR_FAN_FAIL]           = "Fan Failure",
2778        [QED_HW_ERR_MFW_RESP_FAIL]      = "MFW Response Failure",
2779        [QED_HW_ERR_HW_ATTN]            = "HW Attention",
2780        [QED_HW_ERR_DMAE_FAIL]          = "DMAE Failure",
2781        [QED_HW_ERR_RAMROD_FAIL]        = "Ramrod Failure",
2782        [QED_HW_ERR_FW_ASSERT]          = "FW Assertion",
2783        [QED_HW_ERR_LAST]               = "Unknown",
2784};
2785
2786void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2787                           enum qed_hw_err_type err_type)
2788{
2789        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2790        void *cookie = p_hwfn->cdev->ops_cookie;
2791        const char *err_str;
2792
2793        if (err_type > QED_HW_ERR_LAST)
2794                err_type = QED_HW_ERR_LAST;
2795        err_str = qed_hw_err_type_descr[err_type];
2796
2797        DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2798
2799        /* Call the HW error handler of the protocol driver.
2800         * If it is not available - perform a minimal handling of preventing
2801         * HW attentions from being reasserted.
2802         */
2803        if (ops && ops->schedule_hw_err_handler)
2804                ops->schedule_hw_err_handler(cookie, err_type);
2805        else
2806                qed_int_attn_clr_enable(p_hwfn->cdev, true);
2807}
2808
2809static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2810                            void *handle)
2811{
2812                return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2813}
2814
2815static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2816{
2817        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2818        struct qed_ptt *ptt;
2819        int status = 0;
2820
2821        ptt = qed_ptt_acquire(hwfn);
2822        if (!ptt)
2823                return -EAGAIN;
2824
2825        status = qed_mcp_set_led(hwfn, ptt, mode);
2826
2827        qed_ptt_release(hwfn, ptt);
2828
2829        return status;
2830}
2831
2832int qed_recovery_process(struct qed_dev *cdev)
2833{
2834        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2835        struct qed_ptt *p_ptt;
2836        int rc = 0;
2837
2838        p_ptt = qed_ptt_acquire(p_hwfn);
2839        if (!p_ptt)
2840                return -EAGAIN;
2841
2842        rc = qed_start_recovery_process(p_hwfn, p_ptt);
2843
2844        qed_ptt_release(p_hwfn, p_ptt);
2845
2846        return rc;
2847}
2848
2849static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2850{
2851        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2852        struct qed_ptt *ptt;
2853        int rc = 0;
2854
2855        if (IS_VF(cdev))
2856                return 0;
2857
2858        ptt = qed_ptt_acquire(hwfn);
2859        if (!ptt)
2860                return -EAGAIN;
2861
2862        rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2863                                   : QED_OV_WOL_DISABLED);
2864        if (rc)
2865                goto out;
2866        rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2867
2868out:
2869        qed_ptt_release(hwfn, ptt);
2870        return rc;
2871}
2872
2873static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2874{
2875        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2876        struct qed_ptt *ptt;
2877        int status = 0;
2878
2879        if (IS_VF(cdev))
2880                return 0;
2881
2882        ptt = qed_ptt_acquire(hwfn);
2883        if (!ptt)
2884                return -EAGAIN;
2885
2886        status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2887                                                QED_OV_DRIVER_STATE_ACTIVE :
2888                                                QED_OV_DRIVER_STATE_DISABLED);
2889
2890        qed_ptt_release(hwfn, ptt);
2891
2892        return status;
2893}
2894
2895static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2896{
2897        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2898        struct qed_ptt *ptt;
2899        int status = 0;
2900
2901        if (IS_VF(cdev))
2902                return 0;
2903
2904        ptt = qed_ptt_acquire(hwfn);
2905        if (!ptt)
2906                return -EAGAIN;
2907
2908        status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2909        if (status)
2910                goto out;
2911
2912        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2913
2914out:
2915        qed_ptt_release(hwfn, ptt);
2916        return status;
2917}
2918
2919static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2920{
2921        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2922        struct qed_ptt *ptt;
2923        int status = 0;
2924
2925        if (IS_VF(cdev))
2926                return 0;
2927
2928        ptt = qed_ptt_acquire(hwfn);
2929        if (!ptt)
2930                return -EAGAIN;
2931
2932        status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2933        if (status)
2934                goto out;
2935
2936        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2937
2938out:
2939        qed_ptt_release(hwfn, ptt);
2940        return status;
2941}
2942
2943static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2944                                  u8 dev_addr, u32 offset, u32 len)
2945{
2946        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2947        struct qed_ptt *ptt;
2948        int rc = 0;
2949
2950        if (IS_VF(cdev))
2951                return 0;
2952
2953        ptt = qed_ptt_acquire(hwfn);
2954        if (!ptt)
2955                return -EAGAIN;
2956
2957        rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2958                                  offset, len, buf);
2959
2960        qed_ptt_release(hwfn, ptt);
2961
2962        return rc;
2963}
2964
2965static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2966{
2967        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2968        struct qed_ptt *ptt;
2969        int rc = 0;
2970
2971        if (IS_VF(cdev))
2972                return 0;
2973
2974        ptt = qed_ptt_acquire(hwfn);
2975        if (!ptt)
2976                return -EAGAIN;
2977
2978        rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2979
2980        qed_ptt_release(hwfn, ptt);
2981
2982        return rc;
2983}
2984
2985static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2986{
2987        return QED_AFFIN_HWFN_IDX(cdev);
2988}
2989
2990static struct qed_selftest_ops qed_selftest_ops_pass = {
2991        .selftest_memory = &qed_selftest_memory,
2992        .selftest_interrupt = &qed_selftest_interrupt,
2993        .selftest_register = &qed_selftest_register,
2994        .selftest_clock = &qed_selftest_clock,
2995        .selftest_nvram = &qed_selftest_nvram,
2996};
2997
2998const struct qed_common_ops qed_common_ops_pass = {
2999        .selftest = &qed_selftest_ops_pass,
3000        .probe = &qed_probe,
3001        .remove = &qed_remove,
3002        .set_power_state = &qed_set_power_state,
3003        .set_name = &qed_set_name,
3004        .update_pf_params = &qed_update_pf_params,
3005        .slowpath_start = &qed_slowpath_start,
3006        .slowpath_stop = &qed_slowpath_stop,
3007        .set_fp_int = &qed_set_int_fp,
3008        .get_fp_int = &qed_get_int_fp,
3009        .sb_init = &qed_sb_init,
3010        .sb_release = &qed_sb_release,
3011        .simd_handler_config = &qed_simd_handler_config,
3012        .simd_handler_clean = &qed_simd_handler_clean,
3013        .dbg_grc = &qed_dbg_grc,
3014        .dbg_grc_size = &qed_dbg_grc_size,
3015        .can_link_change = &qed_can_link_change,
3016        .set_link = &qed_set_link,
3017        .get_link = &qed_get_current_link,
3018        .drain = &qed_drain,
3019        .update_msglvl = &qed_init_dp,
3020        .devlink_register = qed_devlink_register,
3021        .devlink_unregister = qed_devlink_unregister,
3022        .report_fatal_error = qed_report_fatal_error,
3023        .dbg_all_data = &qed_dbg_all_data,
3024        .dbg_all_data_size = &qed_dbg_all_data_size,
3025        .chain_alloc = &qed_chain_alloc,
3026        .chain_free = &qed_chain_free,
3027        .nvm_flash = &qed_nvm_flash,
3028        .nvm_get_image = &qed_nvm_get_image,
3029        .set_coalesce = &qed_set_coalesce,
3030        .set_led = &qed_set_led,
3031        .recovery_process = &qed_recovery_process,
3032        .recovery_prolog = &qed_recovery_prolog,
3033        .attn_clr_enable = &qed_int_attn_clr_enable,
3034        .update_drv_state = &qed_update_drv_state,
3035        .update_mac = &qed_update_mac,
3036        .update_mtu = &qed_update_mtu,
3037        .update_wol = &qed_update_wol,
3038        .db_recovery_add = &qed_db_recovery_add,
3039        .db_recovery_del = &qed_db_recovery_del,
3040        .read_module_eeprom = &qed_read_module_eeprom,
3041        .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3042        .read_nvm_cfg = &qed_nvm_flash_cfg_read,
3043        .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3044        .set_grc_config = &qed_set_grc_config,
3045};
3046
3047void qed_get_protocol_stats(struct qed_dev *cdev,
3048                            enum qed_mcp_protocol_type type,
3049                            union qed_mcp_protocol_stats *stats)
3050{
3051        struct qed_eth_stats eth_stats;
3052
3053        memset(stats, 0, sizeof(*stats));
3054
3055        switch (type) {
3056        case QED_MCP_LAN_STATS:
3057                qed_get_vport_stats(cdev, &eth_stats);
3058                stats->lan_stats.ucast_rx_pkts =
3059                                        eth_stats.common.rx_ucast_pkts;
3060                stats->lan_stats.ucast_tx_pkts =
3061                                        eth_stats.common.tx_ucast_pkts;
3062                stats->lan_stats.fcs_err = -1;
3063                break;
3064        case QED_MCP_FCOE_STATS:
3065                qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3066                break;
3067        case QED_MCP_ISCSI_STATS:
3068                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3069                break;
3070        default:
3071                DP_VERBOSE(cdev, QED_MSG_SP,
3072                           "Invalid protocol type = %d\n", type);
3073                return;
3074        }
3075}
3076
3077int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3078{
3079        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3080                   "Scheduling slowpath task [Flag: %d]\n",
3081                   QED_SLOWPATH_MFW_TLV_REQ);
3082        smp_mb__before_atomic();
3083        set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3084        smp_mb__after_atomic();
3085        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3086
3087        return 0;
3088}
3089
3090static void
3091qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3092{
3093        struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3094        struct qed_eth_stats_common *p_common;
3095        struct qed_generic_tlvs gen_tlvs;
3096        struct qed_eth_stats stats;
3097        int i;
3098
3099        memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3100        op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3101
3102        if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3103                tlv->flags.ipv4_csum_offload = true;
3104        if (gen_tlvs.feat_flags & QED_TLV_LSO)
3105                tlv->flags.lso_supported = true;
3106        tlv->flags.b_set = true;
3107
3108        for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3109                if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3110                        ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3111                        tlv->mac_set[i] = true;
3112                }
3113        }
3114
3115        qed_get_vport_stats(cdev, &stats);
3116        p_common = &stats.common;
3117        tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3118                         p_common->rx_bcast_pkts;
3119        tlv->rx_frames_set = true;
3120        tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3121                        p_common->rx_bcast_bytes;
3122        tlv->rx_bytes_set = true;
3123        tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3124                         p_common->tx_bcast_pkts;
3125        tlv->tx_frames_set = true;
3126        tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3127                        p_common->tx_bcast_bytes;
3128        tlv->rx_bytes_set = true;
3129}
3130
3131int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3132                          union qed_mfw_tlv_data *tlv_buf)
3133{
3134        struct qed_dev *cdev = hwfn->cdev;
3135        struct qed_common_cb_ops *ops;
3136
3137        ops = cdev->protocol_ops.common;
3138        if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3139                DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3140                return -EINVAL;
3141        }
3142
3143        switch (type) {
3144        case QED_MFW_TLV_GENERIC:
3145                qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3146                break;
3147        case QED_MFW_TLV_ETH:
3148                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3149                break;
3150        case QED_MFW_TLV_FCOE:
3151                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3152                break;
3153        case QED_MFW_TLV_ISCSI:
3154                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3155                break;
3156        default:
3157                break;
3158        }
3159
3160        return 0;
3161}
3162