linux/drivers/net/ethernet/qlogic/qed/qed_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/pci.h>
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <asm/byteorder.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/string.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/workqueue.h>
  18#include <linux/ethtool.h>
  19#include <linux/etherdevice.h>
  20#include <linux/vmalloc.h>
  21#include <linux/crash_dump.h>
  22#include <linux/crc32.h>
  23#include <linux/qed/qed_if.h>
  24#include <linux/qed/qed_ll2_if.h>
  25#include <net/devlink.h>
  26#include <linux/aer.h>
  27#include <linux/phylink.h>
  28
  29#include "qed.h"
  30#include "qed_sriov.h"
  31#include "qed_sp.h"
  32#include "qed_dev_api.h"
  33#include "qed_ll2.h"
  34#include "qed_fcoe.h"
  35#include "qed_iscsi.h"
  36
  37#include "qed_mcp.h"
  38#include "qed_reg_addr.h"
  39#include "qed_hw.h"
  40#include "qed_selftest.h"
  41#include "qed_debug.h"
  42#include "qed_devlink.h"
  43
  44#define QED_ROCE_QPS                    (8192)
  45#define QED_ROCE_DPIS                   (8)
  46#define QED_RDMA_SRQS                   QED_ROCE_QPS
  47#define QED_NVM_CFG_GET_FLAGS           0xA
  48#define QED_NVM_CFG_GET_PF_FLAGS        0x1A
  49#define QED_NVM_CFG_MAX_ATTRS           50
  50
  51static char version[] =
  52        "QLogic FastLinQ 4xxxx Core Module qed\n";
  53
  54MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  55MODULE_LICENSE("GPL");
  56
  57#define FW_FILE_VERSION                         \
  58        __stringify(FW_MAJOR_VERSION) "."       \
  59        __stringify(FW_MINOR_VERSION) "."       \
  60        __stringify(FW_REVISION_VERSION) "."    \
  61        __stringify(FW_ENGINEERING_VERSION)
  62
  63#define QED_FW_FILE_NAME        \
  64        "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  65
  66MODULE_FIRMWARE(QED_FW_FILE_NAME);
  67
  68/* MFW speed capabilities maps */
  69
  70struct qed_mfw_speed_map {
  71        u32             mfw_val;
  72        __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
  73
  74        const u32       *cap_arr;
  75        u32             arr_size;
  76};
  77
  78#define QED_MFW_SPEED_MAP(type, arr)            \
  79{                                               \
  80        .mfw_val        = (type),               \
  81        .cap_arr        = (arr),                \
  82        .arr_size       = ARRAY_SIZE(arr),      \
  83}
  84
  85static const u32 qed_mfw_ext_1g[] __initconst = {
  86        ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
  87        ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
  88        ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
  89};
  90
  91static const u32 qed_mfw_ext_10g[] __initconst = {
  92        ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
  93        ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
  94        ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
  95        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
  96        ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
  97        ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
  98        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
  99        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 100};
 101
 102static const u32 qed_mfw_ext_25g[] __initconst = {
 103        ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 104        ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 105        ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 106};
 107
 108static const u32 qed_mfw_ext_40g[] __initconst = {
 109        ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 110        ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 111        ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 112        ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 113};
 114
 115static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
 116        ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
 117        ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
 118        ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
 119        ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
 120        ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
 121};
 122
 123static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
 124        ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 125        ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 126        ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 127};
 128
 129static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
 130        ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
 131        ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
 132        ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
 133        ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
 134        ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
 135};
 136
 137static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
 138        ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 139        ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 140        ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 141        ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 142};
 143
 144static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
 145        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
 146        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
 147        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
 148        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
 149        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
 150                          qed_mfw_ext_50g_base_r),
 151        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
 152                          qed_mfw_ext_50g_base_r2),
 153        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
 154                          qed_mfw_ext_100g_base_r2),
 155        QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
 156                          qed_mfw_ext_100g_base_r4),
 157};
 158
 159static const u32 qed_mfw_legacy_1g[] __initconst = {
 160        ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 161        ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 162        ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
 163};
 164
 165static const u32 qed_mfw_legacy_10g[] __initconst = {
 166        ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 167        ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
 168        ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
 169        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
 170        ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
 171        ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
 172        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
 173        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 174};
 175
 176static const u32 qed_mfw_legacy_20g[] __initconst = {
 177        ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
 178};
 179
 180static const u32 qed_mfw_legacy_25g[] __initconst = {
 181        ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 182        ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 183        ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 184};
 185
 186static const u32 qed_mfw_legacy_40g[] __initconst = {
 187        ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 188        ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 189        ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 190        ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 191};
 192
 193static const u32 qed_mfw_legacy_50g[] __initconst = {
 194        ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 195        ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 196        ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 197};
 198
 199static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
 200        ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 201        ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 202        ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 203        ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 204};
 205
 206static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
 207        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
 208                          qed_mfw_legacy_1g),
 209        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
 210                          qed_mfw_legacy_10g),
 211        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
 212                          qed_mfw_legacy_20g),
 213        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
 214                          qed_mfw_legacy_25g),
 215        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
 216                          qed_mfw_legacy_40g),
 217        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
 218                          qed_mfw_legacy_50g),
 219        QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
 220                          qed_mfw_legacy_bb_100g),
 221};
 222
 223static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
 224{
 225        linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
 226
 227        map->cap_arr = NULL;
 228        map->arr_size = 0;
 229}
 230
 231static void __init qed_mfw_speed_maps_init(void)
 232{
 233        u32 i;
 234
 235        for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
 236                qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
 237
 238        for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
 239                qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
 240}
 241
 242static int __init qed_init(void)
 243{
 244        pr_info("%s", version);
 245
 246        qed_mfw_speed_maps_init();
 247
 248        return 0;
 249}
 250module_init(qed_init);
 251
 252static void __exit qed_exit(void)
 253{
 254        /* To prevent marking this module as "permanent" */
 255}
 256module_exit(qed_exit);
 257
 258/* Check if the DMA controller on the machine can properly handle the DMA
 259 * addressing required by the device.
 260 */
 261static int qed_set_coherency_mask(struct qed_dev *cdev)
 262{
 263        struct device *dev = &cdev->pdev->dev;
 264
 265        if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 266                if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 267                        DP_NOTICE(cdev,
 268                                  "Can't request 64-bit consistent allocations\n");
 269                        return -EIO;
 270                }
 271        } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 272                DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
 273                return -EIO;
 274        }
 275
 276        return 0;
 277}
 278
 279static void qed_free_pci(struct qed_dev *cdev)
 280{
 281        struct pci_dev *pdev = cdev->pdev;
 282
 283        pci_disable_pcie_error_reporting(pdev);
 284
 285        if (cdev->doorbells && cdev->db_size)
 286                iounmap(cdev->doorbells);
 287        if (cdev->regview)
 288                iounmap(cdev->regview);
 289        if (atomic_read(&pdev->enable_cnt) == 1)
 290                pci_release_regions(pdev);
 291
 292        pci_disable_device(pdev);
 293}
 294
 295#define PCI_REVISION_ID_ERROR_VAL       0xff
 296
 297/* Performs PCI initializations as well as initializing PCI-related parameters
 298 * in the device structrue. Returns 0 in case of success.
 299 */
 300static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 301{
 302        u8 rev_id;
 303        int rc;
 304
 305        cdev->pdev = pdev;
 306
 307        rc = pci_enable_device(pdev);
 308        if (rc) {
 309                DP_NOTICE(cdev, "Cannot enable PCI device\n");
 310                goto err0;
 311        }
 312
 313        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 314                DP_NOTICE(cdev, "No memory region found in bar #0\n");
 315                rc = -EIO;
 316                goto err1;
 317        }
 318
 319        if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 320                DP_NOTICE(cdev, "No memory region found in bar #2\n");
 321                rc = -EIO;
 322                goto err1;
 323        }
 324
 325        if (atomic_read(&pdev->enable_cnt) == 1) {
 326                rc = pci_request_regions(pdev, "qed");
 327                if (rc) {
 328                        DP_NOTICE(cdev,
 329                                  "Failed to request PCI memory resources\n");
 330                        goto err1;
 331                }
 332                pci_set_master(pdev);
 333                pci_save_state(pdev);
 334        }
 335
 336        pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 337        if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 338                DP_NOTICE(cdev,
 339                          "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 340                          rev_id);
 341                rc = -ENODEV;
 342                goto err2;
 343        }
 344        if (!pci_is_pcie(pdev)) {
 345                DP_NOTICE(cdev, "The bus is not PCI Express\n");
 346                rc = -EIO;
 347                goto err2;
 348        }
 349
 350        cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 351        if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 352                DP_NOTICE(cdev, "Cannot find power management capability\n");
 353
 354        rc = qed_set_coherency_mask(cdev);
 355        if (rc)
 356                goto err2;
 357
 358        cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 359        cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 360        cdev->pci_params.irq = pdev->irq;
 361
 362        cdev->regview = pci_ioremap_bar(pdev, 0);
 363        if (!cdev->regview) {
 364                DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 365                rc = -ENOMEM;
 366                goto err2;
 367        }
 368
 369        cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 370        cdev->db_size = pci_resource_len(cdev->pdev, 2);
 371        if (!cdev->db_size) {
 372                if (IS_PF(cdev)) {
 373                        DP_NOTICE(cdev, "No Doorbell bar available\n");
 374                        return -EINVAL;
 375                } else {
 376                        return 0;
 377                }
 378        }
 379
 380        cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 381
 382        if (!cdev->doorbells) {
 383                DP_NOTICE(cdev, "Cannot map doorbell space\n");
 384                return -ENOMEM;
 385        }
 386
 387        /* AER (Advanced Error reporting) configuration */
 388        rc = pci_enable_pcie_error_reporting(pdev);
 389        if (rc)
 390                DP_VERBOSE(cdev, NETIF_MSG_DRV,
 391                           "Failed to configure PCIe AER [%d]\n", rc);
 392
 393        return 0;
 394
 395err2:
 396        pci_release_regions(pdev);
 397err1:
 398        pci_disable_device(pdev);
 399err0:
 400        return rc;
 401}
 402
 403int qed_fill_dev_info(struct qed_dev *cdev,
 404                      struct qed_dev_info *dev_info)
 405{
 406        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 407        struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 408        struct qed_tunnel_info *tun = &cdev->tunnel;
 409        struct qed_ptt  *ptt;
 410
 411        memset(dev_info, 0, sizeof(struct qed_dev_info));
 412
 413        if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 414            tun->vxlan.b_mode_enabled)
 415                dev_info->vxlan_enable = true;
 416
 417        if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 418            tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 419            tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 420                dev_info->gre_enable = true;
 421
 422        if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 423            tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 424            tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 425                dev_info->geneve_enable = true;
 426
 427        dev_info->num_hwfns = cdev->num_hwfns;
 428        dev_info->pci_mem_start = cdev->pci_params.mem_start;
 429        dev_info->pci_mem_end = cdev->pci_params.mem_end;
 430        dev_info->pci_irq = cdev->pci_params.irq;
 431        dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 432        dev_info->dev_type = cdev->type;
 433        ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 434
 435        if (IS_PF(cdev)) {
 436                dev_info->fw_major = FW_MAJOR_VERSION;
 437                dev_info->fw_minor = FW_MINOR_VERSION;
 438                dev_info->fw_rev = FW_REVISION_VERSION;
 439                dev_info->fw_eng = FW_ENGINEERING_VERSION;
 440                dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 441                                                       &cdev->mf_bits);
 442                if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
 443                        dev_info->b_arfs_capable = true;
 444                dev_info->tx_switching = true;
 445
 446                if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 447                        dev_info->wol_support = true;
 448
 449                dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 450
 451                dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 452        } else {
 453                qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 454                                      &dev_info->fw_minor, &dev_info->fw_rev,
 455                                      &dev_info->fw_eng);
 456        }
 457
 458        if (IS_PF(cdev)) {
 459                ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 460                if (ptt) {
 461                        qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 462                                            &dev_info->mfw_rev, NULL);
 463
 464                        qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 465                                            &dev_info->mbi_version);
 466
 467                        qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 468                                               &dev_info->flash_size);
 469
 470                        qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 471                }
 472        } else {
 473                qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 474                                    &dev_info->mfw_rev, NULL);
 475        }
 476
 477        dev_info->mtu = hw_info->mtu;
 478        cdev->common_dev_info = *dev_info;
 479
 480        return 0;
 481}
 482
 483static void qed_free_cdev(struct qed_dev *cdev)
 484{
 485        kfree((void *)cdev);
 486}
 487
 488static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 489{
 490        struct qed_dev *cdev;
 491
 492        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 493        if (!cdev)
 494                return cdev;
 495
 496        qed_init_struct(cdev);
 497
 498        return cdev;
 499}
 500
 501/* Sets the requested power state */
 502static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 503{
 504        if (!cdev)
 505                return -ENODEV;
 506
 507        DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 508        return 0;
 509}
 510
 511/* probing */
 512static struct qed_dev *qed_probe(struct pci_dev *pdev,
 513                                 struct qed_probe_params *params)
 514{
 515        struct qed_dev *cdev;
 516        int rc;
 517
 518        cdev = qed_alloc_cdev(pdev);
 519        if (!cdev)
 520                goto err0;
 521
 522        cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 523        cdev->protocol = params->protocol;
 524
 525        if (params->is_vf)
 526                cdev->b_is_vf = true;
 527
 528        qed_init_dp(cdev, params->dp_module, params->dp_level);
 529
 530        cdev->recov_in_prog = params->recov_in_prog;
 531
 532        rc = qed_init_pci(cdev, pdev);
 533        if (rc) {
 534                DP_ERR(cdev, "init pci failed\n");
 535                goto err1;
 536        }
 537        DP_INFO(cdev, "PCI init completed successfully\n");
 538
 539        rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 540        if (rc) {
 541                DP_ERR(cdev, "hw prepare failed\n");
 542                goto err2;
 543        }
 544
 545        DP_INFO(cdev, "%s completed successfully\n", __func__);
 546
 547        return cdev;
 548
 549err2:
 550        qed_free_pci(cdev);
 551err1:
 552        qed_free_cdev(cdev);
 553err0:
 554        return NULL;
 555}
 556
 557static void qed_remove(struct qed_dev *cdev)
 558{
 559        if (!cdev)
 560                return;
 561
 562        qed_hw_remove(cdev);
 563
 564        qed_free_pci(cdev);
 565
 566        qed_set_power_state(cdev, PCI_D3hot);
 567
 568        qed_free_cdev(cdev);
 569}
 570
 571static void qed_disable_msix(struct qed_dev *cdev)
 572{
 573        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 574                pci_disable_msix(cdev->pdev);
 575                kfree(cdev->int_params.msix_table);
 576        } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 577                pci_disable_msi(cdev->pdev);
 578        }
 579
 580        memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 581}
 582
 583static int qed_enable_msix(struct qed_dev *cdev,
 584                           struct qed_int_params *int_params)
 585{
 586        int i, rc, cnt;
 587
 588        cnt = int_params->in.num_vectors;
 589
 590        for (i = 0; i < cnt; i++)
 591                int_params->msix_table[i].entry = i;
 592
 593        rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 594                                   int_params->in.min_msix_cnt, cnt);
 595        if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 596            (rc % cdev->num_hwfns)) {
 597                pci_disable_msix(cdev->pdev);
 598
 599                /* If fastpath is initialized, we need at least one interrupt
 600                 * per hwfn [and the slow path interrupts]. New requested number
 601                 * should be a multiple of the number of hwfns.
 602                 */
 603                cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 604                DP_NOTICE(cdev,
 605                          "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 606                          cnt, int_params->in.num_vectors);
 607                rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 608                                           cnt);
 609                if (!rc)
 610                        rc = cnt;
 611        }
 612
 613        /* For VFs, we should return with an error in case we didn't get the
 614         * exact number of msix vectors as we requested.
 615         * Not doing that will lead to a crash when starting queues for
 616         * this VF.
 617         */
 618        if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
 619                /* MSI-x configuration was achieved */
 620                int_params->out.int_mode = QED_INT_MODE_MSIX;
 621                int_params->out.num_vectors = rc;
 622                rc = 0;
 623        } else {
 624                DP_NOTICE(cdev,
 625                          "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 626                          cnt, rc);
 627        }
 628
 629        return rc;
 630}
 631
 632/* This function outputs the int mode and the number of enabled msix vector */
 633static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 634{
 635        struct qed_int_params *int_params = &cdev->int_params;
 636        struct msix_entry *tbl;
 637        int rc = 0, cnt;
 638
 639        switch (int_params->in.int_mode) {
 640        case QED_INT_MODE_MSIX:
 641                /* Allocate MSIX table */
 642                cnt = int_params->in.num_vectors;
 643                int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 644                if (!int_params->msix_table) {
 645                        rc = -ENOMEM;
 646                        goto out;
 647                }
 648
 649                /* Enable MSIX */
 650                rc = qed_enable_msix(cdev, int_params);
 651                if (!rc)
 652                        goto out;
 653
 654                DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 655                kfree(int_params->msix_table);
 656                if (force_mode)
 657                        goto out;
 658                fallthrough;
 659
 660        case QED_INT_MODE_MSI:
 661                if (cdev->num_hwfns == 1) {
 662                        rc = pci_enable_msi(cdev->pdev);
 663                        if (!rc) {
 664                                int_params->out.int_mode = QED_INT_MODE_MSI;
 665                                goto out;
 666                        }
 667
 668                        DP_NOTICE(cdev, "Failed to enable MSI\n");
 669                        if (force_mode)
 670                                goto out;
 671                }
 672                fallthrough;
 673
 674        case QED_INT_MODE_INTA:
 675                        int_params->out.int_mode = QED_INT_MODE_INTA;
 676                        rc = 0;
 677                        goto out;
 678        default:
 679                DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 680                          int_params->in.int_mode);
 681                rc = -EINVAL;
 682        }
 683
 684out:
 685        if (!rc)
 686                DP_INFO(cdev, "Using %s interrupts\n",
 687                        int_params->out.int_mode == QED_INT_MODE_INTA ?
 688                        "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 689                        "MSI" : "MSIX");
 690        cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 691
 692        return rc;
 693}
 694
 695static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 696                                    int index, void(*handler)(void *))
 697{
 698        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 699        int relative_idx = index / cdev->num_hwfns;
 700
 701        hwfn->simd_proto_handler[relative_idx].func = handler;
 702        hwfn->simd_proto_handler[relative_idx].token = token;
 703}
 704
 705static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 706{
 707        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 708        int relative_idx = index / cdev->num_hwfns;
 709
 710        memset(&hwfn->simd_proto_handler[relative_idx], 0,
 711               sizeof(struct qed_simd_fp_handler));
 712}
 713
 714static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 715{
 716        tasklet_schedule((struct tasklet_struct *)tasklet);
 717        return IRQ_HANDLED;
 718}
 719
 720static irqreturn_t qed_single_int(int irq, void *dev_instance)
 721{
 722        struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 723        struct qed_hwfn *hwfn;
 724        irqreturn_t rc = IRQ_NONE;
 725        u64 status;
 726        int i, j;
 727
 728        for (i = 0; i < cdev->num_hwfns; i++) {
 729                status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 730
 731                if (!status)
 732                        continue;
 733
 734                hwfn = &cdev->hwfns[i];
 735
 736                /* Slowpath interrupt */
 737                if (unlikely(status & 0x1)) {
 738                        tasklet_schedule(&hwfn->sp_dpc);
 739                        status &= ~0x1;
 740                        rc = IRQ_HANDLED;
 741                }
 742
 743                /* Fastpath interrupts */
 744                for (j = 0; j < 64; j++) {
 745                        if ((0x2ULL << j) & status) {
 746                                struct qed_simd_fp_handler *p_handler =
 747                                        &hwfn->simd_proto_handler[j];
 748
 749                                if (p_handler->func)
 750                                        p_handler->func(p_handler->token);
 751                                else
 752                                        DP_NOTICE(hwfn,
 753                                                  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 754                                                  j, status);
 755
 756                                status &= ~(0x2ULL << j);
 757                                rc = IRQ_HANDLED;
 758                        }
 759                }
 760
 761                if (unlikely(status))
 762                        DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 763                                   "got an unknown interrupt status 0x%llx\n",
 764                                   status);
 765        }
 766
 767        return rc;
 768}
 769
 770int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 771{
 772        struct qed_dev *cdev = hwfn->cdev;
 773        u32 int_mode;
 774        int rc = 0;
 775        u8 id;
 776
 777        int_mode = cdev->int_params.out.int_mode;
 778        if (int_mode == QED_INT_MODE_MSIX) {
 779                id = hwfn->my_id;
 780                snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 781                         id, cdev->pdev->bus->number,
 782                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 783                rc = request_irq(cdev->int_params.msix_table[id].vector,
 784                                 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
 785        } else {
 786                unsigned long flags = 0;
 787
 788                snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 789                         cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 790                         PCI_FUNC(cdev->pdev->devfn));
 791
 792                if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 793                        flags |= IRQF_SHARED;
 794
 795                rc = request_irq(cdev->pdev->irq, qed_single_int,
 796                                 flags, cdev->name, cdev);
 797        }
 798
 799        if (rc)
 800                DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 801        else
 802                DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 803                           "Requested slowpath %s\n",
 804                           (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 805
 806        return rc;
 807}
 808
 809static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 810{
 811        /* Calling the disable function will make sure that any
 812         * currently-running function is completed. The following call to the
 813         * enable function makes this sequence a flush-like operation.
 814         */
 815        if (p_hwfn->b_sp_dpc_enabled) {
 816                tasklet_disable(&p_hwfn->sp_dpc);
 817                tasklet_enable(&p_hwfn->sp_dpc);
 818        }
 819}
 820
 821void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 822{
 823        struct qed_dev *cdev = p_hwfn->cdev;
 824        u8 id = p_hwfn->my_id;
 825        u32 int_mode;
 826
 827        int_mode = cdev->int_params.out.int_mode;
 828        if (int_mode == QED_INT_MODE_MSIX)
 829                synchronize_irq(cdev->int_params.msix_table[id].vector);
 830        else
 831                synchronize_irq(cdev->pdev->irq);
 832
 833        qed_slowpath_tasklet_flush(p_hwfn);
 834}
 835
 836static void qed_slowpath_irq_free(struct qed_dev *cdev)
 837{
 838        int i;
 839
 840        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 841                for_each_hwfn(cdev, i) {
 842                        if (!cdev->hwfns[i].b_int_requested)
 843                                break;
 844                        synchronize_irq(cdev->int_params.msix_table[i].vector);
 845                        free_irq(cdev->int_params.msix_table[i].vector,
 846                                 &cdev->hwfns[i].sp_dpc);
 847                }
 848        } else {
 849                if (QED_LEADING_HWFN(cdev)->b_int_requested)
 850                        free_irq(cdev->pdev->irq, cdev);
 851        }
 852        qed_int_disable_post_isr_release(cdev);
 853}
 854
 855static int qed_nic_stop(struct qed_dev *cdev)
 856{
 857        int i, rc;
 858
 859        rc = qed_hw_stop(cdev);
 860
 861        for (i = 0; i < cdev->num_hwfns; i++) {
 862                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 863
 864                if (p_hwfn->b_sp_dpc_enabled) {
 865                        tasklet_disable(&p_hwfn->sp_dpc);
 866                        p_hwfn->b_sp_dpc_enabled = false;
 867                        DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 868                                   "Disabled sp tasklet [hwfn %d] at %p\n",
 869                                   i, &p_hwfn->sp_dpc);
 870                }
 871        }
 872
 873        qed_dbg_pf_exit(cdev);
 874
 875        return rc;
 876}
 877
 878static int qed_nic_setup(struct qed_dev *cdev)
 879{
 880        int rc, i;
 881
 882        /* Determine if interface is going to require LL2 */
 883        if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 884                for (i = 0; i < cdev->num_hwfns; i++) {
 885                        struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 886
 887                        p_hwfn->using_ll2 = true;
 888                }
 889        }
 890
 891        rc = qed_resc_alloc(cdev);
 892        if (rc)
 893                return rc;
 894
 895        DP_INFO(cdev, "Allocated qed resources\n");
 896
 897        qed_resc_setup(cdev);
 898
 899        return rc;
 900}
 901
 902static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 903{
 904        int limit = 0;
 905
 906        /* Mark the fastpath as free/used */
 907        cdev->int_params.fp_initialized = cnt ? true : false;
 908
 909        if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 910                limit = cdev->num_hwfns * 63;
 911        else if (cdev->int_params.fp_msix_cnt)
 912                limit = cdev->int_params.fp_msix_cnt;
 913
 914        if (!limit)
 915                return -ENOMEM;
 916
 917        return min_t(int, cnt, limit);
 918}
 919
 920static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 921{
 922        memset(info, 0, sizeof(struct qed_int_info));
 923
 924        if (!cdev->int_params.fp_initialized) {
 925                DP_INFO(cdev,
 926                        "Protocol driver requested interrupt information, but its support is not yet configured\n");
 927                return -EINVAL;
 928        }
 929
 930        /* Need to expose only MSI-X information; Single IRQ is handled solely
 931         * by qed.
 932         */
 933        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 934                int msix_base = cdev->int_params.fp_msix_base;
 935
 936                info->msix_cnt = cdev->int_params.fp_msix_cnt;
 937                info->msix = &cdev->int_params.msix_table[msix_base];
 938        }
 939
 940        return 0;
 941}
 942
 943static int qed_slowpath_setup_int(struct qed_dev *cdev,
 944                                  enum qed_int_mode int_mode)
 945{
 946        struct qed_sb_cnt_info sb_cnt_info;
 947        int num_l2_queues = 0;
 948        int rc;
 949        int i;
 950
 951        if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 952                DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 953                return -EINVAL;
 954        }
 955
 956        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 957        cdev->int_params.in.int_mode = int_mode;
 958        for_each_hwfn(cdev, i) {
 959                memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 960                qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 961                cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 962                cdev->int_params.in.num_vectors++; /* slowpath */
 963        }
 964
 965        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
 966        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 967
 968        if (is_kdump_kernel()) {
 969                DP_INFO(cdev,
 970                        "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 971                        cdev->int_params.in.min_msix_cnt);
 972                cdev->int_params.in.num_vectors =
 973                        cdev->int_params.in.min_msix_cnt;
 974        }
 975
 976        rc = qed_set_int_mode(cdev, false);
 977        if (rc)  {
 978                DP_ERR(cdev, "%s ERR\n", __func__);
 979                return rc;
 980        }
 981
 982        cdev->int_params.fp_msix_base = cdev->num_hwfns;
 983        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 984                                       cdev->num_hwfns;
 985
 986        if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 987            !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 988                return 0;
 989
 990        for_each_hwfn(cdev, i)
 991                num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 992
 993        DP_VERBOSE(cdev, QED_MSG_RDMA,
 994                   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
 995                   cdev->int_params.fp_msix_cnt, num_l2_queues);
 996
 997        if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
 998                cdev->int_params.rdma_msix_cnt =
 999                        (cdev->int_params.fp_msix_cnt - num_l2_queues)
1000                        / cdev->num_hwfns;
1001                cdev->int_params.rdma_msix_base =
1002                        cdev->int_params.fp_msix_base + num_l2_queues;
1003                cdev->int_params.fp_msix_cnt = num_l2_queues;
1004        } else {
1005                cdev->int_params.rdma_msix_cnt = 0;
1006        }
1007
1008        DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
1009                   cdev->int_params.rdma_msix_cnt,
1010                   cdev->int_params.rdma_msix_base);
1011
1012        return 0;
1013}
1014
1015static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
1016{
1017        int rc;
1018
1019        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1020        cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1021
1022        qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1023                            &cdev->int_params.in.num_vectors);
1024        if (cdev->num_hwfns > 1) {
1025                u8 vectors = 0;
1026
1027                qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1028                cdev->int_params.in.num_vectors += vectors;
1029        }
1030
1031        /* We want a minimum of one fastpath vector per vf hwfn */
1032        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1033
1034        rc = qed_set_int_mode(cdev, true);
1035        if (rc)
1036                return rc;
1037
1038        cdev->int_params.fp_msix_base = 0;
1039        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1040
1041        return 0;
1042}
1043
1044u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1045                   u8 *input_buf, u32 max_size, u8 *unzip_buf)
1046{
1047        int rc;
1048
1049        p_hwfn->stream->next_in = input_buf;
1050        p_hwfn->stream->avail_in = input_len;
1051        p_hwfn->stream->next_out = unzip_buf;
1052        p_hwfn->stream->avail_out = max_size;
1053
1054        rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1055
1056        if (rc != Z_OK) {
1057                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1058                           rc);
1059                return 0;
1060        }
1061
1062        rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1063        zlib_inflateEnd(p_hwfn->stream);
1064
1065        if (rc != Z_OK && rc != Z_STREAM_END) {
1066                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1067                           p_hwfn->stream->msg, rc);
1068                return 0;
1069        }
1070
1071        return p_hwfn->stream->total_out / 4;
1072}
1073
1074static int qed_alloc_stream_mem(struct qed_dev *cdev)
1075{
1076        int i;
1077        void *workspace;
1078
1079        for_each_hwfn(cdev, i) {
1080                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1081
1082                p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1083                if (!p_hwfn->stream)
1084                        return -ENOMEM;
1085
1086                workspace = vzalloc(zlib_inflate_workspacesize());
1087                if (!workspace)
1088                        return -ENOMEM;
1089                p_hwfn->stream->workspace = workspace;
1090        }
1091
1092        return 0;
1093}
1094
1095static void qed_free_stream_mem(struct qed_dev *cdev)
1096{
1097        int i;
1098
1099        for_each_hwfn(cdev, i) {
1100                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1101
1102                if (!p_hwfn->stream)
1103                        return;
1104
1105                vfree(p_hwfn->stream->workspace);
1106                kfree(p_hwfn->stream);
1107        }
1108}
1109
1110static void qed_update_pf_params(struct qed_dev *cdev,
1111                                 struct qed_pf_params *params)
1112{
1113        int i;
1114
1115        if (IS_ENABLED(CONFIG_QED_RDMA)) {
1116                params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1117                params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1118                params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1119                /* divide by 3 the MRs to avoid MF ILT overflow */
1120                params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1121        }
1122
1123        if (cdev->num_hwfns > 1 || IS_VF(cdev))
1124                params->eth_pf_params.num_arfs_filters = 0;
1125
1126        /* In case we might support RDMA, don't allow qede to be greedy
1127         * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1128         * per hwfn.
1129         */
1130        if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1131                u16 *num_cons;
1132
1133                num_cons = &params->eth_pf_params.num_cons;
1134                *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1135        }
1136
1137        for (i = 0; i < cdev->num_hwfns; i++) {
1138                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1139
1140                p_hwfn->pf_params = *params;
1141        }
1142}
1143
1144#define QED_PERIODIC_DB_REC_COUNT               10
1145#define QED_PERIODIC_DB_REC_INTERVAL_MS         100
1146#define QED_PERIODIC_DB_REC_INTERVAL \
1147        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1148
1149static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1150                                     enum qed_slowpath_wq_flag wq_flag,
1151                                     unsigned long delay)
1152{
1153        if (!hwfn->slowpath_wq_active)
1154                return -EINVAL;
1155
1156        /* Memory barrier for setting atomic bit */
1157        smp_mb__before_atomic();
1158        set_bit(wq_flag, &hwfn->slowpath_task_flags);
1159        /* Memory barrier after setting atomic bit */
1160        smp_mb__after_atomic();
1161        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1162
1163        return 0;
1164}
1165
1166void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1167{
1168        /* Reset periodic Doorbell Recovery counter */
1169        p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1170
1171        /* Don't schedule periodic Doorbell Recovery if already scheduled */
1172        if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1173                     &p_hwfn->slowpath_task_flags))
1174                return;
1175
1176        qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1177                                  QED_PERIODIC_DB_REC_INTERVAL);
1178}
1179
1180static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1181{
1182        int i;
1183
1184        if (IS_VF(cdev))
1185                return;
1186
1187        for_each_hwfn(cdev, i) {
1188                if (!cdev->hwfns[i].slowpath_wq)
1189                        continue;
1190
1191                /* Stop queuing new delayed works */
1192                cdev->hwfns[i].slowpath_wq_active = false;
1193
1194                cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1195                destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1196        }
1197}
1198
1199static void qed_slowpath_task(struct work_struct *work)
1200{
1201        struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1202                                             slowpath_task.work);
1203        struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1204
1205        if (!ptt) {
1206                if (hwfn->slowpath_wq_active)
1207                        queue_delayed_work(hwfn->slowpath_wq,
1208                                           &hwfn->slowpath_task, 0);
1209
1210                return;
1211        }
1212
1213        if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1214                               &hwfn->slowpath_task_flags))
1215                qed_mfw_process_tlv_req(hwfn, ptt);
1216
1217        if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1218                               &hwfn->slowpath_task_flags)) {
1219                /* skip qed_db_rec_handler during recovery/unload */
1220                if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1221                        goto out;
1222
1223                qed_db_rec_handler(hwfn, ptt);
1224                if (hwfn->periodic_db_rec_count--)
1225                        qed_slowpath_delayed_work(hwfn,
1226                                                  QED_SLOWPATH_PERIODIC_DB_REC,
1227                                                  QED_PERIODIC_DB_REC_INTERVAL);
1228        }
1229
1230out:
1231        qed_ptt_release(hwfn, ptt);
1232}
1233
1234static int qed_slowpath_wq_start(struct qed_dev *cdev)
1235{
1236        struct qed_hwfn *hwfn;
1237        char name[NAME_SIZE];
1238        int i;
1239
1240        if (IS_VF(cdev))
1241                return 0;
1242
1243        for_each_hwfn(cdev, i) {
1244                hwfn = &cdev->hwfns[i];
1245
1246                snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1247                         cdev->pdev->bus->number,
1248                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1249
1250                hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1251                if (!hwfn->slowpath_wq) {
1252                        DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1253                        return -ENOMEM;
1254                }
1255
1256                INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1257                hwfn->slowpath_wq_active = true;
1258        }
1259
1260        return 0;
1261}
1262
1263static int qed_slowpath_start(struct qed_dev *cdev,
1264                              struct qed_slowpath_params *params)
1265{
1266        struct qed_drv_load_params drv_load_params;
1267        struct qed_hw_init_params hw_init_params;
1268        struct qed_mcp_drv_version drv_version;
1269        struct qed_tunnel_info tunn_info;
1270        const u8 *data = NULL;
1271        struct qed_hwfn *hwfn;
1272        struct qed_ptt *p_ptt;
1273        int rc = -EINVAL;
1274
1275        if (qed_iov_wq_start(cdev))
1276                goto err;
1277
1278        if (qed_slowpath_wq_start(cdev))
1279                goto err;
1280
1281        if (IS_PF(cdev)) {
1282                rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1283                                      &cdev->pdev->dev);
1284                if (rc) {
1285                        DP_NOTICE(cdev,
1286                                  "Failed to find fw file - /lib/firmware/%s\n",
1287                                  QED_FW_FILE_NAME);
1288                        goto err;
1289                }
1290
1291                if (cdev->num_hwfns == 1) {
1292                        p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1293                        if (p_ptt) {
1294                                QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1295                        } else {
1296                                DP_NOTICE(cdev,
1297                                          "Failed to acquire PTT for aRFS\n");
1298                                rc = -EINVAL;
1299                                goto err;
1300                        }
1301                }
1302        }
1303
1304        cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1305        rc = qed_nic_setup(cdev);
1306        if (rc)
1307                goto err;
1308
1309        if (IS_PF(cdev))
1310                rc = qed_slowpath_setup_int(cdev, params->int_mode);
1311        else
1312                rc = qed_slowpath_vf_setup_int(cdev);
1313        if (rc)
1314                goto err1;
1315
1316        if (IS_PF(cdev)) {
1317                /* Allocate stream for unzipping */
1318                rc = qed_alloc_stream_mem(cdev);
1319                if (rc)
1320                        goto err2;
1321
1322                /* First Dword used to differentiate between various sources */
1323                data = cdev->firmware->data + sizeof(u32);
1324
1325                qed_dbg_pf_init(cdev);
1326        }
1327
1328        /* Start the slowpath */
1329        memset(&hw_init_params, 0, sizeof(hw_init_params));
1330        memset(&tunn_info, 0, sizeof(tunn_info));
1331        tunn_info.vxlan.b_mode_enabled = true;
1332        tunn_info.l2_gre.b_mode_enabled = true;
1333        tunn_info.ip_gre.b_mode_enabled = true;
1334        tunn_info.l2_geneve.b_mode_enabled = true;
1335        tunn_info.ip_geneve.b_mode_enabled = true;
1336        tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1337        tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1338        tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1339        tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1340        tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1341        hw_init_params.p_tunn = &tunn_info;
1342        hw_init_params.b_hw_start = true;
1343        hw_init_params.int_mode = cdev->int_params.out.int_mode;
1344        hw_init_params.allow_npar_tx_switch = true;
1345        hw_init_params.bin_fw_data = data;
1346
1347        memset(&drv_load_params, 0, sizeof(drv_load_params));
1348        drv_load_params.is_crash_kernel = is_kdump_kernel();
1349        drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1350        drv_load_params.avoid_eng_reset = false;
1351        drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1352        hw_init_params.p_drv_load_params = &drv_load_params;
1353
1354        rc = qed_hw_init(cdev, &hw_init_params);
1355        if (rc)
1356                goto err2;
1357
1358        DP_INFO(cdev,
1359                "HW initialization and function start completed successfully\n");
1360
1361        if (IS_PF(cdev)) {
1362                cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1363                                           BIT(QED_MODE_L2GENEVE_TUNN) |
1364                                           BIT(QED_MODE_IPGENEVE_TUNN) |
1365                                           BIT(QED_MODE_L2GRE_TUNN) |
1366                                           BIT(QED_MODE_IPGRE_TUNN));
1367        }
1368
1369        /* Allocate LL2 interface if needed */
1370        if (QED_LEADING_HWFN(cdev)->using_ll2) {
1371                rc = qed_ll2_alloc_if(cdev);
1372                if (rc)
1373                        goto err3;
1374        }
1375        if (IS_PF(cdev)) {
1376                hwfn = QED_LEADING_HWFN(cdev);
1377                drv_version.version = (params->drv_major << 24) |
1378                                      (params->drv_minor << 16) |
1379                                      (params->drv_rev << 8) |
1380                                      (params->drv_eng);
1381                strscpy(drv_version.name, params->name,
1382                        MCP_DRV_VER_STR_SIZE - 4);
1383                rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1384                                              &drv_version);
1385                if (rc) {
1386                        DP_NOTICE(cdev, "Failed sending drv version command\n");
1387                        goto err4;
1388                }
1389        }
1390
1391        qed_reset_vport_stats(cdev);
1392
1393        return 0;
1394
1395err4:
1396        qed_ll2_dealloc_if(cdev);
1397err3:
1398        qed_hw_stop(cdev);
1399err2:
1400        qed_hw_timers_stop_all(cdev);
1401        if (IS_PF(cdev))
1402                qed_slowpath_irq_free(cdev);
1403        qed_free_stream_mem(cdev);
1404        qed_disable_msix(cdev);
1405err1:
1406        qed_resc_free(cdev);
1407err:
1408        if (IS_PF(cdev))
1409                release_firmware(cdev->firmware);
1410
1411        if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1412            QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1413                qed_ptt_release(QED_LEADING_HWFN(cdev),
1414                                QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1415
1416        qed_iov_wq_stop(cdev, false);
1417
1418        qed_slowpath_wq_stop(cdev);
1419
1420        return rc;
1421}
1422
1423static int qed_slowpath_stop(struct qed_dev *cdev)
1424{
1425        if (!cdev)
1426                return -ENODEV;
1427
1428        qed_slowpath_wq_stop(cdev);
1429
1430        qed_ll2_dealloc_if(cdev);
1431
1432        if (IS_PF(cdev)) {
1433                if (cdev->num_hwfns == 1)
1434                        qed_ptt_release(QED_LEADING_HWFN(cdev),
1435                                        QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1436                qed_free_stream_mem(cdev);
1437                if (IS_QED_ETH_IF(cdev))
1438                        qed_sriov_disable(cdev, true);
1439        }
1440
1441        qed_nic_stop(cdev);
1442
1443        if (IS_PF(cdev))
1444                qed_slowpath_irq_free(cdev);
1445
1446        qed_disable_msix(cdev);
1447
1448        qed_resc_free(cdev);
1449
1450        qed_iov_wq_stop(cdev, true);
1451
1452        if (IS_PF(cdev))
1453                release_firmware(cdev->firmware);
1454
1455        return 0;
1456}
1457
1458static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1459{
1460        int i;
1461
1462        memcpy(cdev->name, name, NAME_SIZE);
1463        for_each_hwfn(cdev, i)
1464                snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1465}
1466
1467static u32 qed_sb_init(struct qed_dev *cdev,
1468                       struct qed_sb_info *sb_info,
1469                       void *sb_virt_addr,
1470                       dma_addr_t sb_phy_addr, u16 sb_id,
1471                       enum qed_sb_type type)
1472{
1473        struct qed_hwfn *p_hwfn;
1474        struct qed_ptt *p_ptt;
1475        u16 rel_sb_id;
1476        u32 rc;
1477
1478        /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1479        if (type == QED_SB_TYPE_L2_QUEUE) {
1480                p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1481                rel_sb_id = sb_id / cdev->num_hwfns;
1482        } else {
1483                p_hwfn = QED_AFFIN_HWFN(cdev);
1484                rel_sb_id = sb_id;
1485        }
1486
1487        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1488                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1489                   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1490
1491        if (IS_PF(p_hwfn->cdev)) {
1492                p_ptt = qed_ptt_acquire(p_hwfn);
1493                if (!p_ptt)
1494                        return -EBUSY;
1495
1496                rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1497                                     sb_phy_addr, rel_sb_id);
1498                qed_ptt_release(p_hwfn, p_ptt);
1499        } else {
1500                rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1501                                     sb_phy_addr, rel_sb_id);
1502        }
1503
1504        return rc;
1505}
1506
1507static u32 qed_sb_release(struct qed_dev *cdev,
1508                          struct qed_sb_info *sb_info,
1509                          u16 sb_id,
1510                          enum qed_sb_type type)
1511{
1512        struct qed_hwfn *p_hwfn;
1513        u16 rel_sb_id;
1514        u32 rc;
1515
1516        /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1517        if (type == QED_SB_TYPE_L2_QUEUE) {
1518                p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1519                rel_sb_id = sb_id / cdev->num_hwfns;
1520        } else {
1521                p_hwfn = QED_AFFIN_HWFN(cdev);
1522                rel_sb_id = sb_id;
1523        }
1524
1525        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1526                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1527                   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1528
1529        rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1530
1531        return rc;
1532}
1533
1534static bool qed_can_link_change(struct qed_dev *cdev)
1535{
1536        return true;
1537}
1538
1539static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1540                                     const struct qed_link_params *params)
1541{
1542        struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1543        const struct qed_mfw_speed_map *map;
1544        u32 i;
1545
1546        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1547                ext_speed->autoneg = !!params->autoneg;
1548
1549        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1550                ext_speed->advertised_speeds = 0;
1551
1552                for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1553                        map = qed_mfw_ext_maps + i;
1554
1555                        if (linkmode_intersects(params->adv_speeds, map->caps))
1556                                ext_speed->advertised_speeds |= map->mfw_val;
1557                }
1558        }
1559
1560        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1561                switch (params->forced_speed) {
1562                case SPEED_1000:
1563                        ext_speed->forced_speed = QED_EXT_SPEED_1G;
1564                        break;
1565                case SPEED_10000:
1566                        ext_speed->forced_speed = QED_EXT_SPEED_10G;
1567                        break;
1568                case SPEED_20000:
1569                        ext_speed->forced_speed = QED_EXT_SPEED_20G;
1570                        break;
1571                case SPEED_25000:
1572                        ext_speed->forced_speed = QED_EXT_SPEED_25G;
1573                        break;
1574                case SPEED_40000:
1575                        ext_speed->forced_speed = QED_EXT_SPEED_40G;
1576                        break;
1577                case SPEED_50000:
1578                        ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1579                                                  QED_EXT_SPEED_50G_R2;
1580                        break;
1581                case SPEED_100000:
1582                        ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1583                                                  QED_EXT_SPEED_100G_R4 |
1584                                                  QED_EXT_SPEED_100G_P4;
1585                        break;
1586                default:
1587                        break;
1588                }
1589        }
1590
1591        if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1592                return;
1593
1594        switch (params->forced_speed) {
1595        case SPEED_25000:
1596                switch (params->fec) {
1597                case FEC_FORCE_MODE_NONE:
1598                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1599                        break;
1600                case FEC_FORCE_MODE_FIRECODE:
1601                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1602                        break;
1603                case FEC_FORCE_MODE_RS:
1604                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1605                        break;
1606                case FEC_FORCE_MODE_AUTO:
1607                        link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1608                                                    ETH_EXT_FEC_25G_BASE_R |
1609                                                    ETH_EXT_FEC_25G_NONE;
1610                        break;
1611                default:
1612                        break;
1613                }
1614
1615                break;
1616        case SPEED_40000:
1617                switch (params->fec) {
1618                case FEC_FORCE_MODE_NONE:
1619                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1620                        break;
1621                case FEC_FORCE_MODE_FIRECODE:
1622                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1623                        break;
1624                case FEC_FORCE_MODE_AUTO:
1625                        link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1626                                                    ETH_EXT_FEC_40G_NONE;
1627                        break;
1628                default:
1629                        break;
1630                }
1631
1632                break;
1633        case SPEED_50000:
1634                switch (params->fec) {
1635                case FEC_FORCE_MODE_NONE:
1636                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1637                        break;
1638                case FEC_FORCE_MODE_FIRECODE:
1639                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1640                        break;
1641                case FEC_FORCE_MODE_RS:
1642                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1643                        break;
1644                case FEC_FORCE_MODE_AUTO:
1645                        link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1646                                                    ETH_EXT_FEC_50G_BASE_R |
1647                                                    ETH_EXT_FEC_50G_NONE;
1648                        break;
1649                default:
1650                        break;
1651                }
1652
1653                break;
1654        case SPEED_100000:
1655                switch (params->fec) {
1656                case FEC_FORCE_MODE_NONE:
1657                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1658                        break;
1659                case FEC_FORCE_MODE_FIRECODE:
1660                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1661                        break;
1662                case FEC_FORCE_MODE_RS:
1663                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1664                        break;
1665                case FEC_FORCE_MODE_AUTO:
1666                        link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1667                                                    ETH_EXT_FEC_100G_BASE_R |
1668                                                    ETH_EXT_FEC_100G_NONE;
1669                        break;
1670                default:
1671                        break;
1672                }
1673
1674                break;
1675        default:
1676                break;
1677        }
1678}
1679
1680static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1681{
1682        struct qed_mcp_link_params *link_params;
1683        struct qed_mcp_link_speed_params *speed;
1684        const struct qed_mfw_speed_map *map;
1685        struct qed_hwfn *hwfn;
1686        struct qed_ptt *ptt;
1687        int rc;
1688        u32 i;
1689
1690        if (!cdev)
1691                return -ENODEV;
1692
1693        /* The link should be set only once per PF */
1694        hwfn = &cdev->hwfns[0];
1695
1696        /* When VF wants to set link, force it to read the bulletin instead.
1697         * This mimics the PF behavior, where a noitification [both immediate
1698         * and possible later] would be generated when changing properties.
1699         */
1700        if (IS_VF(cdev)) {
1701                qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1702                return 0;
1703        }
1704
1705        ptt = qed_ptt_acquire(hwfn);
1706        if (!ptt)
1707                return -EBUSY;
1708
1709        link_params = qed_mcp_get_link_params(hwfn);
1710        if (!link_params)
1711                return -ENODATA;
1712
1713        speed = &link_params->speed;
1714
1715        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1716                speed->autoneg = !!params->autoneg;
1717
1718        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1719                speed->advertised_speeds = 0;
1720
1721                for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1722                        map = qed_mfw_legacy_maps + i;
1723
1724                        if (linkmode_intersects(params->adv_speeds, map->caps))
1725                                speed->advertised_speeds |= map->mfw_val;
1726                }
1727        }
1728
1729        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1730                speed->forced_speed = params->forced_speed;
1731
1732        if (qed_mcp_is_ext_speed_supported(hwfn))
1733                qed_set_ext_speed_params(link_params, params);
1734
1735        if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1736                if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1737                        link_params->pause.autoneg = true;
1738                else
1739                        link_params->pause.autoneg = false;
1740                if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1741                        link_params->pause.forced_rx = true;
1742                else
1743                        link_params->pause.forced_rx = false;
1744                if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1745                        link_params->pause.forced_tx = true;
1746                else
1747                        link_params->pause.forced_tx = false;
1748        }
1749
1750        if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1751                switch (params->loopback_mode) {
1752                case QED_LINK_LOOPBACK_INT_PHY:
1753                        link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1754                        break;
1755                case QED_LINK_LOOPBACK_EXT_PHY:
1756                        link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1757                        break;
1758                case QED_LINK_LOOPBACK_EXT:
1759                        link_params->loopback_mode = ETH_LOOPBACK_EXT;
1760                        break;
1761                case QED_LINK_LOOPBACK_MAC:
1762                        link_params->loopback_mode = ETH_LOOPBACK_MAC;
1763                        break;
1764                case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1765                        link_params->loopback_mode =
1766                                ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1767                        break;
1768                case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1769                        link_params->loopback_mode =
1770                                ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1771                        break;
1772                case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1773                        link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1774                        break;
1775                case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1776                        link_params->loopback_mode =
1777                                ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1778                        break;
1779                case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1780                        link_params->loopback_mode =
1781                                ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1782                        break;
1783                default:
1784                        link_params->loopback_mode = ETH_LOOPBACK_NONE;
1785                        break;
1786                }
1787        }
1788
1789        if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1790                memcpy(&link_params->eee, &params->eee,
1791                       sizeof(link_params->eee));
1792
1793        if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1794                link_params->fec = params->fec;
1795
1796        rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1797
1798        qed_ptt_release(hwfn, ptt);
1799
1800        return rc;
1801}
1802
1803static int qed_get_port_type(u32 media_type)
1804{
1805        int port_type;
1806
1807        switch (media_type) {
1808        case MEDIA_SFPP_10G_FIBER:
1809        case MEDIA_SFP_1G_FIBER:
1810        case MEDIA_XFP_FIBER:
1811        case MEDIA_MODULE_FIBER:
1812                port_type = PORT_FIBRE;
1813                break;
1814        case MEDIA_DA_TWINAX:
1815                port_type = PORT_DA;
1816                break;
1817        case MEDIA_BASE_T:
1818                port_type = PORT_TP;
1819                break;
1820        case MEDIA_KR:
1821        case MEDIA_NOT_PRESENT:
1822                port_type = PORT_NONE;
1823                break;
1824        case MEDIA_UNSPECIFIED:
1825        default:
1826                port_type = PORT_OTHER;
1827                break;
1828        }
1829        return port_type;
1830}
1831
1832static int qed_get_link_data(struct qed_hwfn *hwfn,
1833                             struct qed_mcp_link_params *params,
1834                             struct qed_mcp_link_state *link,
1835                             struct qed_mcp_link_capabilities *link_caps)
1836{
1837        void *p;
1838
1839        if (!IS_PF(hwfn->cdev)) {
1840                qed_vf_get_link_params(hwfn, params);
1841                qed_vf_get_link_state(hwfn, link);
1842                qed_vf_get_link_caps(hwfn, link_caps);
1843
1844                return 0;
1845        }
1846
1847        p = qed_mcp_get_link_params(hwfn);
1848        if (!p)
1849                return -ENXIO;
1850        memcpy(params, p, sizeof(*params));
1851
1852        p = qed_mcp_get_link_state(hwfn);
1853        if (!p)
1854                return -ENXIO;
1855        memcpy(link, p, sizeof(*link));
1856
1857        p = qed_mcp_get_link_capabilities(hwfn);
1858        if (!p)
1859                return -ENXIO;
1860        memcpy(link_caps, p, sizeof(*link_caps));
1861
1862        return 0;
1863}
1864
1865static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1866                                     struct qed_ptt *ptt, u32 capability,
1867                                     unsigned long *if_caps)
1868{
1869        u32 media_type, tcvr_state, tcvr_type;
1870        u32 speed_mask, board_cfg;
1871
1872        if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1873                media_type = MEDIA_UNSPECIFIED;
1874
1875        if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1876                tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1877
1878        if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1879                speed_mask = 0xFFFFFFFF;
1880
1881        if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1882                board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1883
1884        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1885                   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1886                   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1887
1888        switch (media_type) {
1889        case MEDIA_DA_TWINAX:
1890                phylink_set(if_caps, FIBRE);
1891
1892                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1893                        phylink_set(if_caps, 20000baseKR2_Full);
1894
1895                /* For DAC media multiple speed capabilities are supported */
1896                capability |= speed_mask;
1897
1898                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1899                        phylink_set(if_caps, 1000baseKX_Full);
1900                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1901                        phylink_set(if_caps, 10000baseCR_Full);
1902
1903                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1904                        switch (tcvr_type) {
1905                        case ETH_TRANSCEIVER_TYPE_40G_CR4:
1906                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1907                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1908                                phylink_set(if_caps, 40000baseCR4_Full);
1909                                break;
1910                        default:
1911                                break;
1912                        }
1913
1914                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1915                        phylink_set(if_caps, 25000baseCR_Full);
1916                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1917                        phylink_set(if_caps, 50000baseCR2_Full);
1918
1919                if (capability &
1920                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1921                        switch (tcvr_type) {
1922                        case ETH_TRANSCEIVER_TYPE_100G_CR4:
1923                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1924                                phylink_set(if_caps, 100000baseCR4_Full);
1925                                break;
1926                        default:
1927                                break;
1928                        }
1929
1930                break;
1931        case MEDIA_BASE_T:
1932                phylink_set(if_caps, TP);
1933
1934                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1935                        if (capability &
1936                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1937                                phylink_set(if_caps, 1000baseT_Full);
1938                        if (capability &
1939                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1940                                phylink_set(if_caps, 10000baseT_Full);
1941                }
1942
1943                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1944                        phylink_set(if_caps, FIBRE);
1945
1946                        switch (tcvr_type) {
1947                        case ETH_TRANSCEIVER_TYPE_1000BASET:
1948                                phylink_set(if_caps, 1000baseT_Full);
1949                                break;
1950                        case ETH_TRANSCEIVER_TYPE_10G_BASET:
1951                                phylink_set(if_caps, 10000baseT_Full);
1952                                break;
1953                        default:
1954                                break;
1955                        }
1956                }
1957
1958                break;
1959        case MEDIA_SFP_1G_FIBER:
1960        case MEDIA_SFPP_10G_FIBER:
1961        case MEDIA_XFP_FIBER:
1962        case MEDIA_MODULE_FIBER:
1963                phylink_set(if_caps, FIBRE);
1964                capability |= speed_mask;
1965
1966                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1967                        switch (tcvr_type) {
1968                        case ETH_TRANSCEIVER_TYPE_1G_LX:
1969                        case ETH_TRANSCEIVER_TYPE_1G_SX:
1970                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1971                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1972                                phylink_set(if_caps, 1000baseKX_Full);
1973                                break;
1974                        default:
1975                                break;
1976                        }
1977
1978                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1979                        switch (tcvr_type) {
1980                        case ETH_TRANSCEIVER_TYPE_10G_SR:
1981                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1982                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1983                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1984                                phylink_set(if_caps, 10000baseSR_Full);
1985                                break;
1986                        case ETH_TRANSCEIVER_TYPE_10G_LR:
1987                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1988                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1989                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1990                                phylink_set(if_caps, 10000baseLR_Full);
1991                                break;
1992                        case ETH_TRANSCEIVER_TYPE_10G_LRM:
1993                                phylink_set(if_caps, 10000baseLRM_Full);
1994                                break;
1995                        case ETH_TRANSCEIVER_TYPE_10G_ER:
1996                                phylink_set(if_caps, 10000baseR_FEC);
1997                                break;
1998                        default:
1999                                break;
2000                        }
2001
2002                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2003                        phylink_set(if_caps, 20000baseKR2_Full);
2004
2005                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2006                        switch (tcvr_type) {
2007                        case ETH_TRANSCEIVER_TYPE_25G_SR:
2008                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2009                                phylink_set(if_caps, 25000baseSR_Full);
2010                                break;
2011                        default:
2012                                break;
2013                        }
2014
2015                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2016                        switch (tcvr_type) {
2017                        case ETH_TRANSCEIVER_TYPE_40G_LR4:
2018                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2019                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2020                                phylink_set(if_caps, 40000baseLR4_Full);
2021                                break;
2022                        case ETH_TRANSCEIVER_TYPE_40G_SR4:
2023                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2024                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2025                                phylink_set(if_caps, 40000baseSR4_Full);
2026                                break;
2027                        default:
2028                                break;
2029                        }
2030
2031                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2032                        phylink_set(if_caps, 50000baseKR2_Full);
2033
2034                if (capability &
2035                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2036                        switch (tcvr_type) {
2037                        case ETH_TRANSCEIVER_TYPE_100G_SR4:
2038                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2039                                phylink_set(if_caps, 100000baseSR4_Full);
2040                                break;
2041                        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2042                                phylink_set(if_caps, 100000baseLR4_ER4_Full);
2043                                break;
2044                        default:
2045                                break;
2046                        }
2047
2048                break;
2049        case MEDIA_KR:
2050                phylink_set(if_caps, Backplane);
2051
2052                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2053                        phylink_set(if_caps, 20000baseKR2_Full);
2054                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2055                        phylink_set(if_caps, 1000baseKX_Full);
2056                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2057                        phylink_set(if_caps, 10000baseKR_Full);
2058                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2059                        phylink_set(if_caps, 25000baseKR_Full);
2060                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2061                        phylink_set(if_caps, 40000baseKR4_Full);
2062                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2063                        phylink_set(if_caps, 50000baseKR2_Full);
2064                if (capability &
2065                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2066                        phylink_set(if_caps, 100000baseKR4_Full);
2067
2068                break;
2069        case MEDIA_UNSPECIFIED:
2070        case MEDIA_NOT_PRESENT:
2071        default:
2072                DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2073                           "Unknown media and transceiver type;\n");
2074                break;
2075        }
2076}
2077
2078static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2079{
2080        *speed_mask = 0;
2081
2082        if (caps &
2083            (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2084                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2085        if (caps & QED_LINK_PARTNER_SPEED_10G)
2086                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2087        if (caps & QED_LINK_PARTNER_SPEED_20G)
2088                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2089        if (caps & QED_LINK_PARTNER_SPEED_25G)
2090                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2091        if (caps & QED_LINK_PARTNER_SPEED_40G)
2092                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2093        if (caps & QED_LINK_PARTNER_SPEED_50G)
2094                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2095        if (caps & QED_LINK_PARTNER_SPEED_100G)
2096                *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2097}
2098
2099static void qed_fill_link(struct qed_hwfn *hwfn,
2100                          struct qed_ptt *ptt,
2101                          struct qed_link_output *if_link)
2102{
2103        struct qed_mcp_link_capabilities link_caps;
2104        struct qed_mcp_link_params params;
2105        struct qed_mcp_link_state link;
2106        u32 media_type, speed_mask;
2107
2108        memset(if_link, 0, sizeof(*if_link));
2109
2110        /* Prepare source inputs */
2111        if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2112                dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2113                return;
2114        }
2115
2116        /* Set the link parameters to pass to protocol driver */
2117        if (link.link_up)
2118                if_link->link_up = true;
2119
2120        if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2121                if (link_caps.default_ext_autoneg)
2122                        phylink_set(if_link->supported_caps, Autoneg);
2123
2124                linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2125
2126                if (params.ext_speed.autoneg)
2127                        phylink_set(if_link->advertised_caps, Autoneg);
2128                else
2129                        phylink_clear(if_link->advertised_caps, Autoneg);
2130
2131                qed_fill_link_capability(hwfn, ptt,
2132                                         params.ext_speed.advertised_speeds,
2133                                         if_link->advertised_caps);
2134        } else {
2135                if (link_caps.default_speed_autoneg)
2136                        phylink_set(if_link->supported_caps, Autoneg);
2137
2138                linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2139
2140                if (params.speed.autoneg)
2141                        phylink_set(if_link->advertised_caps, Autoneg);
2142                else
2143                        phylink_clear(if_link->advertised_caps, Autoneg);
2144        }
2145
2146        if (params.pause.autoneg ||
2147            (params.pause.forced_rx && params.pause.forced_tx))
2148                phylink_set(if_link->supported_caps, Asym_Pause);
2149        if (params.pause.autoneg || params.pause.forced_rx ||
2150            params.pause.forced_tx)
2151                phylink_set(if_link->supported_caps, Pause);
2152
2153        if_link->sup_fec = link_caps.fec_default;
2154        if_link->active_fec = params.fec;
2155
2156        /* Fill link advertised capability */
2157        qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2158                                 if_link->advertised_caps);
2159
2160        /* Fill link supported capability */
2161        qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2162                                 if_link->supported_caps);
2163
2164        /* Fill partner advertised capability */
2165        qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2166        qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2167
2168        if (link.link_up)
2169                if_link->speed = link.speed;
2170
2171        /* TODO - fill duplex properly */
2172        if_link->duplex = DUPLEX_FULL;
2173        qed_mcp_get_media_type(hwfn, ptt, &media_type);
2174        if_link->port = qed_get_port_type(media_type);
2175
2176        if_link->autoneg = params.speed.autoneg;
2177
2178        if (params.pause.autoneg)
2179                if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2180        if (params.pause.forced_rx)
2181                if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2182        if (params.pause.forced_tx)
2183                if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2184
2185        if (link.an_complete)
2186                phylink_set(if_link->lp_caps, Autoneg);
2187        if (link.partner_adv_pause)
2188                phylink_set(if_link->lp_caps, Pause);
2189        if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2190            link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2191                phylink_set(if_link->lp_caps, Asym_Pause);
2192
2193        if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2194                if_link->eee_supported = false;
2195        } else {
2196                if_link->eee_supported = true;
2197                if_link->eee_active = link.eee_active;
2198                if_link->sup_caps = link_caps.eee_speed_caps;
2199                /* MFW clears adv_caps on eee disable; use configured value */
2200                if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2201                                        params.eee.adv_caps;
2202                if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2203                if_link->eee.enable = params.eee.enable;
2204                if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2205                if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2206        }
2207}
2208
2209static void qed_get_current_link(struct qed_dev *cdev,
2210                                 struct qed_link_output *if_link)
2211{
2212        struct qed_hwfn *hwfn;
2213        struct qed_ptt *ptt;
2214        int i;
2215
2216        hwfn = &cdev->hwfns[0];
2217        if (IS_PF(cdev)) {
2218                ptt = qed_ptt_acquire(hwfn);
2219                if (ptt) {
2220                        qed_fill_link(hwfn, ptt, if_link);
2221                        qed_ptt_release(hwfn, ptt);
2222                } else {
2223                        DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2224                }
2225        } else {
2226                qed_fill_link(hwfn, NULL, if_link);
2227        }
2228
2229        for_each_hwfn(cdev, i)
2230                qed_inform_vf_link_state(&cdev->hwfns[i]);
2231}
2232
2233void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2234{
2235        void *cookie = hwfn->cdev->ops_cookie;
2236        struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2237        struct qed_link_output if_link;
2238
2239        qed_fill_link(hwfn, ptt, &if_link);
2240        qed_inform_vf_link_state(hwfn);
2241
2242        if (IS_LEAD_HWFN(hwfn) && cookie)
2243                op->link_update(cookie, &if_link);
2244}
2245
2246void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2247{
2248        void *cookie = hwfn->cdev->ops_cookie;
2249        struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2250
2251        if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2252                op->bw_update(cookie);
2253}
2254
2255static int qed_drain(struct qed_dev *cdev)
2256{
2257        struct qed_hwfn *hwfn;
2258        struct qed_ptt *ptt;
2259        int i, rc;
2260
2261        if (IS_VF(cdev))
2262                return 0;
2263
2264        for_each_hwfn(cdev, i) {
2265                hwfn = &cdev->hwfns[i];
2266                ptt = qed_ptt_acquire(hwfn);
2267                if (!ptt) {
2268                        DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2269                        return -EBUSY;
2270                }
2271                rc = qed_mcp_drain(hwfn, ptt);
2272                qed_ptt_release(hwfn, ptt);
2273                if (rc)
2274                        return rc;
2275        }
2276
2277        return 0;
2278}
2279
2280static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2281                                          struct qed_nvm_image_att *nvm_image,
2282                                          u32 *crc)
2283{
2284        u8 *buf = NULL;
2285        int rc;
2286
2287        /* Allocate a buffer for holding the nvram image */
2288        buf = kzalloc(nvm_image->length, GFP_KERNEL);
2289        if (!buf)
2290                return -ENOMEM;
2291
2292        /* Read image into buffer */
2293        rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2294                              buf, nvm_image->length);
2295        if (rc) {
2296                DP_ERR(cdev, "Failed reading image from nvm\n");
2297                goto out;
2298        }
2299
2300        /* Convert the buffer into big-endian format (excluding the
2301         * closing 4 bytes of CRC).
2302         */
2303        cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2304                          DIV_ROUND_UP(nvm_image->length - 4, 4));
2305
2306        /* Calc CRC for the "actual" image buffer, i.e. not including
2307         * the last 4 CRC bytes.
2308         */
2309        *crc = ~crc32(~0U, buf, nvm_image->length - 4);
2310        *crc = (__force u32)cpu_to_be32p(crc);
2311
2312out:
2313        kfree(buf);
2314
2315        return rc;
2316}
2317
2318/* Binary file format -
2319 *     /----------------------------------------------------------------------\
2320 * 0B  |                       0x4 [command index]                            |
2321 * 4B  | image_type     | Options        |  Number of register settings       |
2322 * 8B  |                       Value                                          |
2323 * 12B |                       Mask                                           |
2324 * 16B |                       Offset                                         |
2325 *     \----------------------------------------------------------------------/
2326 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2327 * Options - 0'b - Calculate & Update CRC for image
2328 */
2329static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2330                                      bool *check_resp)
2331{
2332        struct qed_nvm_image_att nvm_image;
2333        struct qed_hwfn *p_hwfn;
2334        bool is_crc = false;
2335        u32 image_type;
2336        int rc = 0, i;
2337        u16 len;
2338
2339        *data += 4;
2340        image_type = **data;
2341        p_hwfn = QED_LEADING_HWFN(cdev);
2342        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2343                if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2344                        break;
2345        if (i == p_hwfn->nvm_info.num_images) {
2346                DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2347                       image_type);
2348                return -ENOENT;
2349        }
2350
2351        nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2352        nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2353
2354        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2355                   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2356                   **data, image_type, nvm_image.start_addr,
2357                   nvm_image.start_addr + nvm_image.length - 1);
2358        (*data)++;
2359        is_crc = !!(**data & BIT(0));
2360        (*data)++;
2361        len = *((u16 *)*data);
2362        *data += 2;
2363        if (is_crc) {
2364                u32 crc = 0;
2365
2366                rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2367                if (rc) {
2368                        DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2369                        goto exit;
2370                }
2371
2372                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2373                                       (nvm_image.start_addr +
2374                                        nvm_image.length - 4), (u8 *)&crc, 4);
2375                if (rc)
2376                        DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2377                               nvm_image.start_addr + nvm_image.length - 4, rc);
2378                goto exit;
2379        }
2380
2381        /* Iterate over the values for setting */
2382        while (len) {
2383                u32 offset, mask, value, cur_value;
2384                u8 buf[4];
2385
2386                value = *((u32 *)*data);
2387                *data += 4;
2388                mask = *((u32 *)*data);
2389                *data += 4;
2390                offset = *((u32 *)*data);
2391                *data += 4;
2392
2393                rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2394                                      4);
2395                if (rc) {
2396                        DP_ERR(cdev, "Failed reading from %08x\n",
2397                               nvm_image.start_addr + offset);
2398                        goto exit;
2399                }
2400
2401                cur_value = le32_to_cpu(*((__le32 *)buf));
2402                DP_VERBOSE(cdev, NETIF_MSG_DRV,
2403                           "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2404                           nvm_image.start_addr + offset, cur_value,
2405                           (cur_value & ~mask) | (value & mask), value, mask);
2406                value = (value & mask) | (cur_value & ~mask);
2407                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2408                                       nvm_image.start_addr + offset,
2409                                       (u8 *)&value, 4);
2410                if (rc) {
2411                        DP_ERR(cdev, "Failed writing to %08x\n",
2412                               nvm_image.start_addr + offset);
2413                        goto exit;
2414                }
2415
2416                len--;
2417        }
2418exit:
2419        return rc;
2420}
2421
2422/* Binary file format -
2423 *     /----------------------------------------------------------------------\
2424 * 0B  |                       0x3 [command index]                            |
2425 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2426 * 8B  | File-type |                   reserved                               |
2427 * 12B |                    Image length in bytes                             |
2428 *     \----------------------------------------------------------------------/
2429 *     Start a new file of the provided type
2430 */
2431static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2432                                          const u8 **data, bool *check_resp)
2433{
2434        u32 file_type, file_size = 0;
2435        int rc;
2436
2437        *data += 4;
2438        *check_resp = !!(**data & BIT(0));
2439        *data += 4;
2440        file_type = **data;
2441
2442        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2443                   "About to start a new file of type %02x\n", file_type);
2444        if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2445                *data += 4;
2446                file_size = *((u32 *)(*data));
2447        }
2448
2449        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2450                               (u8 *)(&file_size), 4);
2451        *data += 4;
2452
2453        return rc;
2454}
2455
2456/* Binary file format -
2457 *     /----------------------------------------------------------------------\
2458 * 0B  |                       0x2 [command index]                            |
2459 * 4B  |                       Length in bytes                                |
2460 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2461 * 12B |                       Offset in bytes                                |
2462 * 16B |                       Data ...                                       |
2463 *     \----------------------------------------------------------------------/
2464 *     Write data as part of a file that was previously started. Data should be
2465 *     of length equal to that provided in the message
2466 */
2467static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2468                                         const u8 **data, bool *check_resp)
2469{
2470        u32 offset, len;
2471        int rc;
2472
2473        *data += 4;
2474        len = *((u32 *)(*data));
2475        *data += 4;
2476        *check_resp = !!(**data & BIT(0));
2477        *data += 4;
2478        offset = *((u32 *)(*data));
2479        *data += 4;
2480
2481        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2482                   "About to write File-data: %08x bytes to offset %08x\n",
2483                   len, offset);
2484
2485        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2486                               (char *)(*data), len);
2487        *data += len;
2488
2489        return rc;
2490}
2491
2492/* Binary file format [General header] -
2493 *     /----------------------------------------------------------------------\
2494 * 0B  |                       QED_NVM_SIGNATURE                              |
2495 * 4B  |                       Length in bytes                                |
2496 * 8B  | Highest command in this batchfile |          Reserved                |
2497 *     \----------------------------------------------------------------------/
2498 */
2499static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2500                                        const struct firmware *image,
2501                                        const u8 **data)
2502{
2503        u32 signature, len;
2504
2505        /* Check minimum size */
2506        if (image->size < 12) {
2507                DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2508                return -EINVAL;
2509        }
2510
2511        /* Check signature */
2512        signature = *((u32 *)(*data));
2513        if (signature != QED_NVM_SIGNATURE) {
2514                DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2515                return -EINVAL;
2516        }
2517
2518        *data += 4;
2519        /* Validate internal size equals the image-size */
2520        len = *((u32 *)(*data));
2521        if (len != image->size) {
2522                DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2523                       len, (u32)image->size);
2524                return -EINVAL;
2525        }
2526
2527        *data += 4;
2528        /* Make sure driver familiar with all commands necessary for this */
2529        if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2530                DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2531                       *((u16 *)(*data)));
2532                return -EINVAL;
2533        }
2534
2535        *data += 4;
2536
2537        return 0;
2538}
2539
2540/* Binary file format -
2541 *     /----------------------------------------------------------------------\
2542 * 0B  |                       0x5 [command index]                            |
2543 * 4B  | Number of config attributes     |          Reserved                  |
2544 * 4B  | Config ID                       | Entity ID      | Length            |
2545 * 4B  | Value                                                                |
2546 *     |                                                                      |
2547 *     \----------------------------------------------------------------------/
2548 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2549 * 'Number of config attributes'.
2550 *
2551 * The API parses config attributes from the user provided buffer and flashes
2552 * them to the respective NVM path using Management FW inerface.
2553 */
2554static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2555{
2556        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2557        u8 entity_id, len, buf[32];
2558        bool need_nvm_init = true;
2559        struct qed_ptt *ptt;
2560        u16 cfg_id, count;
2561        int rc = 0, i;
2562        u32 flags;
2563
2564        ptt = qed_ptt_acquire(hwfn);
2565        if (!ptt)
2566                return -EAGAIN;
2567
2568        /* NVM CFG ID attribute header */
2569        *data += 4;
2570        count = *((u16 *)*data);
2571        *data += 4;
2572
2573        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2574                   "Read config ids: num_attrs = %0d\n", count);
2575        /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2576         * arithmetic operations in the implementation.
2577         */
2578        for (i = 1; i <= count; i++) {
2579                cfg_id = *((u16 *)*data);
2580                *data += 2;
2581                entity_id = **data;
2582                (*data)++;
2583                len = **data;
2584                (*data)++;
2585                memcpy(buf, *data, len);
2586                *data += len;
2587
2588                flags = 0;
2589                if (need_nvm_init) {
2590                        flags |= QED_NVM_CFG_OPTION_INIT;
2591                        need_nvm_init = false;
2592                }
2593
2594                /* Commit to flash and free the resources */
2595                if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2596                        flags |= QED_NVM_CFG_OPTION_COMMIT |
2597                                 QED_NVM_CFG_OPTION_FREE;
2598                        need_nvm_init = true;
2599                }
2600
2601                if (entity_id)
2602                        flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2603
2604                DP_VERBOSE(cdev, NETIF_MSG_DRV,
2605                           "cfg_id = %d entity = %d len = %d\n", cfg_id,
2606                           entity_id, len);
2607                rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2608                                         buf, len);
2609                if (rc) {
2610                        DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2611                        break;
2612                }
2613        }
2614
2615        qed_ptt_release(hwfn, ptt);
2616
2617        return rc;
2618}
2619
2620#define QED_MAX_NVM_BUF_LEN     32
2621static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2622{
2623        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2624        u8 buf[QED_MAX_NVM_BUF_LEN];
2625        struct qed_ptt *ptt;
2626        u32 len;
2627        int rc;
2628
2629        ptt = qed_ptt_acquire(hwfn);
2630        if (!ptt)
2631                return QED_MAX_NVM_BUF_LEN;
2632
2633        rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2634                                 &len);
2635        if (rc || !len) {
2636                DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2637                len = QED_MAX_NVM_BUF_LEN;
2638        }
2639
2640        qed_ptt_release(hwfn, ptt);
2641
2642        return len;
2643}
2644
2645static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2646                                  u32 cmd, u32 entity_id)
2647{
2648        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2649        struct qed_ptt *ptt;
2650        u32 flags, len;
2651        int rc = 0;
2652
2653        ptt = qed_ptt_acquire(hwfn);
2654        if (!ptt)
2655                return -EAGAIN;
2656
2657        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2658                   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2659        flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2660        rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2661        if (rc)
2662                DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2663
2664        qed_ptt_release(hwfn, ptt);
2665
2666        return rc;
2667}
2668
2669static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2670{
2671        const struct firmware *image;
2672        const u8 *data, *data_end;
2673        u32 cmd_type;
2674        int rc;
2675
2676        rc = request_firmware(&image, name, &cdev->pdev->dev);
2677        if (rc) {
2678                DP_ERR(cdev, "Failed to find '%s'\n", name);
2679                return rc;
2680        }
2681
2682        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2683                   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2684                   name, image->data, (u32)image->size);
2685        data = image->data;
2686        data_end = data + image->size;
2687
2688        rc = qed_nvm_flash_image_validate(cdev, image, &data);
2689        if (rc)
2690                goto exit;
2691
2692        while (data < data_end) {
2693                bool check_resp = false;
2694
2695                /* Parse the actual command */
2696                cmd_type = *((u32 *)data);
2697                switch (cmd_type) {
2698                case QED_NVM_FLASH_CMD_FILE_DATA:
2699                        rc = qed_nvm_flash_image_file_data(cdev, &data,
2700                                                           &check_resp);
2701                        break;
2702                case QED_NVM_FLASH_CMD_FILE_START:
2703                        rc = qed_nvm_flash_image_file_start(cdev, &data,
2704                                                            &check_resp);
2705                        break;
2706                case QED_NVM_FLASH_CMD_NVM_CHANGE:
2707                        rc = qed_nvm_flash_image_access(cdev, &data,
2708                                                        &check_resp);
2709                        break;
2710                case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2711                        rc = qed_nvm_flash_cfg_write(cdev, &data);
2712                        break;
2713                default:
2714                        DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2715                        rc = -EINVAL;
2716                        goto exit;
2717                }
2718
2719                if (rc) {
2720                        DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2721                        goto exit;
2722                }
2723
2724                /* Check response if needed */
2725                if (check_resp) {
2726                        u32 mcp_response = 0;
2727
2728                        if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2729                                DP_ERR(cdev, "Failed getting MCP response\n");
2730                                rc = -EINVAL;
2731                                goto exit;
2732                        }
2733
2734                        switch (mcp_response & FW_MSG_CODE_MASK) {
2735                        case FW_MSG_CODE_OK:
2736                        case FW_MSG_CODE_NVM_OK:
2737                        case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2738                        case FW_MSG_CODE_PHY_OK:
2739                                break;
2740                        default:
2741                                DP_ERR(cdev, "MFW returns error: %08x\n",
2742                                       mcp_response);
2743                                rc = -EINVAL;
2744                                goto exit;
2745                        }
2746                }
2747        }
2748
2749exit:
2750        release_firmware(image);
2751
2752        return rc;
2753}
2754
2755static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2756                             u8 *buf, u16 len)
2757{
2758        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2759
2760        return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2761}
2762
2763void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2764{
2765        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2766        void *cookie = p_hwfn->cdev->ops_cookie;
2767
2768        if (ops && ops->schedule_recovery_handler)
2769                ops->schedule_recovery_handler(cookie);
2770}
2771
2772static const char * const qed_hw_err_type_descr[] = {
2773        [QED_HW_ERR_FAN_FAIL]           = "Fan Failure",
2774        [QED_HW_ERR_MFW_RESP_FAIL]      = "MFW Response Failure",
2775        [QED_HW_ERR_HW_ATTN]            = "HW Attention",
2776        [QED_HW_ERR_DMAE_FAIL]          = "DMAE Failure",
2777        [QED_HW_ERR_RAMROD_FAIL]        = "Ramrod Failure",
2778        [QED_HW_ERR_FW_ASSERT]          = "FW Assertion",
2779        [QED_HW_ERR_LAST]               = "Unknown",
2780};
2781
2782void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2783                           enum qed_hw_err_type err_type)
2784{
2785        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2786        void *cookie = p_hwfn->cdev->ops_cookie;
2787        const char *err_str;
2788
2789        if (err_type > QED_HW_ERR_LAST)
2790                err_type = QED_HW_ERR_LAST;
2791        err_str = qed_hw_err_type_descr[err_type];
2792
2793        DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2794
2795        /* Call the HW error handler of the protocol driver.
2796         * If it is not available - perform a minimal handling of preventing
2797         * HW attentions from being reasserted.
2798         */
2799        if (ops && ops->schedule_hw_err_handler)
2800                ops->schedule_hw_err_handler(cookie, err_type);
2801        else
2802                qed_int_attn_clr_enable(p_hwfn->cdev, true);
2803}
2804
2805static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2806                            void *handle)
2807{
2808                return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2809}
2810
2811static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2812{
2813        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2814        struct qed_ptt *ptt;
2815        int status = 0;
2816
2817        ptt = qed_ptt_acquire(hwfn);
2818        if (!ptt)
2819                return -EAGAIN;
2820
2821        status = qed_mcp_set_led(hwfn, ptt, mode);
2822
2823        qed_ptt_release(hwfn, ptt);
2824
2825        return status;
2826}
2827
2828int qed_recovery_process(struct qed_dev *cdev)
2829{
2830        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2831        struct qed_ptt *p_ptt;
2832        int rc = 0;
2833
2834        p_ptt = qed_ptt_acquire(p_hwfn);
2835        if (!p_ptt)
2836                return -EAGAIN;
2837
2838        rc = qed_start_recovery_process(p_hwfn, p_ptt);
2839
2840        qed_ptt_release(p_hwfn, p_ptt);
2841
2842        return rc;
2843}
2844
2845static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2846{
2847        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2848        struct qed_ptt *ptt;
2849        int rc = 0;
2850
2851        if (IS_VF(cdev))
2852                return 0;
2853
2854        ptt = qed_ptt_acquire(hwfn);
2855        if (!ptt)
2856                return -EAGAIN;
2857
2858        rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2859                                   : QED_OV_WOL_DISABLED);
2860        if (rc)
2861                goto out;
2862        rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2863
2864out:
2865        qed_ptt_release(hwfn, ptt);
2866        return rc;
2867}
2868
2869static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2870{
2871        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2872        struct qed_ptt *ptt;
2873        int status = 0;
2874
2875        if (IS_VF(cdev))
2876                return 0;
2877
2878        ptt = qed_ptt_acquire(hwfn);
2879        if (!ptt)
2880                return -EAGAIN;
2881
2882        status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2883                                                QED_OV_DRIVER_STATE_ACTIVE :
2884                                                QED_OV_DRIVER_STATE_DISABLED);
2885
2886        qed_ptt_release(hwfn, ptt);
2887
2888        return status;
2889}
2890
2891static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
2892{
2893        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2894        struct qed_ptt *ptt;
2895        int status = 0;
2896
2897        if (IS_VF(cdev))
2898                return 0;
2899
2900        ptt = qed_ptt_acquire(hwfn);
2901        if (!ptt)
2902                return -EAGAIN;
2903
2904        status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2905        if (status)
2906                goto out;
2907
2908        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2909
2910out:
2911        qed_ptt_release(hwfn, ptt);
2912        return status;
2913}
2914
2915static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2916{
2917        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2918        struct qed_ptt *ptt;
2919        int status = 0;
2920
2921        if (IS_VF(cdev))
2922                return 0;
2923
2924        ptt = qed_ptt_acquire(hwfn);
2925        if (!ptt)
2926                return -EAGAIN;
2927
2928        status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2929        if (status)
2930                goto out;
2931
2932        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2933
2934out:
2935        qed_ptt_release(hwfn, ptt);
2936        return status;
2937}
2938
2939static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2940                                  u8 dev_addr, u32 offset, u32 len)
2941{
2942        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2943        struct qed_ptt *ptt;
2944        int rc = 0;
2945
2946        if (IS_VF(cdev))
2947                return 0;
2948
2949        ptt = qed_ptt_acquire(hwfn);
2950        if (!ptt)
2951                return -EAGAIN;
2952
2953        rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2954                                  offset, len, buf);
2955
2956        qed_ptt_release(hwfn, ptt);
2957
2958        return rc;
2959}
2960
2961static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2962{
2963        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2964        struct qed_ptt *ptt;
2965        int rc = 0;
2966
2967        if (IS_VF(cdev))
2968                return 0;
2969
2970        ptt = qed_ptt_acquire(hwfn);
2971        if (!ptt)
2972                return -EAGAIN;
2973
2974        rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2975
2976        qed_ptt_release(hwfn, ptt);
2977
2978        return rc;
2979}
2980
2981static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2982{
2983        return QED_AFFIN_HWFN_IDX(cdev);
2984}
2985
2986static struct qed_selftest_ops qed_selftest_ops_pass = {
2987        .selftest_memory = &qed_selftest_memory,
2988        .selftest_interrupt = &qed_selftest_interrupt,
2989        .selftest_register = &qed_selftest_register,
2990        .selftest_clock = &qed_selftest_clock,
2991        .selftest_nvram = &qed_selftest_nvram,
2992};
2993
2994const struct qed_common_ops qed_common_ops_pass = {
2995        .selftest = &qed_selftest_ops_pass,
2996        .probe = &qed_probe,
2997        .remove = &qed_remove,
2998        .set_power_state = &qed_set_power_state,
2999        .set_name = &qed_set_name,
3000        .update_pf_params = &qed_update_pf_params,
3001        .slowpath_start = &qed_slowpath_start,
3002        .slowpath_stop = &qed_slowpath_stop,
3003        .set_fp_int = &qed_set_int_fp,
3004        .get_fp_int = &qed_get_int_fp,
3005        .sb_init = &qed_sb_init,
3006        .sb_release = &qed_sb_release,
3007        .simd_handler_config = &qed_simd_handler_config,
3008        .simd_handler_clean = &qed_simd_handler_clean,
3009        .dbg_grc = &qed_dbg_grc,
3010        .dbg_grc_size = &qed_dbg_grc_size,
3011        .can_link_change = &qed_can_link_change,
3012        .set_link = &qed_set_link,
3013        .get_link = &qed_get_current_link,
3014        .drain = &qed_drain,
3015        .update_msglvl = &qed_init_dp,
3016        .devlink_register = qed_devlink_register,
3017        .devlink_unregister = qed_devlink_unregister,
3018        .report_fatal_error = qed_report_fatal_error,
3019        .dbg_all_data = &qed_dbg_all_data,
3020        .dbg_all_data_size = &qed_dbg_all_data_size,
3021        .chain_alloc = &qed_chain_alloc,
3022        .chain_free = &qed_chain_free,
3023        .nvm_flash = &qed_nvm_flash,
3024        .nvm_get_image = &qed_nvm_get_image,
3025        .set_coalesce = &qed_set_coalesce,
3026        .set_led = &qed_set_led,
3027        .recovery_process = &qed_recovery_process,
3028        .recovery_prolog = &qed_recovery_prolog,
3029        .attn_clr_enable = &qed_int_attn_clr_enable,
3030        .update_drv_state = &qed_update_drv_state,
3031        .update_mac = &qed_update_mac,
3032        .update_mtu = &qed_update_mtu,
3033        .update_wol = &qed_update_wol,
3034        .db_recovery_add = &qed_db_recovery_add,
3035        .db_recovery_del = &qed_db_recovery_del,
3036        .read_module_eeprom = &qed_read_module_eeprom,
3037        .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3038        .read_nvm_cfg = &qed_nvm_flash_cfg_read,
3039        .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3040        .set_grc_config = &qed_set_grc_config,
3041};
3042
3043void qed_get_protocol_stats(struct qed_dev *cdev,
3044                            enum qed_mcp_protocol_type type,
3045                            union qed_mcp_protocol_stats *stats)
3046{
3047        struct qed_eth_stats eth_stats;
3048
3049        memset(stats, 0, sizeof(*stats));
3050
3051        switch (type) {
3052        case QED_MCP_LAN_STATS:
3053                qed_get_vport_stats(cdev, &eth_stats);
3054                stats->lan_stats.ucast_rx_pkts =
3055                                        eth_stats.common.rx_ucast_pkts;
3056                stats->lan_stats.ucast_tx_pkts =
3057                                        eth_stats.common.tx_ucast_pkts;
3058                stats->lan_stats.fcs_err = -1;
3059                break;
3060        case QED_MCP_FCOE_STATS:
3061                qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3062                break;
3063        case QED_MCP_ISCSI_STATS:
3064                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3065                break;
3066        default:
3067                DP_VERBOSE(cdev, QED_MSG_SP,
3068                           "Invalid protocol type = %d\n", type);
3069                return;
3070        }
3071}
3072
3073int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3074{
3075        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3076                   "Scheduling slowpath task [Flag: %d]\n",
3077                   QED_SLOWPATH_MFW_TLV_REQ);
3078        /* Memory barrier for setting atomic bit */
3079        smp_mb__before_atomic();
3080        set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3081        /* Memory barrier after setting atomic bit */
3082        smp_mb__after_atomic();
3083        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3084
3085        return 0;
3086}
3087
3088static void
3089qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3090{
3091        struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3092        struct qed_eth_stats_common *p_common;
3093        struct qed_generic_tlvs gen_tlvs;
3094        struct qed_eth_stats stats;
3095        int i;
3096
3097        memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3098        op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3099
3100        if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3101                tlv->flags.ipv4_csum_offload = true;
3102        if (gen_tlvs.feat_flags & QED_TLV_LSO)
3103                tlv->flags.lso_supported = true;
3104        tlv->flags.b_set = true;
3105
3106        for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3107                if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3108                        ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3109                        tlv->mac_set[i] = true;
3110                }
3111        }
3112
3113        qed_get_vport_stats(cdev, &stats);
3114        p_common = &stats.common;
3115        tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3116                         p_common->rx_bcast_pkts;
3117        tlv->rx_frames_set = true;
3118        tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3119                        p_common->rx_bcast_bytes;
3120        tlv->rx_bytes_set = true;
3121        tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3122                         p_common->tx_bcast_pkts;
3123        tlv->tx_frames_set = true;
3124        tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3125                        p_common->tx_bcast_bytes;
3126        tlv->rx_bytes_set = true;
3127}
3128
3129int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3130                          union qed_mfw_tlv_data *tlv_buf)
3131{
3132        struct qed_dev *cdev = hwfn->cdev;
3133        struct qed_common_cb_ops *ops;
3134
3135        ops = cdev->protocol_ops.common;
3136        if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3137                DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3138                return -EINVAL;
3139        }
3140
3141        switch (type) {
3142        case QED_MFW_TLV_GENERIC:
3143                qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3144                break;
3145        case QED_MFW_TLV_ETH:
3146                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3147                break;
3148        case QED_MFW_TLV_FCOE:
3149                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3150                break;
3151        case QED_MFW_TLV_ISCSI:
3152                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3153                break;
3154        default:
3155                break;
3156        }
3157
3158        return 0;
3159}
3160
3161unsigned long qed_get_epoch_time(void)
3162{
3163        return ktime_get_real_seconds();
3164}
3165