linux/drivers/net/ethernet/qlogic/qed/qed_main.c
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/stddef.h>
  34#include <linux/pci.h>
  35#include <linux/kernel.h>
  36#include <linux/slab.h>
  37#include <linux/delay.h>
  38#include <asm/byteorder.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/string.h>
  41#include <linux/module.h>
  42#include <linux/interrupt.h>
  43#include <linux/workqueue.h>
  44#include <linux/ethtool.h>
  45#include <linux/etherdevice.h>
  46#include <linux/vmalloc.h>
  47#include <linux/crash_dump.h>
  48#include <linux/crc32.h>
  49#include <linux/qed/qed_if.h>
  50#include <linux/qed/qed_ll2_if.h>
  51
  52#include "qed.h"
  53#include "qed_sriov.h"
  54#include "qed_sp.h"
  55#include "qed_dev_api.h"
  56#include "qed_ll2.h"
  57#include "qed_fcoe.h"
  58#include "qed_iscsi.h"
  59
  60#include "qed_mcp.h"
  61#include "qed_reg_addr.h"
  62#include "qed_hw.h"
  63#include "qed_selftest.h"
  64#include "qed_debug.h"
  65
  66#define QED_ROCE_QPS                    (8192)
  67#define QED_ROCE_DPIS                   (8)
  68#define QED_RDMA_SRQS                   QED_ROCE_QPS
  69
  70static char version[] =
  71        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
  72
  73MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  74MODULE_LICENSE("GPL");
  75MODULE_VERSION(DRV_MODULE_VERSION);
  76
  77#define FW_FILE_VERSION                         \
  78        __stringify(FW_MAJOR_VERSION) "."       \
  79        __stringify(FW_MINOR_VERSION) "."       \
  80        __stringify(FW_REVISION_VERSION) "."    \
  81        __stringify(FW_ENGINEERING_VERSION)
  82
  83#define QED_FW_FILE_NAME        \
  84        "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  85
  86MODULE_FIRMWARE(QED_FW_FILE_NAME);
  87
  88static int __init qed_init(void)
  89{
  90        pr_info("%s", version);
  91
  92        return 0;
  93}
  94
  95static void __exit qed_cleanup(void)
  96{
  97        pr_notice("qed_cleanup called\n");
  98}
  99
 100module_init(qed_init);
 101module_exit(qed_cleanup);
 102
 103/* Check if the DMA controller on the machine can properly handle the DMA
 104 * addressing required by the device.
 105*/
 106static int qed_set_coherency_mask(struct qed_dev *cdev)
 107{
 108        struct device *dev = &cdev->pdev->dev;
 109
 110        if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 111                if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 112                        DP_NOTICE(cdev,
 113                                  "Can't request 64-bit consistent allocations\n");
 114                        return -EIO;
 115                }
 116        } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 117                DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
 118                return -EIO;
 119        }
 120
 121        return 0;
 122}
 123
 124static void qed_free_pci(struct qed_dev *cdev)
 125{
 126        struct pci_dev *pdev = cdev->pdev;
 127
 128        if (cdev->doorbells && cdev->db_size)
 129                iounmap(cdev->doorbells);
 130        if (cdev->regview)
 131                iounmap(cdev->regview);
 132        if (atomic_read(&pdev->enable_cnt) == 1)
 133                pci_release_regions(pdev);
 134
 135        pci_disable_device(pdev);
 136}
 137
 138#define PCI_REVISION_ID_ERROR_VAL       0xff
 139
 140/* Performs PCI initializations as well as initializing PCI-related parameters
 141 * in the device structrue. Returns 0 in case of success.
 142 */
 143static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 144{
 145        u8 rev_id;
 146        int rc;
 147
 148        cdev->pdev = pdev;
 149
 150        rc = pci_enable_device(pdev);
 151        if (rc) {
 152                DP_NOTICE(cdev, "Cannot enable PCI device\n");
 153                goto err0;
 154        }
 155
 156        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 157                DP_NOTICE(cdev, "No memory region found in bar #0\n");
 158                rc = -EIO;
 159                goto err1;
 160        }
 161
 162        if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 163                DP_NOTICE(cdev, "No memory region found in bar #2\n");
 164                rc = -EIO;
 165                goto err1;
 166        }
 167
 168        if (atomic_read(&pdev->enable_cnt) == 1) {
 169                rc = pci_request_regions(pdev, "qed");
 170                if (rc) {
 171                        DP_NOTICE(cdev,
 172                                  "Failed to request PCI memory resources\n");
 173                        goto err1;
 174                }
 175                pci_set_master(pdev);
 176                pci_save_state(pdev);
 177        }
 178
 179        pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 180        if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 181                DP_NOTICE(cdev,
 182                          "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 183                          rev_id);
 184                rc = -ENODEV;
 185                goto err2;
 186        }
 187        if (!pci_is_pcie(pdev)) {
 188                DP_NOTICE(cdev, "The bus is not PCI Express\n");
 189                rc = -EIO;
 190                goto err2;
 191        }
 192
 193        cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 194        if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 195                DP_NOTICE(cdev, "Cannot find power management capability\n");
 196
 197        rc = qed_set_coherency_mask(cdev);
 198        if (rc)
 199                goto err2;
 200
 201        cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 202        cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 203        cdev->pci_params.irq = pdev->irq;
 204
 205        cdev->regview = pci_ioremap_bar(pdev, 0);
 206        if (!cdev->regview) {
 207                DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 208                rc = -ENOMEM;
 209                goto err2;
 210        }
 211
 212        cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 213        cdev->db_size = pci_resource_len(cdev->pdev, 2);
 214        if (!cdev->db_size) {
 215                if (IS_PF(cdev)) {
 216                        DP_NOTICE(cdev, "No Doorbell bar available\n");
 217                        return -EINVAL;
 218                } else {
 219                        return 0;
 220                }
 221        }
 222
 223        cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 224
 225        if (!cdev->doorbells) {
 226                DP_NOTICE(cdev, "Cannot map doorbell space\n");
 227                return -ENOMEM;
 228        }
 229
 230        return 0;
 231
 232err2:
 233        pci_release_regions(pdev);
 234err1:
 235        pci_disable_device(pdev);
 236err0:
 237        return rc;
 238}
 239
 240int qed_fill_dev_info(struct qed_dev *cdev,
 241                      struct qed_dev_info *dev_info)
 242{
 243        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 244        struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 245        struct qed_tunnel_info *tun = &cdev->tunnel;
 246        struct qed_ptt  *ptt;
 247
 248        memset(dev_info, 0, sizeof(struct qed_dev_info));
 249
 250        if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 251            tun->vxlan.b_mode_enabled)
 252                dev_info->vxlan_enable = true;
 253
 254        if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 255            tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 256            tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 257                dev_info->gre_enable = true;
 258
 259        if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 260            tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 261            tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 262                dev_info->geneve_enable = true;
 263
 264        dev_info->num_hwfns = cdev->num_hwfns;
 265        dev_info->pci_mem_start = cdev->pci_params.mem_start;
 266        dev_info->pci_mem_end = cdev->pci_params.mem_end;
 267        dev_info->pci_irq = cdev->pci_params.irq;
 268        dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 269        dev_info->dev_type = cdev->type;
 270        ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 271
 272        if (IS_PF(cdev)) {
 273                dev_info->fw_major = FW_MAJOR_VERSION;
 274                dev_info->fw_minor = FW_MINOR_VERSION;
 275                dev_info->fw_rev = FW_REVISION_VERSION;
 276                dev_info->fw_eng = FW_ENGINEERING_VERSION;
 277                dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 278                                                       &cdev->mf_bits);
 279                dev_info->tx_switching = true;
 280
 281                if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 282                        dev_info->wol_support = true;
 283
 284                dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 285
 286                dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 287        } else {
 288                qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 289                                      &dev_info->fw_minor, &dev_info->fw_rev,
 290                                      &dev_info->fw_eng);
 291        }
 292
 293        if (IS_PF(cdev)) {
 294                ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 295                if (ptt) {
 296                        qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 297                                            &dev_info->mfw_rev, NULL);
 298
 299                        qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 300                                            &dev_info->mbi_version);
 301
 302                        qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 303                                               &dev_info->flash_size);
 304
 305                        qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 306                }
 307        } else {
 308                qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 309                                    &dev_info->mfw_rev, NULL);
 310        }
 311
 312        dev_info->mtu = hw_info->mtu;
 313
 314        return 0;
 315}
 316
 317static void qed_free_cdev(struct qed_dev *cdev)
 318{
 319        kfree((void *)cdev);
 320}
 321
 322static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 323{
 324        struct qed_dev *cdev;
 325
 326        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 327        if (!cdev)
 328                return cdev;
 329
 330        qed_init_struct(cdev);
 331
 332        return cdev;
 333}
 334
 335/* Sets the requested power state */
 336static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 337{
 338        if (!cdev)
 339                return -ENODEV;
 340
 341        DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 342        return 0;
 343}
 344
 345/* probing */
 346static struct qed_dev *qed_probe(struct pci_dev *pdev,
 347                                 struct qed_probe_params *params)
 348{
 349        struct qed_dev *cdev;
 350        int rc;
 351
 352        cdev = qed_alloc_cdev(pdev);
 353        if (!cdev)
 354                goto err0;
 355
 356        cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 357        cdev->protocol = params->protocol;
 358
 359        if (params->is_vf)
 360                cdev->b_is_vf = true;
 361
 362        qed_init_dp(cdev, params->dp_module, params->dp_level);
 363
 364        cdev->recov_in_prog = params->recov_in_prog;
 365
 366        rc = qed_init_pci(cdev, pdev);
 367        if (rc) {
 368                DP_ERR(cdev, "init pci failed\n");
 369                goto err1;
 370        }
 371        DP_INFO(cdev, "PCI init completed successfully\n");
 372
 373        rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 374        if (rc) {
 375                DP_ERR(cdev, "hw prepare failed\n");
 376                goto err2;
 377        }
 378
 379        DP_INFO(cdev, "qed_probe completed successfully\n");
 380
 381        return cdev;
 382
 383err2:
 384        qed_free_pci(cdev);
 385err1:
 386        qed_free_cdev(cdev);
 387err0:
 388        return NULL;
 389}
 390
 391static void qed_remove(struct qed_dev *cdev)
 392{
 393        if (!cdev)
 394                return;
 395
 396        qed_hw_remove(cdev);
 397
 398        qed_free_pci(cdev);
 399
 400        qed_set_power_state(cdev, PCI_D3hot);
 401
 402        qed_free_cdev(cdev);
 403}
 404
 405static void qed_disable_msix(struct qed_dev *cdev)
 406{
 407        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 408                pci_disable_msix(cdev->pdev);
 409                kfree(cdev->int_params.msix_table);
 410        } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 411                pci_disable_msi(cdev->pdev);
 412        }
 413
 414        memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 415}
 416
 417static int qed_enable_msix(struct qed_dev *cdev,
 418                           struct qed_int_params *int_params)
 419{
 420        int i, rc, cnt;
 421
 422        cnt = int_params->in.num_vectors;
 423
 424        for (i = 0; i < cnt; i++)
 425                int_params->msix_table[i].entry = i;
 426
 427        rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 428                                   int_params->in.min_msix_cnt, cnt);
 429        if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 430            (rc % cdev->num_hwfns)) {
 431                pci_disable_msix(cdev->pdev);
 432
 433                /* If fastpath is initialized, we need at least one interrupt
 434                 * per hwfn [and the slow path interrupts]. New requested number
 435                 * should be a multiple of the number of hwfns.
 436                 */
 437                cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 438                DP_NOTICE(cdev,
 439                          "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 440                          cnt, int_params->in.num_vectors);
 441                rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 442                                           cnt);
 443                if (!rc)
 444                        rc = cnt;
 445        }
 446
 447        if (rc > 0) {
 448                /* MSI-x configuration was achieved */
 449                int_params->out.int_mode = QED_INT_MODE_MSIX;
 450                int_params->out.num_vectors = rc;
 451                rc = 0;
 452        } else {
 453                DP_NOTICE(cdev,
 454                          "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 455                          cnt, rc);
 456        }
 457
 458        return rc;
 459}
 460
 461/* This function outputs the int mode and the number of enabled msix vector */
 462static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 463{
 464        struct qed_int_params *int_params = &cdev->int_params;
 465        struct msix_entry *tbl;
 466        int rc = 0, cnt;
 467
 468        switch (int_params->in.int_mode) {
 469        case QED_INT_MODE_MSIX:
 470                /* Allocate MSIX table */
 471                cnt = int_params->in.num_vectors;
 472                int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 473                if (!int_params->msix_table) {
 474                        rc = -ENOMEM;
 475                        goto out;
 476                }
 477
 478                /* Enable MSIX */
 479                rc = qed_enable_msix(cdev, int_params);
 480                if (!rc)
 481                        goto out;
 482
 483                DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 484                kfree(int_params->msix_table);
 485                if (force_mode)
 486                        goto out;
 487                /* Fallthrough */
 488
 489        case QED_INT_MODE_MSI:
 490                if (cdev->num_hwfns == 1) {
 491                        rc = pci_enable_msi(cdev->pdev);
 492                        if (!rc) {
 493                                int_params->out.int_mode = QED_INT_MODE_MSI;
 494                                goto out;
 495                        }
 496
 497                        DP_NOTICE(cdev, "Failed to enable MSI\n");
 498                        if (force_mode)
 499                                goto out;
 500                }
 501                /* Fallthrough */
 502
 503        case QED_INT_MODE_INTA:
 504                        int_params->out.int_mode = QED_INT_MODE_INTA;
 505                        rc = 0;
 506                        goto out;
 507        default:
 508                DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 509                          int_params->in.int_mode);
 510                rc = -EINVAL;
 511        }
 512
 513out:
 514        if (!rc)
 515                DP_INFO(cdev, "Using %s interrupts\n",
 516                        int_params->out.int_mode == QED_INT_MODE_INTA ?
 517                        "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 518                        "MSI" : "MSIX");
 519        cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 520
 521        return rc;
 522}
 523
 524static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 525                                    int index, void(*handler)(void *))
 526{
 527        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 528        int relative_idx = index / cdev->num_hwfns;
 529
 530        hwfn->simd_proto_handler[relative_idx].func = handler;
 531        hwfn->simd_proto_handler[relative_idx].token = token;
 532}
 533
 534static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 535{
 536        struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 537        int relative_idx = index / cdev->num_hwfns;
 538
 539        memset(&hwfn->simd_proto_handler[relative_idx], 0,
 540               sizeof(struct qed_simd_fp_handler));
 541}
 542
 543static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 544{
 545        tasklet_schedule((struct tasklet_struct *)tasklet);
 546        return IRQ_HANDLED;
 547}
 548
 549static irqreturn_t qed_single_int(int irq, void *dev_instance)
 550{
 551        struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 552        struct qed_hwfn *hwfn;
 553        irqreturn_t rc = IRQ_NONE;
 554        u64 status;
 555        int i, j;
 556
 557        for (i = 0; i < cdev->num_hwfns; i++) {
 558                status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 559
 560                if (!status)
 561                        continue;
 562
 563                hwfn = &cdev->hwfns[i];
 564
 565                /* Slowpath interrupt */
 566                if (unlikely(status & 0x1)) {
 567                        tasklet_schedule(hwfn->sp_dpc);
 568                        status &= ~0x1;
 569                        rc = IRQ_HANDLED;
 570                }
 571
 572                /* Fastpath interrupts */
 573                for (j = 0; j < 64; j++) {
 574                        if ((0x2ULL << j) & status) {
 575                                struct qed_simd_fp_handler *p_handler =
 576                                        &hwfn->simd_proto_handler[j];
 577
 578                                if (p_handler->func)
 579                                        p_handler->func(p_handler->token);
 580                                else
 581                                        DP_NOTICE(hwfn,
 582                                                  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 583                                                  j, status);
 584
 585                                status &= ~(0x2ULL << j);
 586                                rc = IRQ_HANDLED;
 587                        }
 588                }
 589
 590                if (unlikely(status))
 591                        DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 592                                   "got an unknown interrupt status 0x%llx\n",
 593                                   status);
 594        }
 595
 596        return rc;
 597}
 598
 599int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 600{
 601        struct qed_dev *cdev = hwfn->cdev;
 602        u32 int_mode;
 603        int rc = 0;
 604        u8 id;
 605
 606        int_mode = cdev->int_params.out.int_mode;
 607        if (int_mode == QED_INT_MODE_MSIX) {
 608                id = hwfn->my_id;
 609                snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 610                         id, cdev->pdev->bus->number,
 611                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 612                rc = request_irq(cdev->int_params.msix_table[id].vector,
 613                                 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
 614        } else {
 615                unsigned long flags = 0;
 616
 617                snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 618                         cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 619                         PCI_FUNC(cdev->pdev->devfn));
 620
 621                if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 622                        flags |= IRQF_SHARED;
 623
 624                rc = request_irq(cdev->pdev->irq, qed_single_int,
 625                                 flags, cdev->name, cdev);
 626        }
 627
 628        if (rc)
 629                DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 630        else
 631                DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 632                           "Requested slowpath %s\n",
 633                           (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 634
 635        return rc;
 636}
 637
 638static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 639{
 640        /* Calling the disable function will make sure that any
 641         * currently-running function is completed. The following call to the
 642         * enable function makes this sequence a flush-like operation.
 643         */
 644        if (p_hwfn->b_sp_dpc_enabled) {
 645                tasklet_disable(p_hwfn->sp_dpc);
 646                tasklet_enable(p_hwfn->sp_dpc);
 647        }
 648}
 649
 650void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 651{
 652        struct qed_dev *cdev = p_hwfn->cdev;
 653        u8 id = p_hwfn->my_id;
 654        u32 int_mode;
 655
 656        int_mode = cdev->int_params.out.int_mode;
 657        if (int_mode == QED_INT_MODE_MSIX)
 658                synchronize_irq(cdev->int_params.msix_table[id].vector);
 659        else
 660                synchronize_irq(cdev->pdev->irq);
 661
 662        qed_slowpath_tasklet_flush(p_hwfn);
 663}
 664
 665static void qed_slowpath_irq_free(struct qed_dev *cdev)
 666{
 667        int i;
 668
 669        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 670                for_each_hwfn(cdev, i) {
 671                        if (!cdev->hwfns[i].b_int_requested)
 672                                break;
 673                        synchronize_irq(cdev->int_params.msix_table[i].vector);
 674                        free_irq(cdev->int_params.msix_table[i].vector,
 675                                 cdev->hwfns[i].sp_dpc);
 676                }
 677        } else {
 678                if (QED_LEADING_HWFN(cdev)->b_int_requested)
 679                        free_irq(cdev->pdev->irq, cdev);
 680        }
 681        qed_int_disable_post_isr_release(cdev);
 682}
 683
 684static int qed_nic_stop(struct qed_dev *cdev)
 685{
 686        int i, rc;
 687
 688        rc = qed_hw_stop(cdev);
 689
 690        for (i = 0; i < cdev->num_hwfns; i++) {
 691                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 692
 693                if (p_hwfn->b_sp_dpc_enabled) {
 694                        tasklet_disable(p_hwfn->sp_dpc);
 695                        p_hwfn->b_sp_dpc_enabled = false;
 696                        DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 697                                   "Disabled sp tasklet [hwfn %d] at %p\n",
 698                                   i, p_hwfn->sp_dpc);
 699                }
 700        }
 701
 702        qed_dbg_pf_exit(cdev);
 703
 704        return rc;
 705}
 706
 707static int qed_nic_setup(struct qed_dev *cdev)
 708{
 709        int rc, i;
 710
 711        /* Determine if interface is going to require LL2 */
 712        if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 713                for (i = 0; i < cdev->num_hwfns; i++) {
 714                        struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 715
 716                        p_hwfn->using_ll2 = true;
 717                }
 718        }
 719
 720        rc = qed_resc_alloc(cdev);
 721        if (rc)
 722                return rc;
 723
 724        DP_INFO(cdev, "Allocated qed resources\n");
 725
 726        qed_resc_setup(cdev);
 727
 728        return rc;
 729}
 730
 731static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 732{
 733        int limit = 0;
 734
 735        /* Mark the fastpath as free/used */
 736        cdev->int_params.fp_initialized = cnt ? true : false;
 737
 738        if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 739                limit = cdev->num_hwfns * 63;
 740        else if (cdev->int_params.fp_msix_cnt)
 741                limit = cdev->int_params.fp_msix_cnt;
 742
 743        if (!limit)
 744                return -ENOMEM;
 745
 746        return min_t(int, cnt, limit);
 747}
 748
 749static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 750{
 751        memset(info, 0, sizeof(struct qed_int_info));
 752
 753        if (!cdev->int_params.fp_initialized) {
 754                DP_INFO(cdev,
 755                        "Protocol driver requested interrupt information, but its support is not yet configured\n");
 756                return -EINVAL;
 757        }
 758
 759        /* Need to expose only MSI-X information; Single IRQ is handled solely
 760         * by qed.
 761         */
 762        if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 763                int msix_base = cdev->int_params.fp_msix_base;
 764
 765                info->msix_cnt = cdev->int_params.fp_msix_cnt;
 766                info->msix = &cdev->int_params.msix_table[msix_base];
 767        }
 768
 769        return 0;
 770}
 771
 772static int qed_slowpath_setup_int(struct qed_dev *cdev,
 773                                  enum qed_int_mode int_mode)
 774{
 775        struct qed_sb_cnt_info sb_cnt_info;
 776        int num_l2_queues = 0;
 777        int rc;
 778        int i;
 779
 780        if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 781                DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 782                return -EINVAL;
 783        }
 784
 785        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 786        cdev->int_params.in.int_mode = int_mode;
 787        for_each_hwfn(cdev, i) {
 788                memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 789                qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 790                cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 791                cdev->int_params.in.num_vectors++; /* slowpath */
 792        }
 793
 794        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
 795        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 796
 797        if (is_kdump_kernel()) {
 798                DP_INFO(cdev,
 799                        "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 800                        cdev->int_params.in.min_msix_cnt);
 801                cdev->int_params.in.num_vectors =
 802                        cdev->int_params.in.min_msix_cnt;
 803        }
 804
 805        rc = qed_set_int_mode(cdev, false);
 806        if (rc)  {
 807                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
 808                return rc;
 809        }
 810
 811        cdev->int_params.fp_msix_base = cdev->num_hwfns;
 812        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 813                                       cdev->num_hwfns;
 814
 815        if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 816            !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 817                return 0;
 818
 819        for_each_hwfn(cdev, i)
 820                num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 821
 822        DP_VERBOSE(cdev, QED_MSG_RDMA,
 823                   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
 824                   cdev->int_params.fp_msix_cnt, num_l2_queues);
 825
 826        if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
 827                cdev->int_params.rdma_msix_cnt =
 828                        (cdev->int_params.fp_msix_cnt - num_l2_queues)
 829                        / cdev->num_hwfns;
 830                cdev->int_params.rdma_msix_base =
 831                        cdev->int_params.fp_msix_base + num_l2_queues;
 832                cdev->int_params.fp_msix_cnt = num_l2_queues;
 833        } else {
 834                cdev->int_params.rdma_msix_cnt = 0;
 835        }
 836
 837        DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
 838                   cdev->int_params.rdma_msix_cnt,
 839                   cdev->int_params.rdma_msix_base);
 840
 841        return 0;
 842}
 843
 844static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
 845{
 846        int rc;
 847
 848        memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 849        cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
 850
 851        qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
 852                            &cdev->int_params.in.num_vectors);
 853        if (cdev->num_hwfns > 1) {
 854                u8 vectors = 0;
 855
 856                qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
 857                cdev->int_params.in.num_vectors += vectors;
 858        }
 859
 860        /* We want a minimum of one fastpath vector per vf hwfn */
 861        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
 862
 863        rc = qed_set_int_mode(cdev, true);
 864        if (rc)
 865                return rc;
 866
 867        cdev->int_params.fp_msix_base = 0;
 868        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
 869
 870        return 0;
 871}
 872
 873u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
 874                   u8 *input_buf, u32 max_size, u8 *unzip_buf)
 875{
 876        int rc;
 877
 878        p_hwfn->stream->next_in = input_buf;
 879        p_hwfn->stream->avail_in = input_len;
 880        p_hwfn->stream->next_out = unzip_buf;
 881        p_hwfn->stream->avail_out = max_size;
 882
 883        rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
 884
 885        if (rc != Z_OK) {
 886                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
 887                           rc);
 888                return 0;
 889        }
 890
 891        rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
 892        zlib_inflateEnd(p_hwfn->stream);
 893
 894        if (rc != Z_OK && rc != Z_STREAM_END) {
 895                DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
 896                           p_hwfn->stream->msg, rc);
 897                return 0;
 898        }
 899
 900        return p_hwfn->stream->total_out / 4;
 901}
 902
 903static int qed_alloc_stream_mem(struct qed_dev *cdev)
 904{
 905        int i;
 906        void *workspace;
 907
 908        for_each_hwfn(cdev, i) {
 909                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 910
 911                p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
 912                if (!p_hwfn->stream)
 913                        return -ENOMEM;
 914
 915                workspace = vzalloc(zlib_inflate_workspacesize());
 916                if (!workspace)
 917                        return -ENOMEM;
 918                p_hwfn->stream->workspace = workspace;
 919        }
 920
 921        return 0;
 922}
 923
 924static void qed_free_stream_mem(struct qed_dev *cdev)
 925{
 926        int i;
 927
 928        for_each_hwfn(cdev, i) {
 929                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 930
 931                if (!p_hwfn->stream)
 932                        return;
 933
 934                vfree(p_hwfn->stream->workspace);
 935                kfree(p_hwfn->stream);
 936        }
 937}
 938
 939static void qed_update_pf_params(struct qed_dev *cdev,
 940                                 struct qed_pf_params *params)
 941{
 942        int i;
 943
 944        if (IS_ENABLED(CONFIG_QED_RDMA)) {
 945                params->rdma_pf_params.num_qps = QED_ROCE_QPS;
 946                params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
 947                params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
 948                /* divide by 3 the MRs to avoid MF ILT overflow */
 949                params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
 950        }
 951
 952        if (cdev->num_hwfns > 1 || IS_VF(cdev))
 953                params->eth_pf_params.num_arfs_filters = 0;
 954
 955        /* In case we might support RDMA, don't allow qede to be greedy
 956         * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
 957         * per hwfn.
 958         */
 959        if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
 960                u16 *num_cons;
 961
 962                num_cons = &params->eth_pf_params.num_cons;
 963                *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
 964        }
 965
 966        for (i = 0; i < cdev->num_hwfns; i++) {
 967                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 968
 969                p_hwfn->pf_params = *params;
 970        }
 971}
 972
 973#define QED_PERIODIC_DB_REC_COUNT               10
 974#define QED_PERIODIC_DB_REC_INTERVAL_MS         100
 975#define QED_PERIODIC_DB_REC_INTERVAL \
 976        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
 977#define QED_PERIODIC_DB_REC_WAIT_COUNT          10
 978#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
 979        (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
 980
 981static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
 982                                     enum qed_slowpath_wq_flag wq_flag,
 983                                     unsigned long delay)
 984{
 985        if (!hwfn->slowpath_wq_active)
 986                return -EINVAL;
 987
 988        /* Memory barrier for setting atomic bit */
 989        smp_mb__before_atomic();
 990        set_bit(wq_flag, &hwfn->slowpath_task_flags);
 991        smp_mb__after_atomic();
 992        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
 993
 994        return 0;
 995}
 996
 997void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
 998{
 999        /* Reset periodic Doorbell Recovery counter */
1000        p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1001
1002        /* Don't schedule periodic Doorbell Recovery if already scheduled */
1003        if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1004                     &p_hwfn->slowpath_task_flags))
1005                return;
1006
1007        qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1008                                  QED_PERIODIC_DB_REC_INTERVAL);
1009}
1010
1011static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1012{
1013        int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
1014
1015        if (IS_VF(cdev))
1016                return;
1017
1018        for_each_hwfn(cdev, i) {
1019                if (!cdev->hwfns[i].slowpath_wq)
1020                        continue;
1021
1022                /* Stop queuing new delayed works */
1023                cdev->hwfns[i].slowpath_wq_active = false;
1024
1025                /* Wait until the last periodic doorbell recovery is executed */
1026                while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1027                                &cdev->hwfns[i].slowpath_task_flags) &&
1028                       sleep_count--)
1029                        msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
1030
1031                flush_workqueue(cdev->hwfns[i].slowpath_wq);
1032                destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1033        }
1034}
1035
1036static void qed_slowpath_task(struct work_struct *work)
1037{
1038        struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1039                                             slowpath_task.work);
1040        struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1041
1042        if (!ptt) {
1043                if (hwfn->slowpath_wq_active)
1044                        queue_delayed_work(hwfn->slowpath_wq,
1045                                           &hwfn->slowpath_task, 0);
1046
1047                return;
1048        }
1049
1050        if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1051                               &hwfn->slowpath_task_flags))
1052                qed_mfw_process_tlv_req(hwfn, ptt);
1053
1054        if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1055                               &hwfn->slowpath_task_flags)) {
1056                qed_db_rec_handler(hwfn, ptt);
1057                if (hwfn->periodic_db_rec_count--)
1058                        qed_slowpath_delayed_work(hwfn,
1059                                                  QED_SLOWPATH_PERIODIC_DB_REC,
1060                                                  QED_PERIODIC_DB_REC_INTERVAL);
1061        }
1062
1063        qed_ptt_release(hwfn, ptt);
1064}
1065
1066static int qed_slowpath_wq_start(struct qed_dev *cdev)
1067{
1068        struct qed_hwfn *hwfn;
1069        char name[NAME_SIZE];
1070        int i;
1071
1072        if (IS_VF(cdev))
1073                return 0;
1074
1075        for_each_hwfn(cdev, i) {
1076                hwfn = &cdev->hwfns[i];
1077
1078                snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1079                         cdev->pdev->bus->number,
1080                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1081
1082                hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1083                if (!hwfn->slowpath_wq) {
1084                        DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1085                        return -ENOMEM;
1086                }
1087
1088                INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1089                hwfn->slowpath_wq_active = true;
1090        }
1091
1092        return 0;
1093}
1094
1095static int qed_slowpath_start(struct qed_dev *cdev,
1096                              struct qed_slowpath_params *params)
1097{
1098        struct qed_drv_load_params drv_load_params;
1099        struct qed_hw_init_params hw_init_params;
1100        struct qed_mcp_drv_version drv_version;
1101        struct qed_tunnel_info tunn_info;
1102        const u8 *data = NULL;
1103        struct qed_hwfn *hwfn;
1104        struct qed_ptt *p_ptt;
1105        int rc = -EINVAL;
1106
1107        if (qed_iov_wq_start(cdev))
1108                goto err;
1109
1110        if (qed_slowpath_wq_start(cdev))
1111                goto err;
1112
1113        if (IS_PF(cdev)) {
1114                rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1115                                      &cdev->pdev->dev);
1116                if (rc) {
1117                        DP_NOTICE(cdev,
1118                                  "Failed to find fw file - /lib/firmware/%s\n",
1119                                  QED_FW_FILE_NAME);
1120                        goto err;
1121                }
1122
1123                if (cdev->num_hwfns == 1) {
1124                        p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1125                        if (p_ptt) {
1126                                QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1127                        } else {
1128                                DP_NOTICE(cdev,
1129                                          "Failed to acquire PTT for aRFS\n");
1130                                goto err;
1131                        }
1132                }
1133        }
1134
1135        cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1136        rc = qed_nic_setup(cdev);
1137        if (rc)
1138                goto err;
1139
1140        if (IS_PF(cdev))
1141                rc = qed_slowpath_setup_int(cdev, params->int_mode);
1142        else
1143                rc = qed_slowpath_vf_setup_int(cdev);
1144        if (rc)
1145                goto err1;
1146
1147        if (IS_PF(cdev)) {
1148                /* Allocate stream for unzipping */
1149                rc = qed_alloc_stream_mem(cdev);
1150                if (rc)
1151                        goto err2;
1152
1153                /* First Dword used to differentiate between various sources */
1154                data = cdev->firmware->data + sizeof(u32);
1155
1156                qed_dbg_pf_init(cdev);
1157        }
1158
1159        /* Start the slowpath */
1160        memset(&hw_init_params, 0, sizeof(hw_init_params));
1161        memset(&tunn_info, 0, sizeof(tunn_info));
1162        tunn_info.vxlan.b_mode_enabled = true;
1163        tunn_info.l2_gre.b_mode_enabled = true;
1164        tunn_info.ip_gre.b_mode_enabled = true;
1165        tunn_info.l2_geneve.b_mode_enabled = true;
1166        tunn_info.ip_geneve.b_mode_enabled = true;
1167        tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1168        tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1169        tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1170        tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1171        tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1172        hw_init_params.p_tunn = &tunn_info;
1173        hw_init_params.b_hw_start = true;
1174        hw_init_params.int_mode = cdev->int_params.out.int_mode;
1175        hw_init_params.allow_npar_tx_switch = true;
1176        hw_init_params.bin_fw_data = data;
1177
1178        memset(&drv_load_params, 0, sizeof(drv_load_params));
1179        drv_load_params.is_crash_kernel = is_kdump_kernel();
1180        drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1181        drv_load_params.avoid_eng_reset = false;
1182        drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1183        hw_init_params.p_drv_load_params = &drv_load_params;
1184
1185        rc = qed_hw_init(cdev, &hw_init_params);
1186        if (rc)
1187                goto err2;
1188
1189        DP_INFO(cdev,
1190                "HW initialization and function start completed successfully\n");
1191
1192        if (IS_PF(cdev)) {
1193                cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1194                                           BIT(QED_MODE_L2GENEVE_TUNN) |
1195                                           BIT(QED_MODE_IPGENEVE_TUNN) |
1196                                           BIT(QED_MODE_L2GRE_TUNN) |
1197                                           BIT(QED_MODE_IPGRE_TUNN));
1198        }
1199
1200        /* Allocate LL2 interface if needed */
1201        if (QED_LEADING_HWFN(cdev)->using_ll2) {
1202                rc = qed_ll2_alloc_if(cdev);
1203                if (rc)
1204                        goto err3;
1205        }
1206        if (IS_PF(cdev)) {
1207                hwfn = QED_LEADING_HWFN(cdev);
1208                drv_version.version = (params->drv_major << 24) |
1209                                      (params->drv_minor << 16) |
1210                                      (params->drv_rev << 8) |
1211                                      (params->drv_eng);
1212                strlcpy(drv_version.name, params->name,
1213                        MCP_DRV_VER_STR_SIZE - 4);
1214                rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1215                                              &drv_version);
1216                if (rc) {
1217                        DP_NOTICE(cdev, "Failed sending drv version command\n");
1218                        return rc;
1219                }
1220        }
1221
1222        qed_reset_vport_stats(cdev);
1223
1224        return 0;
1225
1226err3:
1227        qed_hw_stop(cdev);
1228err2:
1229        qed_hw_timers_stop_all(cdev);
1230        if (IS_PF(cdev))
1231                qed_slowpath_irq_free(cdev);
1232        qed_free_stream_mem(cdev);
1233        qed_disable_msix(cdev);
1234err1:
1235        qed_resc_free(cdev);
1236err:
1237        if (IS_PF(cdev))
1238                release_firmware(cdev->firmware);
1239
1240        if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1241            QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1242                qed_ptt_release(QED_LEADING_HWFN(cdev),
1243                                QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1244
1245        qed_iov_wq_stop(cdev, false);
1246
1247        qed_slowpath_wq_stop(cdev);
1248
1249        return rc;
1250}
1251
1252static int qed_slowpath_stop(struct qed_dev *cdev)
1253{
1254        if (!cdev)
1255                return -ENODEV;
1256
1257        qed_slowpath_wq_stop(cdev);
1258
1259        qed_ll2_dealloc_if(cdev);
1260
1261        if (IS_PF(cdev)) {
1262                if (cdev->num_hwfns == 1)
1263                        qed_ptt_release(QED_LEADING_HWFN(cdev),
1264                                        QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1265                qed_free_stream_mem(cdev);
1266                if (IS_QED_ETH_IF(cdev))
1267                        qed_sriov_disable(cdev, true);
1268        }
1269
1270        qed_nic_stop(cdev);
1271
1272        if (IS_PF(cdev))
1273                qed_slowpath_irq_free(cdev);
1274
1275        qed_disable_msix(cdev);
1276
1277        qed_resc_free(cdev);
1278
1279        qed_iov_wq_stop(cdev, true);
1280
1281        if (IS_PF(cdev))
1282                release_firmware(cdev->firmware);
1283
1284        return 0;
1285}
1286
1287static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1288{
1289        int i;
1290
1291        memcpy(cdev->name, name, NAME_SIZE);
1292        for_each_hwfn(cdev, i)
1293                snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1294}
1295
1296static u32 qed_sb_init(struct qed_dev *cdev,
1297                       struct qed_sb_info *sb_info,
1298                       void *sb_virt_addr,
1299                       dma_addr_t sb_phy_addr, u16 sb_id,
1300                       enum qed_sb_type type)
1301{
1302        struct qed_hwfn *p_hwfn;
1303        struct qed_ptt *p_ptt;
1304        int hwfn_index;
1305        u16 rel_sb_id;
1306        u8 n_hwfns;
1307        u32 rc;
1308
1309        /* RoCE uses single engine and CMT uses two engines. When using both
1310         * we force only a single engine. Storage uses only engine 0 too.
1311         */
1312        if (type == QED_SB_TYPE_L2_QUEUE)
1313                n_hwfns = cdev->num_hwfns;
1314        else
1315                n_hwfns = 1;
1316
1317        hwfn_index = sb_id % n_hwfns;
1318        p_hwfn = &cdev->hwfns[hwfn_index];
1319        rel_sb_id = sb_id / n_hwfns;
1320
1321        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1322                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1323                   hwfn_index, rel_sb_id, sb_id);
1324
1325        if (IS_PF(p_hwfn->cdev)) {
1326                p_ptt = qed_ptt_acquire(p_hwfn);
1327                if (!p_ptt)
1328                        return -EBUSY;
1329
1330                rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1331                                     sb_phy_addr, rel_sb_id);
1332                qed_ptt_release(p_hwfn, p_ptt);
1333        } else {
1334                rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1335                                     sb_phy_addr, rel_sb_id);
1336        }
1337
1338        return rc;
1339}
1340
1341static u32 qed_sb_release(struct qed_dev *cdev,
1342                          struct qed_sb_info *sb_info, u16 sb_id)
1343{
1344        struct qed_hwfn *p_hwfn;
1345        int hwfn_index;
1346        u16 rel_sb_id;
1347        u32 rc;
1348
1349        hwfn_index = sb_id % cdev->num_hwfns;
1350        p_hwfn = &cdev->hwfns[hwfn_index];
1351        rel_sb_id = sb_id / cdev->num_hwfns;
1352
1353        DP_VERBOSE(cdev, NETIF_MSG_INTR,
1354                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1355                   hwfn_index, rel_sb_id, sb_id);
1356
1357        rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1358
1359        return rc;
1360}
1361
1362static bool qed_can_link_change(struct qed_dev *cdev)
1363{
1364        return true;
1365}
1366
1367static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1368{
1369        struct qed_hwfn *hwfn;
1370        struct qed_mcp_link_params *link_params;
1371        struct qed_ptt *ptt;
1372        u32 sup_caps;
1373        int rc;
1374
1375        if (!cdev)
1376                return -ENODEV;
1377
1378        /* The link should be set only once per PF */
1379        hwfn = &cdev->hwfns[0];
1380
1381        /* When VF wants to set link, force it to read the bulletin instead.
1382         * This mimics the PF behavior, where a noitification [both immediate
1383         * and possible later] would be generated when changing properties.
1384         */
1385        if (IS_VF(cdev)) {
1386                qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1387                return 0;
1388        }
1389
1390        ptt = qed_ptt_acquire(hwfn);
1391        if (!ptt)
1392                return -EBUSY;
1393
1394        link_params = qed_mcp_get_link_params(hwfn);
1395        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1396                link_params->speed.autoneg = params->autoneg;
1397        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1398                link_params->speed.advertised_speeds = 0;
1399                sup_caps = QED_LM_1000baseT_Full_BIT |
1400                           QED_LM_1000baseKX_Full_BIT |
1401                           QED_LM_1000baseX_Full_BIT;
1402                if (params->adv_speeds & sup_caps)
1403                        link_params->speed.advertised_speeds |=
1404                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1405                sup_caps = QED_LM_10000baseT_Full_BIT |
1406                           QED_LM_10000baseKR_Full_BIT |
1407                           QED_LM_10000baseKX4_Full_BIT |
1408                           QED_LM_10000baseR_FEC_BIT |
1409                           QED_LM_10000baseCR_Full_BIT |
1410                           QED_LM_10000baseSR_Full_BIT |
1411                           QED_LM_10000baseLR_Full_BIT |
1412                           QED_LM_10000baseLRM_Full_BIT;
1413                if (params->adv_speeds & sup_caps)
1414                        link_params->speed.advertised_speeds |=
1415                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1416                if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1417                        link_params->speed.advertised_speeds |=
1418                                NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1419                sup_caps = QED_LM_25000baseKR_Full_BIT |
1420                           QED_LM_25000baseCR_Full_BIT |
1421                           QED_LM_25000baseSR_Full_BIT;
1422                if (params->adv_speeds & sup_caps)
1423                        link_params->speed.advertised_speeds |=
1424                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1425                sup_caps = QED_LM_40000baseLR4_Full_BIT |
1426                           QED_LM_40000baseKR4_Full_BIT |
1427                           QED_LM_40000baseCR4_Full_BIT |
1428                           QED_LM_40000baseSR4_Full_BIT;
1429                if (params->adv_speeds & sup_caps)
1430                        link_params->speed.advertised_speeds |=
1431                                NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1432                sup_caps = QED_LM_50000baseKR2_Full_BIT |
1433                           QED_LM_50000baseCR2_Full_BIT |
1434                           QED_LM_50000baseSR2_Full_BIT;
1435                if (params->adv_speeds & sup_caps)
1436                        link_params->speed.advertised_speeds |=
1437                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1438                sup_caps = QED_LM_100000baseKR4_Full_BIT |
1439                           QED_LM_100000baseSR4_Full_BIT |
1440                           QED_LM_100000baseCR4_Full_BIT |
1441                           QED_LM_100000baseLR4_ER4_Full_BIT;
1442                if (params->adv_speeds & sup_caps)
1443                        link_params->speed.advertised_speeds |=
1444                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1445        }
1446        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1447                link_params->speed.forced_speed = params->forced_speed;
1448        if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1449                if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1450                        link_params->pause.autoneg = true;
1451                else
1452                        link_params->pause.autoneg = false;
1453                if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1454                        link_params->pause.forced_rx = true;
1455                else
1456                        link_params->pause.forced_rx = false;
1457                if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1458                        link_params->pause.forced_tx = true;
1459                else
1460                        link_params->pause.forced_tx = false;
1461        }
1462        if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1463                switch (params->loopback_mode) {
1464                case QED_LINK_LOOPBACK_INT_PHY:
1465                        link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1466                        break;
1467                case QED_LINK_LOOPBACK_EXT_PHY:
1468                        link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1469                        break;
1470                case QED_LINK_LOOPBACK_EXT:
1471                        link_params->loopback_mode = ETH_LOOPBACK_EXT;
1472                        break;
1473                case QED_LINK_LOOPBACK_MAC:
1474                        link_params->loopback_mode = ETH_LOOPBACK_MAC;
1475                        break;
1476                default:
1477                        link_params->loopback_mode = ETH_LOOPBACK_NONE;
1478                        break;
1479                }
1480        }
1481
1482        if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1483                memcpy(&link_params->eee, &params->eee,
1484                       sizeof(link_params->eee));
1485
1486        rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1487
1488        qed_ptt_release(hwfn, ptt);
1489
1490        return rc;
1491}
1492
1493static int qed_get_port_type(u32 media_type)
1494{
1495        int port_type;
1496
1497        switch (media_type) {
1498        case MEDIA_SFPP_10G_FIBER:
1499        case MEDIA_SFP_1G_FIBER:
1500        case MEDIA_XFP_FIBER:
1501        case MEDIA_MODULE_FIBER:
1502        case MEDIA_KR:
1503                port_type = PORT_FIBRE;
1504                break;
1505        case MEDIA_DA_TWINAX:
1506                port_type = PORT_DA;
1507                break;
1508        case MEDIA_BASE_T:
1509                port_type = PORT_TP;
1510                break;
1511        case MEDIA_NOT_PRESENT:
1512                port_type = PORT_NONE;
1513                break;
1514        case MEDIA_UNSPECIFIED:
1515        default:
1516                port_type = PORT_OTHER;
1517                break;
1518        }
1519        return port_type;
1520}
1521
1522static int qed_get_link_data(struct qed_hwfn *hwfn,
1523                             struct qed_mcp_link_params *params,
1524                             struct qed_mcp_link_state *link,
1525                             struct qed_mcp_link_capabilities *link_caps)
1526{
1527        void *p;
1528
1529        if (!IS_PF(hwfn->cdev)) {
1530                qed_vf_get_link_params(hwfn, params);
1531                qed_vf_get_link_state(hwfn, link);
1532                qed_vf_get_link_caps(hwfn, link_caps);
1533
1534                return 0;
1535        }
1536
1537        p = qed_mcp_get_link_params(hwfn);
1538        if (!p)
1539                return -ENXIO;
1540        memcpy(params, p, sizeof(*params));
1541
1542        p = qed_mcp_get_link_state(hwfn);
1543        if (!p)
1544                return -ENXIO;
1545        memcpy(link, p, sizeof(*link));
1546
1547        p = qed_mcp_get_link_capabilities(hwfn);
1548        if (!p)
1549                return -ENXIO;
1550        memcpy(link_caps, p, sizeof(*link_caps));
1551
1552        return 0;
1553}
1554
1555static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1556                                     struct qed_ptt *ptt, u32 capability,
1557                                     u32 *if_capability)
1558{
1559        u32 media_type, tcvr_state, tcvr_type;
1560        u32 speed_mask, board_cfg;
1561
1562        if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1563                media_type = MEDIA_UNSPECIFIED;
1564
1565        if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1566                tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1567
1568        if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1569                speed_mask = 0xFFFFFFFF;
1570
1571        if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1572                board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1573
1574        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1575                   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1576                   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1577
1578        switch (media_type) {
1579        case MEDIA_DA_TWINAX:
1580                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1581                        *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1582                /* For DAC media multiple speed capabilities are supported*/
1583                capability = capability & speed_mask;
1584                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1585                        *if_capability |= QED_LM_1000baseKX_Full_BIT;
1586                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1587                        *if_capability |= QED_LM_10000baseCR_Full_BIT;
1588                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1589                        *if_capability |= QED_LM_40000baseCR4_Full_BIT;
1590                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1591                        *if_capability |= QED_LM_25000baseCR_Full_BIT;
1592                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1593                        *if_capability |= QED_LM_50000baseCR2_Full_BIT;
1594                if (capability &
1595                        NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1596                        *if_capability |= QED_LM_100000baseCR4_Full_BIT;
1597                break;
1598        case MEDIA_BASE_T:
1599                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1600                        if (capability &
1601                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1602                                *if_capability |= QED_LM_1000baseT_Full_BIT;
1603                        }
1604                        if (capability &
1605                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1606                                *if_capability |= QED_LM_10000baseT_Full_BIT;
1607                        }
1608                }
1609                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1610                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1611                                *if_capability |= QED_LM_1000baseT_Full_BIT;
1612                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1613                                *if_capability |= QED_LM_10000baseT_Full_BIT;
1614                }
1615                break;
1616        case MEDIA_SFP_1G_FIBER:
1617        case MEDIA_SFPP_10G_FIBER:
1618        case MEDIA_XFP_FIBER:
1619        case MEDIA_MODULE_FIBER:
1620                if (capability &
1621                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1622                        if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1623                            (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1624                                *if_capability |= QED_LM_1000baseKX_Full_BIT;
1625                }
1626                if (capability &
1627                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1628                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1629                                *if_capability |= QED_LM_10000baseSR_Full_BIT;
1630                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1631                                *if_capability |= QED_LM_10000baseLR_Full_BIT;
1632                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1633                                *if_capability |= QED_LM_10000baseLRM_Full_BIT;
1634                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1635                                *if_capability |= QED_LM_10000baseR_FEC_BIT;
1636                }
1637                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1638                        *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1639                if (capability &
1640                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1641                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1642                                *if_capability |= QED_LM_25000baseSR_Full_BIT;
1643                }
1644                if (capability &
1645                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1646                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1647                                *if_capability |= QED_LM_40000baseLR4_Full_BIT;
1648                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1649                                *if_capability |= QED_LM_40000baseSR4_Full_BIT;
1650                }
1651                if (capability &
1652                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1653                        *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1654                if (capability &
1655                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1656                        if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1657                                *if_capability |= QED_LM_100000baseSR4_Full_BIT;
1658                }
1659
1660                break;
1661        case MEDIA_KR:
1662                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1663                        *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1664                if (capability &
1665                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1666                        *if_capability |= QED_LM_1000baseKX_Full_BIT;
1667                if (capability &
1668                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1669                        *if_capability |= QED_LM_10000baseKR_Full_BIT;
1670                if (capability &
1671                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1672                        *if_capability |= QED_LM_25000baseKR_Full_BIT;
1673                if (capability &
1674                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1675                        *if_capability |= QED_LM_40000baseKR4_Full_BIT;
1676                if (capability &
1677                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1678                        *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1679                if (capability &
1680                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1681                        *if_capability |= QED_LM_100000baseKR4_Full_BIT;
1682                break;
1683        case MEDIA_UNSPECIFIED:
1684        case MEDIA_NOT_PRESENT:
1685                DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1686                           "Unknown media and transceiver type;\n");
1687                break;
1688        }
1689}
1690
1691static void qed_fill_link(struct qed_hwfn *hwfn,
1692                          struct qed_ptt *ptt,
1693                          struct qed_link_output *if_link)
1694{
1695        struct qed_mcp_link_capabilities link_caps;
1696        struct qed_mcp_link_params params;
1697        struct qed_mcp_link_state link;
1698        u32 media_type;
1699
1700        memset(if_link, 0, sizeof(*if_link));
1701
1702        /* Prepare source inputs */
1703        if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1704                dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1705                return;
1706        }
1707
1708        /* Set the link parameters to pass to protocol driver */
1709        if (link.link_up)
1710                if_link->link_up = true;
1711
1712        /* TODO - at the moment assume supported and advertised speed equal */
1713        if_link->supported_caps = QED_LM_FIBRE_BIT;
1714        if (link_caps.default_speed_autoneg)
1715                if_link->supported_caps |= QED_LM_Autoneg_BIT;
1716        if (params.pause.autoneg ||
1717            (params.pause.forced_rx && params.pause.forced_tx))
1718                if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1719        if (params.pause.autoneg || params.pause.forced_rx ||
1720            params.pause.forced_tx)
1721                if_link->supported_caps |= QED_LM_Pause_BIT;
1722
1723        if_link->advertised_caps = if_link->supported_caps;
1724        if (params.speed.autoneg)
1725                if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1726        else
1727                if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1728
1729        /* Fill link advertised capability*/
1730        qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1731                                 &if_link->advertised_caps);
1732        /* Fill link supported capability*/
1733        qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1734                                 &if_link->supported_caps);
1735
1736        if (link.link_up)
1737                if_link->speed = link.speed;
1738
1739        /* TODO - fill duplex properly */
1740        if_link->duplex = DUPLEX_FULL;
1741        qed_mcp_get_media_type(hwfn, ptt, &media_type);
1742        if_link->port = qed_get_port_type(media_type);
1743
1744        if_link->autoneg = params.speed.autoneg;
1745
1746        if (params.pause.autoneg)
1747                if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1748        if (params.pause.forced_rx)
1749                if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1750        if (params.pause.forced_tx)
1751                if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1752
1753        /* Link partner capabilities */
1754        if (link.partner_adv_speed &
1755            QED_LINK_PARTNER_SPEED_1G_FD)
1756                if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1757        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1758                if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1759        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1760                if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1761        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1762                if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1763        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1764                if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1765        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1766                if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1767        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1768                if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1769
1770        if (link.an_complete)
1771                if_link->lp_caps |= QED_LM_Autoneg_BIT;
1772
1773        if (link.partner_adv_pause)
1774                if_link->lp_caps |= QED_LM_Pause_BIT;
1775        if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1776            link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1777                if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1778
1779        if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1780                if_link->eee_supported = false;
1781        } else {
1782                if_link->eee_supported = true;
1783                if_link->eee_active = link.eee_active;
1784                if_link->sup_caps = link_caps.eee_speed_caps;
1785                /* MFW clears adv_caps on eee disable; use configured value */
1786                if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1787                                        params.eee.adv_caps;
1788                if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1789                if_link->eee.enable = params.eee.enable;
1790                if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1791                if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1792        }
1793}
1794
1795static void qed_get_current_link(struct qed_dev *cdev,
1796                                 struct qed_link_output *if_link)
1797{
1798        struct qed_hwfn *hwfn;
1799        struct qed_ptt *ptt;
1800        int i;
1801
1802        hwfn = &cdev->hwfns[0];
1803        if (IS_PF(cdev)) {
1804                ptt = qed_ptt_acquire(hwfn);
1805                if (ptt) {
1806                        qed_fill_link(hwfn, ptt, if_link);
1807                        qed_ptt_release(hwfn, ptt);
1808                } else {
1809                        DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1810                }
1811        } else {
1812                qed_fill_link(hwfn, NULL, if_link);
1813        }
1814
1815        for_each_hwfn(cdev, i)
1816                qed_inform_vf_link_state(&cdev->hwfns[i]);
1817}
1818
1819void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1820{
1821        void *cookie = hwfn->cdev->ops_cookie;
1822        struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1823        struct qed_link_output if_link;
1824
1825        qed_fill_link(hwfn, ptt, &if_link);
1826        qed_inform_vf_link_state(hwfn);
1827
1828        if (IS_LEAD_HWFN(hwfn) && cookie)
1829                op->link_update(cookie, &if_link);
1830}
1831
1832static int qed_drain(struct qed_dev *cdev)
1833{
1834        struct qed_hwfn *hwfn;
1835        struct qed_ptt *ptt;
1836        int i, rc;
1837
1838        if (IS_VF(cdev))
1839                return 0;
1840
1841        for_each_hwfn(cdev, i) {
1842                hwfn = &cdev->hwfns[i];
1843                ptt = qed_ptt_acquire(hwfn);
1844                if (!ptt) {
1845                        DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1846                        return -EBUSY;
1847                }
1848                rc = qed_mcp_drain(hwfn, ptt);
1849                qed_ptt_release(hwfn, ptt);
1850                if (rc)
1851                        return rc;
1852        }
1853
1854        return 0;
1855}
1856
1857static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1858                                          struct qed_nvm_image_att *nvm_image,
1859                                          u32 *crc)
1860{
1861        u8 *buf = NULL;
1862        int rc, j;
1863        u32 val;
1864
1865        /* Allocate a buffer for holding the nvram image */
1866        buf = kzalloc(nvm_image->length, GFP_KERNEL);
1867        if (!buf)
1868                return -ENOMEM;
1869
1870        /* Read image into buffer */
1871        rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1872                              buf, nvm_image->length);
1873        if (rc) {
1874                DP_ERR(cdev, "Failed reading image from nvm\n");
1875                goto out;
1876        }
1877
1878        /* Convert the buffer into big-endian format (excluding the
1879         * closing 4 bytes of CRC).
1880         */
1881        for (j = 0; j < nvm_image->length - 4; j += 4) {
1882                val = cpu_to_be32(*(u32 *)&buf[j]);
1883                *(u32 *)&buf[j] = val;
1884        }
1885
1886        /* Calc CRC for the "actual" image buffer, i.e. not including
1887         * the last 4 CRC bytes.
1888         */
1889        *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1890
1891out:
1892        kfree(buf);
1893
1894        return rc;
1895}
1896
1897/* Binary file format -
1898 *     /----------------------------------------------------------------------\
1899 * 0B  |                       0x4 [command index]                            |
1900 * 4B  | image_type     | Options        |  Number of register settings       |
1901 * 8B  |                       Value                                          |
1902 * 12B |                       Mask                                           |
1903 * 16B |                       Offset                                         |
1904 *     \----------------------------------------------------------------------/
1905 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1906 * Options - 0'b - Calculate & Update CRC for image
1907 */
1908static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1909                                      bool *check_resp)
1910{
1911        struct qed_nvm_image_att nvm_image;
1912        struct qed_hwfn *p_hwfn;
1913        bool is_crc = false;
1914        u32 image_type;
1915        int rc = 0, i;
1916        u16 len;
1917
1918        *data += 4;
1919        image_type = **data;
1920        p_hwfn = QED_LEADING_HWFN(cdev);
1921        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1922                if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1923                        break;
1924        if (i == p_hwfn->nvm_info.num_images) {
1925                DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1926                       image_type);
1927                return -ENOENT;
1928        }
1929
1930        nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1931        nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1932
1933        DP_VERBOSE(cdev, NETIF_MSG_DRV,
1934                   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1935                   **data, image_type, nvm_image.start_addr,
1936                   nvm_image.start_addr + nvm_image.length - 1);
1937        (*data)++;
1938        is_crc = !!(**data & BIT(0));
1939        (*data)++;
1940        len = *((u16 *)*data);
1941        *data += 2;
1942        if (is_crc) {
1943                u32 crc = 0;
1944
1945                rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1946                if (rc) {
1947                        DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1948                        goto exit;
1949                }
1950
1951                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1952                                       (nvm_image.start_addr +
1953                                        nvm_image.length - 4), (u8 *)&crc, 4);
1954                if (rc)
1955                        DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1956                               nvm_image.start_addr + nvm_image.length - 4, rc);
1957                goto exit;
1958        }
1959
1960        /* Iterate over the values for setting */
1961        while (len) {
1962                u32 offset, mask, value, cur_value;
1963                u8 buf[4];
1964
1965                value = *((u32 *)*data);
1966                *data += 4;
1967                mask = *((u32 *)*data);
1968                *data += 4;
1969                offset = *((u32 *)*data);
1970                *data += 4;
1971
1972                rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1973                                      4);
1974                if (rc) {
1975                        DP_ERR(cdev, "Failed reading from %08x\n",
1976                               nvm_image.start_addr + offset);
1977                        goto exit;
1978                }
1979
1980                cur_value = le32_to_cpu(*((__le32 *)buf));
1981                DP_VERBOSE(cdev, NETIF_MSG_DRV,
1982                           "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1983                           nvm_image.start_addr + offset, cur_value,
1984                           (cur_value & ~mask) | (value & mask), value, mask);
1985                value = (value & mask) | (cur_value & ~mask);
1986                rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1987                                       nvm_image.start_addr + offset,
1988                                       (u8 *)&value, 4);
1989                if (rc) {
1990                        DP_ERR(cdev, "Failed writing to %08x\n",
1991                               nvm_image.start_addr + offset);
1992                        goto exit;
1993                }
1994
1995                len--;
1996        }
1997exit:
1998        return rc;
1999}
2000
2001/* Binary file format -
2002 *     /----------------------------------------------------------------------\
2003 * 0B  |                       0x3 [command index]                            |
2004 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2005 * 8B  | File-type |                   reserved                               |
2006 * 12B |                    Image length in bytes                             |
2007 *     \----------------------------------------------------------------------/
2008 *     Start a new file of the provided type
2009 */
2010static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2011                                          const u8 **data, bool *check_resp)
2012{
2013        u32 file_type, file_size = 0;
2014        int rc;
2015
2016        *data += 4;
2017        *check_resp = !!(**data & BIT(0));
2018        *data += 4;
2019        file_type = **data;
2020
2021        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2022                   "About to start a new file of type %02x\n", file_type);
2023        if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2024                *data += 4;
2025                file_size = *((u32 *)(*data));
2026        }
2027
2028        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2029                               (u8 *)(&file_size), 4);
2030        *data += 4;
2031
2032        return rc;
2033}
2034
2035/* Binary file format -
2036 *     /----------------------------------------------------------------------\
2037 * 0B  |                       0x2 [command index]                            |
2038 * 4B  |                       Length in bytes                                |
2039 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2040 * 12B |                       Offset in bytes                                |
2041 * 16B |                       Data ...                                       |
2042 *     \----------------------------------------------------------------------/
2043 *     Write data as part of a file that was previously started. Data should be
2044 *     of length equal to that provided in the message
2045 */
2046static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2047                                         const u8 **data, bool *check_resp)
2048{
2049        u32 offset, len;
2050        int rc;
2051
2052        *data += 4;
2053        len = *((u32 *)(*data));
2054        *data += 4;
2055        *check_resp = !!(**data & BIT(0));
2056        *data += 4;
2057        offset = *((u32 *)(*data));
2058        *data += 4;
2059
2060        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2061                   "About to write File-data: %08x bytes to offset %08x\n",
2062                   len, offset);
2063
2064        rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2065                               (char *)(*data), len);
2066        *data += len;
2067
2068        return rc;
2069}
2070
2071/* Binary file format [General header] -
2072 *     /----------------------------------------------------------------------\
2073 * 0B  |                       QED_NVM_SIGNATURE                              |
2074 * 4B  |                       Length in bytes                                |
2075 * 8B  | Highest command in this batchfile |          Reserved                |
2076 *     \----------------------------------------------------------------------/
2077 */
2078static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2079                                        const struct firmware *image,
2080                                        const u8 **data)
2081{
2082        u32 signature, len;
2083
2084        /* Check minimum size */
2085        if (image->size < 12) {
2086                DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2087                return -EINVAL;
2088        }
2089
2090        /* Check signature */
2091        signature = *((u32 *)(*data));
2092        if (signature != QED_NVM_SIGNATURE) {
2093                DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2094                return -EINVAL;
2095        }
2096
2097        *data += 4;
2098        /* Validate internal size equals the image-size */
2099        len = *((u32 *)(*data));
2100        if (len != image->size) {
2101                DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2102                       len, (u32)image->size);
2103                return -EINVAL;
2104        }
2105
2106        *data += 4;
2107        /* Make sure driver familiar with all commands necessary for this */
2108        if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2109                DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2110                       *((u16 *)(*data)));
2111                return -EINVAL;
2112        }
2113
2114        *data += 4;
2115
2116        return 0;
2117}
2118
2119static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2120{
2121        const struct firmware *image;
2122        const u8 *data, *data_end;
2123        u32 cmd_type;
2124        int rc;
2125
2126        rc = request_firmware(&image, name, &cdev->pdev->dev);
2127        if (rc) {
2128                DP_ERR(cdev, "Failed to find '%s'\n", name);
2129                return rc;
2130        }
2131
2132        DP_VERBOSE(cdev, NETIF_MSG_DRV,
2133                   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2134                   name, image->data, (u32)image->size);
2135        data = image->data;
2136        data_end = data + image->size;
2137
2138        rc = qed_nvm_flash_image_validate(cdev, image, &data);
2139        if (rc)
2140                goto exit;
2141
2142        while (data < data_end) {
2143                bool check_resp = false;
2144
2145                /* Parse the actual command */
2146                cmd_type = *((u32 *)data);
2147                switch (cmd_type) {
2148                case QED_NVM_FLASH_CMD_FILE_DATA:
2149                        rc = qed_nvm_flash_image_file_data(cdev, &data,
2150                                                           &check_resp);
2151                        break;
2152                case QED_NVM_FLASH_CMD_FILE_START:
2153                        rc = qed_nvm_flash_image_file_start(cdev, &data,
2154                                                            &check_resp);
2155                        break;
2156                case QED_NVM_FLASH_CMD_NVM_CHANGE:
2157                        rc = qed_nvm_flash_image_access(cdev, &data,
2158                                                        &check_resp);
2159                        break;
2160                default:
2161                        DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2162                        rc = -EINVAL;
2163                        goto exit;
2164                }
2165
2166                if (rc) {
2167                        DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2168                        goto exit;
2169                }
2170
2171                /* Check response if needed */
2172                if (check_resp) {
2173                        u32 mcp_response = 0;
2174
2175                        if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2176                                DP_ERR(cdev, "Failed getting MCP response\n");
2177                                rc = -EINVAL;
2178                                goto exit;
2179                        }
2180
2181                        switch (mcp_response & FW_MSG_CODE_MASK) {
2182                        case FW_MSG_CODE_OK:
2183                        case FW_MSG_CODE_NVM_OK:
2184                        case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2185                        case FW_MSG_CODE_PHY_OK:
2186                                break;
2187                        default:
2188                                DP_ERR(cdev, "MFW returns error: %08x\n",
2189                                       mcp_response);
2190                                rc = -EINVAL;
2191                                goto exit;
2192                        }
2193                }
2194        }
2195
2196exit:
2197        release_firmware(image);
2198
2199        return rc;
2200}
2201
2202static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2203                             u8 *buf, u16 len)
2204{
2205        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2206
2207        return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2208}
2209
2210void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2211{
2212        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2213        void *cookie = p_hwfn->cdev->ops_cookie;
2214
2215        if (ops && ops->schedule_recovery_handler)
2216                ops->schedule_recovery_handler(cookie);
2217}
2218
2219static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2220                            void *handle)
2221{
2222                return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2223}
2224
2225static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2226{
2227        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2228        struct qed_ptt *ptt;
2229        int status = 0;
2230
2231        ptt = qed_ptt_acquire(hwfn);
2232        if (!ptt)
2233                return -EAGAIN;
2234
2235        status = qed_mcp_set_led(hwfn, ptt, mode);
2236
2237        qed_ptt_release(hwfn, ptt);
2238
2239        return status;
2240}
2241
2242static int qed_recovery_process(struct qed_dev *cdev)
2243{
2244        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2245        struct qed_ptt *p_ptt;
2246        int rc = 0;
2247
2248        p_ptt = qed_ptt_acquire(p_hwfn);
2249        if (!p_ptt)
2250                return -EAGAIN;
2251
2252        rc = qed_start_recovery_process(p_hwfn, p_ptt);
2253
2254        qed_ptt_release(p_hwfn, p_ptt);
2255
2256        return rc;
2257}
2258
2259static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2260{
2261        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2262        struct qed_ptt *ptt;
2263        int rc = 0;
2264
2265        if (IS_VF(cdev))
2266                return 0;
2267
2268        ptt = qed_ptt_acquire(hwfn);
2269        if (!ptt)
2270                return -EAGAIN;
2271
2272        rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2273                                   : QED_OV_WOL_DISABLED);
2274        if (rc)
2275                goto out;
2276        rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2277
2278out:
2279        qed_ptt_release(hwfn, ptt);
2280        return rc;
2281}
2282
2283static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2284{
2285        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2286        struct qed_ptt *ptt;
2287        int status = 0;
2288
2289        if (IS_VF(cdev))
2290                return 0;
2291
2292        ptt = qed_ptt_acquire(hwfn);
2293        if (!ptt)
2294                return -EAGAIN;
2295
2296        status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2297                                                QED_OV_DRIVER_STATE_ACTIVE :
2298                                                QED_OV_DRIVER_STATE_DISABLED);
2299
2300        qed_ptt_release(hwfn, ptt);
2301
2302        return status;
2303}
2304
2305static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2306{
2307        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2308        struct qed_ptt *ptt;
2309        int status = 0;
2310
2311        if (IS_VF(cdev))
2312                return 0;
2313
2314        ptt = qed_ptt_acquire(hwfn);
2315        if (!ptt)
2316                return -EAGAIN;
2317
2318        status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2319        if (status)
2320                goto out;
2321
2322        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2323
2324out:
2325        qed_ptt_release(hwfn, ptt);
2326        return status;
2327}
2328
2329static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2330{
2331        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2332        struct qed_ptt *ptt;
2333        int status = 0;
2334
2335        if (IS_VF(cdev))
2336                return 0;
2337
2338        ptt = qed_ptt_acquire(hwfn);
2339        if (!ptt)
2340                return -EAGAIN;
2341
2342        status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2343        if (status)
2344                goto out;
2345
2346        status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2347
2348out:
2349        qed_ptt_release(hwfn, ptt);
2350        return status;
2351}
2352
2353static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2354                                  u8 dev_addr, u32 offset, u32 len)
2355{
2356        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2357        struct qed_ptt *ptt;
2358        int rc = 0;
2359
2360        if (IS_VF(cdev))
2361                return 0;
2362
2363        ptt = qed_ptt_acquire(hwfn);
2364        if (!ptt)
2365                return -EAGAIN;
2366
2367        rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2368                                  offset, len, buf);
2369
2370        qed_ptt_release(hwfn, ptt);
2371
2372        return rc;
2373}
2374
2375static struct qed_selftest_ops qed_selftest_ops_pass = {
2376        .selftest_memory = &qed_selftest_memory,
2377        .selftest_interrupt = &qed_selftest_interrupt,
2378        .selftest_register = &qed_selftest_register,
2379        .selftest_clock = &qed_selftest_clock,
2380        .selftest_nvram = &qed_selftest_nvram,
2381};
2382
2383const struct qed_common_ops qed_common_ops_pass = {
2384        .selftest = &qed_selftest_ops_pass,
2385        .probe = &qed_probe,
2386        .remove = &qed_remove,
2387        .set_power_state = &qed_set_power_state,
2388        .set_name = &qed_set_name,
2389        .update_pf_params = &qed_update_pf_params,
2390        .slowpath_start = &qed_slowpath_start,
2391        .slowpath_stop = &qed_slowpath_stop,
2392        .set_fp_int = &qed_set_int_fp,
2393        .get_fp_int = &qed_get_int_fp,
2394        .sb_init = &qed_sb_init,
2395        .sb_release = &qed_sb_release,
2396        .simd_handler_config = &qed_simd_handler_config,
2397        .simd_handler_clean = &qed_simd_handler_clean,
2398        .dbg_grc = &qed_dbg_grc,
2399        .dbg_grc_size = &qed_dbg_grc_size,
2400        .can_link_change = &qed_can_link_change,
2401        .set_link = &qed_set_link,
2402        .get_link = &qed_get_current_link,
2403        .drain = &qed_drain,
2404        .update_msglvl = &qed_init_dp,
2405        .dbg_all_data = &qed_dbg_all_data,
2406        .dbg_all_data_size = &qed_dbg_all_data_size,
2407        .chain_alloc = &qed_chain_alloc,
2408        .chain_free = &qed_chain_free,
2409        .nvm_flash = &qed_nvm_flash,
2410        .nvm_get_image = &qed_nvm_get_image,
2411        .set_coalesce = &qed_set_coalesce,
2412        .set_led = &qed_set_led,
2413        .recovery_process = &qed_recovery_process,
2414        .recovery_prolog = &qed_recovery_prolog,
2415        .update_drv_state = &qed_update_drv_state,
2416        .update_mac = &qed_update_mac,
2417        .update_mtu = &qed_update_mtu,
2418        .update_wol = &qed_update_wol,
2419        .db_recovery_add = &qed_db_recovery_add,
2420        .db_recovery_del = &qed_db_recovery_del,
2421        .read_module_eeprom = &qed_read_module_eeprom,
2422};
2423
2424void qed_get_protocol_stats(struct qed_dev *cdev,
2425                            enum qed_mcp_protocol_type type,
2426                            union qed_mcp_protocol_stats *stats)
2427{
2428        struct qed_eth_stats eth_stats;
2429
2430        memset(stats, 0, sizeof(*stats));
2431
2432        switch (type) {
2433        case QED_MCP_LAN_STATS:
2434                qed_get_vport_stats(cdev, &eth_stats);
2435                stats->lan_stats.ucast_rx_pkts =
2436                                        eth_stats.common.rx_ucast_pkts;
2437                stats->lan_stats.ucast_tx_pkts =
2438                                        eth_stats.common.tx_ucast_pkts;
2439                stats->lan_stats.fcs_err = -1;
2440                break;
2441        case QED_MCP_FCOE_STATS:
2442                qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2443                break;
2444        case QED_MCP_ISCSI_STATS:
2445                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2446                break;
2447        default:
2448                DP_VERBOSE(cdev, QED_MSG_SP,
2449                           "Invalid protocol type = %d\n", type);
2450                return;
2451        }
2452}
2453
2454int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2455{
2456        DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2457                   "Scheduling slowpath task [Flag: %d]\n",
2458                   QED_SLOWPATH_MFW_TLV_REQ);
2459        smp_mb__before_atomic();
2460        set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2461        smp_mb__after_atomic();
2462        queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2463
2464        return 0;
2465}
2466
2467static void
2468qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2469{
2470        struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2471        struct qed_eth_stats_common *p_common;
2472        struct qed_generic_tlvs gen_tlvs;
2473        struct qed_eth_stats stats;
2474        int i;
2475
2476        memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2477        op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2478
2479        if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2480                tlv->flags.ipv4_csum_offload = true;
2481        if (gen_tlvs.feat_flags & QED_TLV_LSO)
2482                tlv->flags.lso_supported = true;
2483        tlv->flags.b_set = true;
2484
2485        for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2486                if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2487                        ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2488                        tlv->mac_set[i] = true;
2489                }
2490        }
2491
2492        qed_get_vport_stats(cdev, &stats);
2493        p_common = &stats.common;
2494        tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2495                         p_common->rx_bcast_pkts;
2496        tlv->rx_frames_set = true;
2497        tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2498                        p_common->rx_bcast_bytes;
2499        tlv->rx_bytes_set = true;
2500        tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2501                         p_common->tx_bcast_pkts;
2502        tlv->tx_frames_set = true;
2503        tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2504                        p_common->tx_bcast_bytes;
2505        tlv->rx_bytes_set = true;
2506}
2507
2508int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2509                          union qed_mfw_tlv_data *tlv_buf)
2510{
2511        struct qed_dev *cdev = hwfn->cdev;
2512        struct qed_common_cb_ops *ops;
2513
2514        ops = cdev->protocol_ops.common;
2515        if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2516                DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2517                return -EINVAL;
2518        }
2519
2520        switch (type) {
2521        case QED_MFW_TLV_GENERIC:
2522                qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2523                break;
2524        case QED_MFW_TLV_ETH:
2525                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2526                break;
2527        case QED_MFW_TLV_FCOE:
2528                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2529                break;
2530        case QED_MFW_TLV_ISCSI:
2531                ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
2532                break;
2533        default:
2534                break;
2535        }
2536
2537        return 0;
2538}
2539