linux/drivers/scsi/lpfc/lpfc_init.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/idr.h>
  28#include <linux/interrupt.h>
  29#include <linux/module.h>
  30#include <linux/kthread.h>
  31#include <linux/pci.h>
  32#include <linux/spinlock.h>
  33#include <linux/ctype.h>
  34#include <linux/aer.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <linux/miscdevice.h>
  38#include <linux/percpu.h>
  39#include <linux/msi.h>
  40#include <linux/irq.h>
  41#include <linux/bitops.h>
  42#include <linux/crash_dump.h>
  43#include <linux/cpu.h>
  44#include <linux/cpuhotplug.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_host.h>
  49#include <scsi/scsi_transport_fc.h>
  50#include <scsi/scsi_tcq.h>
  51#include <scsi/fc/fc_fs.h>
  52
  53#include "lpfc_hw4.h"
  54#include "lpfc_hw.h"
  55#include "lpfc_sli.h"
  56#include "lpfc_sli4.h"
  57#include "lpfc_nl.h"
  58#include "lpfc_disc.h"
  59#include "lpfc.h"
  60#include "lpfc_scsi.h"
  61#include "lpfc_nvme.h"
  62#include "lpfc_logmsg.h"
  63#include "lpfc_crtn.h"
  64#include "lpfc_vport.h"
  65#include "lpfc_version.h"
  66#include "lpfc_ids.h"
  67
  68static enum cpuhp_state lpfc_cpuhp_state;
  69/* Used when mapping IRQ vectors in a driver centric manner */
  70static uint32_t lpfc_present_cpu;
  71static bool lpfc_pldv_detect;
  72
  73static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  74static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  75static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  76static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  77static int lpfc_post_rcv_buf(struct lpfc_hba *);
  78static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  79static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  80static int lpfc_setup_endian_order(struct lpfc_hba *);
  81static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  82static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  83static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  84static void lpfc_init_sgl_list(struct lpfc_hba *);
  85static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  86static void lpfc_free_active_sgl(struct lpfc_hba *);
  87static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  88static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  89static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  90static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  91static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  92static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  93static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  94static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  95static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  96static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  97static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
  98static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
  99
 100static struct scsi_transport_template *lpfc_transport_template = NULL;
 101static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
 102static DEFINE_IDR(lpfc_hba_index);
 103#define LPFC_NVMET_BUF_POST 254
 104static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
 105
 106/**
 107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
 108 * @phba: pointer to lpfc hba data structure.
 109 *
 110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
 111 * mailbox command. It retrieves the revision information from the HBA and
 112 * collects the Vital Product Data (VPD) about the HBA for preparing the
 113 * configuration of the HBA.
 114 *
 115 * Return codes:
 116 *   0 - success.
 117 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
 118 *   Any other value - indicates an error.
 119 **/
 120int
 121lpfc_config_port_prep(struct lpfc_hba *phba)
 122{
 123        lpfc_vpd_t *vp = &phba->vpd;
 124        int i = 0, rc;
 125        LPFC_MBOXQ_t *pmb;
 126        MAILBOX_t *mb;
 127        char *lpfc_vpd_data = NULL;
 128        uint16_t offset = 0;
 129        static char licensed[56] =
 130                    "key unlock for use with gnu public licensed code only\0";
 131        static int init_key = 1;
 132
 133        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 134        if (!pmb) {
 135                phba->link_state = LPFC_HBA_ERROR;
 136                return -ENOMEM;
 137        }
 138
 139        mb = &pmb->u.mb;
 140        phba->link_state = LPFC_INIT_MBX_CMDS;
 141
 142        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 143                if (init_key) {
 144                        uint32_t *ptext = (uint32_t *) licensed;
 145
 146                        for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 147                                *ptext = cpu_to_be32(*ptext);
 148                        init_key = 0;
 149                }
 150
 151                lpfc_read_nv(phba, pmb);
 152                memset((char*)mb->un.varRDnvp.rsvd3, 0,
 153                        sizeof (mb->un.varRDnvp.rsvd3));
 154                memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 155                         sizeof (licensed));
 156
 157                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 158
 159                if (rc != MBX_SUCCESS) {
 160                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 161                                        "0324 Config Port initialization "
 162                                        "error, mbxCmd x%x READ_NVPARM, "
 163                                        "mbxStatus x%x\n",
 164                                        mb->mbxCommand, mb->mbxStatus);
 165                        mempool_free(pmb, phba->mbox_mem_pool);
 166                        return -ERESTART;
 167                }
 168                memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 169                       sizeof(phba->wwnn));
 170                memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 171                       sizeof(phba->wwpn));
 172        }
 173
 174        /*
 175         * Clear all option bits except LPFC_SLI3_BG_ENABLED,
 176         * which was already set in lpfc_get_cfgparam()
 177         */
 178        phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 179
 180        /* Setup and issue mailbox READ REV command */
 181        lpfc_read_rev(phba, pmb);
 182        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 183        if (rc != MBX_SUCCESS) {
 184                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 185                                "0439 Adapter failed to init, mbxCmd x%x "
 186                                "READ_REV, mbxStatus x%x\n",
 187                                mb->mbxCommand, mb->mbxStatus);
 188                mempool_free( pmb, phba->mbox_mem_pool);
 189                return -ERESTART;
 190        }
 191
 192
 193        /*
 194         * The value of rr must be 1 since the driver set the cv field to 1.
 195         * This setting requires the FW to set all revision fields.
 196         */
 197        if (mb->un.varRdRev.rr == 0) {
 198                vp->rev.rBit = 0;
 199                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 200                                "0440 Adapter failed to init, READ_REV has "
 201                                "missing revision information.\n");
 202                mempool_free(pmb, phba->mbox_mem_pool);
 203                return -ERESTART;
 204        }
 205
 206        if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 207                mempool_free(pmb, phba->mbox_mem_pool);
 208                return -EINVAL;
 209        }
 210
 211        /* Save information as VPD data */
 212        vp->rev.rBit = 1;
 213        memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 214        vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 215        memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 216        vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 217        memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 218        vp->rev.biuRev = mb->un.varRdRev.biuRev;
 219        vp->rev.smRev = mb->un.varRdRev.smRev;
 220        vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 221        vp->rev.endecRev = mb->un.varRdRev.endecRev;
 222        vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 223        vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 224        vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 225        vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 226        vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 227        vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 228
 229        /* If the sli feature level is less then 9, we must
 230         * tear down all RPIs and VPIs on link down if NPIV
 231         * is enabled.
 232         */
 233        if (vp->rev.feaLevelHigh < 9)
 234                phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 235
 236        if (lpfc_is_LC_HBA(phba->pcidev->device))
 237                memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 238                                                sizeof (phba->RandomData));
 239
 240        /* Get adapter VPD information */
 241        lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 242        if (!lpfc_vpd_data)
 243                goto out_free_mbox;
 244        do {
 245                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 246                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 247
 248                if (rc != MBX_SUCCESS) {
 249                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 250                                        "0441 VPD not present on adapter, "
 251                                        "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 252                                        mb->mbxCommand, mb->mbxStatus);
 253                        mb->un.varDmp.word_cnt = 0;
 254                }
 255                /* dump mem may return a zero when finished or we got a
 256                 * mailbox error, either way we are done.
 257                 */
 258                if (mb->un.varDmp.word_cnt == 0)
 259                        break;
 260
 261                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 262                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 263                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 264                                      lpfc_vpd_data + offset,
 265                                      mb->un.varDmp.word_cnt);
 266                offset += mb->un.varDmp.word_cnt;
 267        } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 268
 269        lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 270
 271        kfree(lpfc_vpd_data);
 272out_free_mbox:
 273        mempool_free(pmb, phba->mbox_mem_pool);
 274        return 0;
 275}
 276
 277/**
 278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
 279 * @phba: pointer to lpfc hba data structure.
 280 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 281 *
 282 * This is the completion handler for driver's configuring asynchronous event
 283 * mailbox command to the device. If the mailbox command returns successfully,
 284 * it will set internal async event support flag to 1; otherwise, it will
 285 * set internal async event support flag to 0.
 286 **/
 287static void
 288lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 289{
 290        if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 291                phba->temp_sensor_support = 1;
 292        else
 293                phba->temp_sensor_support = 0;
 294        mempool_free(pmboxq, phba->mbox_mem_pool);
 295        return;
 296}
 297
 298/**
 299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
 300 * @phba: pointer to lpfc hba data structure.
 301 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 302 *
 303 * This is the completion handler for dump mailbox command for getting
 304 * wake up parameters. When this command complete, the response contain
 305 * Option rom version of the HBA. This function translate the version number
 306 * into a human readable string and store it in OptionROMVersion.
 307 **/
 308static void
 309lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 310{
 311        struct prog_id *prg;
 312        uint32_t prog_id_word;
 313        char dist = ' ';
 314        /* character array used for decoding dist type. */
 315        char dist_char[] = "nabx";
 316
 317        if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 318                mempool_free(pmboxq, phba->mbox_mem_pool);
 319                return;
 320        }
 321
 322        prg = (struct prog_id *) &prog_id_word;
 323
 324        /* word 7 contain option rom version */
 325        prog_id_word = pmboxq->u.mb.un.varWords[7];
 326
 327        /* Decode the Option rom version word to a readable string */
 328        if (prg->dist < 4)
 329                dist = dist_char[prg->dist];
 330
 331        if ((prg->dist == 3) && (prg->num == 0))
 332                snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 333                        prg->ver, prg->rev, prg->lev);
 334        else
 335                snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 336                        prg->ver, prg->rev, prg->lev,
 337                        dist, prg->num);
 338        mempool_free(pmboxq, phba->mbox_mem_pool);
 339        return;
 340}
 341
 342/**
 343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
 344 * @vport: pointer to lpfc vport data structure.
 345 *
 346 *
 347 * Return codes
 348 *   None.
 349 **/
 350void
 351lpfc_update_vport_wwn(struct lpfc_vport *vport)
 352{
 353        struct lpfc_hba *phba = vport->phba;
 354
 355        /*
 356         * If the name is empty or there exists a soft name
 357         * then copy the service params name, otherwise use the fc name
 358         */
 359        if (vport->fc_nodename.u.wwn[0] == 0)
 360                memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 361                        sizeof(struct lpfc_name));
 362        else
 363                memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 364                        sizeof(struct lpfc_name));
 365
 366        /*
 367         * If the port name has changed, then set the Param changes flag
 368         * to unreg the login
 369         */
 370        if (vport->fc_portname.u.wwn[0] != 0 &&
 371                memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
 372                       sizeof(struct lpfc_name))) {
 373                vport->vport_flag |= FAWWPN_PARAM_CHG;
 374
 375                if (phba->sli_rev == LPFC_SLI_REV4 &&
 376                    vport->port_type == LPFC_PHYSICAL_PORT &&
 377                    phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
 378                        lpfc_printf_log(phba, KERN_INFO,
 379                                        LOG_SLI | LOG_DISCOVERY | LOG_ELS,
 380                                        "2701 FA-PWWN change WWPN from %llx to "
 381                                        "%llx: vflag x%x fawwpn_flag x%x\n",
 382                                        wwn_to_u64(vport->fc_portname.u.wwn),
 383                                        wwn_to_u64
 384                                           (vport->fc_sparam.portName.u.wwn),
 385                                        vport->vport_flag,
 386                                        phba->sli4_hba.fawwpn_flag);
 387                        memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 388                               sizeof(struct lpfc_name));
 389                }
 390        }
 391
 392        if (vport->fc_portname.u.wwn[0] == 0)
 393                memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 394                       sizeof(struct lpfc_name));
 395        else
 396                memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 397                       sizeof(struct lpfc_name));
 398}
 399
 400/**
 401 * lpfc_config_port_post - Perform lpfc initialization after config port
 402 * @phba: pointer to lpfc hba data structure.
 403 *
 404 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
 405 * command call. It performs all internal resource and state setups on the
 406 * port: post IOCB buffers, enable appropriate host interrupt attentions,
 407 * ELS ring timers, etc.
 408 *
 409 * Return codes
 410 *   0 - success.
 411 *   Any other value - error.
 412 **/
 413int
 414lpfc_config_port_post(struct lpfc_hba *phba)
 415{
 416        struct lpfc_vport *vport = phba->pport;
 417        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 418        LPFC_MBOXQ_t *pmb;
 419        MAILBOX_t *mb;
 420        struct lpfc_dmabuf *mp;
 421        struct lpfc_sli *psli = &phba->sli;
 422        uint32_t status, timeout;
 423        int i, j;
 424        int rc;
 425
 426        spin_lock_irq(&phba->hbalock);
 427        /*
 428         * If the Config port completed correctly the HBA is not
 429         * over heated any more.
 430         */
 431        if (phba->over_temp_state == HBA_OVER_TEMP)
 432                phba->over_temp_state = HBA_NORMAL_TEMP;
 433        spin_unlock_irq(&phba->hbalock);
 434
 435        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 436        if (!pmb) {
 437                phba->link_state = LPFC_HBA_ERROR;
 438                return -ENOMEM;
 439        }
 440        mb = &pmb->u.mb;
 441
 442        /* Get login parameters for NID.  */
 443        rc = lpfc_read_sparam(phba, pmb, 0);
 444        if (rc) {
 445                mempool_free(pmb, phba->mbox_mem_pool);
 446                return -ENOMEM;
 447        }
 448
 449        pmb->vport = vport;
 450        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 451                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 452                                "0448 Adapter failed init, mbxCmd x%x "
 453                                "READ_SPARM mbxStatus x%x\n",
 454                                mb->mbxCommand, mb->mbxStatus);
 455                phba->link_state = LPFC_HBA_ERROR;
 456                lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 457                return -EIO;
 458        }
 459
 460        mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 461
 462        /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
 463         * longer needed.  Prevent unintended ctx_buf access as the mbox is
 464         * reused.
 465         */
 466        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 467        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 468        kfree(mp);
 469        pmb->ctx_buf = NULL;
 470        lpfc_update_vport_wwn(vport);
 471
 472        /* Update the fc_host data structures with new wwn. */
 473        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 474        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 475        fc_host_max_npiv_vports(shost) = phba->max_vpi;
 476
 477        /* If no serial number in VPD data, use low 6 bytes of WWNN */
 478        /* This should be consolidated into parse_vpd ? - mr */
 479        if (phba->SerialNumber[0] == 0) {
 480                uint8_t *outptr;
 481
 482                outptr = &vport->fc_nodename.u.s.IEEE[0];
 483                for (i = 0; i < 12; i++) {
 484                        status = *outptr++;
 485                        j = ((status & 0xf0) >> 4);
 486                        if (j <= 9)
 487                                phba->SerialNumber[i] =
 488                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 489                        else
 490                                phba->SerialNumber[i] =
 491                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 492                        i++;
 493                        j = (status & 0xf);
 494                        if (j <= 9)
 495                                phba->SerialNumber[i] =
 496                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 497                        else
 498                                phba->SerialNumber[i] =
 499                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 500                }
 501        }
 502
 503        lpfc_read_config(phba, pmb);
 504        pmb->vport = vport;
 505        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 506                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 507                                "0453 Adapter failed to init, mbxCmd x%x "
 508                                "READ_CONFIG, mbxStatus x%x\n",
 509                                mb->mbxCommand, mb->mbxStatus);
 510                phba->link_state = LPFC_HBA_ERROR;
 511                mempool_free( pmb, phba->mbox_mem_pool);
 512                return -EIO;
 513        }
 514
 515        /* Check if the port is disabled */
 516        lpfc_sli_read_link_ste(phba);
 517
 518        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 519        if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
 520                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 521                                "3359 HBA queue depth changed from %d to %d\n",
 522                                phba->cfg_hba_queue_depth,
 523                                mb->un.varRdConfig.max_xri);
 524                phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
 525        }
 526
 527        phba->lmt = mb->un.varRdConfig.lmt;
 528
 529        /* Get the default values for Model Name and Description */
 530        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 531
 532        phba->link_state = LPFC_LINK_DOWN;
 533
 534        /* Only process IOCBs on ELS ring till hba_state is READY */
 535        if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
 536                psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
 537        if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
 538                psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
 539
 540        /* Post receive buffers for desired rings */
 541        if (phba->sli_rev != 3)
 542                lpfc_post_rcv_buf(phba);
 543
 544        /*
 545         * Configure HBA MSI-X attention conditions to messages if MSI-X mode
 546         */
 547        if (phba->intr_type == MSIX) {
 548                rc = lpfc_config_msi(phba, pmb);
 549                if (rc) {
 550                        mempool_free(pmb, phba->mbox_mem_pool);
 551                        return -EIO;
 552                }
 553                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 554                if (rc != MBX_SUCCESS) {
 555                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 556                                        "0352 Config MSI mailbox command "
 557                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
 558                                        pmb->u.mb.mbxCommand,
 559                                        pmb->u.mb.mbxStatus);
 560                        mempool_free(pmb, phba->mbox_mem_pool);
 561                        return -EIO;
 562                }
 563        }
 564
 565        spin_lock_irq(&phba->hbalock);
 566        /* Initialize ERATT handling flag */
 567        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 568
 569        /* Enable appropriate host interrupts */
 570        if (lpfc_readl(phba->HCregaddr, &status)) {
 571                spin_unlock_irq(&phba->hbalock);
 572                return -EIO;
 573        }
 574        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 575        if (psli->num_rings > 0)
 576                status |= HC_R0INT_ENA;
 577        if (psli->num_rings > 1)
 578                status |= HC_R1INT_ENA;
 579        if (psli->num_rings > 2)
 580                status |= HC_R2INT_ENA;
 581        if (psli->num_rings > 3)
 582                status |= HC_R3INT_ENA;
 583
 584        if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 585            (phba->cfg_poll & DISABLE_FCP_RING_INT))
 586                status &= ~(HC_R0INT_ENA);
 587
 588        writel(status, phba->HCregaddr);
 589        readl(phba->HCregaddr); /* flush */
 590        spin_unlock_irq(&phba->hbalock);
 591
 592        /* Set up ring-0 (ELS) timer */
 593        timeout = phba->fc_ratov * 2;
 594        mod_timer(&vport->els_tmofunc,
 595                  jiffies + msecs_to_jiffies(1000 * timeout));
 596        /* Set up heart beat (HB) timer */
 597        mod_timer(&phba->hb_tmofunc,
 598                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 599        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
 600        phba->last_completion_time = jiffies;
 601        /* Set up error attention (ERATT) polling timer */
 602        mod_timer(&phba->eratt_poll,
 603                  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
 604
 605        if (phba->hba_flag & LINK_DISABLED) {
 606                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 607                                "2598 Adapter Link is disabled.\n");
 608                lpfc_down_link(phba, pmb);
 609                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 610                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 611                if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 612                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 613                                        "2599 Adapter failed to issue DOWN_LINK"
 614                                        " mbox command rc 0x%x\n", rc);
 615
 616                        mempool_free(pmb, phba->mbox_mem_pool);
 617                        return -EIO;
 618                }
 619        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 620                mempool_free(pmb, phba->mbox_mem_pool);
 621                rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 622                if (rc)
 623                        return rc;
 624        }
 625        /* MBOX buffer will be freed in mbox compl */
 626        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 627        if (!pmb) {
 628                phba->link_state = LPFC_HBA_ERROR;
 629                return -ENOMEM;
 630        }
 631
 632        lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 633        pmb->mbox_cmpl = lpfc_config_async_cmpl;
 634        pmb->vport = phba->pport;
 635        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 636
 637        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 638                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 639                                "0456 Adapter failed to issue "
 640                                "ASYNCEVT_ENABLE mbox status x%x\n",
 641                                rc);
 642                mempool_free(pmb, phba->mbox_mem_pool);
 643        }
 644
 645        /* Get Option rom version */
 646        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 647        if (!pmb) {
 648                phba->link_state = LPFC_HBA_ERROR;
 649                return -ENOMEM;
 650        }
 651
 652        lpfc_dump_wakeup_param(phba, pmb);
 653        pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 654        pmb->vport = phba->pport;
 655        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 656
 657        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 658                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 659                                "0435 Adapter failed "
 660                                "to get Option ROM version status x%x\n", rc);
 661                mempool_free(pmb, phba->mbox_mem_pool);
 662        }
 663
 664        return 0;
 665}
 666
 667/**
 668 * lpfc_sli4_refresh_params - update driver copy of params.
 669 * @phba: Pointer to HBA context object.
 670 *
 671 * This is called to refresh driver copy of dynamic fields from the
 672 * common_get_sli4_parameters descriptor.
 673 **/
 674int
 675lpfc_sli4_refresh_params(struct lpfc_hba *phba)
 676{
 677        LPFC_MBOXQ_t *mboxq;
 678        struct lpfc_mqe *mqe;
 679        struct lpfc_sli4_parameters *mbx_sli4_parameters;
 680        int length, rc;
 681
 682        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 683        if (!mboxq)
 684                return -ENOMEM;
 685
 686        mqe = &mboxq->u.mqe;
 687        /* Read the port's SLI4 Config Parameters */
 688        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
 689                  sizeof(struct lpfc_sli4_cfg_mhdr));
 690        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
 691                         LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
 692                         length, LPFC_SLI4_MBX_EMBED);
 693
 694        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 695        if (unlikely(rc)) {
 696                mempool_free(mboxq, phba->mbox_mem_pool);
 697                return rc;
 698        }
 699        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
 700
 701        /* Are we forcing MI off via module parameter? */
 702        if (phba->cfg_enable_mi)
 703                phba->sli4_hba.pc_sli4_params.mi_ver =
 704                        bf_get(cfg_mi_ver, mbx_sli4_parameters);
 705        else
 706                phba->sli4_hba.pc_sli4_params.mi_ver = 0;
 707
 708        phba->sli4_hba.pc_sli4_params.cmf =
 709                        bf_get(cfg_cmf, mbx_sli4_parameters);
 710        phba->sli4_hba.pc_sli4_params.pls =
 711                        bf_get(cfg_pvl, mbx_sli4_parameters);
 712
 713        mempool_free(mboxq, phba->mbox_mem_pool);
 714        return rc;
 715}
 716
 717/**
 718 * lpfc_hba_init_link - Initialize the FC link
 719 * @phba: pointer to lpfc hba data structure.
 720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 721 *
 722 * This routine will issue the INIT_LINK mailbox command call.
 723 * It is available to other drivers through the lpfc_hba data
 724 * structure for use as a delayed link up mechanism with the
 725 * module parameter lpfc_suppress_link_up.
 726 *
 727 * Return code
 728 *              0 - success
 729 *              Any other value - error
 730 **/
 731static int
 732lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 733{
 734        return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 735}
 736
 737/**
 738 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
 739 * @phba: pointer to lpfc hba data structure.
 740 * @fc_topology: desired fc topology.
 741 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 742 *
 743 * This routine will issue the INIT_LINK mailbox command call.
 744 * It is available to other drivers through the lpfc_hba data
 745 * structure for use as a delayed link up mechanism with the
 746 * module parameter lpfc_suppress_link_up.
 747 *
 748 * Return code
 749 *              0 - success
 750 *              Any other value - error
 751 **/
 752int
 753lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 754                               uint32_t flag)
 755{
 756        struct lpfc_vport *vport = phba->pport;
 757        LPFC_MBOXQ_t *pmb;
 758        MAILBOX_t *mb;
 759        int rc;
 760
 761        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 762        if (!pmb) {
 763                phba->link_state = LPFC_HBA_ERROR;
 764                return -ENOMEM;
 765        }
 766        mb = &pmb->u.mb;
 767        pmb->vport = vport;
 768
 769        if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 770            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 771             !(phba->lmt & LMT_1Gb)) ||
 772            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 773             !(phba->lmt & LMT_2Gb)) ||
 774            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 775             !(phba->lmt & LMT_4Gb)) ||
 776            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 777             !(phba->lmt & LMT_8Gb)) ||
 778            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 779             !(phba->lmt & LMT_10Gb)) ||
 780            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 781             !(phba->lmt & LMT_16Gb)) ||
 782            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
 783             !(phba->lmt & LMT_32Gb)) ||
 784            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
 785             !(phba->lmt & LMT_64Gb))) {
 786                /* Reset link speed to auto */
 787                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 788                                "1302 Invalid speed for this board:%d "
 789                                "Reset link speed to auto.\n",
 790                                phba->cfg_link_speed);
 791                        phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 792        }
 793        lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 794        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 795        if (phba->sli_rev < LPFC_SLI_REV4)
 796                lpfc_set_loopback_flag(phba);
 797        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 798        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 799                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 800                                "0498 Adapter failed to init, mbxCmd x%x "
 801                                "INIT_LINK, mbxStatus x%x\n",
 802                                mb->mbxCommand, mb->mbxStatus);
 803                if (phba->sli_rev <= LPFC_SLI_REV3) {
 804                        /* Clear all interrupt enable conditions */
 805                        writel(0, phba->HCregaddr);
 806                        readl(phba->HCregaddr); /* flush */
 807                        /* Clear all pending interrupts */
 808                        writel(0xffffffff, phba->HAregaddr);
 809                        readl(phba->HAregaddr); /* flush */
 810                }
 811                phba->link_state = LPFC_HBA_ERROR;
 812                if (rc != MBX_BUSY || flag == MBX_POLL)
 813                        mempool_free(pmb, phba->mbox_mem_pool);
 814                return -EIO;
 815        }
 816        phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 817        if (flag == MBX_POLL)
 818                mempool_free(pmb, phba->mbox_mem_pool);
 819
 820        return 0;
 821}
 822
 823/**
 824 * lpfc_hba_down_link - this routine downs the FC link
 825 * @phba: pointer to lpfc hba data structure.
 826 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 827 *
 828 * This routine will issue the DOWN_LINK mailbox command call.
 829 * It is available to other drivers through the lpfc_hba data
 830 * structure for use to stop the link.
 831 *
 832 * Return code
 833 *              0 - success
 834 *              Any other value - error
 835 **/
 836static int
 837lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 838{
 839        LPFC_MBOXQ_t *pmb;
 840        int rc;
 841
 842        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 843        if (!pmb) {
 844                phba->link_state = LPFC_HBA_ERROR;
 845                return -ENOMEM;
 846        }
 847
 848        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 849                        "0491 Adapter Link is disabled.\n");
 850        lpfc_down_link(phba, pmb);
 851        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 852        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 853        if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 854                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 855                                "2522 Adapter failed to issue DOWN_LINK"
 856                                " mbox command rc 0x%x\n", rc);
 857
 858                mempool_free(pmb, phba->mbox_mem_pool);
 859                return -EIO;
 860        }
 861        if (flag == MBX_POLL)
 862                mempool_free(pmb, phba->mbox_mem_pool);
 863
 864        return 0;
 865}
 866
 867/**
 868 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
 869 * @phba: pointer to lpfc HBA data structure.
 870 *
 871 * This routine will do LPFC uninitialization before the HBA is reset when
 872 * bringing down the SLI Layer.
 873 *
 874 * Return codes
 875 *   0 - success.
 876 *   Any other value - error.
 877 **/
 878int
 879lpfc_hba_down_prep(struct lpfc_hba *phba)
 880{
 881        struct lpfc_vport **vports;
 882        int i;
 883
 884        if (phba->sli_rev <= LPFC_SLI_REV3) {
 885                /* Disable interrupts */
 886                writel(0, phba->HCregaddr);
 887                readl(phba->HCregaddr); /* flush */
 888        }
 889
 890        if (phba->pport->load_flag & FC_UNLOADING)
 891                lpfc_cleanup_discovery_resources(phba->pport);
 892        else {
 893                vports = lpfc_create_vport_work_array(phba);
 894                if (vports != NULL)
 895                        for (i = 0; i <= phba->max_vports &&
 896                                vports[i] != NULL; i++)
 897                                lpfc_cleanup_discovery_resources(vports[i]);
 898                lpfc_destroy_vport_work_array(phba, vports);
 899        }
 900        return 0;
 901}
 902
 903/**
 904 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
 905 * rspiocb which got deferred
 906 *
 907 * @phba: pointer to lpfc HBA data structure.
 908 *
 909 * This routine will cleanup completed slow path events after HBA is reset
 910 * when bringing down the SLI Layer.
 911 *
 912 *
 913 * Return codes
 914 *   void.
 915 **/
 916static void
 917lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 918{
 919        struct lpfc_iocbq *rspiocbq;
 920        struct hbq_dmabuf *dmabuf;
 921        struct lpfc_cq_event *cq_event;
 922
 923        spin_lock_irq(&phba->hbalock);
 924        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 925        spin_unlock_irq(&phba->hbalock);
 926
 927        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 928                /* Get the response iocb from the head of work queue */
 929                spin_lock_irq(&phba->hbalock);
 930                list_remove_head(&phba->sli4_hba.sp_queue_event,
 931                                 cq_event, struct lpfc_cq_event, list);
 932                spin_unlock_irq(&phba->hbalock);
 933
 934                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 935                case CQE_CODE_COMPL_WQE:
 936                        rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 937                                                 cq_event);
 938                        lpfc_sli_release_iocbq(phba, rspiocbq);
 939                        break;
 940                case CQE_CODE_RECEIVE:
 941                case CQE_CODE_RECEIVE_V1:
 942                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
 943                                              cq_event);
 944                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
 945                }
 946        }
 947}
 948
 949/**
 950 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
 951 * @phba: pointer to lpfc HBA data structure.
 952 *
 953 * This routine will cleanup posted ELS buffers after the HBA is reset
 954 * when bringing down the SLI Layer.
 955 *
 956 *
 957 * Return codes
 958 *   void.
 959 **/
 960static void
 961lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 962{
 963        struct lpfc_sli *psli = &phba->sli;
 964        struct lpfc_sli_ring *pring;
 965        struct lpfc_dmabuf *mp, *next_mp;
 966        LIST_HEAD(buflist);
 967        int count;
 968
 969        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 970                lpfc_sli_hbqbuf_free_all(phba);
 971        else {
 972                /* Cleanup preposted buffers on the ELS ring */
 973                pring = &psli->sli3_ring[LPFC_ELS_RING];
 974                spin_lock_irq(&phba->hbalock);
 975                list_splice_init(&pring->postbufq, &buflist);
 976                spin_unlock_irq(&phba->hbalock);
 977
 978                count = 0;
 979                list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 980                        list_del(&mp->list);
 981                        count++;
 982                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 983                        kfree(mp);
 984                }
 985
 986                spin_lock_irq(&phba->hbalock);
 987                pring->postbufq_cnt -= count;
 988                spin_unlock_irq(&phba->hbalock);
 989        }
 990}
 991
 992/**
 993 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
 994 * @phba: pointer to lpfc HBA data structure.
 995 *
 996 * This routine will cleanup the txcmplq after the HBA is reset when bringing
 997 * down the SLI Layer.
 998 *
 999 * Return codes
1000 *   void
1001 **/
1002static void
1003lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1004{
1005        struct lpfc_sli *psli = &phba->sli;
1006        struct lpfc_queue *qp = NULL;
1007        struct lpfc_sli_ring *pring;
1008        LIST_HEAD(completions);
1009        int i;
1010        struct lpfc_iocbq *piocb, *next_iocb;
1011
1012        if (phba->sli_rev != LPFC_SLI_REV4) {
1013                for (i = 0; i < psli->num_rings; i++) {
1014                        pring = &psli->sli3_ring[i];
1015                        spin_lock_irq(&phba->hbalock);
1016                        /* At this point in time the HBA is either reset or DOA
1017                         * Nothing should be on txcmplq as it will
1018                         * NEVER complete.
1019                         */
1020                        list_splice_init(&pring->txcmplq, &completions);
1021                        pring->txcmplq_cnt = 0;
1022                        spin_unlock_irq(&phba->hbalock);
1023
1024                        lpfc_sli_abort_iocb_ring(phba, pring);
1025                }
1026                /* Cancel all the IOCBs from the completions list */
1027                lpfc_sli_cancel_iocbs(phba, &completions,
1028                                      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1029                return;
1030        }
1031        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1032                pring = qp->pring;
1033                if (!pring)
1034                        continue;
1035                spin_lock_irq(&pring->ring_lock);
1036                list_for_each_entry_safe(piocb, next_iocb,
1037                                         &pring->txcmplq, list)
1038                        piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1039                list_splice_init(&pring->txcmplq, &completions);
1040                pring->txcmplq_cnt = 0;
1041                spin_unlock_irq(&pring->ring_lock);
1042                lpfc_sli_abort_iocb_ring(phba, pring);
1043        }
1044        /* Cancel all the IOCBs from the completions list */
1045        lpfc_sli_cancel_iocbs(phba, &completions,
1046                              IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1047}
1048
1049/**
1050 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1051 * @phba: pointer to lpfc HBA data structure.
1052 *
1053 * This routine will do uninitialization after the HBA is reset when bring
1054 * down the SLI Layer.
1055 *
1056 * Return codes
1057 *   0 - success.
1058 *   Any other value - error.
1059 **/
1060static int
1061lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1062{
1063        lpfc_hba_free_post_buf(phba);
1064        lpfc_hba_clean_txcmplq(phba);
1065        return 0;
1066}
1067
1068/**
1069 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1070 * @phba: pointer to lpfc HBA data structure.
1071 *
1072 * This routine will do uninitialization after the HBA is reset when bring
1073 * down the SLI Layer.
1074 *
1075 * Return codes
1076 *   0 - success.
1077 *   Any other value - error.
1078 **/
1079static int
1080lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1081{
1082        struct lpfc_io_buf *psb, *psb_next;
1083        struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1084        struct lpfc_sli4_hdw_queue *qp;
1085        LIST_HEAD(aborts);
1086        LIST_HEAD(nvme_aborts);
1087        LIST_HEAD(nvmet_aborts);
1088        struct lpfc_sglq *sglq_entry = NULL;
1089        int cnt, idx;
1090
1091
1092        lpfc_sli_hbqbuf_free_all(phba);
1093        lpfc_hba_clean_txcmplq(phba);
1094
1095        /* At this point in time the HBA is either reset or DOA. Either
1096         * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1097         * on the lpfc_els_sgl_list so that it can either be freed if the
1098         * driver is unloading or reposted if the driver is restarting
1099         * the port.
1100         */
1101
1102        /* sgl_list_lock required because worker thread uses this
1103         * list.
1104         */
1105        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1106        list_for_each_entry(sglq_entry,
1107                &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1108                sglq_entry->state = SGL_FREED;
1109
1110        list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1111                        &phba->sli4_hba.lpfc_els_sgl_list);
1112
1113
1114        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1115
1116        /* abts_xxxx_buf_list_lock required because worker thread uses this
1117         * list.
1118         */
1119        spin_lock_irq(&phba->hbalock);
1120        cnt = 0;
1121        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1122                qp = &phba->sli4_hba.hdwq[idx];
1123
1124                spin_lock(&qp->abts_io_buf_list_lock);
1125                list_splice_init(&qp->lpfc_abts_io_buf_list,
1126                                 &aborts);
1127
1128                list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1129                        psb->pCmd = NULL;
1130                        psb->status = IOSTAT_SUCCESS;
1131                        cnt++;
1132                }
1133                spin_lock(&qp->io_buf_list_put_lock);
1134                list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1135                qp->put_io_bufs += qp->abts_scsi_io_bufs;
1136                qp->put_io_bufs += qp->abts_nvme_io_bufs;
1137                qp->abts_scsi_io_bufs = 0;
1138                qp->abts_nvme_io_bufs = 0;
1139                spin_unlock(&qp->io_buf_list_put_lock);
1140                spin_unlock(&qp->abts_io_buf_list_lock);
1141        }
1142        spin_unlock_irq(&phba->hbalock);
1143
1144        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1145                spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1146                list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1147                                 &nvmet_aborts);
1148                spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1149                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1150                        ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1151                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1152                }
1153        }
1154
1155        lpfc_sli4_free_sp_events(phba);
1156        return cnt;
1157}
1158
1159/**
1160 * lpfc_hba_down_post - Wrapper func for hba down post routine
1161 * @phba: pointer to lpfc HBA data structure.
1162 *
1163 * This routine wraps the actual SLI3 or SLI4 routine for performing
1164 * uninitialization after the HBA is reset when bring down the SLI Layer.
1165 *
1166 * Return codes
1167 *   0 - success.
1168 *   Any other value - error.
1169 **/
1170int
1171lpfc_hba_down_post(struct lpfc_hba *phba)
1172{
1173        return (*phba->lpfc_hba_down_post)(phba);
1174}
1175
1176/**
1177 * lpfc_hb_timeout - The HBA-timer timeout handler
1178 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1179 *
1180 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1181 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1182 * work-port-events bitmap and the worker thread is notified. This timeout
1183 * event will be used by the worker thread to invoke the actual timeout
1184 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1185 * be performed in the timeout handler and the HBA timeout event bit shall
1186 * be cleared by the worker thread after it has taken the event bitmap out.
1187 **/
1188static void
1189lpfc_hb_timeout(struct timer_list *t)
1190{
1191        struct lpfc_hba *phba;
1192        uint32_t tmo_posted;
1193        unsigned long iflag;
1194
1195        phba = from_timer(phba, t, hb_tmofunc);
1196
1197        /* Check for heart beat timeout conditions */
1198        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1199        tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1200        if (!tmo_posted)
1201                phba->pport->work_port_events |= WORKER_HB_TMO;
1202        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1203
1204        /* Tell the worker thread there is work to do */
1205        if (!tmo_posted)
1206                lpfc_worker_wake_up(phba);
1207        return;
1208}
1209
1210/**
1211 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1212 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1213 *
1214 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1215 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1216 * work-port-events bitmap and the worker thread is notified. This timeout
1217 * event will be used by the worker thread to invoke the actual timeout
1218 * handler routine, lpfc_rrq_handler. Any periodical operations will
1219 * be performed in the timeout handler and the RRQ timeout event bit shall
1220 * be cleared by the worker thread after it has taken the event bitmap out.
1221 **/
1222static void
1223lpfc_rrq_timeout(struct timer_list *t)
1224{
1225        struct lpfc_hba *phba;
1226        unsigned long iflag;
1227
1228        phba = from_timer(phba, t, rrq_tmr);
1229        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1230        if (!(phba->pport->load_flag & FC_UNLOADING))
1231                phba->hba_flag |= HBA_RRQ_ACTIVE;
1232        else
1233                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1234        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1235
1236        if (!(phba->pport->load_flag & FC_UNLOADING))
1237                lpfc_worker_wake_up(phba);
1238}
1239
1240/**
1241 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1242 * @phba: pointer to lpfc hba data structure.
1243 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1244 *
1245 * This is the callback function to the lpfc heart-beat mailbox command.
1246 * If configured, the lpfc driver issues the heart-beat mailbox command to
1247 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1248 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1249 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1250 * heart-beat outstanding state. Once the mailbox command comes back and
1251 * no error conditions detected, the heart-beat mailbox command timer is
1252 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1253 * state is cleared for the next heart-beat. If the timer expired with the
1254 * heart-beat outstanding state set, the driver will put the HBA offline.
1255 **/
1256static void
1257lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1258{
1259        unsigned long drvr_flag;
1260
1261        spin_lock_irqsave(&phba->hbalock, drvr_flag);
1262        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1263        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1264
1265        /* Check and reset heart-beat timer if necessary */
1266        mempool_free(pmboxq, phba->mbox_mem_pool);
1267        if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1268                !(phba->link_state == LPFC_HBA_ERROR) &&
1269                !(phba->pport->load_flag & FC_UNLOADING))
1270                mod_timer(&phba->hb_tmofunc,
1271                          jiffies +
1272                          msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1273        return;
1274}
1275
1276/*
1277 * lpfc_idle_stat_delay_work - idle_stat tracking
1278 *
1279 * This routine tracks per-cq idle_stat and determines polling decisions.
1280 *
1281 * Return codes:
1282 *   None
1283 **/
1284static void
1285lpfc_idle_stat_delay_work(struct work_struct *work)
1286{
1287        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1288                                             struct lpfc_hba,
1289                                             idle_stat_delay_work);
1290        struct lpfc_queue *cq;
1291        struct lpfc_sli4_hdw_queue *hdwq;
1292        struct lpfc_idle_stat *idle_stat;
1293        u32 i, idle_percent;
1294        u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1295
1296        if (phba->pport->load_flag & FC_UNLOADING)
1297                return;
1298
1299        if (phba->link_state == LPFC_HBA_ERROR ||
1300            phba->pport->fc_flag & FC_OFFLINE_MODE ||
1301            phba->cmf_active_mode != LPFC_CFG_OFF)
1302                goto requeue;
1303
1304        for_each_present_cpu(i) {
1305                hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1306                cq = hdwq->io_cq;
1307
1308                /* Skip if we've already handled this cq's primary CPU */
1309                if (cq->chann != i)
1310                        continue;
1311
1312                idle_stat = &phba->sli4_hba.idle_stat[i];
1313
1314                /* get_cpu_idle_time returns values as running counters. Thus,
1315                 * to know the amount for this period, the prior counter values
1316                 * need to be subtracted from the current counter values.
1317                 * From there, the idle time stat can be calculated as a
1318                 * percentage of 100 - the sum of the other consumption times.
1319                 */
1320                wall_idle = get_cpu_idle_time(i, &wall, 1);
1321                diff_idle = wall_idle - idle_stat->prev_idle;
1322                diff_wall = wall - idle_stat->prev_wall;
1323
1324                if (diff_wall <= diff_idle)
1325                        busy_time = 0;
1326                else
1327                        busy_time = diff_wall - diff_idle;
1328
1329                idle_percent = div64_u64(100 * busy_time, diff_wall);
1330                idle_percent = 100 - idle_percent;
1331
1332                if (idle_percent < 15)
1333                        cq->poll_mode = LPFC_QUEUE_WORK;
1334                else
1335                        cq->poll_mode = LPFC_IRQ_POLL;
1336
1337                idle_stat->prev_idle = wall_idle;
1338                idle_stat->prev_wall = wall;
1339        }
1340
1341requeue:
1342        schedule_delayed_work(&phba->idle_stat_delay_work,
1343                              msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1344}
1345
1346static void
1347lpfc_hb_eq_delay_work(struct work_struct *work)
1348{
1349        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1350                                             struct lpfc_hba, eq_delay_work);
1351        struct lpfc_eq_intr_info *eqi, *eqi_new;
1352        struct lpfc_queue *eq, *eq_next;
1353        unsigned char *ena_delay = NULL;
1354        uint32_t usdelay;
1355        int i;
1356
1357        if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1358                return;
1359
1360        if (phba->link_state == LPFC_HBA_ERROR ||
1361            phba->pport->fc_flag & FC_OFFLINE_MODE)
1362                goto requeue;
1363
1364        ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1365                            GFP_KERNEL);
1366        if (!ena_delay)
1367                goto requeue;
1368
1369        for (i = 0; i < phba->cfg_irq_chann; i++) {
1370                /* Get the EQ corresponding to the IRQ vector */
1371                eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1372                if (!eq)
1373                        continue;
1374                if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1375                        eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1376                        ena_delay[eq->last_cpu] = 1;
1377                }
1378        }
1379
1380        for_each_present_cpu(i) {
1381                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1382                if (ena_delay[i]) {
1383                        usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1384                        if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1385                                usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1386                } else {
1387                        usdelay = 0;
1388                }
1389
1390                eqi->icnt = 0;
1391
1392                list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1393                        if (unlikely(eq->last_cpu != i)) {
1394                                eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1395                                                      eq->last_cpu);
1396                                list_move_tail(&eq->cpu_list, &eqi_new->list);
1397                                continue;
1398                        }
1399                        if (usdelay != eq->q_mode)
1400                                lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1401                                                         usdelay);
1402                }
1403        }
1404
1405        kfree(ena_delay);
1406
1407requeue:
1408        queue_delayed_work(phba->wq, &phba->eq_delay_work,
1409                           msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1410}
1411
1412/**
1413 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1414 * @phba: pointer to lpfc hba data structure.
1415 *
1416 * For each heartbeat, this routine does some heuristic methods to adjust
1417 * XRI distribution. The goal is to fully utilize free XRIs.
1418 **/
1419static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1420{
1421        u32 i;
1422        u32 hwq_count;
1423
1424        hwq_count = phba->cfg_hdw_queue;
1425        for (i = 0; i < hwq_count; i++) {
1426                /* Adjust XRIs in private pool */
1427                lpfc_adjust_pvt_pool_count(phba, i);
1428
1429                /* Adjust high watermark */
1430                lpfc_adjust_high_watermark(phba, i);
1431
1432#ifdef LPFC_MXP_STAT
1433                /* Snapshot pbl, pvt and busy count */
1434                lpfc_snapshot_mxp(phba, i);
1435#endif
1436        }
1437}
1438
1439/**
1440 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1441 * @phba: pointer to lpfc hba data structure.
1442 *
1443 * If a HB mbox is not already in progrees, this routine will allocate
1444 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1445 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1446 **/
1447int
1448lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1449{
1450        LPFC_MBOXQ_t *pmboxq;
1451        int retval;
1452
1453        /* Is a Heartbeat mbox already in progress */
1454        if (phba->hba_flag & HBA_HBEAT_INP)
1455                return 0;
1456
1457        pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1458        if (!pmboxq)
1459                return -ENOMEM;
1460
1461        lpfc_heart_beat(phba, pmboxq);
1462        pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1463        pmboxq->vport = phba->pport;
1464        retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1465
1466        if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1467                mempool_free(pmboxq, phba->mbox_mem_pool);
1468                return -ENXIO;
1469        }
1470        phba->hba_flag |= HBA_HBEAT_INP;
1471
1472        return 0;
1473}
1474
1475/**
1476 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1477 * @phba: pointer to lpfc hba data structure.
1478 *
1479 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1480 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1481 * of the value of lpfc_enable_hba_heartbeat.
1482 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1483 * try to issue a MBX_HEARTBEAT mbox command.
1484 **/
1485void
1486lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1487{
1488        if (phba->cfg_enable_hba_heartbeat)
1489                return;
1490        phba->hba_flag |= HBA_HBEAT_TMO;
1491}
1492
1493/**
1494 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1495 * @phba: pointer to lpfc hba data structure.
1496 *
1497 * This is the actual HBA-timer timeout handler to be invoked by the worker
1498 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1499 * handler performs any periodic operations needed for the device. If such
1500 * periodic event has already been attended to either in the interrupt handler
1501 * or by processing slow-ring or fast-ring events within the HBA-timer
1502 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1503 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1504 * is configured and there is no heart-beat mailbox command outstanding, a
1505 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1506 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1507 * to offline.
1508 **/
1509void
1510lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1511{
1512        struct lpfc_vport **vports;
1513        struct lpfc_dmabuf *buf_ptr;
1514        int retval = 0;
1515        int i, tmo;
1516        struct lpfc_sli *psli = &phba->sli;
1517        LIST_HEAD(completions);
1518
1519        if (phba->cfg_xri_rebalancing) {
1520                /* Multi-XRI pools handler */
1521                lpfc_hb_mxp_handler(phba);
1522        }
1523
1524        vports = lpfc_create_vport_work_array(phba);
1525        if (vports != NULL)
1526                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1527                        lpfc_rcv_seq_check_edtov(vports[i]);
1528                        lpfc_fdmi_change_check(vports[i]);
1529                }
1530        lpfc_destroy_vport_work_array(phba, vports);
1531
1532        if ((phba->link_state == LPFC_HBA_ERROR) ||
1533                (phba->pport->load_flag & FC_UNLOADING) ||
1534                (phba->pport->fc_flag & FC_OFFLINE_MODE))
1535                return;
1536
1537        if (phba->elsbuf_cnt &&
1538                (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1539                spin_lock_irq(&phba->hbalock);
1540                list_splice_init(&phba->elsbuf, &completions);
1541                phba->elsbuf_cnt = 0;
1542                phba->elsbuf_prev_cnt = 0;
1543                spin_unlock_irq(&phba->hbalock);
1544
1545                while (!list_empty(&completions)) {
1546                        list_remove_head(&completions, buf_ptr,
1547                                struct lpfc_dmabuf, list);
1548                        lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1549                        kfree(buf_ptr);
1550                }
1551        }
1552        phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1553
1554        /* If there is no heart beat outstanding, issue a heartbeat command */
1555        if (phba->cfg_enable_hba_heartbeat) {
1556                /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1557                spin_lock_irq(&phba->pport->work_port_lock);
1558                if (time_after(phba->last_completion_time +
1559                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1560                                jiffies)) {
1561                        spin_unlock_irq(&phba->pport->work_port_lock);
1562                        if (phba->hba_flag & HBA_HBEAT_INP)
1563                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1564                        else
1565                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1566                        goto out;
1567                }
1568                spin_unlock_irq(&phba->pport->work_port_lock);
1569
1570                /* Check if a MBX_HEARTBEAT is already in progress */
1571                if (phba->hba_flag & HBA_HBEAT_INP) {
1572                        /*
1573                         * If heart beat timeout called with HBA_HBEAT_INP set
1574                         * we need to give the hb mailbox cmd a chance to
1575                         * complete or TMO.
1576                         */
1577                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1578                                "0459 Adapter heartbeat still outstanding: "
1579                                "last compl time was %d ms.\n",
1580                                jiffies_to_msecs(jiffies
1581                                         - phba->last_completion_time));
1582                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1583                } else {
1584                        if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1585                                (list_empty(&psli->mboxq))) {
1586
1587                                retval = lpfc_issue_hb_mbox(phba);
1588                                if (retval) {
1589                                        tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1590                                        goto out;
1591                                }
1592                                phba->skipped_hb = 0;
1593                        } else if (time_before_eq(phba->last_completion_time,
1594                                        phba->skipped_hb)) {
1595                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1596                                        "2857 Last completion time not "
1597                                        " updated in %d ms\n",
1598                                        jiffies_to_msecs(jiffies
1599                                                 - phba->last_completion_time));
1600                        } else
1601                                phba->skipped_hb = jiffies;
1602
1603                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1604                        goto out;
1605                }
1606        } else {
1607                /* Check to see if we want to force a MBX_HEARTBEAT */
1608                if (phba->hba_flag & HBA_HBEAT_TMO) {
1609                        retval = lpfc_issue_hb_mbox(phba);
1610                        if (retval)
1611                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1612                        else
1613                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1614                        goto out;
1615                }
1616                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1617        }
1618out:
1619        mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1620}
1621
1622/**
1623 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1624 * @phba: pointer to lpfc hba data structure.
1625 *
1626 * This routine is called to bring the HBA offline when HBA hardware error
1627 * other than Port Error 6 has been detected.
1628 **/
1629static void
1630lpfc_offline_eratt(struct lpfc_hba *phba)
1631{
1632        struct lpfc_sli   *psli = &phba->sli;
1633
1634        spin_lock_irq(&phba->hbalock);
1635        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1636        spin_unlock_irq(&phba->hbalock);
1637        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1638
1639        lpfc_offline(phba);
1640        lpfc_reset_barrier(phba);
1641        spin_lock_irq(&phba->hbalock);
1642        lpfc_sli_brdreset(phba);
1643        spin_unlock_irq(&phba->hbalock);
1644        lpfc_hba_down_post(phba);
1645        lpfc_sli_brdready(phba, HS_MBRDY);
1646        lpfc_unblock_mgmt_io(phba);
1647        phba->link_state = LPFC_HBA_ERROR;
1648        return;
1649}
1650
1651/**
1652 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1653 * @phba: pointer to lpfc hba data structure.
1654 *
1655 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1656 * other than Port Error 6 has been detected.
1657 **/
1658void
1659lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1660{
1661        spin_lock_irq(&phba->hbalock);
1662        if (phba->link_state == LPFC_HBA_ERROR &&
1663                test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1664                spin_unlock_irq(&phba->hbalock);
1665                return;
1666        }
1667        phba->link_state = LPFC_HBA_ERROR;
1668        spin_unlock_irq(&phba->hbalock);
1669
1670        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1671        lpfc_sli_flush_io_rings(phba);
1672        lpfc_offline(phba);
1673        lpfc_hba_down_post(phba);
1674        lpfc_unblock_mgmt_io(phba);
1675}
1676
1677/**
1678 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1679 * @phba: pointer to lpfc hba data structure.
1680 *
1681 * This routine is invoked to handle the deferred HBA hardware error
1682 * conditions. This type of error is indicated by HBA by setting ER1
1683 * and another ER bit in the host status register. The driver will
1684 * wait until the ER1 bit clears before handling the error condition.
1685 **/
1686static void
1687lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1688{
1689        uint32_t old_host_status = phba->work_hs;
1690        struct lpfc_sli *psli = &phba->sli;
1691
1692        /* If the pci channel is offline, ignore possible errors,
1693         * since we cannot communicate with the pci card anyway.
1694         */
1695        if (pci_channel_offline(phba->pcidev)) {
1696                spin_lock_irq(&phba->hbalock);
1697                phba->hba_flag &= ~DEFER_ERATT;
1698                spin_unlock_irq(&phba->hbalock);
1699                return;
1700        }
1701
1702        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1703                        "0479 Deferred Adapter Hardware Error "
1704                        "Data: x%x x%x x%x\n",
1705                        phba->work_hs, phba->work_status[0],
1706                        phba->work_status[1]);
1707
1708        spin_lock_irq(&phba->hbalock);
1709        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1710        spin_unlock_irq(&phba->hbalock);
1711
1712
1713        /*
1714         * Firmware stops when it triggred erratt. That could cause the I/Os
1715         * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1716         * SCSI layer retry it after re-establishing link.
1717         */
1718        lpfc_sli_abort_fcp_rings(phba);
1719
1720        /*
1721         * There was a firmware error. Take the hba offline and then
1722         * attempt to restart it.
1723         */
1724        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1725        lpfc_offline(phba);
1726
1727        /* Wait for the ER1 bit to clear.*/
1728        while (phba->work_hs & HS_FFER1) {
1729                msleep(100);
1730                if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1731                        phba->work_hs = UNPLUG_ERR ;
1732                        break;
1733                }
1734                /* If driver is unloading let the worker thread continue */
1735                if (phba->pport->load_flag & FC_UNLOADING) {
1736                        phba->work_hs = 0;
1737                        break;
1738                }
1739        }
1740
1741        /*
1742         * This is to ptrotect against a race condition in which
1743         * first write to the host attention register clear the
1744         * host status register.
1745         */
1746        if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1747                phba->work_hs = old_host_status & ~HS_FFER1;
1748
1749        spin_lock_irq(&phba->hbalock);
1750        phba->hba_flag &= ~DEFER_ERATT;
1751        spin_unlock_irq(&phba->hbalock);
1752        phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1753        phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1754}
1755
1756static void
1757lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1758{
1759        struct lpfc_board_event_header board_event;
1760        struct Scsi_Host *shost;
1761
1762        board_event.event_type = FC_REG_BOARD_EVENT;
1763        board_event.subcategory = LPFC_EVENT_PORTINTERR;
1764        shost = lpfc_shost_from_vport(phba->pport);
1765        fc_host_post_vendor_event(shost, fc_get_event_number(),
1766                                  sizeof(board_event),
1767                                  (char *) &board_event,
1768                                  LPFC_NL_VENDOR_ID);
1769}
1770
1771/**
1772 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1773 * @phba: pointer to lpfc hba data structure.
1774 *
1775 * This routine is invoked to handle the following HBA hardware error
1776 * conditions:
1777 * 1 - HBA error attention interrupt
1778 * 2 - DMA ring index out of range
1779 * 3 - Mailbox command came back as unknown
1780 **/
1781static void
1782lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1783{
1784        struct lpfc_vport *vport = phba->pport;
1785        struct lpfc_sli   *psli = &phba->sli;
1786        uint32_t event_data;
1787        unsigned long temperature;
1788        struct temp_event temp_event_data;
1789        struct Scsi_Host  *shost;
1790
1791        /* If the pci channel is offline, ignore possible errors,
1792         * since we cannot communicate with the pci card anyway.
1793         */
1794        if (pci_channel_offline(phba->pcidev)) {
1795                spin_lock_irq(&phba->hbalock);
1796                phba->hba_flag &= ~DEFER_ERATT;
1797                spin_unlock_irq(&phba->hbalock);
1798                return;
1799        }
1800
1801        /* If resets are disabled then leave the HBA alone and return */
1802        if (!phba->cfg_enable_hba_reset)
1803                return;
1804
1805        /* Send an internal error event to mgmt application */
1806        lpfc_board_errevt_to_mgmt(phba);
1807
1808        if (phba->hba_flag & DEFER_ERATT)
1809                lpfc_handle_deferred_eratt(phba);
1810
1811        if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1812                if (phba->work_hs & HS_FFER6)
1813                        /* Re-establishing Link */
1814                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1815                                        "1301 Re-establishing Link "
1816                                        "Data: x%x x%x x%x\n",
1817                                        phba->work_hs, phba->work_status[0],
1818                                        phba->work_status[1]);
1819                if (phba->work_hs & HS_FFER8)
1820                        /* Device Zeroization */
1821                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1822                                        "2861 Host Authentication device "
1823                                        "zeroization Data:x%x x%x x%x\n",
1824                                        phba->work_hs, phba->work_status[0],
1825                                        phba->work_status[1]);
1826
1827                spin_lock_irq(&phba->hbalock);
1828                psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1829                spin_unlock_irq(&phba->hbalock);
1830
1831                /*
1832                * Firmware stops when it triggled erratt with HS_FFER6.
1833                * That could cause the I/Os dropped by the firmware.
1834                * Error iocb (I/O) on txcmplq and let the SCSI layer
1835                * retry it after re-establishing link.
1836                */
1837                lpfc_sli_abort_fcp_rings(phba);
1838
1839                /*
1840                 * There was a firmware error.  Take the hba offline and then
1841                 * attempt to restart it.
1842                 */
1843                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1844                lpfc_offline(phba);
1845                lpfc_sli_brdrestart(phba);
1846                if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1847                        lpfc_unblock_mgmt_io(phba);
1848                        return;
1849                }
1850                lpfc_unblock_mgmt_io(phba);
1851        } else if (phba->work_hs & HS_CRIT_TEMP) {
1852                temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1853                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1854                temp_event_data.event_code = LPFC_CRIT_TEMP;
1855                temp_event_data.data = (uint32_t)temperature;
1856
1857                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1858                                "0406 Adapter maximum temperature exceeded "
1859                                "(%ld), taking this port offline "
1860                                "Data: x%x x%x x%x\n",
1861                                temperature, phba->work_hs,
1862                                phba->work_status[0], phba->work_status[1]);
1863
1864                shost = lpfc_shost_from_vport(phba->pport);
1865                fc_host_post_vendor_event(shost, fc_get_event_number(),
1866                                          sizeof(temp_event_data),
1867                                          (char *) &temp_event_data,
1868                                          SCSI_NL_VID_TYPE_PCI
1869                                          | PCI_VENDOR_ID_EMULEX);
1870
1871                spin_lock_irq(&phba->hbalock);
1872                phba->over_temp_state = HBA_OVER_TEMP;
1873                spin_unlock_irq(&phba->hbalock);
1874                lpfc_offline_eratt(phba);
1875
1876        } else {
1877                /* The if clause above forces this code path when the status
1878                 * failure is a value other than FFER6. Do not call the offline
1879                 * twice. This is the adapter hardware error path.
1880                 */
1881                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1882                                "0457 Adapter Hardware Error "
1883                                "Data: x%x x%x x%x\n",
1884                                phba->work_hs,
1885                                phba->work_status[0], phba->work_status[1]);
1886
1887                event_data = FC_REG_DUMP_EVENT;
1888                shost = lpfc_shost_from_vport(vport);
1889                fc_host_post_vendor_event(shost, fc_get_event_number(),
1890                                sizeof(event_data), (char *) &event_data,
1891                                SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1892
1893                lpfc_offline_eratt(phba);
1894        }
1895        return;
1896}
1897
1898/**
1899 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1900 * @phba: pointer to lpfc hba data structure.
1901 * @mbx_action: flag for mailbox shutdown action.
1902 * @en_rn_msg: send reset/port recovery message.
1903 * This routine is invoked to perform an SLI4 port PCI function reset in
1904 * response to port status register polling attention. It waits for port
1905 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1906 * During this process, interrupt vectors are freed and later requested
1907 * for handling possible port resource change.
1908 **/
1909static int
1910lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1911                            bool en_rn_msg)
1912{
1913        int rc;
1914        uint32_t intr_mode;
1915        LPFC_MBOXQ_t *mboxq;
1916
1917        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1918            LPFC_SLI_INTF_IF_TYPE_2) {
1919                /*
1920                 * On error status condition, driver need to wait for port
1921                 * ready before performing reset.
1922                 */
1923                rc = lpfc_sli4_pdev_status_reg_wait(phba);
1924                if (rc)
1925                        return rc;
1926        }
1927
1928        /* need reset: attempt for port recovery */
1929        if (en_rn_msg)
1930                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1931                                "2887 Reset Needed: Attempting Port "
1932                                "Recovery...\n");
1933
1934        /* If we are no wait, the HBA has been reset and is not
1935         * functional, thus we should clear
1936         * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1937         */
1938        if (mbx_action == LPFC_MBX_NO_WAIT) {
1939                spin_lock_irq(&phba->hbalock);
1940                phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1941                if (phba->sli.mbox_active) {
1942                        mboxq = phba->sli.mbox_active;
1943                        mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1944                        __lpfc_mbox_cmpl_put(phba, mboxq);
1945                        phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1946                        phba->sli.mbox_active = NULL;
1947                }
1948                spin_unlock_irq(&phba->hbalock);
1949        }
1950
1951        lpfc_offline_prep(phba, mbx_action);
1952        lpfc_sli_flush_io_rings(phba);
1953        lpfc_offline(phba);
1954        /* release interrupt for possible resource change */
1955        lpfc_sli4_disable_intr(phba);
1956        rc = lpfc_sli_brdrestart(phba);
1957        if (rc) {
1958                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959                                "6309 Failed to restart board\n");
1960                return rc;
1961        }
1962        /* request and enable interrupt */
1963        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1964        if (intr_mode == LPFC_INTR_ERROR) {
1965                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1966                                "3175 Failed to enable interrupt\n");
1967                return -EIO;
1968        }
1969        phba->intr_mode = intr_mode;
1970        rc = lpfc_online(phba);
1971        if (rc == 0)
1972                lpfc_unblock_mgmt_io(phba);
1973
1974        return rc;
1975}
1976
1977/**
1978 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1979 * @phba: pointer to lpfc hba data structure.
1980 *
1981 * This routine is invoked to handle the SLI4 HBA hardware error attention
1982 * conditions.
1983 **/
1984static void
1985lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1986{
1987        struct lpfc_vport *vport = phba->pport;
1988        uint32_t event_data;
1989        struct Scsi_Host *shost;
1990        uint32_t if_type;
1991        struct lpfc_register portstat_reg = {0};
1992        uint32_t reg_err1, reg_err2;
1993        uint32_t uerrlo_reg, uemasklo_reg;
1994        uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1995        bool en_rn_msg = true;
1996        struct temp_event temp_event_data;
1997        struct lpfc_register portsmphr_reg;
1998        int rc, i;
1999
2000        /* If the pci channel is offline, ignore possible errors, since
2001         * we cannot communicate with the pci card anyway.
2002         */
2003        if (pci_channel_offline(phba->pcidev)) {
2004                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2005                                "3166 pci channel is offline\n");
2006                lpfc_sli_flush_io_rings(phba);
2007                return;
2008        }
2009
2010        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2011        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2012        switch (if_type) {
2013        case LPFC_SLI_INTF_IF_TYPE_0:
2014                pci_rd_rc1 = lpfc_readl(
2015                                phba->sli4_hba.u.if_type0.UERRLOregaddr,
2016                                &uerrlo_reg);
2017                pci_rd_rc2 = lpfc_readl(
2018                                phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2019                                &uemasklo_reg);
2020                /* consider PCI bus read error as pci_channel_offline */
2021                if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2022                        return;
2023                if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2024                        lpfc_sli4_offline_eratt(phba);
2025                        return;
2026                }
2027                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2028                                "7623 Checking UE recoverable");
2029
2030                for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2031                        if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2032                                       &portsmphr_reg.word0))
2033                                continue;
2034
2035                        smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2036                                                   &portsmphr_reg);
2037                        if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2038                            LPFC_PORT_SEM_UE_RECOVERABLE)
2039                                break;
2040                        /*Sleep for 1Sec, before checking SEMAPHORE */
2041                        msleep(1000);
2042                }
2043
2044                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2045                                "4827 smphr_port_status x%x : Waited %dSec",
2046                                smphr_port_status, i);
2047
2048                /* Recoverable UE, reset the HBA device */
2049                if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2050                    LPFC_PORT_SEM_UE_RECOVERABLE) {
2051                        for (i = 0; i < 20; i++) {
2052                                msleep(1000);
2053                                if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2054                                    &portsmphr_reg.word0) &&
2055                                    (LPFC_POST_STAGE_PORT_READY ==
2056                                     bf_get(lpfc_port_smphr_port_status,
2057                                     &portsmphr_reg))) {
2058                                        rc = lpfc_sli4_port_sta_fn_reset(phba,
2059                                                LPFC_MBX_NO_WAIT, en_rn_msg);
2060                                        if (rc == 0)
2061                                                return;
2062                                        lpfc_printf_log(phba, KERN_ERR,
2063                                                LOG_TRACE_EVENT,
2064                                                "4215 Failed to recover UE");
2065                                        break;
2066                                }
2067                        }
2068                }
2069                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2070                                "7624 Firmware not ready: Failing UE recovery,"
2071                                " waited %dSec", i);
2072                phba->link_state = LPFC_HBA_ERROR;
2073                break;
2074
2075        case LPFC_SLI_INTF_IF_TYPE_2:
2076        case LPFC_SLI_INTF_IF_TYPE_6:
2077                pci_rd_rc1 = lpfc_readl(
2078                                phba->sli4_hba.u.if_type2.STATUSregaddr,
2079                                &portstat_reg.word0);
2080                /* consider PCI bus read error as pci_channel_offline */
2081                if (pci_rd_rc1 == -EIO) {
2082                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2083                                "3151 PCI bus read access failure: x%x\n",
2084                                readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2085                        lpfc_sli4_offline_eratt(phba);
2086                        return;
2087                }
2088                reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2089                reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2090                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2091                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092                                        "2889 Port Overtemperature event, "
2093                                        "taking port offline Data: x%x x%x\n",
2094                                        reg_err1, reg_err2);
2095
2096                        phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2097                        temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2098                        temp_event_data.event_code = LPFC_CRIT_TEMP;
2099                        temp_event_data.data = 0xFFFFFFFF;
2100
2101                        shost = lpfc_shost_from_vport(phba->pport);
2102                        fc_host_post_vendor_event(shost, fc_get_event_number(),
2103                                                  sizeof(temp_event_data),
2104                                                  (char *)&temp_event_data,
2105                                                  SCSI_NL_VID_TYPE_PCI
2106                                                  | PCI_VENDOR_ID_EMULEX);
2107
2108                        spin_lock_irq(&phba->hbalock);
2109                        phba->over_temp_state = HBA_OVER_TEMP;
2110                        spin_unlock_irq(&phba->hbalock);
2111                        lpfc_sli4_offline_eratt(phba);
2112                        return;
2113                }
2114                if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2115                    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2116                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2117                                        "3143 Port Down: Firmware Update "
2118                                        "Detected\n");
2119                        en_rn_msg = false;
2120                } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2121                         reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2122                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2123                                        "3144 Port Down: Debug Dump\n");
2124                else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125                         reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2126                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127                                        "3145 Port Down: Provisioning\n");
2128
2129                /* If resets are disabled then leave the HBA alone and return */
2130                if (!phba->cfg_enable_hba_reset)
2131                        return;
2132
2133                /* Check port status register for function reset */
2134                rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2135                                en_rn_msg);
2136                if (rc == 0) {
2137                        /* don't report event on forced debug dump */
2138                        if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2139                            reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2140                                return;
2141                        else
2142                                break;
2143                }
2144                /* fall through for not able to recover */
2145                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2146                                "3152 Unrecoverable error\n");
2147                phba->link_state = LPFC_HBA_ERROR;
2148                break;
2149        case LPFC_SLI_INTF_IF_TYPE_1:
2150        default:
2151                break;
2152        }
2153        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2154                        "3123 Report dump event to upper layer\n");
2155        /* Send an internal error event to mgmt application */
2156        lpfc_board_errevt_to_mgmt(phba);
2157
2158        event_data = FC_REG_DUMP_EVENT;
2159        shost = lpfc_shost_from_vport(vport);
2160        fc_host_post_vendor_event(shost, fc_get_event_number(),
2161                                  sizeof(event_data), (char *) &event_data,
2162                                  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2163}
2164
2165/**
2166 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2167 * @phba: pointer to lpfc HBA data structure.
2168 *
2169 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2170 * routine from the API jump table function pointer from the lpfc_hba struct.
2171 *
2172 * Return codes
2173 *   0 - success.
2174 *   Any other value - error.
2175 **/
2176void
2177lpfc_handle_eratt(struct lpfc_hba *phba)
2178{
2179        (*phba->lpfc_handle_eratt)(phba);
2180}
2181
2182/**
2183 * lpfc_handle_latt - The HBA link event handler
2184 * @phba: pointer to lpfc hba data structure.
2185 *
2186 * This routine is invoked from the worker thread to handle a HBA host
2187 * attention link event. SLI3 only.
2188 **/
2189void
2190lpfc_handle_latt(struct lpfc_hba *phba)
2191{
2192        struct lpfc_vport *vport = phba->pport;
2193        struct lpfc_sli   *psli = &phba->sli;
2194        LPFC_MBOXQ_t *pmb;
2195        volatile uint32_t control;
2196        int rc = 0;
2197
2198        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2199        if (!pmb) {
2200                rc = 1;
2201                goto lpfc_handle_latt_err_exit;
2202        }
2203
2204        rc = lpfc_mbox_rsrc_prep(phba, pmb);
2205        if (rc) {
2206                rc = 2;
2207                mempool_free(pmb, phba->mbox_mem_pool);
2208                goto lpfc_handle_latt_err_exit;
2209        }
2210
2211        /* Cleanup any outstanding ELS commands */
2212        lpfc_els_flush_all_cmd(phba);
2213        psli->slistat.link_event++;
2214        lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2215        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2216        pmb->vport = vport;
2217        /* Block ELS IOCBs until we have processed this mbox command */
2218        phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2219        rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2220        if (rc == MBX_NOT_FINISHED) {
2221                rc = 4;
2222                goto lpfc_handle_latt_free_mbuf;
2223        }
2224
2225        /* Clear Link Attention in HA REG */
2226        spin_lock_irq(&phba->hbalock);
2227        writel(HA_LATT, phba->HAregaddr);
2228        readl(phba->HAregaddr); /* flush */
2229        spin_unlock_irq(&phba->hbalock);
2230
2231        return;
2232
2233lpfc_handle_latt_free_mbuf:
2234        phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2235        lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2236lpfc_handle_latt_err_exit:
2237        /* Enable Link attention interrupts */
2238        spin_lock_irq(&phba->hbalock);
2239        psli->sli_flag |= LPFC_PROCESS_LA;
2240        control = readl(phba->HCregaddr);
2241        control |= HC_LAINT_ENA;
2242        writel(control, phba->HCregaddr);
2243        readl(phba->HCregaddr); /* flush */
2244
2245        /* Clear Link Attention in HA REG */
2246        writel(HA_LATT, phba->HAregaddr);
2247        readl(phba->HAregaddr); /* flush */
2248        spin_unlock_irq(&phba->hbalock);
2249        lpfc_linkdown(phba);
2250        phba->link_state = LPFC_HBA_ERROR;
2251
2252        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2253                        "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2254
2255        return;
2256}
2257
2258/**
2259 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2260 * @phba: pointer to lpfc hba data structure.
2261 * @vpd: pointer to the vital product data.
2262 * @len: length of the vital product data in bytes.
2263 *
2264 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2265 * an array of characters. In this routine, the ModelName, ProgramType, and
2266 * ModelDesc, etc. fields of the phba data structure will be populated.
2267 *
2268 * Return codes
2269 *   0 - pointer to the VPD passed in is NULL
2270 *   1 - success
2271 **/
2272int
2273lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2274{
2275        uint8_t lenlo, lenhi;
2276        int Length;
2277        int i, j;
2278        int finished = 0;
2279        int index = 0;
2280
2281        if (!vpd)
2282                return 0;
2283
2284        /* Vital Product */
2285        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2286                        "0455 Vital Product Data: x%x x%x x%x x%x\n",
2287                        (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2288                        (uint32_t) vpd[3]);
2289        while (!finished && (index < (len - 4))) {
2290                switch (vpd[index]) {
2291                case 0x82:
2292                case 0x91:
2293                        index += 1;
2294                        lenlo = vpd[index];
2295                        index += 1;
2296                        lenhi = vpd[index];
2297                        index += 1;
2298                        i = ((((unsigned short)lenhi) << 8) + lenlo);
2299                        index += i;
2300                        break;
2301                case 0x90:
2302                        index += 1;
2303                        lenlo = vpd[index];
2304                        index += 1;
2305                        lenhi = vpd[index];
2306                        index += 1;
2307                        Length = ((((unsigned short)lenhi) << 8) + lenlo);
2308                        if (Length > len - index)
2309                                Length = len - index;
2310                        while (Length > 0) {
2311                        /* Look for Serial Number */
2312                        if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2313                                index += 2;
2314                                i = vpd[index];
2315                                index += 1;
2316                                j = 0;
2317                                Length -= (3+i);
2318                                while(i--) {
2319                                        phba->SerialNumber[j++] = vpd[index++];
2320                                        if (j == 31)
2321                                                break;
2322                                }
2323                                phba->SerialNumber[j] = 0;
2324                                continue;
2325                        }
2326                        else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2327                                phba->vpd_flag |= VPD_MODEL_DESC;
2328                                index += 2;
2329                                i = vpd[index];
2330                                index += 1;
2331                                j = 0;
2332                                Length -= (3+i);
2333                                while(i--) {
2334                                        phba->ModelDesc[j++] = vpd[index++];
2335                                        if (j == 255)
2336                                                break;
2337                                }
2338                                phba->ModelDesc[j] = 0;
2339                                continue;
2340                        }
2341                        else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2342                                phba->vpd_flag |= VPD_MODEL_NAME;
2343                                index += 2;
2344                                i = vpd[index];
2345                                index += 1;
2346                                j = 0;
2347                                Length -= (3+i);
2348                                while(i--) {
2349                                        phba->ModelName[j++] = vpd[index++];
2350                                        if (j == 79)
2351                                                break;
2352                                }
2353                                phba->ModelName[j] = 0;
2354                                continue;
2355                        }
2356                        else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2357                                phba->vpd_flag |= VPD_PROGRAM_TYPE;
2358                                index += 2;
2359                                i = vpd[index];
2360                                index += 1;
2361                                j = 0;
2362                                Length -= (3+i);
2363                                while(i--) {
2364                                        phba->ProgramType[j++] = vpd[index++];
2365                                        if (j == 255)
2366                                                break;
2367                                }
2368                                phba->ProgramType[j] = 0;
2369                                continue;
2370                        }
2371                        else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2372                                phba->vpd_flag |= VPD_PORT;
2373                                index += 2;
2374                                i = vpd[index];
2375                                index += 1;
2376                                j = 0;
2377                                Length -= (3+i);
2378                                while(i--) {
2379                                        if ((phba->sli_rev == LPFC_SLI_REV4) &&
2380                                            (phba->sli4_hba.pport_name_sta ==
2381                                             LPFC_SLI4_PPNAME_GET)) {
2382                                                j++;
2383                                                index++;
2384                                        } else
2385                                                phba->Port[j++] = vpd[index++];
2386                                        if (j == 19)
2387                                                break;
2388                                }
2389                                if ((phba->sli_rev != LPFC_SLI_REV4) ||
2390                                    (phba->sli4_hba.pport_name_sta ==
2391                                     LPFC_SLI4_PPNAME_NON))
2392                                        phba->Port[j] = 0;
2393                                continue;
2394                        }
2395                        else {
2396                                index += 2;
2397                                i = vpd[index];
2398                                index += 1;
2399                                index += i;
2400                                Length -= (3 + i);
2401                        }
2402                }
2403                finished = 0;
2404                break;
2405                case 0x78:
2406                        finished = 1;
2407                        break;
2408                default:
2409                        index ++;
2410                        break;
2411                }
2412        }
2413
2414        return(1);
2415}
2416
2417/**
2418 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2419 * @phba: pointer to lpfc hba data structure.
2420 * @mdp: pointer to the data structure to hold the derived model name.
2421 * @descp: pointer to the data structure to hold the derived description.
2422 *
2423 * This routine retrieves HBA's description based on its registered PCI device
2424 * ID. The @descp passed into this function points to an array of 256 chars. It
2425 * shall be returned with the model name, maximum speed, and the host bus type.
2426 * The @mdp passed into this function points to an array of 80 chars. When the
2427 * function returns, the @mdp will be filled with the model name.
2428 **/
2429static void
2430lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2431{
2432        uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2433        char *model = "<Unknown>";
2434        int tbolt = 0;
2435
2436        switch (sub_dev_id) {
2437        case PCI_DEVICE_ID_CLRY_161E:
2438                model = "161E";
2439                break;
2440        case PCI_DEVICE_ID_CLRY_162E:
2441                model = "162E";
2442                break;
2443        case PCI_DEVICE_ID_CLRY_164E:
2444                model = "164E";
2445                break;
2446        case PCI_DEVICE_ID_CLRY_161P:
2447                model = "161P";
2448                break;
2449        case PCI_DEVICE_ID_CLRY_162P:
2450                model = "162P";
2451                break;
2452        case PCI_DEVICE_ID_CLRY_164P:
2453                model = "164P";
2454                break;
2455        case PCI_DEVICE_ID_CLRY_321E:
2456                model = "321E";
2457                break;
2458        case PCI_DEVICE_ID_CLRY_322E:
2459                model = "322E";
2460                break;
2461        case PCI_DEVICE_ID_CLRY_324E:
2462                model = "324E";
2463                break;
2464        case PCI_DEVICE_ID_CLRY_321P:
2465                model = "321P";
2466                break;
2467        case PCI_DEVICE_ID_CLRY_322P:
2468                model = "322P";
2469                break;
2470        case PCI_DEVICE_ID_CLRY_324P:
2471                model = "324P";
2472                break;
2473        case PCI_DEVICE_ID_TLFC_2XX2:
2474                model = "2XX2";
2475                tbolt = 1;
2476                break;
2477        case PCI_DEVICE_ID_TLFC_3162:
2478                model = "3162";
2479                tbolt = 1;
2480                break;
2481        case PCI_DEVICE_ID_TLFC_3322:
2482                model = "3322";
2483                tbolt = 1;
2484                break;
2485        default:
2486                model = "Unknown";
2487                break;
2488        }
2489
2490        if (mdp && mdp[0] == '\0')
2491                snprintf(mdp, 79, "%s", model);
2492
2493        if (descp && descp[0] == '\0')
2494                snprintf(descp, 255,
2495                         "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2496                         (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2497                         model,
2498                         phba->Port);
2499}
2500
2501/**
2502 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2503 * @phba: pointer to lpfc hba data structure.
2504 * @mdp: pointer to the data structure to hold the derived model name.
2505 * @descp: pointer to the data structure to hold the derived description.
2506 *
2507 * This routine retrieves HBA's description based on its registered PCI device
2508 * ID. The @descp passed into this function points to an array of 256 chars. It
2509 * shall be returned with the model name, maximum speed, and the host bus type.
2510 * The @mdp passed into this function points to an array of 80 chars. When the
2511 * function returns, the @mdp will be filled with the model name.
2512 **/
2513static void
2514lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2515{
2516        lpfc_vpd_t *vp;
2517        uint16_t dev_id = phba->pcidev->device;
2518        int max_speed;
2519        int GE = 0;
2520        int oneConnect = 0; /* default is not a oneConnect */
2521        struct {
2522                char *name;
2523                char *bus;
2524                char *function;
2525        } m = {"<Unknown>", "", ""};
2526
2527        if (mdp && mdp[0] != '\0'
2528                && descp && descp[0] != '\0')
2529                return;
2530
2531        if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2532                lpfc_get_atto_model_desc(phba, mdp, descp);
2533                return;
2534        }
2535
2536        if (phba->lmt & LMT_64Gb)
2537                max_speed = 64;
2538        else if (phba->lmt & LMT_32Gb)
2539                max_speed = 32;
2540        else if (phba->lmt & LMT_16Gb)
2541                max_speed = 16;
2542        else if (phba->lmt & LMT_10Gb)
2543                max_speed = 10;
2544        else if (phba->lmt & LMT_8Gb)
2545                max_speed = 8;
2546        else if (phba->lmt & LMT_4Gb)
2547                max_speed = 4;
2548        else if (phba->lmt & LMT_2Gb)
2549                max_speed = 2;
2550        else if (phba->lmt & LMT_1Gb)
2551                max_speed = 1;
2552        else
2553                max_speed = 0;
2554
2555        vp = &phba->vpd;
2556
2557        switch (dev_id) {
2558        case PCI_DEVICE_ID_FIREFLY:
2559                m = (typeof(m)){"LP6000", "PCI",
2560                                "Obsolete, Unsupported Fibre Channel Adapter"};
2561                break;
2562        case PCI_DEVICE_ID_SUPERFLY:
2563                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2564                        m = (typeof(m)){"LP7000", "PCI", ""};
2565                else
2566                        m = (typeof(m)){"LP7000E", "PCI", ""};
2567                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2568                break;
2569        case PCI_DEVICE_ID_DRAGONFLY:
2570                m = (typeof(m)){"LP8000", "PCI",
2571                                "Obsolete, Unsupported Fibre Channel Adapter"};
2572                break;
2573        case PCI_DEVICE_ID_CENTAUR:
2574                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2575                        m = (typeof(m)){"LP9002", "PCI", ""};
2576                else
2577                        m = (typeof(m)){"LP9000", "PCI", ""};
2578                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2579                break;
2580        case PCI_DEVICE_ID_RFLY:
2581                m = (typeof(m)){"LP952", "PCI",
2582                                "Obsolete, Unsupported Fibre Channel Adapter"};
2583                break;
2584        case PCI_DEVICE_ID_PEGASUS:
2585                m = (typeof(m)){"LP9802", "PCI-X",
2586                                "Obsolete, Unsupported Fibre Channel Adapter"};
2587                break;
2588        case PCI_DEVICE_ID_THOR:
2589                m = (typeof(m)){"LP10000", "PCI-X",
2590                                "Obsolete, Unsupported Fibre Channel Adapter"};
2591                break;
2592        case PCI_DEVICE_ID_VIPER:
2593                m = (typeof(m)){"LPX1000",  "PCI-X",
2594                                "Obsolete, Unsupported Fibre Channel Adapter"};
2595                break;
2596        case PCI_DEVICE_ID_PFLY:
2597                m = (typeof(m)){"LP982", "PCI-X",
2598                                "Obsolete, Unsupported Fibre Channel Adapter"};
2599                break;
2600        case PCI_DEVICE_ID_TFLY:
2601                m = (typeof(m)){"LP1050", "PCI-X",
2602                                "Obsolete, Unsupported Fibre Channel Adapter"};
2603                break;
2604        case PCI_DEVICE_ID_HELIOS:
2605                m = (typeof(m)){"LP11000", "PCI-X2",
2606                                "Obsolete, Unsupported Fibre Channel Adapter"};
2607                break;
2608        case PCI_DEVICE_ID_HELIOS_SCSP:
2609                m = (typeof(m)){"LP11000-SP", "PCI-X2",
2610                                "Obsolete, Unsupported Fibre Channel Adapter"};
2611                break;
2612        case PCI_DEVICE_ID_HELIOS_DCSP:
2613                m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2614                                "Obsolete, Unsupported Fibre Channel Adapter"};
2615                break;
2616        case PCI_DEVICE_ID_NEPTUNE:
2617                m = (typeof(m)){"LPe1000", "PCIe",
2618                                "Obsolete, Unsupported Fibre Channel Adapter"};
2619                break;
2620        case PCI_DEVICE_ID_NEPTUNE_SCSP:
2621                m = (typeof(m)){"LPe1000-SP", "PCIe",
2622                                "Obsolete, Unsupported Fibre Channel Adapter"};
2623                break;
2624        case PCI_DEVICE_ID_NEPTUNE_DCSP:
2625                m = (typeof(m)){"LPe1002-SP", "PCIe",
2626                                "Obsolete, Unsupported Fibre Channel Adapter"};
2627                break;
2628        case PCI_DEVICE_ID_BMID:
2629                m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2630                break;
2631        case PCI_DEVICE_ID_BSMB:
2632                m = (typeof(m)){"LP111", "PCI-X2",
2633                                "Obsolete, Unsupported Fibre Channel Adapter"};
2634                break;
2635        case PCI_DEVICE_ID_ZEPHYR:
2636                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2637                break;
2638        case PCI_DEVICE_ID_ZEPHYR_SCSP:
2639                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2640                break;
2641        case PCI_DEVICE_ID_ZEPHYR_DCSP:
2642                m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2643                GE = 1;
2644                break;
2645        case PCI_DEVICE_ID_ZMID:
2646                m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2647                break;
2648        case PCI_DEVICE_ID_ZSMB:
2649                m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2650                break;
2651        case PCI_DEVICE_ID_LP101:
2652                m = (typeof(m)){"LP101", "PCI-X",
2653                                "Obsolete, Unsupported Fibre Channel Adapter"};
2654                break;
2655        case PCI_DEVICE_ID_LP10000S:
2656                m = (typeof(m)){"LP10000-S", "PCI",
2657                                "Obsolete, Unsupported Fibre Channel Adapter"};
2658                break;
2659        case PCI_DEVICE_ID_LP11000S:
2660                m = (typeof(m)){"LP11000-S", "PCI-X2",
2661                                "Obsolete, Unsupported Fibre Channel Adapter"};
2662                break;
2663        case PCI_DEVICE_ID_LPE11000S:
2664                m = (typeof(m)){"LPe11000-S", "PCIe",
2665                                "Obsolete, Unsupported Fibre Channel Adapter"};
2666                break;
2667        case PCI_DEVICE_ID_SAT:
2668                m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2669                break;
2670        case PCI_DEVICE_ID_SAT_MID:
2671                m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2672                break;
2673        case PCI_DEVICE_ID_SAT_SMB:
2674                m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2675                break;
2676        case PCI_DEVICE_ID_SAT_DCSP:
2677                m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2678                break;
2679        case PCI_DEVICE_ID_SAT_SCSP:
2680                m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2681                break;
2682        case PCI_DEVICE_ID_SAT_S:
2683                m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2684                break;
2685        case PCI_DEVICE_ID_HORNET:
2686                m = (typeof(m)){"LP21000", "PCIe",
2687                                "Obsolete, Unsupported FCoE Adapter"};
2688                GE = 1;
2689                break;
2690        case PCI_DEVICE_ID_PROTEUS_VF:
2691                m = (typeof(m)){"LPev12000", "PCIe IOV",
2692                                "Obsolete, Unsupported Fibre Channel Adapter"};
2693                break;
2694        case PCI_DEVICE_ID_PROTEUS_PF:
2695                m = (typeof(m)){"LPev12000", "PCIe IOV",
2696                                "Obsolete, Unsupported Fibre Channel Adapter"};
2697                break;
2698        case PCI_DEVICE_ID_PROTEUS_S:
2699                m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2700                                "Obsolete, Unsupported Fibre Channel Adapter"};
2701                break;
2702        case PCI_DEVICE_ID_TIGERSHARK:
2703                oneConnect = 1;
2704                m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2705                break;
2706        case PCI_DEVICE_ID_TOMCAT:
2707                oneConnect = 1;
2708                m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2709                break;
2710        case PCI_DEVICE_ID_FALCON:
2711                m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2712                                "EmulexSecure Fibre"};
2713                break;
2714        case PCI_DEVICE_ID_BALIUS:
2715                m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2716                                "Obsolete, Unsupported Fibre Channel Adapter"};
2717                break;
2718        case PCI_DEVICE_ID_LANCER_FC:
2719                m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2720                break;
2721        case PCI_DEVICE_ID_LANCER_FC_VF:
2722                m = (typeof(m)){"LPe16000", "PCIe",
2723                                "Obsolete, Unsupported Fibre Channel Adapter"};
2724                break;
2725        case PCI_DEVICE_ID_LANCER_FCOE:
2726                oneConnect = 1;
2727                m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2728                break;
2729        case PCI_DEVICE_ID_LANCER_FCOE_VF:
2730                oneConnect = 1;
2731                m = (typeof(m)){"OCe15100", "PCIe",
2732                                "Obsolete, Unsupported FCoE"};
2733                break;
2734        case PCI_DEVICE_ID_LANCER_G6_FC:
2735                m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2736                break;
2737        case PCI_DEVICE_ID_LANCER_G7_FC:
2738                m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2739                break;
2740        case PCI_DEVICE_ID_LANCER_G7P_FC:
2741                m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2742                break;
2743        case PCI_DEVICE_ID_SKYHAWK:
2744        case PCI_DEVICE_ID_SKYHAWK_VF:
2745                oneConnect = 1;
2746                m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2747                break;
2748        default:
2749                m = (typeof(m)){"Unknown", "", ""};
2750                break;
2751        }
2752
2753        if (mdp && mdp[0] == '\0')
2754                snprintf(mdp, 79,"%s", m.name);
2755        /*
2756         * oneConnect hba requires special processing, they are all initiators
2757         * and we put the port number on the end
2758         */
2759        if (descp && descp[0] == '\0') {
2760                if (oneConnect)
2761                        snprintf(descp, 255,
2762                                "Emulex OneConnect %s, %s Initiator %s",
2763                                m.name, m.function,
2764                                phba->Port);
2765                else if (max_speed == 0)
2766                        snprintf(descp, 255,
2767                                "Emulex %s %s %s",
2768                                m.name, m.bus, m.function);
2769                else
2770                        snprintf(descp, 255,
2771                                "Emulex %s %d%s %s %s",
2772                                m.name, max_speed, (GE) ? "GE" : "Gb",
2773                                m.bus, m.function);
2774        }
2775}
2776
2777/**
2778 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2779 * @phba: pointer to lpfc hba data structure.
2780 * @pring: pointer to a IOCB ring.
2781 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2782 *
2783 * This routine posts a given number of IOCBs with the associated DMA buffer
2784 * descriptors specified by the cnt argument to the given IOCB ring.
2785 *
2786 * Return codes
2787 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2788 **/
2789int
2790lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2791{
2792        IOCB_t *icmd;
2793        struct lpfc_iocbq *iocb;
2794        struct lpfc_dmabuf *mp1, *mp2;
2795
2796        cnt += pring->missbufcnt;
2797
2798        /* While there are buffers to post */
2799        while (cnt > 0) {
2800                /* Allocate buffer for  command iocb */
2801                iocb = lpfc_sli_get_iocbq(phba);
2802                if (iocb == NULL) {
2803                        pring->missbufcnt = cnt;
2804                        return cnt;
2805                }
2806                icmd = &iocb->iocb;
2807
2808                /* 2 buffers can be posted per command */
2809                /* Allocate buffer to post */
2810                mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2811                if (mp1)
2812                    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2813                if (!mp1 || !mp1->virt) {
2814                        kfree(mp1);
2815                        lpfc_sli_release_iocbq(phba, iocb);
2816                        pring->missbufcnt = cnt;
2817                        return cnt;
2818                }
2819
2820                INIT_LIST_HEAD(&mp1->list);
2821                /* Allocate buffer to post */
2822                if (cnt > 1) {
2823                        mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2824                        if (mp2)
2825                                mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2826                                                            &mp2->phys);
2827                        if (!mp2 || !mp2->virt) {
2828                                kfree(mp2);
2829                                lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2830                                kfree(mp1);
2831                                lpfc_sli_release_iocbq(phba, iocb);
2832                                pring->missbufcnt = cnt;
2833                                return cnt;
2834                        }
2835
2836                        INIT_LIST_HEAD(&mp2->list);
2837                } else {
2838                        mp2 = NULL;
2839                }
2840
2841                icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2842                icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2843                icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2844                icmd->ulpBdeCount = 1;
2845                cnt--;
2846                if (mp2) {
2847                        icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2848                        icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2849                        icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2850                        cnt--;
2851                        icmd->ulpBdeCount = 2;
2852                }
2853
2854                icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2855                icmd->ulpLe = 1;
2856
2857                if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2858                    IOCB_ERROR) {
2859                        lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2860                        kfree(mp1);
2861                        cnt++;
2862                        if (mp2) {
2863                                lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2864                                kfree(mp2);
2865                                cnt++;
2866                        }
2867                        lpfc_sli_release_iocbq(phba, iocb);
2868                        pring->missbufcnt = cnt;
2869                        return cnt;
2870                }
2871                lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2872                if (mp2)
2873                        lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2874        }
2875        pring->missbufcnt = 0;
2876        return 0;
2877}
2878
2879/**
2880 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2881 * @phba: pointer to lpfc hba data structure.
2882 *
2883 * This routine posts initial receive IOCB buffers to the ELS ring. The
2884 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2885 * set to 64 IOCBs. SLI3 only.
2886 *
2887 * Return codes
2888 *   0 - success (currently always success)
2889 **/
2890static int
2891lpfc_post_rcv_buf(struct lpfc_hba *phba)
2892{
2893        struct lpfc_sli *psli = &phba->sli;
2894
2895        /* Ring 0, ELS / CT buffers */
2896        lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2897        /* Ring 2 - FCP no buffers needed */
2898
2899        return 0;
2900}
2901
2902#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2903
2904/**
2905 * lpfc_sha_init - Set up initial array of hash table entries
2906 * @HashResultPointer: pointer to an array as hash table.
2907 *
2908 * This routine sets up the initial values to the array of hash table entries
2909 * for the LC HBAs.
2910 **/
2911static void
2912lpfc_sha_init(uint32_t * HashResultPointer)
2913{
2914        HashResultPointer[0] = 0x67452301;
2915        HashResultPointer[1] = 0xEFCDAB89;
2916        HashResultPointer[2] = 0x98BADCFE;
2917        HashResultPointer[3] = 0x10325476;
2918        HashResultPointer[4] = 0xC3D2E1F0;
2919}
2920
2921/**
2922 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2923 * @HashResultPointer: pointer to an initial/result hash table.
2924 * @HashWorkingPointer: pointer to an working hash table.
2925 *
2926 * This routine iterates an initial hash table pointed by @HashResultPointer
2927 * with the values from the working hash table pointeed by @HashWorkingPointer.
2928 * The results are putting back to the initial hash table, returned through
2929 * the @HashResultPointer as the result hash table.
2930 **/
2931static void
2932lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2933{
2934        int t;
2935        uint32_t TEMP;
2936        uint32_t A, B, C, D, E;
2937        t = 16;
2938        do {
2939                HashWorkingPointer[t] =
2940                    S(1,
2941                      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2942                                                                     8] ^
2943                      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2944        } while (++t <= 79);
2945        t = 0;
2946        A = HashResultPointer[0];
2947        B = HashResultPointer[1];
2948        C = HashResultPointer[2];
2949        D = HashResultPointer[3];
2950        E = HashResultPointer[4];
2951
2952        do {
2953                if (t < 20) {
2954                        TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2955                } else if (t < 40) {
2956                        TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2957                } else if (t < 60) {
2958                        TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2959                } else {
2960                        TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2961                }
2962                TEMP += S(5, A) + E + HashWorkingPointer[t];
2963                E = D;
2964                D = C;
2965                C = S(30, B);
2966                B = A;
2967                A = TEMP;
2968        } while (++t <= 79);
2969
2970        HashResultPointer[0] += A;
2971        HashResultPointer[1] += B;
2972        HashResultPointer[2] += C;
2973        HashResultPointer[3] += D;
2974        HashResultPointer[4] += E;
2975
2976}
2977
2978/**
2979 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2980 * @RandomChallenge: pointer to the entry of host challenge random number array.
2981 * @HashWorking: pointer to the entry of the working hash array.
2982 *
2983 * This routine calculates the working hash array referred by @HashWorking
2984 * from the challenge random numbers associated with the host, referred by
2985 * @RandomChallenge. The result is put into the entry of the working hash
2986 * array and returned by reference through @HashWorking.
2987 **/
2988static void
2989lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2990{
2991        *HashWorking = (*RandomChallenge ^ *HashWorking);
2992}
2993
2994/**
2995 * lpfc_hba_init - Perform special handling for LC HBA initialization
2996 * @phba: pointer to lpfc hba data structure.
2997 * @hbainit: pointer to an array of unsigned 32-bit integers.
2998 *
2999 * This routine performs the special handling for LC HBA initialization.
3000 **/
3001void
3002lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3003{
3004        int t;
3005        uint32_t *HashWorking;
3006        uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3007
3008        HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3009        if (!HashWorking)
3010                return;
3011
3012        HashWorking[0] = HashWorking[78] = *pwwnn++;
3013        HashWorking[1] = HashWorking[79] = *pwwnn;
3014
3015        for (t = 0; t < 7; t++)
3016                lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3017
3018        lpfc_sha_init(hbainit);
3019        lpfc_sha_iterate(hbainit, HashWorking);
3020        kfree(HashWorking);
3021}
3022
3023/**
3024 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3025 * @vport: pointer to a virtual N_Port data structure.
3026 *
3027 * This routine performs the necessary cleanups before deleting the @vport.
3028 * It invokes the discovery state machine to perform necessary state
3029 * transitions and to release the ndlps associated with the @vport. Note,
3030 * the physical port is treated as @vport 0.
3031 **/
3032void
3033lpfc_cleanup(struct lpfc_vport *vport)
3034{
3035        struct lpfc_hba   *phba = vport->phba;
3036        struct lpfc_nodelist *ndlp, *next_ndlp;
3037        int i = 0;
3038
3039        if (phba->link_state > LPFC_LINK_DOWN)
3040                lpfc_port_link_failure(vport);
3041
3042        /* Clean up VMID resources */
3043        if (lpfc_is_vmid_enabled(phba))
3044                lpfc_vmid_vport_cleanup(vport);
3045
3046        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3047                if (vport->port_type != LPFC_PHYSICAL_PORT &&
3048                    ndlp->nlp_DID == Fabric_DID) {
3049                        /* Just free up ndlp with Fabric_DID for vports */
3050                        lpfc_nlp_put(ndlp);
3051                        continue;
3052                }
3053
3054                if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3055                    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3056                        lpfc_nlp_put(ndlp);
3057                        continue;
3058                }
3059
3060                /* Fabric Ports not in UNMAPPED state are cleaned up in the
3061                 * DEVICE_RM event.
3062                 */
3063                if (ndlp->nlp_type & NLP_FABRIC &&
3064                    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3065                        lpfc_disc_state_machine(vport, ndlp, NULL,
3066                                        NLP_EVT_DEVICE_RECOVERY);
3067
3068                if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3069                        lpfc_disc_state_machine(vport, ndlp, NULL,
3070                                        NLP_EVT_DEVICE_RM);
3071        }
3072
3073        /* This is a special case flush to return all
3074         * IOs before entering this loop. There are
3075         * two points in the code where a flush is
3076         * avoided if the FC_UNLOADING flag is set.
3077         * one is in the multipool destroy,
3078         * (this prevents a crash) and the other is
3079         * in the nvme abort handler, ( also prevents
3080         * a crash). Both of these exceptions are
3081         * cases where the slot is still accessible.
3082         * The flush here is only when the pci slot
3083         * is offline.
3084         */
3085        if (vport->load_flag & FC_UNLOADING &&
3086            pci_channel_offline(phba->pcidev))
3087                lpfc_sli_flush_io_rings(vport->phba);
3088
3089        /* At this point, ALL ndlp's should be gone
3090         * because of the previous NLP_EVT_DEVICE_RM.
3091         * Lets wait for this to happen, if needed.
3092         */
3093        while (!list_empty(&vport->fc_nodes)) {
3094                if (i++ > 3000) {
3095                        lpfc_printf_vlog(vport, KERN_ERR,
3096                                         LOG_TRACE_EVENT,
3097                                "0233 Nodelist not empty\n");
3098                        list_for_each_entry_safe(ndlp, next_ndlp,
3099                                                &vport->fc_nodes, nlp_listp) {
3100                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3101                                                 LOG_DISCOVERY,
3102                                                 "0282 did:x%x ndlp:x%px "
3103                                                 "refcnt:%d xflags x%x nflag x%x\n",
3104                                                 ndlp->nlp_DID, (void *)ndlp,
3105                                                 kref_read(&ndlp->kref),
3106                                                 ndlp->fc4_xpt_flags,
3107                                                 ndlp->nlp_flag);
3108                        }
3109                        break;
3110                }
3111
3112                /* Wait for any activity on ndlps to settle */
3113                msleep(10);
3114        }
3115        lpfc_cleanup_vports_rrqs(vport, NULL);
3116}
3117
3118/**
3119 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3120 * @vport: pointer to a virtual N_Port data structure.
3121 *
3122 * This routine stops all the timers associated with a @vport. This function
3123 * is invoked before disabling or deleting a @vport. Note that the physical
3124 * port is treated as @vport 0.
3125 **/
3126void
3127lpfc_stop_vport_timers(struct lpfc_vport *vport)
3128{
3129        del_timer_sync(&vport->els_tmofunc);
3130        del_timer_sync(&vport->delayed_disc_tmo);
3131        lpfc_can_disctmo(vport);
3132        return;
3133}
3134
3135/**
3136 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3137 * @phba: pointer to lpfc hba data structure.
3138 *
3139 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3140 * caller of this routine should already hold the host lock.
3141 **/
3142void
3143__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3144{
3145        /* Clear pending FCF rediscovery wait flag */
3146        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3147
3148        /* Now, try to stop the timer */
3149        del_timer(&phba->fcf.redisc_wait);
3150}
3151
3152/**
3153 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3154 * @phba: pointer to lpfc hba data structure.
3155 *
3156 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3157 * checks whether the FCF rediscovery wait timer is pending with the host
3158 * lock held before proceeding with disabling the timer and clearing the
3159 * wait timer pendig flag.
3160 **/
3161void
3162lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3163{
3164        spin_lock_irq(&phba->hbalock);
3165        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3166                /* FCF rediscovery timer already fired or stopped */
3167                spin_unlock_irq(&phba->hbalock);
3168                return;
3169        }
3170        __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3171        /* Clear failover in progress flags */
3172        phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3173        spin_unlock_irq(&phba->hbalock);
3174}
3175
3176/**
3177 * lpfc_cmf_stop - Stop CMF processing
3178 * @phba: pointer to lpfc hba data structure.
3179 *
3180 * This is called when the link goes down or if CMF mode is turned OFF.
3181 * It is also called when going offline or unloaded just before the
3182 * congestion info buffer is unregistered.
3183 **/
3184void
3185lpfc_cmf_stop(struct lpfc_hba *phba)
3186{
3187        int cpu;
3188        struct lpfc_cgn_stat *cgs;
3189
3190        /* We only do something if CMF is enabled */
3191        if (!phba->sli4_hba.pc_sli4_params.cmf)
3192                return;
3193
3194        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3195                        "6221 Stop CMF / Cancel Timer\n");
3196
3197        /* Cancel the CMF timer */
3198        hrtimer_cancel(&phba->cmf_timer);
3199
3200        /* Zero CMF counters */
3201        atomic_set(&phba->cmf_busy, 0);
3202        for_each_present_cpu(cpu) {
3203                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3204                atomic64_set(&cgs->total_bytes, 0);
3205                atomic64_set(&cgs->rcv_bytes, 0);
3206                atomic_set(&cgs->rx_io_cnt, 0);
3207                atomic64_set(&cgs->rx_latency, 0);
3208        }
3209        atomic_set(&phba->cmf_bw_wait, 0);
3210
3211        /* Resume any blocked IO - Queue unblock on workqueue */
3212        queue_work(phba->wq, &phba->unblock_request_work);
3213}
3214
3215static inline uint64_t
3216lpfc_get_max_line_rate(struct lpfc_hba *phba)
3217{
3218        uint64_t rate = lpfc_sli_port_speed_get(phba);
3219
3220        return ((((unsigned long)rate) * 1024 * 1024) / 10);
3221}
3222
3223void
3224lpfc_cmf_signal_init(struct lpfc_hba *phba)
3225{
3226        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3227                        "6223 Signal CMF init\n");
3228
3229        /* Use the new fc_linkspeed to recalculate */
3230        phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3231        phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3232        phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3233                                            phba->cmf_interval_rate, 1000);
3234        phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3235
3236        /* This is a signal to firmware to sync up CMF BW with link speed */
3237        lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3238}
3239
3240/**
3241 * lpfc_cmf_start - Start CMF processing
3242 * @phba: pointer to lpfc hba data structure.
3243 *
3244 * This is called when the link comes up or if CMF mode is turned OFF
3245 * to Monitor or Managed.
3246 **/
3247void
3248lpfc_cmf_start(struct lpfc_hba *phba)
3249{
3250        struct lpfc_cgn_stat *cgs;
3251        int cpu;
3252
3253        /* We only do something if CMF is enabled */
3254        if (!phba->sli4_hba.pc_sli4_params.cmf ||
3255            phba->cmf_active_mode == LPFC_CFG_OFF)
3256                return;
3257
3258        /* Reinitialize congestion buffer info */
3259        lpfc_init_congestion_buf(phba);
3260
3261        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3262        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3263        atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3264        atomic_set(&phba->cgn_sync_warn_cnt, 0);
3265
3266        atomic_set(&phba->cmf_busy, 0);
3267        for_each_present_cpu(cpu) {
3268                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3269                atomic64_set(&cgs->total_bytes, 0);
3270                atomic64_set(&cgs->rcv_bytes, 0);
3271                atomic_set(&cgs->rx_io_cnt, 0);
3272                atomic64_set(&cgs->rx_latency, 0);
3273        }
3274        phba->cmf_latency.tv_sec = 0;
3275        phba->cmf_latency.tv_nsec = 0;
3276
3277        lpfc_cmf_signal_init(phba);
3278
3279        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3280                        "6222 Start CMF / Timer\n");
3281
3282        phba->cmf_timer_cnt = 0;
3283        hrtimer_start(&phba->cmf_timer,
3284                      ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3285                      HRTIMER_MODE_REL);
3286        /* Setup for latency check in IO cmpl routines */
3287        ktime_get_real_ts64(&phba->cmf_latency);
3288
3289        atomic_set(&phba->cmf_bw_wait, 0);
3290        atomic_set(&phba->cmf_stop_io, 0);
3291}
3292
3293/**
3294 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3295 * @phba: pointer to lpfc hba data structure.
3296 *
3297 * This routine stops all the timers associated with a HBA. This function is
3298 * invoked before either putting a HBA offline or unloading the driver.
3299 **/
3300void
3301lpfc_stop_hba_timers(struct lpfc_hba *phba)
3302{
3303        if (phba->pport)
3304                lpfc_stop_vport_timers(phba->pport);
3305        cancel_delayed_work_sync(&phba->eq_delay_work);
3306        cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3307        del_timer_sync(&phba->sli.mbox_tmo);
3308        del_timer_sync(&phba->fabric_block_timer);
3309        del_timer_sync(&phba->eratt_poll);
3310        del_timer_sync(&phba->hb_tmofunc);
3311        if (phba->sli_rev == LPFC_SLI_REV4) {
3312                del_timer_sync(&phba->rrq_tmr);
3313                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3314        }
3315        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3316
3317        switch (phba->pci_dev_grp) {
3318        case LPFC_PCI_DEV_LP:
3319                /* Stop any LightPulse device specific driver timers */
3320                del_timer_sync(&phba->fcp_poll_timer);
3321                break;
3322        case LPFC_PCI_DEV_OC:
3323                /* Stop any OneConnect device specific driver timers */
3324                lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3325                break;
3326        default:
3327                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3328                                "0297 Invalid device group (x%x)\n",
3329                                phba->pci_dev_grp);
3330                break;
3331        }
3332        return;
3333}
3334
3335/**
3336 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3337 * @phba: pointer to lpfc hba data structure.
3338 * @mbx_action: flag for mailbox no wait action.
3339 *
3340 * This routine marks a HBA's management interface as blocked. Once the HBA's
3341 * management interface is marked as blocked, all the user space access to
3342 * the HBA, whether they are from sysfs interface or libdfc interface will
3343 * all be blocked. The HBA is set to block the management interface when the
3344 * driver prepares the HBA interface for online or offline.
3345 **/
3346static void
3347lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3348{
3349        unsigned long iflag;
3350        uint8_t actcmd = MBX_HEARTBEAT;
3351        unsigned long timeout;
3352
3353        spin_lock_irqsave(&phba->hbalock, iflag);
3354        phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3355        spin_unlock_irqrestore(&phba->hbalock, iflag);
3356        if (mbx_action == LPFC_MBX_NO_WAIT)
3357                return;
3358        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3359        spin_lock_irqsave(&phba->hbalock, iflag);
3360        if (phba->sli.mbox_active) {
3361                actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3362                /* Determine how long we might wait for the active mailbox
3363                 * command to be gracefully completed by firmware.
3364                 */
3365                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3366                                phba->sli.mbox_active) * 1000) + jiffies;
3367        }
3368        spin_unlock_irqrestore(&phba->hbalock, iflag);
3369
3370        /* Wait for the outstnading mailbox command to complete */
3371        while (phba->sli.mbox_active) {
3372                /* Check active mailbox complete status every 2ms */
3373                msleep(2);
3374                if (time_after(jiffies, timeout)) {
3375                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3376                                        "2813 Mgmt IO is Blocked %x "
3377                                        "- mbox cmd %x still active\n",
3378                                        phba->sli.sli_flag, actcmd);
3379                        break;
3380                }
3381        }
3382}
3383
3384/**
3385 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3386 * @phba: pointer to lpfc hba data structure.
3387 *
3388 * Allocate RPIs for all active remote nodes. This is needed whenever
3389 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3390 * is to fixup the temporary rpi assignments.
3391 **/
3392void
3393lpfc_sli4_node_prep(struct lpfc_hba *phba)
3394{
3395        struct lpfc_nodelist  *ndlp, *next_ndlp;
3396        struct lpfc_vport **vports;
3397        int i, rpi;
3398
3399        if (phba->sli_rev != LPFC_SLI_REV4)
3400                return;
3401
3402        vports = lpfc_create_vport_work_array(phba);
3403        if (vports == NULL)
3404                return;
3405
3406        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3407                if (vports[i]->load_flag & FC_UNLOADING)
3408                        continue;
3409
3410                list_for_each_entry_safe(ndlp, next_ndlp,
3411                                         &vports[i]->fc_nodes,
3412                                         nlp_listp) {
3413                        rpi = lpfc_sli4_alloc_rpi(phba);
3414                        if (rpi == LPFC_RPI_ALLOC_ERROR) {
3415                                /* TODO print log? */
3416                                continue;
3417                        }
3418                        ndlp->nlp_rpi = rpi;
3419                        lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3420                                         LOG_NODE | LOG_DISCOVERY,
3421                                         "0009 Assign RPI x%x to ndlp x%px "
3422                                         "DID:x%06x flg:x%x\n",
3423                                         ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3424                                         ndlp->nlp_flag);
3425                }
3426        }
3427        lpfc_destroy_vport_work_array(phba, vports);
3428}
3429
3430/**
3431 * lpfc_create_expedite_pool - create expedite pool
3432 * @phba: pointer to lpfc hba data structure.
3433 *
3434 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3435 * to expedite pool. Mark them as expedite.
3436 **/
3437static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3438{
3439        struct lpfc_sli4_hdw_queue *qp;
3440        struct lpfc_io_buf *lpfc_ncmd;
3441        struct lpfc_io_buf *lpfc_ncmd_next;
3442        struct lpfc_epd_pool *epd_pool;
3443        unsigned long iflag;
3444
3445        epd_pool = &phba->epd_pool;
3446        qp = &phba->sli4_hba.hdwq[0];
3447
3448        spin_lock_init(&epd_pool->lock);
3449        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3450        spin_lock(&epd_pool->lock);
3451        INIT_LIST_HEAD(&epd_pool->list);
3452        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3453                                 &qp->lpfc_io_buf_list_put, list) {
3454                list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3455                lpfc_ncmd->expedite = true;
3456                qp->put_io_bufs--;
3457                epd_pool->count++;
3458                if (epd_pool->count >= XRI_BATCH)
3459                        break;
3460        }
3461        spin_unlock(&epd_pool->lock);
3462        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3463}
3464
3465/**
3466 * lpfc_destroy_expedite_pool - destroy expedite pool
3467 * @phba: pointer to lpfc hba data structure.
3468 *
3469 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3470 * of HWQ 0. Clear the mark.
3471 **/
3472static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3473{
3474        struct lpfc_sli4_hdw_queue *qp;
3475        struct lpfc_io_buf *lpfc_ncmd;
3476        struct lpfc_io_buf *lpfc_ncmd_next;
3477        struct lpfc_epd_pool *epd_pool;
3478        unsigned long iflag;
3479
3480        epd_pool = &phba->epd_pool;
3481        qp = &phba->sli4_hba.hdwq[0];
3482
3483        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3484        spin_lock(&epd_pool->lock);
3485        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3486                                 &epd_pool->list, list) {
3487                list_move_tail(&lpfc_ncmd->list,
3488                               &qp->lpfc_io_buf_list_put);
3489                lpfc_ncmd->flags = false;
3490                qp->put_io_bufs++;
3491                epd_pool->count--;
3492        }
3493        spin_unlock(&epd_pool->lock);
3494        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3495}
3496
3497/**
3498 * lpfc_create_multixri_pools - create multi-XRI pools
3499 * @phba: pointer to lpfc hba data structure.
3500 *
3501 * This routine initialize public, private per HWQ. Then, move XRIs from
3502 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3503 * Initialized.
3504 **/
3505void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3506{
3507        u32 i, j;
3508        u32 hwq_count;
3509        u32 count_per_hwq;
3510        struct lpfc_io_buf *lpfc_ncmd;
3511        struct lpfc_io_buf *lpfc_ncmd_next;
3512        unsigned long iflag;
3513        struct lpfc_sli4_hdw_queue *qp;
3514        struct lpfc_multixri_pool *multixri_pool;
3515        struct lpfc_pbl_pool *pbl_pool;
3516        struct lpfc_pvt_pool *pvt_pool;
3517
3518        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3519                        "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3520                        phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3521                        phba->sli4_hba.io_xri_cnt);
3522
3523        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3524                lpfc_create_expedite_pool(phba);
3525
3526        hwq_count = phba->cfg_hdw_queue;
3527        count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3528
3529        for (i = 0; i < hwq_count; i++) {
3530                multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3531
3532                if (!multixri_pool) {
3533                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3534                                        "1238 Failed to allocate memory for "
3535                                        "multixri_pool\n");
3536
3537                        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3538                                lpfc_destroy_expedite_pool(phba);
3539
3540                        j = 0;
3541                        while (j < i) {
3542                                qp = &phba->sli4_hba.hdwq[j];
3543                                kfree(qp->p_multixri_pool);
3544                                j++;
3545                        }
3546                        phba->cfg_xri_rebalancing = 0;
3547                        return;
3548                }
3549
3550                qp = &phba->sli4_hba.hdwq[i];
3551                qp->p_multixri_pool = multixri_pool;
3552
3553                multixri_pool->xri_limit = count_per_hwq;
3554                multixri_pool->rrb_next_hwqid = i;
3555
3556                /* Deal with public free xri pool */
3557                pbl_pool = &multixri_pool->pbl_pool;
3558                spin_lock_init(&pbl_pool->lock);
3559                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3560                spin_lock(&pbl_pool->lock);
3561                INIT_LIST_HEAD(&pbl_pool->list);
3562                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3563                                         &qp->lpfc_io_buf_list_put, list) {
3564                        list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3565                        qp->put_io_bufs--;
3566                        pbl_pool->count++;
3567                }
3568                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3569                                "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3570                                pbl_pool->count, i);
3571                spin_unlock(&pbl_pool->lock);
3572                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3573
3574                /* Deal with private free xri pool */
3575                pvt_pool = &multixri_pool->pvt_pool;
3576                pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3577                pvt_pool->low_watermark = XRI_BATCH;
3578                spin_lock_init(&pvt_pool->lock);
3579                spin_lock_irqsave(&pvt_pool->lock, iflag);
3580                INIT_LIST_HEAD(&pvt_pool->list);
3581                pvt_pool->count = 0;
3582                spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3583        }
3584}
3585
3586/**
3587 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3588 * @phba: pointer to lpfc hba data structure.
3589 *
3590 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3591 **/
3592static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3593{
3594        u32 i;
3595        u32 hwq_count;
3596        struct lpfc_io_buf *lpfc_ncmd;
3597        struct lpfc_io_buf *lpfc_ncmd_next;
3598        unsigned long iflag;
3599        struct lpfc_sli4_hdw_queue *qp;
3600        struct lpfc_multixri_pool *multixri_pool;
3601        struct lpfc_pbl_pool *pbl_pool;
3602        struct lpfc_pvt_pool *pvt_pool;
3603
3604        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3605                lpfc_destroy_expedite_pool(phba);
3606
3607        if (!(phba->pport->load_flag & FC_UNLOADING))
3608                lpfc_sli_flush_io_rings(phba);
3609
3610        hwq_count = phba->cfg_hdw_queue;
3611
3612        for (i = 0; i < hwq_count; i++) {
3613                qp = &phba->sli4_hba.hdwq[i];
3614                multixri_pool = qp->p_multixri_pool;
3615                if (!multixri_pool)
3616                        continue;
3617
3618                qp->p_multixri_pool = NULL;
3619
3620                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3621
3622                /* Deal with public free xri pool */
3623                pbl_pool = &multixri_pool->pbl_pool;
3624                spin_lock(&pbl_pool->lock);
3625
3626                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3627                                "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3628                                pbl_pool->count, i);
3629
3630                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3631                                         &pbl_pool->list, list) {
3632                        list_move_tail(&lpfc_ncmd->list,
3633                                       &qp->lpfc_io_buf_list_put);
3634                        qp->put_io_bufs++;
3635                        pbl_pool->count--;
3636                }
3637
3638                INIT_LIST_HEAD(&pbl_pool->list);
3639                pbl_pool->count = 0;
3640
3641                spin_unlock(&pbl_pool->lock);
3642
3643                /* Deal with private free xri pool */
3644                pvt_pool = &multixri_pool->pvt_pool;
3645                spin_lock(&pvt_pool->lock);
3646
3647                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3648                                "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3649                                pvt_pool->count, i);
3650
3651                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3652                                         &pvt_pool->list, list) {
3653                        list_move_tail(&lpfc_ncmd->list,
3654                                       &qp->lpfc_io_buf_list_put);
3655                        qp->put_io_bufs++;
3656                        pvt_pool->count--;
3657                }
3658
3659                INIT_LIST_HEAD(&pvt_pool->list);
3660                pvt_pool->count = 0;
3661
3662                spin_unlock(&pvt_pool->lock);
3663                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3664
3665                kfree(multixri_pool);
3666        }
3667}
3668
3669/**
3670 * lpfc_online - Initialize and bring a HBA online
3671 * @phba: pointer to lpfc hba data structure.
3672 *
3673 * This routine initializes the HBA and brings a HBA online. During this
3674 * process, the management interface is blocked to prevent user space access
3675 * to the HBA interfering with the driver initialization.
3676 *
3677 * Return codes
3678 *   0 - successful
3679 *   1 - failed
3680 **/
3681int
3682lpfc_online(struct lpfc_hba *phba)
3683{
3684        struct lpfc_vport *vport;
3685        struct lpfc_vport **vports;
3686        int i, error = 0;
3687        bool vpis_cleared = false;
3688
3689        if (!phba)
3690                return 0;
3691        vport = phba->pport;
3692
3693        if (!(vport->fc_flag & FC_OFFLINE_MODE))
3694                return 0;
3695
3696        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3697                        "0458 Bring Adapter online\n");
3698
3699        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3700
3701        if (phba->sli_rev == LPFC_SLI_REV4) {
3702                if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3703                        lpfc_unblock_mgmt_io(phba);
3704                        return 1;
3705                }
3706                spin_lock_irq(&phba->hbalock);
3707                if (!phba->sli4_hba.max_cfg_param.vpi_used)
3708                        vpis_cleared = true;
3709                spin_unlock_irq(&phba->hbalock);
3710
3711                /* Reestablish the local initiator port.
3712                 * The offline process destroyed the previous lport.
3713                 */
3714                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3715                                !phba->nvmet_support) {
3716                        error = lpfc_nvme_create_localport(phba->pport);
3717                        if (error)
3718                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3719                                        "6132 NVME restore reg failed "
3720                                        "on nvmei error x%x\n", error);
3721                }
3722        } else {
3723                lpfc_sli_queue_init(phba);
3724                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3725                        lpfc_unblock_mgmt_io(phba);
3726                        return 1;
3727                }
3728        }
3729
3730        vports = lpfc_create_vport_work_array(phba);
3731        if (vports != NULL) {
3732                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3733                        struct Scsi_Host *shost;
3734                        shost = lpfc_shost_from_vport(vports[i]);
3735                        spin_lock_irq(shost->host_lock);
3736                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3737                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3738                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3739                        if (phba->sli_rev == LPFC_SLI_REV4) {
3740                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3741                                if ((vpis_cleared) &&
3742                                    (vports[i]->port_type !=
3743                                        LPFC_PHYSICAL_PORT))
3744                                        vports[i]->vpi = 0;
3745                        }
3746                        spin_unlock_irq(shost->host_lock);
3747                }
3748        }
3749        lpfc_destroy_vport_work_array(phba, vports);
3750
3751        if (phba->cfg_xri_rebalancing)
3752                lpfc_create_multixri_pools(phba);
3753
3754        lpfc_cpuhp_add(phba);
3755
3756        lpfc_unblock_mgmt_io(phba);
3757        return 0;
3758}
3759
3760/**
3761 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3762 * @phba: pointer to lpfc hba data structure.
3763 *
3764 * This routine marks a HBA's management interface as not blocked. Once the
3765 * HBA's management interface is marked as not blocked, all the user space
3766 * access to the HBA, whether they are from sysfs interface or libdfc
3767 * interface will be allowed. The HBA is set to block the management interface
3768 * when the driver prepares the HBA interface for online or offline and then
3769 * set to unblock the management interface afterwards.
3770 **/
3771void
3772lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3773{
3774        unsigned long iflag;
3775
3776        spin_lock_irqsave(&phba->hbalock, iflag);
3777        phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3778        spin_unlock_irqrestore(&phba->hbalock, iflag);
3779}
3780
3781/**
3782 * lpfc_offline_prep - Prepare a HBA to be brought offline
3783 * @phba: pointer to lpfc hba data structure.
3784 * @mbx_action: flag for mailbox shutdown action.
3785 *
3786 * This routine is invoked to prepare a HBA to be brought offline. It performs
3787 * unregistration login to all the nodes on all vports and flushes the mailbox
3788 * queue to make it ready to be brought offline.
3789 **/
3790void
3791lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3792{
3793        struct lpfc_vport *vport = phba->pport;
3794        struct lpfc_nodelist  *ndlp, *next_ndlp;
3795        struct lpfc_vport **vports;
3796        struct Scsi_Host *shost;
3797        int i;
3798        int offline;
3799        bool hba_pci_err;
3800
3801        if (vport->fc_flag & FC_OFFLINE_MODE)
3802                return;
3803
3804        lpfc_block_mgmt_io(phba, mbx_action);
3805
3806        lpfc_linkdown(phba);
3807
3808        offline =  pci_channel_offline(phba->pcidev);
3809        hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3810
3811        /* Issue an unreg_login to all nodes on all vports */
3812        vports = lpfc_create_vport_work_array(phba);
3813        if (vports != NULL) {
3814                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3815                        if (vports[i]->load_flag & FC_UNLOADING)
3816                                continue;
3817                        shost = lpfc_shost_from_vport(vports[i]);
3818                        spin_lock_irq(shost->host_lock);
3819                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3820                        vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3821                        vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3822                        spin_unlock_irq(shost->host_lock);
3823
3824                        shost = lpfc_shost_from_vport(vports[i]);
3825                        list_for_each_entry_safe(ndlp, next_ndlp,
3826                                                 &vports[i]->fc_nodes,
3827                                                 nlp_listp) {
3828
3829                                spin_lock_irq(&ndlp->lock);
3830                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3831                                spin_unlock_irq(&ndlp->lock);
3832
3833                                if (offline || hba_pci_err) {
3834                                        spin_lock_irq(&ndlp->lock);
3835                                        ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3836                                                            NLP_RPI_REGISTERED);
3837                                        spin_unlock_irq(&ndlp->lock);
3838                                        if (phba->sli_rev == LPFC_SLI_REV4)
3839                                                lpfc_sli_rpi_release(vports[i],
3840                                                                     ndlp);
3841                                } else {
3842                                        lpfc_unreg_rpi(vports[i], ndlp);
3843                                }
3844                                /*
3845                                 * Whenever an SLI4 port goes offline, free the
3846                                 * RPI. Get a new RPI when the adapter port
3847                                 * comes back online.
3848                                 */
3849                                if (phba->sli_rev == LPFC_SLI_REV4) {
3850                                        lpfc_printf_vlog(vports[i], KERN_INFO,
3851                                                 LOG_NODE | LOG_DISCOVERY,
3852                                                 "0011 Free RPI x%x on "
3853                                                 "ndlp: x%px did x%x\n",
3854                                                 ndlp->nlp_rpi, ndlp,
3855                                                 ndlp->nlp_DID);
3856                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3857                                        ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3858                                }
3859
3860                                if (ndlp->nlp_type & NLP_FABRIC) {
3861                                        lpfc_disc_state_machine(vports[i], ndlp,
3862                                                NULL, NLP_EVT_DEVICE_RECOVERY);
3863
3864                                        /* Don't remove the node unless the node
3865                                         * has been unregistered with the
3866                                         * transport, and we're not in recovery
3867                                         * before dev_loss_tmo triggered.
3868                                         * Otherwise, let dev_loss take care of
3869                                         * the node.
3870                                         */
3871                                        if (!(ndlp->save_flags &
3872                                              NLP_IN_RECOV_POST_DEV_LOSS) &&
3873                                            !(ndlp->fc4_xpt_flags &
3874                                              (NVME_XPT_REGD | SCSI_XPT_REGD)))
3875                                                lpfc_disc_state_machine
3876                                                        (vports[i], ndlp,
3877                                                         NULL,
3878                                                         NLP_EVT_DEVICE_RM);
3879                                }
3880                        }
3881                }
3882        }
3883        lpfc_destroy_vport_work_array(phba, vports);
3884
3885        lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3886
3887        if (phba->wq)
3888                flush_workqueue(phba->wq);
3889}
3890
3891/**
3892 * lpfc_offline - Bring a HBA offline
3893 * @phba: pointer to lpfc hba data structure.
3894 *
3895 * This routine actually brings a HBA offline. It stops all the timers
3896 * associated with the HBA, brings down the SLI layer, and eventually
3897 * marks the HBA as in offline state for the upper layer protocol.
3898 **/
3899void
3900lpfc_offline(struct lpfc_hba *phba)
3901{
3902        struct Scsi_Host  *shost;
3903        struct lpfc_vport **vports;
3904        int i;
3905
3906        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3907                return;
3908
3909        /* stop port and all timers associated with this hba */
3910        lpfc_stop_port(phba);
3911
3912        /* Tear down the local and target port registrations.  The
3913         * nvme transports need to cleanup.
3914         */
3915        lpfc_nvmet_destroy_targetport(phba);
3916        lpfc_nvme_destroy_localport(phba->pport);
3917
3918        vports = lpfc_create_vport_work_array(phba);
3919        if (vports != NULL)
3920                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3921                        lpfc_stop_vport_timers(vports[i]);
3922        lpfc_destroy_vport_work_array(phba, vports);
3923        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3924                        "0460 Bring Adapter offline\n");
3925        /* Bring down the SLI Layer and cleanup.  The HBA is offline
3926           now.  */
3927        lpfc_sli_hba_down(phba);
3928        spin_lock_irq(&phba->hbalock);
3929        phba->work_ha = 0;
3930        spin_unlock_irq(&phba->hbalock);
3931        vports = lpfc_create_vport_work_array(phba);
3932        if (vports != NULL)
3933                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3934                        shost = lpfc_shost_from_vport(vports[i]);
3935                        spin_lock_irq(shost->host_lock);
3936                        vports[i]->work_port_events = 0;
3937                        vports[i]->fc_flag |= FC_OFFLINE_MODE;
3938                        spin_unlock_irq(shost->host_lock);
3939                }
3940        lpfc_destroy_vport_work_array(phba, vports);
3941        /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3942         * in hba_unset
3943         */
3944        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3945                __lpfc_cpuhp_remove(phba);
3946
3947        if (phba->cfg_xri_rebalancing)
3948                lpfc_destroy_multixri_pools(phba);
3949}
3950
3951/**
3952 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is to free all the SCSI buffers and IOCBs from the driver
3956 * list back to kernel. It is called from lpfc_pci_remove_one to free
3957 * the internal resources before the device is removed from the system.
3958 **/
3959static void
3960lpfc_scsi_free(struct lpfc_hba *phba)
3961{
3962        struct lpfc_io_buf *sb, *sb_next;
3963
3964        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3965                return;
3966
3967        spin_lock_irq(&phba->hbalock);
3968
3969        /* Release all the lpfc_scsi_bufs maintained by this host. */
3970
3971        spin_lock(&phba->scsi_buf_list_put_lock);
3972        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3973                                 list) {
3974                list_del(&sb->list);
3975                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3976                              sb->dma_handle);
3977                kfree(sb);
3978                phba->total_scsi_bufs--;
3979        }
3980        spin_unlock(&phba->scsi_buf_list_put_lock);
3981
3982        spin_lock(&phba->scsi_buf_list_get_lock);
3983        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3984                                 list) {
3985                list_del(&sb->list);
3986                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3987                              sb->dma_handle);
3988                kfree(sb);
3989                phba->total_scsi_bufs--;
3990        }
3991        spin_unlock(&phba->scsi_buf_list_get_lock);
3992        spin_unlock_irq(&phba->hbalock);
3993}
3994
3995/**
3996 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3997 * @phba: pointer to lpfc hba data structure.
3998 *
3999 * This routine is to free all the IO buffers and IOCBs from the driver
4000 * list back to kernel. It is called from lpfc_pci_remove_one to free
4001 * the internal resources before the device is removed from the system.
4002 **/
4003void
4004lpfc_io_free(struct lpfc_hba *phba)
4005{
4006        struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4007        struct lpfc_sli4_hdw_queue *qp;
4008        int idx;
4009
4010        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4011                qp = &phba->sli4_hba.hdwq[idx];
4012                /* Release all the lpfc_nvme_bufs maintained by this host. */
4013                spin_lock(&qp->io_buf_list_put_lock);
4014                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4015                                         &qp->lpfc_io_buf_list_put,
4016                                         list) {
4017                        list_del(&lpfc_ncmd->list);
4018                        qp->put_io_bufs--;
4019                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4020                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4021                        if (phba->cfg_xpsgl && !phba->nvmet_support)
4022                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4023                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4024                        kfree(lpfc_ncmd);
4025                        qp->total_io_bufs--;
4026                }
4027                spin_unlock(&qp->io_buf_list_put_lock);
4028
4029                spin_lock(&qp->io_buf_list_get_lock);
4030                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4031                                         &qp->lpfc_io_buf_list_get,
4032                                         list) {
4033                        list_del(&lpfc_ncmd->list);
4034                        qp->get_io_bufs--;
4035                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4036                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4037                        if (phba->cfg_xpsgl && !phba->nvmet_support)
4038                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4039                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4040                        kfree(lpfc_ncmd);
4041                        qp->total_io_bufs--;
4042                }
4043                spin_unlock(&qp->io_buf_list_get_lock);
4044        }
4045}
4046
4047/**
4048 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4049 * @phba: pointer to lpfc hba data structure.
4050 *
4051 * This routine first calculates the sizes of the current els and allocated
4052 * scsi sgl lists, and then goes through all sgls to updates the physical
4053 * XRIs assigned due to port function reset. During port initialization, the
4054 * current els and allocated scsi sgl lists are 0s.
4055 *
4056 * Return codes
4057 *   0 - successful (for now, it always returns 0)
4058 **/
4059int
4060lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4061{
4062        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4063        uint16_t i, lxri, xri_cnt, els_xri_cnt;
4064        LIST_HEAD(els_sgl_list);
4065        int rc;
4066
4067        /*
4068         * update on pci function's els xri-sgl list
4069         */
4070        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4071
4072        if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4073                /* els xri-sgl expanded */
4074                xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4075                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4076                                "3157 ELS xri-sgl count increased from "
4077                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4078                                els_xri_cnt);
4079                /* allocate the additional els sgls */
4080                for (i = 0; i < xri_cnt; i++) {
4081                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4082                                             GFP_KERNEL);
4083                        if (sglq_entry == NULL) {
4084                                lpfc_printf_log(phba, KERN_ERR,
4085                                                LOG_TRACE_EVENT,
4086                                                "2562 Failure to allocate an "
4087                                                "ELS sgl entry:%d\n", i);
4088                                rc = -ENOMEM;
4089                                goto out_free_mem;
4090                        }
4091                        sglq_entry->buff_type = GEN_BUFF_TYPE;
4092                        sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4093                                                           &sglq_entry->phys);
4094                        if (sglq_entry->virt == NULL) {
4095                                kfree(sglq_entry);
4096                                lpfc_printf_log(phba, KERN_ERR,
4097                                                LOG_TRACE_EVENT,
4098                                                "2563 Failure to allocate an "
4099                                                "ELS mbuf:%d\n", i);
4100                                rc = -ENOMEM;
4101                                goto out_free_mem;
4102                        }
4103                        sglq_entry->sgl = sglq_entry->virt;
4104                        memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4105                        sglq_entry->state = SGL_FREED;
4106                        list_add_tail(&sglq_entry->list, &els_sgl_list);
4107                }
4108                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4109                list_splice_init(&els_sgl_list,
4110                                 &phba->sli4_hba.lpfc_els_sgl_list);
4111                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4112        } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4113                /* els xri-sgl shrinked */
4114                xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4115                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4116                                "3158 ELS xri-sgl count decreased from "
4117                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4118                                els_xri_cnt);
4119                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4120                list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4121                                 &els_sgl_list);
4122                /* release extra els sgls from list */
4123                for (i = 0; i < xri_cnt; i++) {
4124                        list_remove_head(&els_sgl_list,
4125                                         sglq_entry, struct lpfc_sglq, list);
4126                        if (sglq_entry) {
4127                                __lpfc_mbuf_free(phba, sglq_entry->virt,
4128                                                 sglq_entry->phys);
4129                                kfree(sglq_entry);
4130                        }
4131                }
4132                list_splice_init(&els_sgl_list,
4133                                 &phba->sli4_hba.lpfc_els_sgl_list);
4134                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4135        } else
4136                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4137                                "3163 ELS xri-sgl count unchanged: %d\n",
4138                                els_xri_cnt);
4139        phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4140
4141        /* update xris to els sgls on the list */
4142        sglq_entry = NULL;
4143        sglq_entry_next = NULL;
4144        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4145                                 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4146                lxri = lpfc_sli4_next_xritag(phba);
4147                if (lxri == NO_XRI) {
4148                        lpfc_printf_log(phba, KERN_ERR,
4149                                        LOG_TRACE_EVENT,
4150                                        "2400 Failed to allocate xri for "
4151                                        "ELS sgl\n");
4152                        rc = -ENOMEM;
4153                        goto out_free_mem;
4154                }
4155                sglq_entry->sli4_lxritag = lxri;
4156                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4157        }
4158        return 0;
4159
4160out_free_mem:
4161        lpfc_free_els_sgl_list(phba);
4162        return rc;
4163}
4164
4165/**
4166 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4167 * @phba: pointer to lpfc hba data structure.
4168 *
4169 * This routine first calculates the sizes of the current els and allocated
4170 * scsi sgl lists, and then goes through all sgls to updates the physical
4171 * XRIs assigned due to port function reset. During port initialization, the
4172 * current els and allocated scsi sgl lists are 0s.
4173 *
4174 * Return codes
4175 *   0 - successful (for now, it always returns 0)
4176 **/
4177int
4178lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4179{
4180        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4181        uint16_t i, lxri, xri_cnt, els_xri_cnt;
4182        uint16_t nvmet_xri_cnt;
4183        LIST_HEAD(nvmet_sgl_list);
4184        int rc;
4185
4186        /*
4187         * update on pci function's nvmet xri-sgl list
4188         */
4189        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4190
4191        /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4192        nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4193        if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4194                /* els xri-sgl expanded */
4195                xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4196                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4197                                "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4198                                phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4199                /* allocate the additional nvmet sgls */
4200                for (i = 0; i < xri_cnt; i++) {
4201                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4202                                             GFP_KERNEL);
4203                        if (sglq_entry == NULL) {
4204                                lpfc_printf_log(phba, KERN_ERR,
4205                                                LOG_TRACE_EVENT,
4206                                                "6303 Failure to allocate an "
4207                                                "NVMET sgl entry:%d\n", i);
4208                                rc = -ENOMEM;
4209                                goto out_free_mem;
4210                        }
4211                        sglq_entry->buff_type = NVMET_BUFF_TYPE;
4212                        sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4213                                                           &sglq_entry->phys);
4214                        if (sglq_entry->virt == NULL) {
4215                                kfree(sglq_entry);
4216                                lpfc_printf_log(phba, KERN_ERR,
4217                                                LOG_TRACE_EVENT,
4218                                                "6304 Failure to allocate an "
4219                                                "NVMET buf:%d\n", i);
4220                                rc = -ENOMEM;
4221                                goto out_free_mem;
4222                        }
4223                        sglq_entry->sgl = sglq_entry->virt;
4224                        memset(sglq_entry->sgl, 0,
4225                               phba->cfg_sg_dma_buf_size);
4226                        sglq_entry->state = SGL_FREED;
4227                        list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4228                }
4229                spin_lock_irq(&phba->hbalock);
4230                spin_lock(&phba->sli4_hba.sgl_list_lock);
4231                list_splice_init(&nvmet_sgl_list,
4232                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4233                spin_unlock(&phba->sli4_hba.sgl_list_lock);
4234                spin_unlock_irq(&phba->hbalock);
4235        } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4236                /* nvmet xri-sgl shrunk */
4237                xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4238                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4239                                "6305 NVMET xri-sgl count decreased from "
4240                                "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4241                                nvmet_xri_cnt);
4242                spin_lock_irq(&phba->hbalock);
4243                spin_lock(&phba->sli4_hba.sgl_list_lock);
4244                list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4245                                 &nvmet_sgl_list);
4246                /* release extra nvmet sgls from list */
4247                for (i = 0; i < xri_cnt; i++) {
4248                        list_remove_head(&nvmet_sgl_list,
4249                                         sglq_entry, struct lpfc_sglq, list);
4250                        if (sglq_entry) {
4251                                lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4252                                                    sglq_entry->phys);
4253                                kfree(sglq_entry);
4254                        }
4255                }
4256                list_splice_init(&nvmet_sgl_list,
4257                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4258                spin_unlock(&phba->sli4_hba.sgl_list_lock);
4259                spin_unlock_irq(&phba->hbalock);
4260        } else
4261                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4262                                "6306 NVMET xri-sgl count unchanged: %d\n",
4263                                nvmet_xri_cnt);
4264        phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4265
4266        /* update xris to nvmet sgls on the list */
4267        sglq_entry = NULL;
4268        sglq_entry_next = NULL;
4269        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4270                                 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4271                lxri = lpfc_sli4_next_xritag(phba);
4272                if (lxri == NO_XRI) {
4273                        lpfc_printf_log(phba, KERN_ERR,
4274                                        LOG_TRACE_EVENT,
4275                                        "6307 Failed to allocate xri for "
4276                                        "NVMET sgl\n");
4277                        rc = -ENOMEM;
4278                        goto out_free_mem;
4279                }
4280                sglq_entry->sli4_lxritag = lxri;
4281                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4282        }
4283        return 0;
4284
4285out_free_mem:
4286        lpfc_free_nvmet_sgl_list(phba);
4287        return rc;
4288}
4289
4290int
4291lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4292{
4293        LIST_HEAD(blist);
4294        struct lpfc_sli4_hdw_queue *qp;
4295        struct lpfc_io_buf *lpfc_cmd;
4296        struct lpfc_io_buf *iobufp, *prev_iobufp;
4297        int idx, cnt, xri, inserted;
4298
4299        cnt = 0;
4300        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4301                qp = &phba->sli4_hba.hdwq[idx];
4302                spin_lock_irq(&qp->io_buf_list_get_lock);
4303                spin_lock(&qp->io_buf_list_put_lock);
4304
4305                /* Take everything off the get and put lists */
4306                list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4307                list_splice(&qp->lpfc_io_buf_list_put, &blist);
4308                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4309                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4310                cnt += qp->get_io_bufs + qp->put_io_bufs;
4311                qp->get_io_bufs = 0;
4312                qp->put_io_bufs = 0;
4313                qp->total_io_bufs = 0;
4314                spin_unlock(&qp->io_buf_list_put_lock);
4315                spin_unlock_irq(&qp->io_buf_list_get_lock);
4316        }
4317
4318        /*
4319         * Take IO buffers off blist and put on cbuf sorted by XRI.
4320         * This is because POST_SGL takes a sequential range of XRIs
4321         * to post to the firmware.
4322         */
4323        for (idx = 0; idx < cnt; idx++) {
4324                list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4325                if (!lpfc_cmd)
4326                        return cnt;
4327                if (idx == 0) {
4328                        list_add_tail(&lpfc_cmd->list, cbuf);
4329                        continue;
4330                }
4331                xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4332                inserted = 0;
4333                prev_iobufp = NULL;
4334                list_for_each_entry(iobufp, cbuf, list) {
4335                        if (xri < iobufp->cur_iocbq.sli4_xritag) {
4336                                if (prev_iobufp)
4337                                        list_add(&lpfc_cmd->list,
4338                                                 &prev_iobufp->list);
4339                                else
4340                                        list_add(&lpfc_cmd->list, cbuf);
4341                                inserted = 1;
4342                                break;
4343                        }
4344                        prev_iobufp = iobufp;
4345                }
4346                if (!inserted)
4347                        list_add_tail(&lpfc_cmd->list, cbuf);
4348        }
4349        return cnt;
4350}
4351
4352int
4353lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4354{
4355        struct lpfc_sli4_hdw_queue *qp;
4356        struct lpfc_io_buf *lpfc_cmd;
4357        int idx, cnt;
4358
4359        qp = phba->sli4_hba.hdwq;
4360        cnt = 0;
4361        while (!list_empty(cbuf)) {
4362                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4363                        list_remove_head(cbuf, lpfc_cmd,
4364                                         struct lpfc_io_buf, list);
4365                        if (!lpfc_cmd)
4366                                return cnt;
4367                        cnt++;
4368                        qp = &phba->sli4_hba.hdwq[idx];
4369                        lpfc_cmd->hdwq_no = idx;
4370                        lpfc_cmd->hdwq = qp;
4371                        lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4372                        spin_lock(&qp->io_buf_list_put_lock);
4373                        list_add_tail(&lpfc_cmd->list,
4374                                      &qp->lpfc_io_buf_list_put);
4375                        qp->put_io_bufs++;
4376                        qp->total_io_bufs++;
4377                        spin_unlock(&qp->io_buf_list_put_lock);
4378                }
4379        }
4380        return cnt;
4381}
4382
4383/**
4384 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4385 * @phba: pointer to lpfc hba data structure.
4386 *
4387 * This routine first calculates the sizes of the current els and allocated
4388 * scsi sgl lists, and then goes through all sgls to updates the physical
4389 * XRIs assigned due to port function reset. During port initialization, the
4390 * current els and allocated scsi sgl lists are 0s.
4391 *
4392 * Return codes
4393 *   0 - successful (for now, it always returns 0)
4394 **/
4395int
4396lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4397{
4398        struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4399        uint16_t i, lxri, els_xri_cnt;
4400        uint16_t io_xri_cnt, io_xri_max;
4401        LIST_HEAD(io_sgl_list);
4402        int rc, cnt;
4403
4404        /*
4405         * update on pci function's allocated nvme xri-sgl list
4406         */
4407
4408        /* maximum number of xris available for nvme buffers */
4409        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4410        io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4411        phba->sli4_hba.io_xri_max = io_xri_max;
4412
4413        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4414                        "6074 Current allocated XRI sgl count:%d, "
4415                        "maximum XRI count:%d els_xri_cnt:%d\n\n",
4416                        phba->sli4_hba.io_xri_cnt,
4417                        phba->sli4_hba.io_xri_max,
4418                        els_xri_cnt);
4419
4420        cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4421
4422        if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4423                /* max nvme xri shrunk below the allocated nvme buffers */
4424                io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4425                                        phba->sli4_hba.io_xri_max;
4426                /* release the extra allocated nvme buffers */
4427                for (i = 0; i < io_xri_cnt; i++) {
4428                        list_remove_head(&io_sgl_list, lpfc_ncmd,
4429                                         struct lpfc_io_buf, list);
4430                        if (lpfc_ncmd) {
4431                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4432                                              lpfc_ncmd->data,
4433                                              lpfc_ncmd->dma_handle);
4434                                kfree(lpfc_ncmd);
4435                        }
4436                }
4437                phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4438        }
4439
4440        /* update xris associated to remaining allocated nvme buffers */
4441        lpfc_ncmd = NULL;
4442        lpfc_ncmd_next = NULL;
4443        phba->sli4_hba.io_xri_cnt = cnt;
4444        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4445                                 &io_sgl_list, list) {
4446                lxri = lpfc_sli4_next_xritag(phba);
4447                if (lxri == NO_XRI) {
4448                        lpfc_printf_log(phba, KERN_ERR,
4449                                        LOG_TRACE_EVENT,
4450                                        "6075 Failed to allocate xri for "
4451                                        "nvme buffer\n");
4452                        rc = -ENOMEM;
4453                        goto out_free_mem;
4454                }
4455                lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4456                lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4457        }
4458        cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4459        return 0;
4460
4461out_free_mem:
4462        lpfc_io_free(phba);
4463        return rc;
4464}
4465
4466/**
4467 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4468 * @phba: Pointer to lpfc hba data structure.
4469 * @num_to_alloc: The requested number of buffers to allocate.
4470 *
4471 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4472 * the nvme buffer contains all the necessary information needed to initiate
4473 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4474 * them on a list, it post them to the port by using SGL block post.
4475 *
4476 * Return codes:
4477 *   int - number of IO buffers that were allocated and posted.
4478 *   0 = failure, less than num_to_alloc is a partial failure.
4479 **/
4480int
4481lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4482{
4483        struct lpfc_io_buf *lpfc_ncmd;
4484        struct lpfc_iocbq *pwqeq;
4485        uint16_t iotag, lxri = 0;
4486        int bcnt, num_posted;
4487        LIST_HEAD(prep_nblist);
4488        LIST_HEAD(post_nblist);
4489        LIST_HEAD(nvme_nblist);
4490
4491        phba->sli4_hba.io_xri_cnt = 0;
4492        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4493                lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4494                if (!lpfc_ncmd)
4495                        break;
4496                /*
4497                 * Get memory from the pci pool to map the virt space to
4498                 * pci bus space for an I/O. The DMA buffer includes the
4499                 * number of SGE's necessary to support the sg_tablesize.
4500                 */
4501                lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4502                                                  GFP_KERNEL,
4503                                                  &lpfc_ncmd->dma_handle);
4504                if (!lpfc_ncmd->data) {
4505                        kfree(lpfc_ncmd);
4506                        break;
4507                }
4508
4509                if (phba->cfg_xpsgl && !phba->nvmet_support) {
4510                        INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4511                } else {
4512                        /*
4513                         * 4K Page alignment is CRITICAL to BlockGuard, double
4514                         * check to be sure.
4515                         */
4516                        if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4517                            (((unsigned long)(lpfc_ncmd->data) &
4518                            (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4519                                lpfc_printf_log(phba, KERN_ERR,
4520                                                LOG_TRACE_EVENT,
4521                                                "3369 Memory alignment err: "
4522                                                "addr=%lx\n",
4523                                                (unsigned long)lpfc_ncmd->data);
4524                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4525                                              lpfc_ncmd->data,
4526                                              lpfc_ncmd->dma_handle);
4527                                kfree(lpfc_ncmd);
4528                                break;
4529                        }
4530                }
4531
4532                INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4533
4534                lxri = lpfc_sli4_next_xritag(phba);
4535                if (lxri == NO_XRI) {
4536                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4537                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4538                        kfree(lpfc_ncmd);
4539                        break;
4540                }
4541                pwqeq = &lpfc_ncmd->cur_iocbq;
4542
4543                /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4544                iotag = lpfc_sli_next_iotag(phba, pwqeq);
4545                if (iotag == 0) {
4546                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4547                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4548                        kfree(lpfc_ncmd);
4549                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4550                                        "6121 Failed to allocate IOTAG for"
4551                                        " XRI:0x%x\n", lxri);
4552                        lpfc_sli4_free_xri(phba, lxri);
4553                        break;
4554                }
4555                pwqeq->sli4_lxritag = lxri;
4556                pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4557
4558                /* Initialize local short-hand pointers. */
4559                lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4560                lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4561                lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4562                spin_lock_init(&lpfc_ncmd->buf_lock);
4563
4564                /* add the nvme buffer to a post list */
4565                list_add_tail(&lpfc_ncmd->list, &post_nblist);
4566                phba->sli4_hba.io_xri_cnt++;
4567        }
4568        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4569                        "6114 Allocate %d out of %d requested new NVME "
4570                        "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4571                        sizeof(*lpfc_ncmd));
4572
4573
4574        /* post the list of nvme buffer sgls to port if available */
4575        if (!list_empty(&post_nblist))
4576                num_posted = lpfc_sli4_post_io_sgl_list(
4577                                phba, &post_nblist, bcnt);
4578        else
4579                num_posted = 0;
4580
4581        return num_posted;
4582}
4583
4584static uint64_t
4585lpfc_get_wwpn(struct lpfc_hba *phba)
4586{
4587        uint64_t wwn;
4588        int rc;
4589        LPFC_MBOXQ_t *mboxq;
4590        MAILBOX_t *mb;
4591
4592        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4593                                                GFP_KERNEL);
4594        if (!mboxq)
4595                return (uint64_t)-1;
4596
4597        /* First get WWN of HBA instance */
4598        lpfc_read_nv(phba, mboxq);
4599        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4600        if (rc != MBX_SUCCESS) {
4601                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4602                                "6019 Mailbox failed , mbxCmd x%x "
4603                                "READ_NV, mbxStatus x%x\n",
4604                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4605                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4606                mempool_free(mboxq, phba->mbox_mem_pool);
4607                return (uint64_t) -1;
4608        }
4609        mb = &mboxq->u.mb;
4610        memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4611        /* wwn is WWPN of HBA instance */
4612        mempool_free(mboxq, phba->mbox_mem_pool);
4613        if (phba->sli_rev == LPFC_SLI_REV4)
4614                return be64_to_cpu(wwn);
4615        else
4616                return rol64(wwn, 32);
4617}
4618
4619/**
4620 * lpfc_vmid_res_alloc - Allocates resources for VMID
4621 * @phba: pointer to lpfc hba data structure.
4622 * @vport: pointer to vport data structure
4623 *
4624 * This routine allocated the resources needed for the VMID.
4625 *
4626 * Return codes
4627 *      0 on Success
4628 *      Non-0 on Failure
4629 */
4630static int
4631lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4632{
4633        /* VMID feature is supported only on SLI4 */
4634        if (phba->sli_rev == LPFC_SLI_REV3) {
4635                phba->cfg_vmid_app_header = 0;
4636                phba->cfg_vmid_priority_tagging = 0;
4637        }
4638
4639        if (lpfc_is_vmid_enabled(phba)) {
4640                vport->vmid =
4641                    kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4642                            GFP_KERNEL);
4643                if (!vport->vmid)
4644                        return -ENOMEM;
4645
4646                rwlock_init(&vport->vmid_lock);
4647
4648                /* Set the VMID parameters for the vport */
4649                vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4650                vport->vmid_inactivity_timeout =
4651                    phba->cfg_vmid_inactivity_timeout;
4652                vport->max_vmid = phba->cfg_max_vmid;
4653                vport->cur_vmid_cnt = 0;
4654
4655                vport->vmid_priority_range = bitmap_zalloc
4656                        (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4657
4658                if (!vport->vmid_priority_range) {
4659                        kfree(vport->vmid);
4660                        return -ENOMEM;
4661                }
4662
4663                hash_init(vport->hash_table);
4664        }
4665        return 0;
4666}
4667
4668/**
4669 * lpfc_create_port - Create an FC port
4670 * @phba: pointer to lpfc hba data structure.
4671 * @instance: a unique integer ID to this FC port.
4672 * @dev: pointer to the device data structure.
4673 *
4674 * This routine creates a FC port for the upper layer protocol. The FC port
4675 * can be created on top of either a physical port or a virtual port provided
4676 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4677 * and associates the FC port created before adding the shost into the SCSI
4678 * layer.
4679 *
4680 * Return codes
4681 *   @vport - pointer to the virtual N_Port data structure.
4682 *   NULL - port create failed.
4683 **/
4684struct lpfc_vport *
4685lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4686{
4687        struct lpfc_vport *vport;
4688        struct Scsi_Host  *shost = NULL;
4689        struct scsi_host_template *template;
4690        int error = 0;
4691        int i;
4692        uint64_t wwn;
4693        bool use_no_reset_hba = false;
4694        int rc;
4695
4696        if (lpfc_no_hba_reset_cnt) {
4697                if (phba->sli_rev < LPFC_SLI_REV4 &&
4698                    dev == &phba->pcidev->dev) {
4699                        /* Reset the port first */
4700                        lpfc_sli_brdrestart(phba);
4701                        rc = lpfc_sli_chipset_init(phba);
4702                        if (rc)
4703                                return NULL;
4704                }
4705                wwn = lpfc_get_wwpn(phba);
4706        }
4707
4708        for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4709                if (wwn == lpfc_no_hba_reset[i]) {
4710                        lpfc_printf_log(phba, KERN_ERR,
4711                                        LOG_TRACE_EVENT,
4712                                        "6020 Setting use_no_reset port=%llx\n",
4713                                        wwn);
4714                        use_no_reset_hba = true;
4715                        break;
4716                }
4717        }
4718
4719        /* Seed template for SCSI host registration */
4720        if (dev == &phba->pcidev->dev) {
4721                template = &phba->port_template;
4722
4723                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4724                        /* Seed physical port template */
4725                        memcpy(template, &lpfc_template, sizeof(*template));
4726
4727                        if (use_no_reset_hba)
4728                                /* template is for a no reset SCSI Host */
4729                                template->eh_host_reset_handler = NULL;
4730
4731                        /* Template for all vports this physical port creates */
4732                        memcpy(&phba->vport_template, &lpfc_template,
4733                               sizeof(*template));
4734                        phba->vport_template.shost_groups = lpfc_vport_groups;
4735                        phba->vport_template.eh_bus_reset_handler = NULL;
4736                        phba->vport_template.eh_host_reset_handler = NULL;
4737                        phba->vport_template.vendor_id = 0;
4738
4739                        /* Initialize the host templates with updated value */
4740                        if (phba->sli_rev == LPFC_SLI_REV4) {
4741                                template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4742                                phba->vport_template.sg_tablesize =
4743                                        phba->cfg_scsi_seg_cnt;
4744                        } else {
4745                                template->sg_tablesize = phba->cfg_sg_seg_cnt;
4746                                phba->vport_template.sg_tablesize =
4747                                        phba->cfg_sg_seg_cnt;
4748                        }
4749
4750                } else {
4751                        /* NVMET is for physical port only */
4752                        memcpy(template, &lpfc_template_nvme,
4753                               sizeof(*template));
4754                }
4755        } else {
4756                template = &phba->vport_template;
4757        }
4758
4759        shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4760        if (!shost)
4761                goto out;
4762
4763        vport = (struct lpfc_vport *) shost->hostdata;
4764        vport->phba = phba;
4765        vport->load_flag |= FC_LOADING;
4766        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4767        vport->fc_rscn_flush = 0;
4768        lpfc_get_vport_cfgparam(vport);
4769
4770        /* Adjust value in vport */
4771        vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4772
4773        shost->unique_id = instance;
4774        shost->max_id = LPFC_MAX_TARGET;
4775        shost->max_lun = vport->cfg_max_luns;
4776        shost->this_id = -1;
4777        shost->max_cmd_len = 16;
4778
4779        if (phba->sli_rev == LPFC_SLI_REV4) {
4780                if (!phba->cfg_fcp_mq_threshold ||
4781                    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4782                        phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4783
4784                shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4785                                            phba->cfg_fcp_mq_threshold);
4786
4787                shost->dma_boundary =
4788                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4789
4790                if (phba->cfg_xpsgl && !phba->nvmet_support)
4791                        shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4792                else
4793                        shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4794        } else
4795                /* SLI-3 has a limited number of hardware queues (3),
4796                 * thus there is only one for FCP processing.
4797                 */
4798                shost->nr_hw_queues = 1;
4799
4800        /*
4801         * Set initial can_queue value since 0 is no longer supported and
4802         * scsi_add_host will fail. This will be adjusted later based on the
4803         * max xri value determined in hba setup.
4804         */
4805        shost->can_queue = phba->cfg_hba_queue_depth - 10;
4806        if (dev != &phba->pcidev->dev) {
4807                shost->transportt = lpfc_vport_transport_template;
4808                vport->port_type = LPFC_NPIV_PORT;
4809        } else {
4810                shost->transportt = lpfc_transport_template;
4811                vport->port_type = LPFC_PHYSICAL_PORT;
4812        }
4813
4814        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4815                        "9081 CreatePort TMPLATE type %x TBLsize %d "
4816                        "SEGcnt %d/%d\n",
4817                        vport->port_type, shost->sg_tablesize,
4818                        phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4819
4820        /* Allocate the resources for VMID */
4821        rc = lpfc_vmid_res_alloc(phba, vport);
4822
4823        if (rc)
4824                goto out;
4825
4826        /* Initialize all internally managed lists. */
4827        INIT_LIST_HEAD(&vport->fc_nodes);
4828        INIT_LIST_HEAD(&vport->rcv_buffer_list);
4829        spin_lock_init(&vport->work_port_lock);
4830
4831        timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4832
4833        timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4834
4835        timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4836
4837        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4838                lpfc_setup_bg(phba, shost);
4839
4840        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4841        if (error)
4842                goto out_put_shost;
4843
4844        spin_lock_irq(&phba->port_list_lock);
4845        list_add_tail(&vport->listentry, &phba->port_list);
4846        spin_unlock_irq(&phba->port_list_lock);
4847        return vport;
4848
4849out_put_shost:
4850        kfree(vport->vmid);
4851        bitmap_free(vport->vmid_priority_range);
4852        scsi_host_put(shost);
4853out:
4854        return NULL;
4855}
4856
4857/**
4858 * destroy_port -  destroy an FC port
4859 * @vport: pointer to an lpfc virtual N_Port data structure.
4860 *
4861 * This routine destroys a FC port from the upper layer protocol. All the
4862 * resources associated with the port are released.
4863 **/
4864void
4865destroy_port(struct lpfc_vport *vport)
4866{
4867        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4868        struct lpfc_hba  *phba = vport->phba;
4869
4870        lpfc_debugfs_terminate(vport);
4871        fc_remove_host(shost);
4872        scsi_remove_host(shost);
4873
4874        spin_lock_irq(&phba->port_list_lock);
4875        list_del_init(&vport->listentry);
4876        spin_unlock_irq(&phba->port_list_lock);
4877
4878        lpfc_cleanup(vport);
4879        return;
4880}
4881
4882/**
4883 * lpfc_get_instance - Get a unique integer ID
4884 *
4885 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4886 * uses the kernel idr facility to perform the task.
4887 *
4888 * Return codes:
4889 *   instance - a unique integer ID allocated as the new instance.
4890 *   -1 - lpfc get instance failed.
4891 **/
4892int
4893lpfc_get_instance(void)
4894{
4895        int ret;
4896
4897        ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4898        return ret < 0 ? -1 : ret;
4899}
4900
4901/**
4902 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4903 * @shost: pointer to SCSI host data structure.
4904 * @time: elapsed time of the scan in jiffies.
4905 *
4906 * This routine is called by the SCSI layer with a SCSI host to determine
4907 * whether the scan host is finished.
4908 *
4909 * Note: there is no scan_start function as adapter initialization will have
4910 * asynchronously kicked off the link initialization.
4911 *
4912 * Return codes
4913 *   0 - SCSI host scan is not over yet.
4914 *   1 - SCSI host scan is over.
4915 **/
4916int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4917{
4918        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4919        struct lpfc_hba   *phba = vport->phba;
4920        int stat = 0;
4921
4922        spin_lock_irq(shost->host_lock);
4923
4924        if (vport->load_flag & FC_UNLOADING) {
4925                stat = 1;
4926                goto finished;
4927        }
4928        if (time >= msecs_to_jiffies(30 * 1000)) {
4929                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4930                                "0461 Scanning longer than 30 "
4931                                "seconds.  Continuing initialization\n");
4932                stat = 1;
4933                goto finished;
4934        }
4935        if (time >= msecs_to_jiffies(15 * 1000) &&
4936            phba->link_state <= LPFC_LINK_DOWN) {
4937                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938                                "0465 Link down longer than 15 "
4939                                "seconds.  Continuing initialization\n");
4940                stat = 1;
4941                goto finished;
4942        }
4943
4944        if (vport->port_state != LPFC_VPORT_READY)
4945                goto finished;
4946        if (vport->num_disc_nodes || vport->fc_prli_sent)
4947                goto finished;
4948        if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4949                goto finished;
4950        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4951                goto finished;
4952
4953        stat = 1;
4954
4955finished:
4956        spin_unlock_irq(shost->host_lock);
4957        return stat;
4958}
4959
4960static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4961{
4962        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963        struct lpfc_hba   *phba = vport->phba;
4964
4965        fc_host_supported_speeds(shost) = 0;
4966        /*
4967         * Avoid reporting supported link speed for FCoE as it can't be
4968         * controlled via FCoE.
4969         */
4970        if (phba->hba_flag & HBA_FCOE_MODE)
4971                return;
4972
4973        if (phba->lmt & LMT_256Gb)
4974                fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4975        if (phba->lmt & LMT_128Gb)
4976                fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4977        if (phba->lmt & LMT_64Gb)
4978                fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4979        if (phba->lmt & LMT_32Gb)
4980                fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4981        if (phba->lmt & LMT_16Gb)
4982                fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4983        if (phba->lmt & LMT_10Gb)
4984                fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4985        if (phba->lmt & LMT_8Gb)
4986                fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4987        if (phba->lmt & LMT_4Gb)
4988                fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4989        if (phba->lmt & LMT_2Gb)
4990                fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4991        if (phba->lmt & LMT_1Gb)
4992                fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4993}
4994
4995/**
4996 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4997 * @shost: pointer to SCSI host data structure.
4998 *
4999 * This routine initializes a given SCSI host attributes on a FC port. The
5000 * SCSI host can be either on top of a physical port or a virtual port.
5001 **/
5002void lpfc_host_attrib_init(struct Scsi_Host *shost)
5003{
5004        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005        struct lpfc_hba   *phba = vport->phba;
5006        /*
5007         * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
5008         */
5009
5010        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5012        fc_host_supported_classes(shost) = FC_COS_CLASS3;
5013
5014        memset(fc_host_supported_fc4s(shost), 0,
5015               sizeof(fc_host_supported_fc4s(shost)));
5016        fc_host_supported_fc4s(shost)[2] = 1;
5017        fc_host_supported_fc4s(shost)[7] = 1;
5018
5019        lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5020                                 sizeof fc_host_symbolic_name(shost));
5021
5022        lpfc_host_supported_speeds_set(shost);
5023
5024        fc_host_maxframe_size(shost) =
5025                (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026                (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5027
5028        fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5029
5030        /* This value is also unchanging */
5031        memset(fc_host_active_fc4s(shost), 0,
5032               sizeof(fc_host_active_fc4s(shost)));
5033        fc_host_active_fc4s(shost)[2] = 1;
5034        fc_host_active_fc4s(shost)[7] = 1;
5035
5036        fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037        spin_lock_irq(shost->host_lock);
5038        vport->load_flag &= ~FC_LOADING;
5039        spin_unlock_irq(shost->host_lock);
5040}
5041
5042/**
5043 * lpfc_stop_port_s3 - Stop SLI3 device port
5044 * @phba: pointer to lpfc hba data structure.
5045 *
5046 * This routine is invoked to stop an SLI3 device port, it stops the device
5047 * from generating interrupts and stops the device driver's timers for the
5048 * device.
5049 **/
5050static void
5051lpfc_stop_port_s3(struct lpfc_hba *phba)
5052{
5053        /* Clear all interrupt enable conditions */
5054        writel(0, phba->HCregaddr);
5055        readl(phba->HCregaddr); /* flush */
5056        /* Clear all pending interrupts */
5057        writel(0xffffffff, phba->HAregaddr);
5058        readl(phba->HAregaddr); /* flush */
5059
5060        /* Reset some HBA SLI setup states */
5061        lpfc_stop_hba_timers(phba);
5062        phba->pport->work_port_events = 0;
5063}
5064
5065/**
5066 * lpfc_stop_port_s4 - Stop SLI4 device port
5067 * @phba: pointer to lpfc hba data structure.
5068 *
5069 * This routine is invoked to stop an SLI4 device port, it stops the device
5070 * from generating interrupts and stops the device driver's timers for the
5071 * device.
5072 **/
5073static void
5074lpfc_stop_port_s4(struct lpfc_hba *phba)
5075{
5076        /* Reset some HBA SLI4 setup states */
5077        lpfc_stop_hba_timers(phba);
5078        if (phba->pport)
5079                phba->pport->work_port_events = 0;
5080        phba->sli4_hba.intr_enable = 0;
5081}
5082
5083/**
5084 * lpfc_stop_port - Wrapper function for stopping hba port
5085 * @phba: Pointer to HBA context object.
5086 *
5087 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5088 * the API jump table function pointer from the lpfc_hba struct.
5089 **/
5090void
5091lpfc_stop_port(struct lpfc_hba *phba)
5092{
5093        phba->lpfc_stop_port(phba);
5094
5095        if (phba->wq)
5096                flush_workqueue(phba->wq);
5097}
5098
5099/**
5100 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5101 * @phba: Pointer to hba for which this call is being executed.
5102 *
5103 * This routine starts the timer waiting for the FCF rediscovery to complete.
5104 **/
5105void
5106lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5107{
5108        unsigned long fcf_redisc_wait_tmo =
5109                (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5110        /* Start fcf rediscovery wait period timer */
5111        mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5112        spin_lock_irq(&phba->hbalock);
5113        /* Allow action to new fcf asynchronous event */
5114        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5115        /* Mark the FCF rediscovery pending state */
5116        phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5117        spin_unlock_irq(&phba->hbalock);
5118}
5119
5120/**
5121 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5122 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5123 *
5124 * This routine is invoked when waiting for FCF table rediscover has been
5125 * timed out. If new FCF record(s) has (have) been discovered during the
5126 * wait period, a new FCF event shall be added to the FCOE async event
5127 * list, and then worker thread shall be waked up for processing from the
5128 * worker thread context.
5129 **/
5130static void
5131lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5132{
5133        struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5134
5135        /* Don't send FCF rediscovery event if timer cancelled */
5136        spin_lock_irq(&phba->hbalock);
5137        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5138                spin_unlock_irq(&phba->hbalock);
5139                return;
5140        }
5141        /* Clear FCF rediscovery timer pending flag */
5142        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5143        /* FCF rediscovery event to worker thread */
5144        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5145        spin_unlock_irq(&phba->hbalock);
5146        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147                        "2776 FCF rediscover quiescent timer expired\n");
5148        /* wake up worker thread */
5149        lpfc_worker_wake_up(phba);
5150}
5151
5152/**
5153 * lpfc_vmid_poll - VMID timeout detection
5154 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5155 *
5156 * This routine is invoked when there is no I/O on by a VM for the specified
5157 * amount of time. When this situation is detected, the VMID has to be
5158 * deregistered from the switch and all the local resources freed. The VMID
5159 * will be reassigned to the VM once the I/O begins.
5160 **/
5161static void
5162lpfc_vmid_poll(struct timer_list *t)
5163{
5164        struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5165        u32 wake_up = 0;
5166
5167        /* check if there is a need to issue QFPA */
5168        if (phba->pport->vmid_priority_tagging) {
5169                wake_up = 1;
5170                phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5171        }
5172
5173        /* Is the vmid inactivity timer enabled */
5174        if (phba->pport->vmid_inactivity_timeout ||
5175            phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5176                wake_up = 1;
5177                phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5178        }
5179
5180        if (wake_up)
5181                lpfc_worker_wake_up(phba);
5182
5183        /* restart the timer for the next iteration */
5184        mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5185                                                        LPFC_VMID_TIMER));
5186}
5187
5188/**
5189 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5190 * @phba: pointer to lpfc hba data structure.
5191 * @acqe_link: pointer to the async link completion queue entry.
5192 *
5193 * This routine is to parse the SLI4 link-attention link fault code.
5194 **/
5195static void
5196lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5197                           struct lpfc_acqe_link *acqe_link)
5198{
5199        switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5200        case LPFC_ASYNC_LINK_FAULT_NONE:
5201        case LPFC_ASYNC_LINK_FAULT_LOCAL:
5202        case LPFC_ASYNC_LINK_FAULT_REMOTE:
5203        case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5204                break;
5205        default:
5206                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5207                                "0398 Unknown link fault code: x%x\n",
5208                                bf_get(lpfc_acqe_link_fault, acqe_link));
5209                break;
5210        }
5211}
5212
5213/**
5214 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5215 * @phba: pointer to lpfc hba data structure.
5216 * @acqe_link: pointer to the async link completion queue entry.
5217 *
5218 * This routine is to parse the SLI4 link attention type and translate it
5219 * into the base driver's link attention type coding.
5220 *
5221 * Return: Link attention type in terms of base driver's coding.
5222 **/
5223static uint8_t
5224lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5225                          struct lpfc_acqe_link *acqe_link)
5226{
5227        uint8_t att_type;
5228
5229        switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5230        case LPFC_ASYNC_LINK_STATUS_DOWN:
5231        case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5232                att_type = LPFC_ATT_LINK_DOWN;
5233                break;
5234        case LPFC_ASYNC_LINK_STATUS_UP:
5235                /* Ignore physical link up events - wait for logical link up */
5236                att_type = LPFC_ATT_RESERVED;
5237                break;
5238        case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5239                att_type = LPFC_ATT_LINK_UP;
5240                break;
5241        default:
5242                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5243                                "0399 Invalid link attention type: x%x\n",
5244                                bf_get(lpfc_acqe_link_status, acqe_link));
5245                att_type = LPFC_ATT_RESERVED;
5246                break;
5247        }
5248        return att_type;
5249}
5250
5251/**
5252 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5253 * @phba: pointer to lpfc hba data structure.
5254 *
5255 * This routine is to get an SLI3 FC port's link speed in Mbps.
5256 *
5257 * Return: link speed in terms of Mbps.
5258 **/
5259uint32_t
5260lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5261{
5262        uint32_t link_speed;
5263
5264        if (!lpfc_is_link_up(phba))
5265                return 0;
5266
5267        if (phba->sli_rev <= LPFC_SLI_REV3) {
5268                switch (phba->fc_linkspeed) {
5269                case LPFC_LINK_SPEED_1GHZ:
5270                        link_speed = 1000;
5271                        break;
5272                case LPFC_LINK_SPEED_2GHZ:
5273                        link_speed = 2000;
5274                        break;
5275                case LPFC_LINK_SPEED_4GHZ:
5276                        link_speed = 4000;
5277                        break;
5278                case LPFC_LINK_SPEED_8GHZ:
5279                        link_speed = 8000;
5280                        break;
5281                case LPFC_LINK_SPEED_10GHZ:
5282                        link_speed = 10000;
5283                        break;
5284                case LPFC_LINK_SPEED_16GHZ:
5285                        link_speed = 16000;
5286                        break;
5287                default:
5288                        link_speed = 0;
5289                }
5290        } else {
5291                if (phba->sli4_hba.link_state.logical_speed)
5292                        link_speed =
5293                              phba->sli4_hba.link_state.logical_speed;
5294                else
5295                        link_speed = phba->sli4_hba.link_state.speed;
5296        }
5297        return link_speed;
5298}
5299
5300/**
5301 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5302 * @phba: pointer to lpfc hba data structure.
5303 * @evt_code: asynchronous event code.
5304 * @speed_code: asynchronous event link speed code.
5305 *
5306 * This routine is to parse the giving SLI4 async event link speed code into
5307 * value of Mbps for the link speed.
5308 *
5309 * Return: link speed in terms of Mbps.
5310 **/
5311static uint32_t
5312lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5313                           uint8_t speed_code)
5314{
5315        uint32_t port_speed;
5316
5317        switch (evt_code) {
5318        case LPFC_TRAILER_CODE_LINK:
5319                switch (speed_code) {
5320                case LPFC_ASYNC_LINK_SPEED_ZERO:
5321                        port_speed = 0;
5322                        break;
5323                case LPFC_ASYNC_LINK_SPEED_10MBPS:
5324                        port_speed = 10;
5325                        break;
5326                case LPFC_ASYNC_LINK_SPEED_100MBPS:
5327                        port_speed = 100;
5328                        break;
5329                case LPFC_ASYNC_LINK_SPEED_1GBPS:
5330                        port_speed = 1000;
5331                        break;
5332                case LPFC_ASYNC_LINK_SPEED_10GBPS:
5333                        port_speed = 10000;
5334                        break;
5335                case LPFC_ASYNC_LINK_SPEED_20GBPS:
5336                        port_speed = 20000;
5337                        break;
5338                case LPFC_ASYNC_LINK_SPEED_25GBPS:
5339                        port_speed = 25000;
5340                        break;
5341                case LPFC_ASYNC_LINK_SPEED_40GBPS:
5342                        port_speed = 40000;
5343                        break;
5344                case LPFC_ASYNC_LINK_SPEED_100GBPS:
5345                        port_speed = 100000;
5346                        break;
5347                default:
5348                        port_speed = 0;
5349                }
5350                break;
5351        case LPFC_TRAILER_CODE_FC:
5352                switch (speed_code) {
5353                case LPFC_FC_LA_SPEED_UNKNOWN:
5354                        port_speed = 0;
5355                        break;
5356                case LPFC_FC_LA_SPEED_1G:
5357                        port_speed = 1000;
5358                        break;
5359                case LPFC_FC_LA_SPEED_2G:
5360                        port_speed = 2000;
5361                        break;
5362                case LPFC_FC_LA_SPEED_4G:
5363                        port_speed = 4000;
5364                        break;
5365                case LPFC_FC_LA_SPEED_8G:
5366                        port_speed = 8000;
5367                        break;
5368                case LPFC_FC_LA_SPEED_10G:
5369                        port_speed = 10000;
5370                        break;
5371                case LPFC_FC_LA_SPEED_16G:
5372                        port_speed = 16000;
5373                        break;
5374                case LPFC_FC_LA_SPEED_32G:
5375                        port_speed = 32000;
5376                        break;
5377                case LPFC_FC_LA_SPEED_64G:
5378                        port_speed = 64000;
5379                        break;
5380                case LPFC_FC_LA_SPEED_128G:
5381                        port_speed = 128000;
5382                        break;
5383                case LPFC_FC_LA_SPEED_256G:
5384                        port_speed = 256000;
5385                        break;
5386                default:
5387                        port_speed = 0;
5388                }
5389                break;
5390        default:
5391                port_speed = 0;
5392        }
5393        return port_speed;
5394}
5395
5396/**
5397 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5398 * @phba: pointer to lpfc hba data structure.
5399 * @acqe_link: pointer to the async link completion queue entry.
5400 *
5401 * This routine is to handle the SLI4 asynchronous FCoE link event.
5402 **/
5403static void
5404lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5405                         struct lpfc_acqe_link *acqe_link)
5406{
5407        LPFC_MBOXQ_t *pmb;
5408        MAILBOX_t *mb;
5409        struct lpfc_mbx_read_top *la;
5410        uint8_t att_type;
5411        int rc;
5412
5413        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5414        if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5415                return;
5416        phba->fcoe_eventtag = acqe_link->event_tag;
5417        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5418        if (!pmb) {
5419                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5420                                "0395 The mboxq allocation failed\n");
5421                return;
5422        }
5423
5424        rc = lpfc_mbox_rsrc_prep(phba, pmb);
5425        if (rc) {
5426                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5427                                "0396 mailbox allocation failed\n");
5428                goto out_free_pmb;
5429        }
5430
5431        /* Cleanup any outstanding ELS commands */
5432        lpfc_els_flush_all_cmd(phba);
5433
5434        /* Block ELS IOCBs until we have done process link event */
5435        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5436
5437        /* Update link event statistics */
5438        phba->sli.slistat.link_event++;
5439
5440        /* Create lpfc_handle_latt mailbox command from link ACQE */
5441        lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5442        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5443        pmb->vport = phba->pport;
5444
5445        /* Keep the link status for extra SLI4 state machine reference */
5446        phba->sli4_hba.link_state.speed =
5447                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5448                                bf_get(lpfc_acqe_link_speed, acqe_link));
5449        phba->sli4_hba.link_state.duplex =
5450                                bf_get(lpfc_acqe_link_duplex, acqe_link);
5451        phba->sli4_hba.link_state.status =
5452                                bf_get(lpfc_acqe_link_status, acqe_link);
5453        phba->sli4_hba.link_state.type =
5454                                bf_get(lpfc_acqe_link_type, acqe_link);
5455        phba->sli4_hba.link_state.number =
5456                                bf_get(lpfc_acqe_link_number, acqe_link);
5457        phba->sli4_hba.link_state.fault =
5458                                bf_get(lpfc_acqe_link_fault, acqe_link);
5459        phba->sli4_hba.link_state.logical_speed =
5460                        bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5461
5462        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5463                        "2900 Async FC/FCoE Link event - Speed:%dGBit "
5464                        "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5465                        "Logical speed:%dMbps Fault:%d\n",
5466                        phba->sli4_hba.link_state.speed,
5467                        phba->sli4_hba.link_state.topology,
5468                        phba->sli4_hba.link_state.status,
5469                        phba->sli4_hba.link_state.type,
5470                        phba->sli4_hba.link_state.number,
5471                        phba->sli4_hba.link_state.logical_speed,
5472                        phba->sli4_hba.link_state.fault);
5473        /*
5474         * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5475         * topology info. Note: Optional for non FC-AL ports.
5476         */
5477        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5478                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5479                if (rc == MBX_NOT_FINISHED)
5480                        goto out_free_pmb;
5481                return;
5482        }
5483        /*
5484         * For FCoE Mode: fill in all the topology information we need and call
5485         * the READ_TOPOLOGY completion routine to continue without actually
5486         * sending the READ_TOPOLOGY mailbox command to the port.
5487         */
5488        /* Initialize completion status */
5489        mb = &pmb->u.mb;
5490        mb->mbxStatus = MBX_SUCCESS;
5491
5492        /* Parse port fault information field */
5493        lpfc_sli4_parse_latt_fault(phba, acqe_link);
5494
5495        /* Parse and translate link attention fields */
5496        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5497        la->eventTag = acqe_link->event_tag;
5498        bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5499        bf_set(lpfc_mbx_read_top_link_spd, la,
5500               (bf_get(lpfc_acqe_link_speed, acqe_link)));
5501
5502        /* Fake the the following irrelvant fields */
5503        bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5504        bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5505        bf_set(lpfc_mbx_read_top_il, la, 0);
5506        bf_set(lpfc_mbx_read_top_pb, la, 0);
5507        bf_set(lpfc_mbx_read_top_fa, la, 0);
5508        bf_set(lpfc_mbx_read_top_mm, la, 0);
5509
5510        /* Invoke the lpfc_handle_latt mailbox command callback function */
5511        lpfc_mbx_cmpl_read_topology(phba, pmb);
5512
5513        return;
5514
5515out_free_pmb:
5516        lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5517}
5518
5519/**
5520 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5521 * topology.
5522 * @phba: pointer to lpfc hba data structure.
5523 * @speed_code: asynchronous event link speed code.
5524 *
5525 * This routine is to parse the giving SLI4 async event link speed code into
5526 * value of Read topology link speed.
5527 *
5528 * Return: link speed in terms of Read topology.
5529 **/
5530static uint8_t
5531lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5532{
5533        uint8_t port_speed;
5534
5535        switch (speed_code) {
5536        case LPFC_FC_LA_SPEED_1G:
5537                port_speed = LPFC_LINK_SPEED_1GHZ;
5538                break;
5539        case LPFC_FC_LA_SPEED_2G:
5540                port_speed = LPFC_LINK_SPEED_2GHZ;
5541                break;
5542        case LPFC_FC_LA_SPEED_4G:
5543                port_speed = LPFC_LINK_SPEED_4GHZ;
5544                break;
5545        case LPFC_FC_LA_SPEED_8G:
5546                port_speed = LPFC_LINK_SPEED_8GHZ;
5547                break;
5548        case LPFC_FC_LA_SPEED_16G:
5549                port_speed = LPFC_LINK_SPEED_16GHZ;
5550                break;
5551        case LPFC_FC_LA_SPEED_32G:
5552                port_speed = LPFC_LINK_SPEED_32GHZ;
5553                break;
5554        case LPFC_FC_LA_SPEED_64G:
5555                port_speed = LPFC_LINK_SPEED_64GHZ;
5556                break;
5557        case LPFC_FC_LA_SPEED_128G:
5558                port_speed = LPFC_LINK_SPEED_128GHZ;
5559                break;
5560        case LPFC_FC_LA_SPEED_256G:
5561                port_speed = LPFC_LINK_SPEED_256GHZ;
5562                break;
5563        default:
5564                port_speed = 0;
5565                break;
5566        }
5567
5568        return port_speed;
5569}
5570
5571void
5572lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5573{
5574        struct rxtable_entry *entry;
5575        int cnt = 0, head, tail, last, start;
5576
5577        head = atomic_read(&phba->rxtable_idx_head);
5578        tail = atomic_read(&phba->rxtable_idx_tail);
5579        if (!phba->rxtable || head == tail) {
5580                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5581                                "4411 Rxtable is empty\n");
5582                return;
5583        }
5584        last = tail;
5585        start = head;
5586
5587        /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5588        while (start != last) {
5589                if (start)
5590                        start--;
5591                else
5592                        start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5593                entry = &phba->rxtable[start];
5594                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5595                                "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5596                                "Lat %lld ASz %lld Info %02d BWUtil %d "
5597                                "Int %d slot %d\n",
5598                                cnt, entry->max_bytes_per_interval,
5599                                entry->total_bytes, entry->rcv_bytes,
5600                                entry->avg_io_latency, entry->avg_io_size,
5601                                entry->cmf_info, entry->timer_utilization,
5602                                entry->timer_interval, start);
5603                cnt++;
5604                if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5605                        return;
5606        }
5607}
5608
5609/**
5610 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5611 * @phba: pointer to lpfc hba data structure.
5612 * @dtag: FPIN descriptor received
5613 *
5614 * Increment the FPIN received counter/time when it happens.
5615 */
5616void
5617lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5618{
5619        struct lpfc_cgn_info *cp;
5620        struct tm broken;
5621        struct timespec64 cur_time;
5622        u32 cnt;
5623        u32 value;
5624
5625        /* Make sure we have a congestion info buffer */
5626        if (!phba->cgn_i)
5627                return;
5628        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5629        ktime_get_real_ts64(&cur_time);
5630        time64_to_tm(cur_time.tv_sec, 0, &broken);
5631
5632        /* Update congestion statistics */
5633        switch (dtag) {
5634        case ELS_DTAG_LNK_INTEGRITY:
5635                cnt = le32_to_cpu(cp->link_integ_notification);
5636                cnt++;
5637                cp->link_integ_notification = cpu_to_le32(cnt);
5638
5639                cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5640                cp->cgn_stat_lnk_day = broken.tm_mday;
5641                cp->cgn_stat_lnk_year = broken.tm_year - 100;
5642                cp->cgn_stat_lnk_hour = broken.tm_hour;
5643                cp->cgn_stat_lnk_min = broken.tm_min;
5644                cp->cgn_stat_lnk_sec = broken.tm_sec;
5645                break;
5646        case ELS_DTAG_DELIVERY:
5647                cnt = le32_to_cpu(cp->delivery_notification);
5648                cnt++;
5649                cp->delivery_notification = cpu_to_le32(cnt);
5650
5651                cp->cgn_stat_del_month = broken.tm_mon + 1;
5652                cp->cgn_stat_del_day = broken.tm_mday;
5653                cp->cgn_stat_del_year = broken.tm_year - 100;
5654                cp->cgn_stat_del_hour = broken.tm_hour;
5655                cp->cgn_stat_del_min = broken.tm_min;
5656                cp->cgn_stat_del_sec = broken.tm_sec;
5657                break;
5658        case ELS_DTAG_PEER_CONGEST:
5659                cnt = le32_to_cpu(cp->cgn_peer_notification);
5660                cnt++;
5661                cp->cgn_peer_notification = cpu_to_le32(cnt);
5662
5663                cp->cgn_stat_peer_month = broken.tm_mon + 1;
5664                cp->cgn_stat_peer_day = broken.tm_mday;
5665                cp->cgn_stat_peer_year = broken.tm_year - 100;
5666                cp->cgn_stat_peer_hour = broken.tm_hour;
5667                cp->cgn_stat_peer_min = broken.tm_min;
5668                cp->cgn_stat_peer_sec = broken.tm_sec;
5669                break;
5670        case ELS_DTAG_CONGESTION:
5671                cnt = le32_to_cpu(cp->cgn_notification);
5672                cnt++;
5673                cp->cgn_notification = cpu_to_le32(cnt);
5674
5675                cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5676                cp->cgn_stat_cgn_day = broken.tm_mday;
5677                cp->cgn_stat_cgn_year = broken.tm_year - 100;
5678                cp->cgn_stat_cgn_hour = broken.tm_hour;
5679                cp->cgn_stat_cgn_min = broken.tm_min;
5680                cp->cgn_stat_cgn_sec = broken.tm_sec;
5681        }
5682        if (phba->cgn_fpin_frequency &&
5683            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5684                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5685                cp->cgn_stat_npm = value;
5686        }
5687        value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5688                                    LPFC_CGN_CRC32_SEED);
5689        cp->cgn_info_crc = cpu_to_le32(value);
5690}
5691
5692/**
5693 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5694 * @phba: pointer to lpfc hba data structure.
5695 *
5696 * Save the congestion event data every minute.
5697 * On the hour collapse all the minute data into hour data. Every day
5698 * collapse all the hour data into daily data. Separate driver
5699 * and fabrc congestion event counters that will be saved out
5700 * to the registered congestion buffer every minute.
5701 */
5702static void
5703lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5704{
5705        struct lpfc_cgn_info *cp;
5706        struct tm broken;
5707        struct timespec64 cur_time;
5708        uint32_t i, index;
5709        uint16_t value, mvalue;
5710        uint64_t bps;
5711        uint32_t mbps;
5712        uint32_t dvalue, wvalue, lvalue, avalue;
5713        uint64_t latsum;
5714        __le16 *ptr;
5715        __le32 *lptr;
5716        __le16 *mptr;
5717
5718        /* Make sure we have a congestion info buffer */
5719        if (!phba->cgn_i)
5720                return;
5721        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5722
5723        if (time_before(jiffies, phba->cgn_evt_timestamp))
5724                return;
5725        phba->cgn_evt_timestamp = jiffies +
5726                        msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5727        phba->cgn_evt_minute++;
5728
5729        /* We should get to this point in the routine on 1 minute intervals */
5730
5731        ktime_get_real_ts64(&cur_time);
5732        time64_to_tm(cur_time.tv_sec, 0, &broken);
5733
5734        if (phba->cgn_fpin_frequency &&
5735            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5736                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5737                cp->cgn_stat_npm = value;
5738        }
5739
5740        /* Read and clear the latency counters for this minute */
5741        lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5742        latsum = atomic64_read(&phba->cgn_latency_evt);
5743        atomic_set(&phba->cgn_latency_evt_cnt, 0);
5744        atomic64_set(&phba->cgn_latency_evt, 0);
5745
5746        /* We need to store MB/sec bandwidth in the congestion information.
5747         * block_cnt is count of 512 byte blocks for the entire minute,
5748         * bps will get bytes per sec before finally converting to MB/sec.
5749         */
5750        bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5751        phba->rx_block_cnt = 0;
5752        mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5753
5754        /* Every minute */
5755        /* cgn parameters */
5756        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5757        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5758        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5759        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5760
5761        /* Fill in default LUN qdepth */
5762        value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5763        cp->cgn_lunq = cpu_to_le16(value);
5764
5765        /* Record congestion buffer info - every minute
5766         * cgn_driver_evt_cnt (Driver events)
5767         * cgn_fabric_warn_cnt (Congestion Warnings)
5768         * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5769         * cgn_fabric_alarm_cnt (Congestion Alarms)
5770         */
5771        index = ++cp->cgn_index_minute;
5772        if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5773                cp->cgn_index_minute = 0;
5774                index = 0;
5775        }
5776
5777        /* Get the number of driver events in this sample and reset counter */
5778        dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5779        atomic_set(&phba->cgn_driver_evt_cnt, 0);
5780
5781        /* Get the number of warning events - FPIN and Signal for this minute */
5782        wvalue = 0;
5783        if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5784            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5785            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5786                wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5787        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5788
5789        /* Get the number of alarm events - FPIN and Signal for this minute */
5790        avalue = 0;
5791        if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5792            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5793                avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5794        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5795
5796        /* Collect the driver, warning, alarm and latency counts for this
5797         * minute into the driver congestion buffer.
5798         */
5799        ptr = &cp->cgn_drvr_min[index];
5800        value = (uint16_t)dvalue;
5801        *ptr = cpu_to_le16(value);
5802
5803        ptr = &cp->cgn_warn_min[index];
5804        value = (uint16_t)wvalue;
5805        *ptr = cpu_to_le16(value);
5806
5807        ptr = &cp->cgn_alarm_min[index];
5808        value = (uint16_t)avalue;
5809        *ptr = cpu_to_le16(value);
5810
5811        lptr = &cp->cgn_latency_min[index];
5812        if (lvalue) {
5813                lvalue = (uint32_t)div_u64(latsum, lvalue);
5814                *lptr = cpu_to_le32(lvalue);
5815        } else {
5816                *lptr = 0;
5817        }
5818
5819        /* Collect the bandwidth value into the driver's congesion buffer. */
5820        mptr = &cp->cgn_bw_min[index];
5821        *mptr = cpu_to_le16(mvalue);
5822
5823        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5824                        "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5825                        index, dvalue, wvalue, *lptr, mvalue, avalue);
5826
5827        /* Every hour */
5828        if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5829                /* Record congestion buffer info - every hour
5830                 * Collapse all minutes into an hour
5831                 */
5832                index = ++cp->cgn_index_hour;
5833                if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5834                        cp->cgn_index_hour = 0;
5835                        index = 0;
5836                }
5837
5838                dvalue = 0;
5839                wvalue = 0;
5840                lvalue = 0;
5841                avalue = 0;
5842                mvalue = 0;
5843                mbps = 0;
5844                for (i = 0; i < LPFC_MIN_HOUR; i++) {
5845                        dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5846                        wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5847                        lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5848                        mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5849                        avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5850                }
5851                if (lvalue)             /* Avg of latency averages */
5852                        lvalue /= LPFC_MIN_HOUR;
5853                if (mbps)               /* Avg of Bandwidth averages */
5854                        mvalue = mbps / LPFC_MIN_HOUR;
5855
5856                lptr = &cp->cgn_drvr_hr[index];
5857                *lptr = cpu_to_le32(dvalue);
5858                lptr = &cp->cgn_warn_hr[index];
5859                *lptr = cpu_to_le32(wvalue);
5860                lptr = &cp->cgn_latency_hr[index];
5861                *lptr = cpu_to_le32(lvalue);
5862                mptr = &cp->cgn_bw_hr[index];
5863                *mptr = cpu_to_le16(mvalue);
5864                lptr = &cp->cgn_alarm_hr[index];
5865                *lptr = cpu_to_le32(avalue);
5866
5867                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5868                                "2419 Congestion Info - hour "
5869                                "(%d): %d %d %d %d %d\n",
5870                                index, dvalue, wvalue, lvalue, mvalue, avalue);
5871        }
5872
5873        /* Every day */
5874        if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5875                /* Record congestion buffer info - every hour
5876                 * Collapse all hours into a day. Rotate days
5877                 * after LPFC_MAX_CGN_DAYS.
5878                 */
5879                index = ++cp->cgn_index_day;
5880                if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5881                        cp->cgn_index_day = 0;
5882                        index = 0;
5883                }
5884
5885                /* Anytime we overwrite daily index 0, after we wrap,
5886                 * we will be overwriting the oldest day, so we must
5887                 * update the congestion data start time for that day.
5888                 * That start time should have previously been saved after
5889                 * we wrote the last days worth of data.
5890                 */
5891                if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5892                        time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5893
5894                        cp->cgn_info_month = broken.tm_mon + 1;
5895                        cp->cgn_info_day = broken.tm_mday;
5896                        cp->cgn_info_year = broken.tm_year - 100;
5897                        cp->cgn_info_hour = broken.tm_hour;
5898                        cp->cgn_info_minute = broken.tm_min;
5899                        cp->cgn_info_second = broken.tm_sec;
5900
5901                        lpfc_printf_log
5902                                (phba, KERN_INFO, LOG_CGN_MGMT,
5903                                "2646 CGNInfo idx0 Start Time: "
5904                                "%d/%d/%d %d:%d:%d\n",
5905                                cp->cgn_info_day, cp->cgn_info_month,
5906                                cp->cgn_info_year, cp->cgn_info_hour,
5907                                cp->cgn_info_minute, cp->cgn_info_second);
5908                }
5909
5910                dvalue = 0;
5911                wvalue = 0;
5912                lvalue = 0;
5913                mvalue = 0;
5914                mbps = 0;
5915                avalue = 0;
5916                for (i = 0; i < LPFC_HOUR_DAY; i++) {
5917                        dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5918                        wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5919                        lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5920                        mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5921                        avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5922                }
5923                if (lvalue)             /* Avg of latency averages */
5924                        lvalue /= LPFC_HOUR_DAY;
5925                if (mbps)               /* Avg of Bandwidth averages */
5926                        mvalue = mbps / LPFC_HOUR_DAY;
5927
5928                lptr = &cp->cgn_drvr_day[index];
5929                *lptr = cpu_to_le32(dvalue);
5930                lptr = &cp->cgn_warn_day[index];
5931                *lptr = cpu_to_le32(wvalue);
5932                lptr = &cp->cgn_latency_day[index];
5933                *lptr = cpu_to_le32(lvalue);
5934                mptr = &cp->cgn_bw_day[index];
5935                *mptr = cpu_to_le16(mvalue);
5936                lptr = &cp->cgn_alarm_day[index];
5937                *lptr = cpu_to_le32(avalue);
5938
5939                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5940                                "2420 Congestion Info - daily (%d): "
5941                                "%d %d %d %d %d\n",
5942                                index, dvalue, wvalue, lvalue, mvalue, avalue);
5943
5944                /* We just wrote LPFC_MAX_CGN_DAYS of data,
5945                 * so we are wrapped on any data after this.
5946                 * Save this as the start time for the next day.
5947                 */
5948                if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5949                        phba->hba_flag |= HBA_CGN_DAY_WRAP;
5950                        ktime_get_real_ts64(&phba->cgn_daily_ts);
5951                }
5952        }
5953
5954        /* Use the frequency found in the last rcv'ed FPIN */
5955        value = phba->cgn_fpin_frequency;
5956        cp->cgn_warn_freq = cpu_to_le16(value);
5957        cp->cgn_alarm_freq = cpu_to_le16(value);
5958
5959        lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5960                                     LPFC_CGN_CRC32_SEED);
5961        cp->cgn_info_crc = cpu_to_le32(lvalue);
5962}
5963
5964/**
5965 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5966 * @phba: The Hba for which this call is being executed.
5967 *
5968 * The routine calculates the latency from the beginning of the CMF timer
5969 * interval to the current point in time. It is called from IO completion
5970 * when we exceed our Bandwidth limitation for the time interval.
5971 */
5972uint32_t
5973lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5974{
5975        struct timespec64 cmpl_time;
5976        uint32_t msec = 0;
5977
5978        ktime_get_real_ts64(&cmpl_time);
5979
5980        /* This routine works on a ms granularity so sec and usec are
5981         * converted accordingly.
5982         */
5983        if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5984                msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5985                        NSEC_PER_MSEC;
5986        } else {
5987                if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5988                        msec = (cmpl_time.tv_sec -
5989                                phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5990                        msec += ((cmpl_time.tv_nsec -
5991                                  phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5992                } else {
5993                        msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5994                                1) * MSEC_PER_SEC;
5995                        msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5996                                 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5997                }
5998        }
5999        return msec;
6000}
6001
6002/**
6003 * lpfc_cmf_timer -  This is the timer function for one congestion
6004 * rate interval.
6005 * @timer: Pointer to the high resolution timer that expired
6006 */
6007static enum hrtimer_restart
6008lpfc_cmf_timer(struct hrtimer *timer)
6009{
6010        struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
6011                                             cmf_timer);
6012        struct rxtable_entry *entry;
6013        uint32_t io_cnt;
6014        uint32_t head, tail;
6015        uint32_t busy, max_read;
6016        uint64_t total, rcv, lat, mbpi, extra, cnt;
6017        int timer_interval = LPFC_CMF_INTERVAL;
6018        uint32_t ms;
6019        struct lpfc_cgn_stat *cgs;
6020        int cpu;
6021
6022        /* Only restart the timer if congestion mgmt is on */
6023        if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6024            !phba->cmf_latency.tv_sec) {
6025                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6026                                "6224 CMF timer exit: %d %lld\n",
6027                                phba->cmf_active_mode,
6028                                (uint64_t)phba->cmf_latency.tv_sec);
6029                return HRTIMER_NORESTART;
6030        }
6031
6032        /* If pport is not ready yet, just exit and wait for
6033         * the next timer cycle to hit.
6034         */
6035        if (!phba->pport)
6036                goto skip;
6037
6038        /* Do not block SCSI IO while in the timer routine since
6039         * total_bytes will be cleared
6040         */
6041        atomic_set(&phba->cmf_stop_io, 1);
6042
6043        /* First we need to calculate the actual ms between
6044         * the last timer interrupt and this one. We ask for
6045         * LPFC_CMF_INTERVAL, however the actual time may
6046         * vary depending on system overhead.
6047         */
6048        ms = lpfc_calc_cmf_latency(phba);
6049
6050
6051        /* Immediately after we calculate the time since the last
6052         * timer interrupt, set the start time for the next
6053         * interrupt
6054         */
6055        ktime_get_real_ts64(&phba->cmf_latency);
6056
6057        phba->cmf_link_byte_count =
6058                div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6059
6060        /* Collect all the stats from the prior timer interval */
6061        total = 0;
6062        io_cnt = 0;
6063        lat = 0;
6064        rcv = 0;
6065        for_each_present_cpu(cpu) {
6066                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6067                total += atomic64_xchg(&cgs->total_bytes, 0);
6068                io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6069                lat += atomic64_xchg(&cgs->rx_latency, 0);
6070                rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6071        }
6072
6073        /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6074         * returned from the last CMF_SYNC_WQE issued, from
6075         * cmf_last_sync_bw. This will be the target BW for
6076         * this next timer interval.
6077         */
6078        if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6079            phba->link_state != LPFC_LINK_DOWN &&
6080            phba->hba_flag & HBA_SETUP) {
6081                mbpi = phba->cmf_last_sync_bw;
6082                phba->cmf_last_sync_bw = 0;
6083                extra = 0;
6084
6085                /* Calculate any extra bytes needed to account for the
6086                 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6087                 * calculate the adjustment needed for total to reflect
6088                 * a full LPFC_CMF_INTERVAL.
6089                 */
6090                if (ms && ms < LPFC_CMF_INTERVAL) {
6091                        cnt = div_u64(total, ms); /* bytes per ms */
6092                        cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6093
6094                        /* If the timeout is scheduled to be shorter,
6095                         * this value may skew the data, so cap it at mbpi.
6096                         */
6097                        if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6098                                cnt = mbpi;
6099
6100                        extra = cnt - total;
6101                }
6102                lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6103        } else {
6104                /* For Monitor mode or link down we want mbpi
6105                 * to be the full link speed
6106                 */
6107                mbpi = phba->cmf_link_byte_count;
6108                extra = 0;
6109        }
6110        phba->cmf_timer_cnt++;
6111
6112        if (io_cnt) {
6113                /* Update congestion info buffer latency in us */
6114                atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6115                atomic64_add(lat, &phba->cgn_latency_evt);
6116        }
6117        busy = atomic_xchg(&phba->cmf_busy, 0);
6118        max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6119
6120        /* Calculate MBPI for the next timer interval */
6121        if (mbpi) {
6122                if (mbpi > phba->cmf_link_byte_count ||
6123                    phba->cmf_active_mode == LPFC_CFG_MONITOR)
6124                        mbpi = phba->cmf_link_byte_count;
6125
6126                /* Change max_bytes_per_interval to what the prior
6127                 * CMF_SYNC_WQE cmpl indicated.
6128                 */
6129                if (mbpi != phba->cmf_max_bytes_per_interval)
6130                        phba->cmf_max_bytes_per_interval = mbpi;
6131        }
6132
6133        /* Save rxmonitor information for debug */
6134        if (phba->rxtable) {
6135                head = atomic_xchg(&phba->rxtable_idx_head,
6136                                   LPFC_RXMONITOR_TABLE_IN_USE);
6137                entry = &phba->rxtable[head];
6138                entry->total_bytes = total;
6139                entry->cmf_bytes = total + extra;
6140                entry->rcv_bytes = rcv;
6141                entry->cmf_busy = busy;
6142                entry->cmf_info = phba->cmf_active_info;
6143                if (io_cnt) {
6144                        entry->avg_io_latency = div_u64(lat, io_cnt);
6145                        entry->avg_io_size = div_u64(rcv, io_cnt);
6146                } else {
6147                        entry->avg_io_latency = 0;
6148                        entry->avg_io_size = 0;
6149                }
6150                entry->max_read_cnt = max_read;
6151                entry->io_cnt = io_cnt;
6152                entry->max_bytes_per_interval = mbpi;
6153                if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6154                        entry->timer_utilization = phba->cmf_last_ts;
6155                else
6156                        entry->timer_utilization = ms;
6157                entry->timer_interval = ms;
6158                phba->cmf_last_ts = 0;
6159
6160                /* Increment rxtable index */
6161                head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6162                tail = atomic_read(&phba->rxtable_idx_tail);
6163                if (head == tail) {
6164                        tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6165                        atomic_set(&phba->rxtable_idx_tail, tail);
6166                }
6167                atomic_set(&phba->rxtable_idx_head, head);
6168        }
6169
6170        if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6171                /* If Monitor mode, check if we are oversubscribed
6172                 * against the full line rate.
6173                 */
6174                if (mbpi && total > mbpi)
6175                        atomic_inc(&phba->cgn_driver_evt_cnt);
6176        }
6177        phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6178
6179        /* Each minute save Fabric and Driver congestion information */
6180        lpfc_cgn_save_evt_cnt(phba);
6181
6182        phba->hba_flag &= ~HBA_SHORT_CMF;
6183
6184        /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6185         * minute, adjust our next timer interval, if needed, to ensure a
6186         * 1 minute granularity when we get the next timer interrupt.
6187         */
6188        if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6189                       phba->cgn_evt_timestamp)) {
6190                timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6191                                                  jiffies);
6192                if (timer_interval <= 0)
6193                        timer_interval = LPFC_CMF_INTERVAL;
6194                else
6195                        phba->hba_flag |= HBA_SHORT_CMF;
6196
6197                /* If we adjust timer_interval, max_bytes_per_interval
6198                 * needs to be adjusted as well.
6199                 */
6200                phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6201                                                    timer_interval, 1000);
6202                if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6203                        phba->cmf_max_bytes_per_interval =
6204                                phba->cmf_link_byte_count;
6205        }
6206
6207        /* Since total_bytes has already been zero'ed, its okay to unblock
6208         * after max_bytes_per_interval is setup.
6209         */
6210        if (atomic_xchg(&phba->cmf_bw_wait, 0))
6211                queue_work(phba->wq, &phba->unblock_request_work);
6212
6213        /* SCSI IO is now unblocked */
6214        atomic_set(&phba->cmf_stop_io, 0);
6215
6216skip:
6217        hrtimer_forward_now(timer,
6218                            ktime_set(0, timer_interval * NSEC_PER_MSEC));
6219        return HRTIMER_RESTART;
6220}
6221
6222#define trunk_link_status(__idx)\
6223        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6224               ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6225                "Link up" : "Link down") : "NA"
6226/* Did port __idx reported an error */
6227#define trunk_port_fault(__idx)\
6228        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6229               (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6230
6231static void
6232lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6233                              struct lpfc_acqe_fc_la *acqe_fc)
6234{
6235        uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6236        uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6237
6238        phba->sli4_hba.link_state.speed =
6239                lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6240                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6241
6242        phba->sli4_hba.link_state.logical_speed =
6243                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6244        /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6245        phba->fc_linkspeed =
6246                 lpfc_async_link_speed_to_read_top(
6247                                phba,
6248                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6249
6250        if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6251                phba->trunk_link.link0.state =
6252                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6253                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6254                phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6255        }
6256        if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6257                phba->trunk_link.link1.state =
6258                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6259                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6260                phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6261        }
6262        if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6263                phba->trunk_link.link2.state =
6264                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6265                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6266                phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6267        }
6268        if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6269                phba->trunk_link.link3.state =
6270                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6271                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6272                phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6273        }
6274
6275        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6276                        "2910 Async FC Trunking Event - Speed:%d\n"
6277                        "\tLogical speed:%d "
6278                        "port0: %s port1: %s port2: %s port3: %s\n",
6279                        phba->sli4_hba.link_state.speed,
6280                        phba->sli4_hba.link_state.logical_speed,
6281                        trunk_link_status(0), trunk_link_status(1),
6282                        trunk_link_status(2), trunk_link_status(3));
6283
6284        if (phba->cmf_active_mode != LPFC_CFG_OFF)
6285                lpfc_cmf_signal_init(phba);
6286
6287        if (port_fault)
6288                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6289                                "3202 trunk error:0x%x (%s) seen on port0:%s "
6290                                /*
6291                                 * SLI-4: We have only 0xA error codes
6292                                 * defined as of now. print an appropriate
6293                                 * message in case driver needs to be updated.
6294                                 */
6295                                "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6296                                "UNDEFINED. update driver." : trunk_errmsg[err],
6297                                trunk_port_fault(0), trunk_port_fault(1),
6298                                trunk_port_fault(2), trunk_port_fault(3));
6299}
6300
6301
6302/**
6303 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6304 * @phba: pointer to lpfc hba data structure.
6305 * @acqe_fc: pointer to the async fc completion queue entry.
6306 *
6307 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6308 * that the event was received and then issue a read_topology mailbox command so
6309 * that the rest of the driver will treat it the same as SLI3.
6310 **/
6311static void
6312lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6313{
6314        LPFC_MBOXQ_t *pmb;
6315        MAILBOX_t *mb;
6316        struct lpfc_mbx_read_top *la;
6317        int rc;
6318
6319        if (bf_get(lpfc_trailer_type, acqe_fc) !=
6320            LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6321                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6322                                "2895 Non FC link Event detected.(%d)\n",
6323                                bf_get(lpfc_trailer_type, acqe_fc));
6324                return;
6325        }
6326
6327        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6328            LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6329                lpfc_update_trunk_link_status(phba, acqe_fc);
6330                return;
6331        }
6332
6333        /* Keep the link status for extra SLI4 state machine reference */
6334        phba->sli4_hba.link_state.speed =
6335                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6336                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6337        phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6338        phba->sli4_hba.link_state.topology =
6339                                bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6340        phba->sli4_hba.link_state.status =
6341                                bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6342        phba->sli4_hba.link_state.type =
6343                                bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6344        phba->sli4_hba.link_state.number =
6345                                bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6346        phba->sli4_hba.link_state.fault =
6347                                bf_get(lpfc_acqe_link_fault, acqe_fc);
6348
6349        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6350            LPFC_FC_LA_TYPE_LINK_DOWN)
6351                phba->sli4_hba.link_state.logical_speed = 0;
6352        else if (!phba->sli4_hba.conf_trunk)
6353                phba->sli4_hba.link_state.logical_speed =
6354                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6355
6356        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6357                        "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6358                        "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6359                        "%dMbps Fault:%d\n",
6360                        phba->sli4_hba.link_state.speed,
6361                        phba->sli4_hba.link_state.topology,
6362                        phba->sli4_hba.link_state.status,
6363                        phba->sli4_hba.link_state.type,
6364                        phba->sli4_hba.link_state.number,
6365                        phba->sli4_hba.link_state.logical_speed,
6366                        phba->sli4_hba.link_state.fault);
6367        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6368        if (!pmb) {
6369                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6370                                "2897 The mboxq allocation failed\n");
6371                return;
6372        }
6373        rc = lpfc_mbox_rsrc_prep(phba, pmb);
6374        if (rc) {
6375                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376                                "2898 The mboxq prep failed\n");
6377                goto out_free_pmb;
6378        }
6379
6380        /* Cleanup any outstanding ELS commands */
6381        lpfc_els_flush_all_cmd(phba);
6382
6383        /* Block ELS IOCBs until we have done process link event */
6384        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6385
6386        /* Update link event statistics */
6387        phba->sli.slistat.link_event++;
6388
6389        /* Create lpfc_handle_latt mailbox command from link ACQE */
6390        lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6391        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6392        pmb->vport = phba->pport;
6393
6394        if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6395                phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6396
6397                switch (phba->sli4_hba.link_state.status) {
6398                case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6399                        phba->link_flag |= LS_MDS_LINK_DOWN;
6400                        break;
6401                case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6402                        phba->link_flag |= LS_MDS_LOOPBACK;
6403                        break;
6404                default:
6405                        break;
6406                }
6407
6408                /* Initialize completion status */
6409                mb = &pmb->u.mb;
6410                mb->mbxStatus = MBX_SUCCESS;
6411
6412                /* Parse port fault information field */
6413                lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6414
6415                /* Parse and translate link attention fields */
6416                la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6417                la->eventTag = acqe_fc->event_tag;
6418
6419                if (phba->sli4_hba.link_state.status ==
6420                    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6421                        bf_set(lpfc_mbx_read_top_att_type, la,
6422                               LPFC_FC_LA_TYPE_UNEXP_WWPN);
6423                } else {
6424                        bf_set(lpfc_mbx_read_top_att_type, la,
6425                               LPFC_FC_LA_TYPE_LINK_DOWN);
6426                }
6427                /* Invoke the mailbox command callback function */
6428                lpfc_mbx_cmpl_read_topology(phba, pmb);
6429
6430                return;
6431        }
6432
6433        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6434        if (rc == MBX_NOT_FINISHED)
6435                goto out_free_pmb;
6436        return;
6437
6438out_free_pmb:
6439        lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6440}
6441
6442/**
6443 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6444 * @phba: pointer to lpfc hba data structure.
6445 * @acqe_sli: pointer to the async SLI completion queue entry.
6446 *
6447 * This routine is to handle the SLI4 asynchronous SLI events.
6448 **/
6449static void
6450lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6451{
6452        char port_name;
6453        char message[128];
6454        uint8_t status;
6455        uint8_t evt_type;
6456        uint8_t operational = 0;
6457        struct temp_event temp_event_data;
6458        struct lpfc_acqe_misconfigured_event *misconfigured;
6459        struct lpfc_acqe_cgn_signal *cgn_signal;
6460        struct Scsi_Host  *shost;
6461        struct lpfc_vport **vports;
6462        int rc, i, cnt;
6463
6464        evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6465
6466        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6467                        "2901 Async SLI event - Type:%d, Event Data: x%08x "
6468                        "x%08x x%08x x%08x\n", evt_type,
6469                        acqe_sli->event_data1, acqe_sli->event_data2,
6470                        acqe_sli->reserved, acqe_sli->trailer);
6471
6472        port_name = phba->Port[0];
6473        if (port_name == 0x00)
6474                port_name = '?'; /* get port name is empty */
6475
6476        switch (evt_type) {
6477        case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6478                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6479                temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6480                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6481
6482                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6483                                "3190 Over Temperature:%d Celsius- Port Name %c\n",
6484                                acqe_sli->event_data1, port_name);
6485
6486                phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6487                shost = lpfc_shost_from_vport(phba->pport);
6488                fc_host_post_vendor_event(shost, fc_get_event_number(),
6489                                          sizeof(temp_event_data),
6490                                          (char *)&temp_event_data,
6491                                          SCSI_NL_VID_TYPE_PCI
6492                                          | PCI_VENDOR_ID_EMULEX);
6493                break;
6494        case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6495                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6496                temp_event_data.event_code = LPFC_NORMAL_TEMP;
6497                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6498
6499                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6500                                "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6501                                acqe_sli->event_data1, port_name);
6502
6503                shost = lpfc_shost_from_vport(phba->pport);
6504                fc_host_post_vendor_event(shost, fc_get_event_number(),
6505                                          sizeof(temp_event_data),
6506                                          (char *)&temp_event_data,
6507                                          SCSI_NL_VID_TYPE_PCI
6508                                          | PCI_VENDOR_ID_EMULEX);
6509                break;
6510        case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6511                misconfigured = (struct lpfc_acqe_misconfigured_event *)
6512                                        &acqe_sli->event_data1;
6513
6514                /* fetch the status for this port */
6515                switch (phba->sli4_hba.lnk_info.lnk_no) {
6516                case LPFC_LINK_NUMBER_0:
6517                        status = bf_get(lpfc_sli_misconfigured_port0_state,
6518                                        &misconfigured->theEvent);
6519                        operational = bf_get(lpfc_sli_misconfigured_port0_op,
6520                                        &misconfigured->theEvent);
6521                        break;
6522                case LPFC_LINK_NUMBER_1:
6523                        status = bf_get(lpfc_sli_misconfigured_port1_state,
6524                                        &misconfigured->theEvent);
6525                        operational = bf_get(lpfc_sli_misconfigured_port1_op,
6526                                        &misconfigured->theEvent);
6527                        break;
6528                case LPFC_LINK_NUMBER_2:
6529                        status = bf_get(lpfc_sli_misconfigured_port2_state,
6530                                        &misconfigured->theEvent);
6531                        operational = bf_get(lpfc_sli_misconfigured_port2_op,
6532                                        &misconfigured->theEvent);
6533                        break;
6534                case LPFC_LINK_NUMBER_3:
6535                        status = bf_get(lpfc_sli_misconfigured_port3_state,
6536                                        &misconfigured->theEvent);
6537                        operational = bf_get(lpfc_sli_misconfigured_port3_op,
6538                                        &misconfigured->theEvent);
6539                        break;
6540                default:
6541                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6542                                        "3296 "
6543                                        "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6544                                        "event: Invalid link %d",
6545                                        phba->sli4_hba.lnk_info.lnk_no);
6546                        return;
6547                }
6548
6549                /* Skip if optic state unchanged */
6550                if (phba->sli4_hba.lnk_info.optic_state == status)
6551                        return;
6552
6553                switch (status) {
6554                case LPFC_SLI_EVENT_STATUS_VALID:
6555                        sprintf(message, "Physical Link is functional");
6556                        break;
6557                case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6558                        sprintf(message, "Optics faulted/incorrectly "
6559                                "installed/not installed - Reseat optics, "
6560                                "if issue not resolved, replace.");
6561                        break;
6562                case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6563                        sprintf(message,
6564                                "Optics of two types installed - Remove one "
6565                                "optic or install matching pair of optics.");
6566                        break;
6567                case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6568                        sprintf(message, "Incompatible optics - Replace with "
6569                                "compatible optics for card to function.");
6570                        break;
6571                case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6572                        sprintf(message, "Unqualified optics - Replace with "
6573                                "Avago optics for Warranty and Technical "
6574                                "Support - Link is%s operational",
6575                                (operational) ? " not" : "");
6576                        break;
6577                case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6578                        sprintf(message, "Uncertified optics - Replace with "
6579                                "Avago-certified optics to enable link "
6580                                "operation - Link is%s operational",
6581                                (operational) ? " not" : "");
6582                        break;
6583                default:
6584                        /* firmware is reporting a status we don't know about */
6585                        sprintf(message, "Unknown event status x%02x", status);
6586                        break;
6587                }
6588
6589                /* Issue READ_CONFIG mbox command to refresh supported speeds */
6590                rc = lpfc_sli4_read_config(phba);
6591                if (rc) {
6592                        phba->lmt = 0;
6593                        lpfc_printf_log(phba, KERN_ERR,
6594                                        LOG_TRACE_EVENT,
6595                                        "3194 Unable to retrieve supported "
6596                                        "speeds, rc = 0x%x\n", rc);
6597                }
6598                rc = lpfc_sli4_refresh_params(phba);
6599                if (rc) {
6600                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6601                                        "3174 Unable to update pls support, "
6602                                        "rc x%x\n", rc);
6603                }
6604                vports = lpfc_create_vport_work_array(phba);
6605                if (vports != NULL) {
6606                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6607                                        i++) {
6608                                shost = lpfc_shost_from_vport(vports[i]);
6609                                lpfc_host_supported_speeds_set(shost);
6610                        }
6611                }
6612                lpfc_destroy_vport_work_array(phba, vports);
6613
6614                phba->sli4_hba.lnk_info.optic_state = status;
6615                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6616                                "3176 Port Name %c %s\n", port_name, message);
6617                break;
6618        case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6619                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6620                                "3192 Remote DPort Test Initiated - "
6621                                "Event Data1:x%08x Event Data2: x%08x\n",
6622                                acqe_sli->event_data1, acqe_sli->event_data2);
6623                break;
6624        case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6625                /* Call FW to obtain active parms */
6626                lpfc_sli4_cgn_parm_chg_evt(phba);
6627                break;
6628        case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6629                /* Misconfigured WWN. Reports that the SLI Port is configured
6630                 * to use FA-WWN, but the attached device doesn’t support it.
6631                 * Event Data1 - N.A, Event Data2 - N.A
6632                 * This event only happens on the physical port.
6633                 */
6634                lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6635                             "2699 Misconfigured FA-PWWN - Attached device "
6636                             "does not support FA-PWWN\n");
6637                phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6638                memset(phba->pport->fc_portname.u.wwn, 0,
6639                       sizeof(struct lpfc_name));
6640                break;
6641        case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6642                /* EEPROM failure. No driver action is required */
6643                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6644                             "2518 EEPROM failure - "
6645                             "Event Data1: x%08x Event Data2: x%08x\n",
6646                             acqe_sli->event_data1, acqe_sli->event_data2);
6647                break;
6648        case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6649                if (phba->cmf_active_mode == LPFC_CFG_OFF)
6650                        break;
6651                cgn_signal = (struct lpfc_acqe_cgn_signal *)
6652                                        &acqe_sli->event_data1;
6653                phba->cgn_acqe_cnt++;
6654
6655                cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6656                atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6657                atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6658
6659                /* no threshold for CMF, even 1 signal will trigger an event */
6660
6661                /* Alarm overrides warning, so check that first */
6662                if (cgn_signal->alarm_cnt) {
6663                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6664                                /* Keep track of alarm cnt for CMF_SYNC_WQE */
6665                                atomic_add(cgn_signal->alarm_cnt,
6666                                           &phba->cgn_sync_alarm_cnt);
6667                        }
6668                } else if (cnt) {
6669                        /* signal action needs to be taken */
6670                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6671                            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6672                                /* Keep track of warning cnt for CMF_SYNC_WQE */
6673                                atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6674                        }
6675                }
6676                break;
6677        default:
6678                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6679                                "3193 Unrecognized SLI event, type: 0x%x",
6680                                evt_type);
6681                break;
6682        }
6683}
6684
6685/**
6686 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6687 * @vport: pointer to vport data structure.
6688 *
6689 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6690 * response to a CVL event.
6691 *
6692 * Return the pointer to the ndlp with the vport if successful, otherwise
6693 * return NULL.
6694 **/
6695static struct lpfc_nodelist *
6696lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6697{
6698        struct lpfc_nodelist *ndlp;
6699        struct Scsi_Host *shost;
6700        struct lpfc_hba *phba;
6701
6702        if (!vport)
6703                return NULL;
6704        phba = vport->phba;
6705        if (!phba)
6706                return NULL;
6707        ndlp = lpfc_findnode_did(vport, Fabric_DID);
6708        if (!ndlp) {
6709                /* Cannot find existing Fabric ndlp, so allocate a new one */
6710                ndlp = lpfc_nlp_init(vport, Fabric_DID);
6711                if (!ndlp)
6712                        return NULL;
6713                /* Set the node type */
6714                ndlp->nlp_type |= NLP_FABRIC;
6715                /* Put ndlp onto node list */
6716                lpfc_enqueue_node(vport, ndlp);
6717        }
6718        if ((phba->pport->port_state < LPFC_FLOGI) &&
6719                (phba->pport->port_state != LPFC_VPORT_FAILED))
6720                return NULL;
6721        /* If virtual link is not yet instantiated ignore CVL */
6722        if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6723                && (vport->port_state != LPFC_VPORT_FAILED))
6724                return NULL;
6725        shost = lpfc_shost_from_vport(vport);
6726        if (!shost)
6727                return NULL;
6728        lpfc_linkdown_port(vport);
6729        lpfc_cleanup_pending_mbox(vport);
6730        spin_lock_irq(shost->host_lock);
6731        vport->fc_flag |= FC_VPORT_CVL_RCVD;
6732        spin_unlock_irq(shost->host_lock);
6733
6734        return ndlp;
6735}
6736
6737/**
6738 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6739 * @phba: pointer to lpfc hba data structure.
6740 *
6741 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6742 * response to a FCF dead event.
6743 **/
6744static void
6745lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6746{
6747        struct lpfc_vport **vports;
6748        int i;
6749
6750        vports = lpfc_create_vport_work_array(phba);
6751        if (vports)
6752                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6753                        lpfc_sli4_perform_vport_cvl(vports[i]);
6754        lpfc_destroy_vport_work_array(phba, vports);
6755}
6756
6757/**
6758 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6759 * @phba: pointer to lpfc hba data structure.
6760 * @acqe_fip: pointer to the async fcoe completion queue entry.
6761 *
6762 * This routine is to handle the SLI4 asynchronous fcoe event.
6763 **/
6764static void
6765lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6766                        struct lpfc_acqe_fip *acqe_fip)
6767{
6768        uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6769        int rc;
6770        struct lpfc_vport *vport;
6771        struct lpfc_nodelist *ndlp;
6772        int active_vlink_present;
6773        struct lpfc_vport **vports;
6774        int i;
6775
6776        phba->fc_eventTag = acqe_fip->event_tag;
6777        phba->fcoe_eventtag = acqe_fip->event_tag;
6778        switch (event_type) {
6779        case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6780        case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6781                if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6782                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6783                                        "2546 New FCF event, evt_tag:x%x, "
6784                                        "index:x%x\n",
6785                                        acqe_fip->event_tag,
6786                                        acqe_fip->index);
6787                else
6788                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6789                                        LOG_DISCOVERY,
6790                                        "2788 FCF param modified event, "
6791                                        "evt_tag:x%x, index:x%x\n",
6792                                        acqe_fip->event_tag,
6793                                        acqe_fip->index);
6794                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6795                        /*
6796                         * During period of FCF discovery, read the FCF
6797                         * table record indexed by the event to update
6798                         * FCF roundrobin failover eligible FCF bmask.
6799                         */
6800                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6801                                        LOG_DISCOVERY,
6802                                        "2779 Read FCF (x%x) for updating "
6803                                        "roundrobin FCF failover bmask\n",
6804                                        acqe_fip->index);
6805                        rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6806                }
6807
6808                /* If the FCF discovery is in progress, do nothing. */
6809                spin_lock_irq(&phba->hbalock);
6810                if (phba->hba_flag & FCF_TS_INPROG) {
6811                        spin_unlock_irq(&phba->hbalock);
6812                        break;
6813                }
6814                /* If fast FCF failover rescan event is pending, do nothing */
6815                if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6816                        spin_unlock_irq(&phba->hbalock);
6817                        break;
6818                }
6819
6820                /* If the FCF has been in discovered state, do nothing. */
6821                if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6822                        spin_unlock_irq(&phba->hbalock);
6823                        break;
6824                }
6825                spin_unlock_irq(&phba->hbalock);
6826
6827                /* Otherwise, scan the entire FCF table and re-discover SAN */
6828                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6829                                "2770 Start FCF table scan per async FCF "
6830                                "event, evt_tag:x%x, index:x%x\n",
6831                                acqe_fip->event_tag, acqe_fip->index);
6832                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6833                                                     LPFC_FCOE_FCF_GET_FIRST);
6834                if (rc)
6835                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6836                                        "2547 Issue FCF scan read FCF mailbox "
6837                                        "command failed (x%x)\n", rc);
6838                break;
6839
6840        case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6841                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6842                                "2548 FCF Table full count 0x%x tag 0x%x\n",
6843                                bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6844                                acqe_fip->event_tag);
6845                break;
6846
6847        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6848                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6849                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6850                                "2549 FCF (x%x) disconnected from network, "
6851                                 "tag:x%x\n", acqe_fip->index,
6852                                 acqe_fip->event_tag);
6853                /*
6854                 * If we are in the middle of FCF failover process, clear
6855                 * the corresponding FCF bit in the roundrobin bitmap.
6856                 */
6857                spin_lock_irq(&phba->hbalock);
6858                if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6859                    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6860                        spin_unlock_irq(&phba->hbalock);
6861                        /* Update FLOGI FCF failover eligible FCF bmask */
6862                        lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6863                        break;
6864                }
6865                spin_unlock_irq(&phba->hbalock);
6866
6867                /* If the event is not for currently used fcf do nothing */
6868                if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6869                        break;
6870
6871                /*
6872                 * Otherwise, request the port to rediscover the entire FCF
6873                 * table for a fast recovery from case that the current FCF
6874                 * is no longer valid as we are not in the middle of FCF
6875                 * failover process already.
6876                 */
6877                spin_lock_irq(&phba->hbalock);
6878                /* Mark the fast failover process in progress */
6879                phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6880                spin_unlock_irq(&phba->hbalock);
6881
6882                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6883                                "2771 Start FCF fast failover process due to "
6884                                "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6885                                "\n", acqe_fip->event_tag, acqe_fip->index);
6886                rc = lpfc_sli4_redisc_fcf_table(phba);
6887                if (rc) {
6888                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6889                                        LOG_TRACE_EVENT,
6890                                        "2772 Issue FCF rediscover mailbox "
6891                                        "command failed, fail through to FCF "
6892                                        "dead event\n");
6893                        spin_lock_irq(&phba->hbalock);
6894                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6895                        spin_unlock_irq(&phba->hbalock);
6896                        /*
6897                         * Last resort will fail over by treating this
6898                         * as a link down to FCF registration.
6899                         */
6900                        lpfc_sli4_fcf_dead_failthrough(phba);
6901                } else {
6902                        /* Reset FCF roundrobin bmask for new discovery */
6903                        lpfc_sli4_clear_fcf_rr_bmask(phba);
6904                        /*
6905                         * Handling fast FCF failover to a DEAD FCF event is
6906                         * considered equalivant to receiving CVL to all vports.
6907                         */
6908                        lpfc_sli4_perform_all_vport_cvl(phba);
6909                }
6910                break;
6911        case LPFC_FIP_EVENT_TYPE_CVL:
6912                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6913                lpfc_printf_log(phba, KERN_ERR,
6914                                LOG_TRACE_EVENT,
6915                        "2718 Clear Virtual Link Received for VPI 0x%x"
6916                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6917
6918                vport = lpfc_find_vport_by_vpid(phba,
6919                                                acqe_fip->index);
6920                ndlp = lpfc_sli4_perform_vport_cvl(vport);
6921                if (!ndlp)
6922                        break;
6923                active_vlink_present = 0;
6924
6925                vports = lpfc_create_vport_work_array(phba);
6926                if (vports) {
6927                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6928                                        i++) {
6929                                if ((!(vports[i]->fc_flag &
6930                                        FC_VPORT_CVL_RCVD)) &&
6931                                        (vports[i]->port_state > LPFC_FDISC)) {
6932                                        active_vlink_present = 1;
6933                                        break;
6934                                }
6935                        }
6936                        lpfc_destroy_vport_work_array(phba, vports);
6937                }
6938
6939                /*
6940                 * Don't re-instantiate if vport is marked for deletion.
6941                 * If we are here first then vport_delete is going to wait
6942                 * for discovery to complete.
6943                 */
6944                if (!(vport->load_flag & FC_UNLOADING) &&
6945                                        active_vlink_present) {
6946                        /*
6947                         * If there are other active VLinks present,
6948                         * re-instantiate the Vlink using FDISC.
6949                         */
6950                        mod_timer(&ndlp->nlp_delayfunc,
6951                                  jiffies + msecs_to_jiffies(1000));
6952                        spin_lock_irq(&ndlp->lock);
6953                        ndlp->nlp_flag |= NLP_DELAY_TMO;
6954                        spin_unlock_irq(&ndlp->lock);
6955                        ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6956                        vport->port_state = LPFC_FDISC;
6957                } else {
6958                        /*
6959                         * Otherwise, we request port to rediscover
6960                         * the entire FCF table for a fast recovery
6961                         * from possible case that the current FCF
6962                         * is no longer valid if we are not already
6963                         * in the FCF failover process.
6964                         */
6965                        spin_lock_irq(&phba->hbalock);
6966                        if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6967                                spin_unlock_irq(&phba->hbalock);
6968                                break;
6969                        }
6970                        /* Mark the fast failover process in progress */
6971                        phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6972                        spin_unlock_irq(&phba->hbalock);
6973                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6974                                        LOG_DISCOVERY,
6975                                        "2773 Start FCF failover per CVL, "
6976                                        "evt_tag:x%x\n", acqe_fip->event_tag);
6977                        rc = lpfc_sli4_redisc_fcf_table(phba);
6978                        if (rc) {
6979                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6980                                                LOG_TRACE_EVENT,
6981                                                "2774 Issue FCF rediscover "
6982                                                "mailbox command failed, "
6983                                                "through to CVL event\n");
6984                                spin_lock_irq(&phba->hbalock);
6985                                phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6986                                spin_unlock_irq(&phba->hbalock);
6987                                /*
6988                                 * Last resort will be re-try on the
6989                                 * the current registered FCF entry.
6990                                 */
6991                                lpfc_retry_pport_discovery(phba);
6992                        } else
6993                                /*
6994                                 * Reset FCF roundrobin bmask for new
6995                                 * discovery.
6996                                 */
6997                                lpfc_sli4_clear_fcf_rr_bmask(phba);
6998                }
6999                break;
7000        default:
7001                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7002                                "0288 Unknown FCoE event type 0x%x event tag "
7003                                "0x%x\n", event_type, acqe_fip->event_tag);
7004                break;
7005        }
7006}
7007
7008/**
7009 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
7010 * @phba: pointer to lpfc hba data structure.
7011 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
7012 *
7013 * This routine is to handle the SLI4 asynchronous dcbx event.
7014 **/
7015static void
7016lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7017                         struct lpfc_acqe_dcbx *acqe_dcbx)
7018{
7019        phba->fc_eventTag = acqe_dcbx->event_tag;
7020        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7021                        "0290 The SLI4 DCBX asynchronous event is not "
7022                        "handled yet\n");
7023}
7024
7025/**
7026 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7027 * @phba: pointer to lpfc hba data structure.
7028 * @acqe_grp5: pointer to the async grp5 completion queue entry.
7029 *
7030 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7031 * is an asynchronous notified of a logical link speed change.  The Port
7032 * reports the logical link speed in units of 10Mbps.
7033 **/
7034static void
7035lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7036                         struct lpfc_acqe_grp5 *acqe_grp5)
7037{
7038        uint16_t prev_ll_spd;
7039
7040        phba->fc_eventTag = acqe_grp5->event_tag;
7041        phba->fcoe_eventtag = acqe_grp5->event_tag;
7042        prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7043        phba->sli4_hba.link_state.logical_speed =
7044                (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7045        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7046                        "2789 GRP5 Async Event: Updating logical link speed "
7047                        "from %dMbps to %dMbps\n", prev_ll_spd,
7048                        phba->sli4_hba.link_state.logical_speed);
7049}
7050
7051/**
7052 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7053 * @phba: pointer to lpfc hba data structure.
7054 *
7055 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7056 * is an asynchronous notification of a request to reset CM stats.
7057 **/
7058static void
7059lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7060{
7061        if (!phba->cgn_i)
7062                return;
7063        lpfc_init_congestion_stat(phba);
7064}
7065
7066/**
7067 * lpfc_cgn_params_val - Validate FW congestion parameters.
7068 * @phba: pointer to lpfc hba data structure.
7069 * @p_cfg_param: pointer to FW provided congestion parameters.
7070 *
7071 * This routine validates the congestion parameters passed
7072 * by the FW to the driver via an ACQE event.
7073 **/
7074static void
7075lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7076{
7077        spin_lock_irq(&phba->hbalock);
7078
7079        if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7080                             LPFC_CFG_MONITOR)) {
7081                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7082                                "6225 CMF mode param out of range: %d\n",
7083                                 p_cfg_param->cgn_param_mode);
7084                p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7085        }
7086
7087        spin_unlock_irq(&phba->hbalock);
7088}
7089
7090/**
7091 * lpfc_cgn_params_parse - Process a FW cong parm change event
7092 * @phba: pointer to lpfc hba data structure.
7093 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7094 * @len: the size of pdata in bytes.
7095 *
7096 * This routine validates the congestion management buffer signature
7097 * from the FW, validates the contents and makes corrections for
7098 * valid, in-range values.  If the signature magic is correct and
7099 * after parameter validation, the contents are copied to the driver's
7100 * @phba structure. If the magic is incorrect, an error message is
7101 * logged.
7102 **/
7103static void
7104lpfc_cgn_params_parse(struct lpfc_hba *phba,
7105                      struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7106{
7107        struct lpfc_cgn_info *cp;
7108        uint32_t crc, oldmode;
7109
7110        /* Make sure the FW has encoded the correct magic number to
7111         * validate the congestion parameter in FW memory.
7112         */
7113        if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7114                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7115                                "4668 FW cgn parm buffer data: "
7116                                "magic 0x%x version %d mode %d "
7117                                "level0 %d level1 %d "
7118                                "level2 %d byte13 %d "
7119                                "byte14 %d byte15 %d "
7120                                "byte11 %d byte12 %d activeMode %d\n",
7121                                p_cgn_param->cgn_param_magic,
7122                                p_cgn_param->cgn_param_version,
7123                                p_cgn_param->cgn_param_mode,
7124                                p_cgn_param->cgn_param_level0,
7125                                p_cgn_param->cgn_param_level1,
7126                                p_cgn_param->cgn_param_level2,
7127                                p_cgn_param->byte13,
7128                                p_cgn_param->byte14,
7129                                p_cgn_param->byte15,
7130                                p_cgn_param->byte11,
7131                                p_cgn_param->byte12,
7132                                phba->cmf_active_mode);
7133
7134                oldmode = phba->cmf_active_mode;
7135
7136                /* Any parameters out of range are corrected to defaults
7137                 * by this routine.  No need to fail.
7138                 */
7139                lpfc_cgn_params_val(phba, p_cgn_param);
7140
7141                /* Parameters are verified, move them into driver storage */
7142                spin_lock_irq(&phba->hbalock);
7143                memcpy(&phba->cgn_p, p_cgn_param,
7144                       sizeof(struct lpfc_cgn_param));
7145
7146                /* Update parameters in congestion info buffer now */
7147                if (phba->cgn_i) {
7148                        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7149                        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7150                        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7151                        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7152                        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7153                        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7154                                                  LPFC_CGN_CRC32_SEED);
7155                        cp->cgn_info_crc = cpu_to_le32(crc);
7156                }
7157                spin_unlock_irq(&phba->hbalock);
7158
7159                phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7160
7161                switch (oldmode) {
7162                case LPFC_CFG_OFF:
7163                        if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7164                                /* Turning CMF on */
7165                                lpfc_cmf_start(phba);
7166
7167                                if (phba->link_state >= LPFC_LINK_UP) {
7168                                        phba->cgn_reg_fpin =
7169                                                phba->cgn_init_reg_fpin;
7170                                        phba->cgn_reg_signal =
7171                                                phba->cgn_init_reg_signal;
7172                                        lpfc_issue_els_edc(phba->pport, 0);
7173                                }
7174                        }
7175                        break;
7176                case LPFC_CFG_MANAGED:
7177                        switch (phba->cgn_p.cgn_param_mode) {
7178                        case LPFC_CFG_OFF:
7179                                /* Turning CMF off */
7180                                lpfc_cmf_stop(phba);
7181                                if (phba->link_state >= LPFC_LINK_UP)
7182                                        lpfc_issue_els_edc(phba->pport, 0);
7183                                break;
7184                        case LPFC_CFG_MONITOR:
7185                                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7186                                                "4661 Switch from MANAGED to "
7187                                                "`MONITOR mode\n");
7188                                phba->cmf_max_bytes_per_interval =
7189                                        phba->cmf_link_byte_count;
7190
7191                                /* Resume blocked IO - unblock on workqueue */
7192                                queue_work(phba->wq,
7193                                           &phba->unblock_request_work);
7194                                break;
7195                        }
7196                        break;
7197                case LPFC_CFG_MONITOR:
7198                        switch (phba->cgn_p.cgn_param_mode) {
7199                        case LPFC_CFG_OFF:
7200                                /* Turning CMF off */
7201                                lpfc_cmf_stop(phba);
7202                                if (phba->link_state >= LPFC_LINK_UP)
7203                                        lpfc_issue_els_edc(phba->pport, 0);
7204                                break;
7205                        case LPFC_CFG_MANAGED:
7206                                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7207                                                "4662 Switch from MONITOR to "
7208                                                "MANAGED mode\n");
7209                                lpfc_cmf_signal_init(phba);
7210                                break;
7211                        }
7212                        break;
7213                }
7214        } else {
7215                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7216                                "4669 FW cgn parm buf wrong magic 0x%x "
7217                                "version %d\n", p_cgn_param->cgn_param_magic,
7218                                p_cgn_param->cgn_param_version);
7219        }
7220}
7221
7222/**
7223 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7224 * @phba: pointer to lpfc hba data structure.
7225 *
7226 * This routine issues a read_object mailbox command to
7227 * get the congestion management parameters from the FW
7228 * parses it and updates the driver maintained values.
7229 *
7230 * Returns
7231 *  0     if the object was empty
7232 *  -Eval if an error was encountered
7233 *  Count if bytes were read from object
7234 **/
7235int
7236lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7237{
7238        int ret = 0;
7239        struct lpfc_cgn_param *p_cgn_param = NULL;
7240        u32 *pdata = NULL;
7241        u32 len = 0;
7242
7243        /* Find out if the FW has a new set of congestion parameters. */
7244        len = sizeof(struct lpfc_cgn_param);
7245        pdata = kzalloc(len, GFP_KERNEL);
7246        ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7247                               pdata, len);
7248
7249        /* 0 means no data.  A negative means error.  A positive means
7250         * bytes were copied.
7251         */
7252        if (!ret) {
7253                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7254                                "4670 CGN RD OBJ returns no data\n");
7255                goto rd_obj_err;
7256        } else if (ret < 0) {
7257                /* Some error.  Just exit and return it to the caller.*/
7258                goto rd_obj_err;
7259        }
7260
7261        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7262                        "6234 READ CGN PARAMS Successful %d\n", len);
7263
7264        /* Parse data pointer over len and update the phba congestion
7265         * parameters with values passed back.  The receive rate values
7266         * may have been altered in FW, but take no action here.
7267         */
7268        p_cgn_param = (struct lpfc_cgn_param *)pdata;
7269        lpfc_cgn_params_parse(phba, p_cgn_param, len);
7270
7271 rd_obj_err:
7272        kfree(pdata);
7273        return ret;
7274}
7275
7276/**
7277 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7278 * @phba: pointer to lpfc hba data structure.
7279 *
7280 * The FW generated Async ACQE SLI event calls this routine when
7281 * the event type is an SLI Internal Port Event and the Event Code
7282 * indicates a change to the FW maintained congestion parameters.
7283 *
7284 * This routine executes a Read_Object mailbox call to obtain the
7285 * current congestion parameters maintained in FW and corrects
7286 * the driver's active congestion parameters.
7287 *
7288 * The acqe event is not passed because there is no further data
7289 * required.
7290 *
7291 * Returns nonzero error if event processing encountered an error.
7292 * Zero otherwise for success.
7293 **/
7294static int
7295lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7296{
7297        int ret = 0;
7298
7299        if (!phba->sli4_hba.pc_sli4_params.cmf) {
7300                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7301                                "4664 Cgn Evt when E2E off. Drop event\n");
7302                return -EACCES;
7303        }
7304
7305        /* If the event is claiming an empty object, it's ok.  A write
7306         * could have cleared it.  Only error is a negative return
7307         * status.
7308         */
7309        ret = lpfc_sli4_cgn_params_read(phba);
7310        if (ret < 0) {
7311                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7312                                "4667 Error reading Cgn Params (%d)\n",
7313                                ret);
7314        } else if (!ret) {
7315                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7316                                "4673 CGN Event empty object.\n");
7317        }
7318        return ret;
7319}
7320
7321/**
7322 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7323 * @phba: pointer to lpfc hba data structure.
7324 *
7325 * This routine is invoked by the worker thread to process all the pending
7326 * SLI4 asynchronous events.
7327 **/
7328void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7329{
7330        struct lpfc_cq_event *cq_event;
7331        unsigned long iflags;
7332
7333        /* First, declare the async event has been handled */
7334        spin_lock_irqsave(&phba->hbalock, iflags);
7335        phba->hba_flag &= ~ASYNC_EVENT;
7336        spin_unlock_irqrestore(&phba->hbalock, iflags);
7337
7338        /* Now, handle all the async events */
7339        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7340        while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7341                list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7342                                 cq_event, struct lpfc_cq_event, list);
7343                spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7344                                       iflags);
7345
7346                /* Process the asynchronous event */
7347                switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7348                case LPFC_TRAILER_CODE_LINK:
7349                        lpfc_sli4_async_link_evt(phba,
7350                                                 &cq_event->cqe.acqe_link);
7351                        break;
7352                case LPFC_TRAILER_CODE_FCOE:
7353                        lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7354                        break;
7355                case LPFC_TRAILER_CODE_DCBX:
7356                        lpfc_sli4_async_dcbx_evt(phba,
7357                                                 &cq_event->cqe.acqe_dcbx);
7358                        break;
7359                case LPFC_TRAILER_CODE_GRP5:
7360                        lpfc_sli4_async_grp5_evt(phba,
7361                                                 &cq_event->cqe.acqe_grp5);
7362                        break;
7363                case LPFC_TRAILER_CODE_FC:
7364                        lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7365                        break;
7366                case LPFC_TRAILER_CODE_SLI:
7367                        lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7368                        break;
7369                case LPFC_TRAILER_CODE_CMSTAT:
7370                        lpfc_sli4_async_cmstat_evt(phba);
7371                        break;
7372                default:
7373                        lpfc_printf_log(phba, KERN_ERR,
7374                                        LOG_TRACE_EVENT,
7375                                        "1804 Invalid asynchronous event code: "
7376                                        "x%x\n", bf_get(lpfc_trailer_code,
7377                                        &cq_event->cqe.mcqe_cmpl));
7378                        break;
7379                }
7380
7381                /* Free the completion event processed to the free pool */
7382                lpfc_sli4_cq_event_release(phba, cq_event);
7383                spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7384        }
7385        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7386}
7387
7388/**
7389 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7390 * @phba: pointer to lpfc hba data structure.
7391 *
7392 * This routine is invoked by the worker thread to process FCF table
7393 * rediscovery pending completion event.
7394 **/
7395void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7396{
7397        int rc;
7398
7399        spin_lock_irq(&phba->hbalock);
7400        /* Clear FCF rediscovery timeout event */
7401        phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7402        /* Clear driver fast failover FCF record flag */
7403        phba->fcf.failover_rec.flag = 0;
7404        /* Set state for FCF fast failover */
7405        phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7406        spin_unlock_irq(&phba->hbalock);
7407
7408        /* Scan FCF table from the first entry to re-discover SAN */
7409        lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7410                        "2777 Start post-quiescent FCF table scan\n");
7411        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7412        if (rc)
7413                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7414                                "2747 Issue FCF scan read FCF mailbox "
7415                                "command failed 0x%x\n", rc);
7416}
7417
7418/**
7419 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7420 * @phba: pointer to lpfc hba data structure.
7421 * @dev_grp: The HBA PCI-Device group number.
7422 *
7423 * This routine is invoked to set up the per HBA PCI-Device group function
7424 * API jump table entries.
7425 *
7426 * Return: 0 if success, otherwise -ENODEV
7427 **/
7428int
7429lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7430{
7431        int rc;
7432
7433        /* Set up lpfc PCI-device group */
7434        phba->pci_dev_grp = dev_grp;
7435
7436        /* The LPFC_PCI_DEV_OC uses SLI4 */
7437        if (dev_grp == LPFC_PCI_DEV_OC)
7438                phba->sli_rev = LPFC_SLI_REV4;
7439
7440        /* Set up device INIT API function jump table */
7441        rc = lpfc_init_api_table_setup(phba, dev_grp);
7442        if (rc)
7443                return -ENODEV;
7444        /* Set up SCSI API function jump table */
7445        rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7446        if (rc)
7447                return -ENODEV;
7448        /* Set up SLI API function jump table */
7449        rc = lpfc_sli_api_table_setup(phba, dev_grp);
7450        if (rc)
7451                return -ENODEV;
7452        /* Set up MBOX API function jump table */
7453        rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7454        if (rc)
7455                return -ENODEV;
7456
7457        return 0;
7458}
7459
7460/**
7461 * lpfc_log_intr_mode - Log the active interrupt mode
7462 * @phba: pointer to lpfc hba data structure.
7463 * @intr_mode: active interrupt mode adopted.
7464 *
7465 * This routine it invoked to log the currently used active interrupt mode
7466 * to the device.
7467 **/
7468static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7469{
7470        switch (intr_mode) {
7471        case 0:
7472                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7473                                "0470 Enable INTx interrupt mode.\n");
7474                break;
7475        case 1:
7476                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7477                                "0481 Enabled MSI interrupt mode.\n");
7478                break;
7479        case 2:
7480                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7481                                "0480 Enabled MSI-X interrupt mode.\n");
7482                break;
7483        default:
7484                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7485                                "0482 Illegal interrupt mode.\n");
7486                break;
7487        }
7488        return;
7489}
7490
7491/**
7492 * lpfc_enable_pci_dev - Enable a generic PCI device.
7493 * @phba: pointer to lpfc hba data structure.
7494 *
7495 * This routine is invoked to enable the PCI device that is common to all
7496 * PCI devices.
7497 *
7498 * Return codes
7499 *      0 - successful
7500 *      other values - error
7501 **/
7502static int
7503lpfc_enable_pci_dev(struct lpfc_hba *phba)
7504{
7505        struct pci_dev *pdev;
7506
7507        /* Obtain PCI device reference */
7508        if (!phba->pcidev)
7509                goto out_error;
7510        else
7511                pdev = phba->pcidev;
7512        /* Enable PCI device */
7513        if (pci_enable_device_mem(pdev))
7514                goto out_error;
7515        /* Request PCI resource for the device */
7516        if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7517                goto out_disable_device;
7518        /* Set up device as PCI master and save state for EEH */
7519        pci_set_master(pdev);
7520        pci_try_set_mwi(pdev);
7521        pci_save_state(pdev);
7522
7523        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7524        if (pci_is_pcie(pdev))
7525                pdev->needs_freset = 1;
7526
7527        return 0;
7528
7529out_disable_device:
7530        pci_disable_device(pdev);
7531out_error:
7532        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7533                        "1401 Failed to enable pci device\n");
7534        return -ENODEV;
7535}
7536
7537/**
7538 * lpfc_disable_pci_dev - Disable a generic PCI device.
7539 * @phba: pointer to lpfc hba data structure.
7540 *
7541 * This routine is invoked to disable the PCI device that is common to all
7542 * PCI devices.
7543 **/
7544static void
7545lpfc_disable_pci_dev(struct lpfc_hba *phba)
7546{
7547        struct pci_dev *pdev;
7548
7549        /* Obtain PCI device reference */
7550        if (!phba->pcidev)
7551                return;
7552        else
7553                pdev = phba->pcidev;
7554        /* Release PCI resource and disable PCI device */
7555        pci_release_mem_regions(pdev);
7556        pci_disable_device(pdev);
7557
7558        return;
7559}
7560
7561/**
7562 * lpfc_reset_hba - Reset a hba
7563 * @phba: pointer to lpfc hba data structure.
7564 *
7565 * This routine is invoked to reset a hba device. It brings the HBA
7566 * offline, performs a board restart, and then brings the board back
7567 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7568 * on outstanding mailbox commands.
7569 **/
7570void
7571lpfc_reset_hba(struct lpfc_hba *phba)
7572{
7573        /* If resets are disabled then set error state and return. */
7574        if (!phba->cfg_enable_hba_reset) {
7575                phba->link_state = LPFC_HBA_ERROR;
7576                return;
7577        }
7578
7579        /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7580        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7581                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7582        } else {
7583                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7584                lpfc_sli_flush_io_rings(phba);
7585        }
7586        lpfc_offline(phba);
7587        lpfc_sli_brdrestart(phba);
7588        lpfc_online(phba);
7589        lpfc_unblock_mgmt_io(phba);
7590}
7591
7592/**
7593 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7594 * @phba: pointer to lpfc hba data structure.
7595 *
7596 * This function enables the PCI SR-IOV virtual functions to a physical
7597 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7598 * enable the number of virtual functions to the physical function. As
7599 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7600 * API call does not considered as an error condition for most of the device.
7601 **/
7602uint16_t
7603lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7604{
7605        struct pci_dev *pdev = phba->pcidev;
7606        uint16_t nr_virtfn;
7607        int pos;
7608
7609        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7610        if (pos == 0)
7611                return 0;
7612
7613        pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7614        return nr_virtfn;
7615}
7616
7617/**
7618 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7619 * @phba: pointer to lpfc hba data structure.
7620 * @nr_vfn: number of virtual functions to be enabled.
7621 *
7622 * This function enables the PCI SR-IOV virtual functions to a physical
7623 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7624 * enable the number of virtual functions to the physical function. As
7625 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7626 * API call does not considered as an error condition for most of the device.
7627 **/
7628int
7629lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7630{
7631        struct pci_dev *pdev = phba->pcidev;
7632        uint16_t max_nr_vfn;
7633        int rc;
7634
7635        max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7636        if (nr_vfn > max_nr_vfn) {
7637                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7638                                "3057 Requested vfs (%d) greater than "
7639                                "supported vfs (%d)", nr_vfn, max_nr_vfn);
7640                return -EINVAL;
7641        }
7642
7643        rc = pci_enable_sriov(pdev, nr_vfn);
7644        if (rc) {
7645                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7646                                "2806 Failed to enable sriov on this device "
7647                                "with vfn number nr_vf:%d, rc:%d\n",
7648                                nr_vfn, rc);
7649        } else
7650                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7651                                "2807 Successful enable sriov on this device "
7652                                "with vfn number nr_vf:%d\n", nr_vfn);
7653        return rc;
7654}
7655
7656static void
7657lpfc_unblock_requests_work(struct work_struct *work)
7658{
7659        struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7660                                             unblock_request_work);
7661
7662        lpfc_unblock_requests(phba);
7663}
7664
7665/**
7666 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7667 * @phba: pointer to lpfc hba data structure.
7668 *
7669 * This routine is invoked to set up the driver internal resources before the
7670 * device specific resource setup to support the HBA device it attached to.
7671 *
7672 * Return codes
7673 *      0 - successful
7674 *      other values - error
7675 **/
7676static int
7677lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7678{
7679        struct lpfc_sli *psli = &phba->sli;
7680
7681        /*
7682         * Driver resources common to all SLI revisions
7683         */
7684        atomic_set(&phba->fast_event_count, 0);
7685        atomic_set(&phba->dbg_log_idx, 0);
7686        atomic_set(&phba->dbg_log_cnt, 0);
7687        atomic_set(&phba->dbg_log_dmping, 0);
7688        spin_lock_init(&phba->hbalock);
7689
7690        /* Initialize port_list spinlock */
7691        spin_lock_init(&phba->port_list_lock);
7692        INIT_LIST_HEAD(&phba->port_list);
7693
7694        INIT_LIST_HEAD(&phba->work_list);
7695        init_waitqueue_head(&phba->wait_4_mlo_m_q);
7696
7697        /* Initialize the wait queue head for the kernel thread */
7698        init_waitqueue_head(&phba->work_waitq);
7699
7700        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7701                        "1403 Protocols supported %s %s %s\n",
7702                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7703                                "SCSI" : " "),
7704                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7705                                "NVME" : " "),
7706                        (phba->nvmet_support ? "NVMET" : " "));
7707
7708        /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7709        spin_lock_init(&phba->scsi_buf_list_get_lock);
7710        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7711        spin_lock_init(&phba->scsi_buf_list_put_lock);
7712        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7713
7714        /* Initialize the fabric iocb list */
7715        INIT_LIST_HEAD(&phba->fabric_iocb_list);
7716
7717        /* Initialize list to save ELS buffers */
7718        INIT_LIST_HEAD(&phba->elsbuf);
7719
7720        /* Initialize FCF connection rec list */
7721        INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7722
7723        /* Initialize OAS configuration list */
7724        spin_lock_init(&phba->devicelock);
7725        INIT_LIST_HEAD(&phba->luns);
7726
7727        /* MBOX heartbeat timer */
7728        timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7729        /* Fabric block timer */
7730        timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7731        /* EA polling mode timer */
7732        timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7733        /* Heartbeat timer */
7734        timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7735
7736        INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7737
7738        INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7739                          lpfc_idle_stat_delay_work);
7740        INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7741        return 0;
7742}
7743
7744/**
7745 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7746 * @phba: pointer to lpfc hba data structure.
7747 *
7748 * This routine is invoked to set up the driver internal resources specific to
7749 * support the SLI-3 HBA device it attached to.
7750 *
7751 * Return codes
7752 * 0 - successful
7753 * other values - error
7754 **/
7755static int
7756lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7757{
7758        int rc, entry_sz;
7759
7760        /*
7761         * Initialize timers used by driver
7762         */
7763
7764        /* FCP polling mode timer */
7765        timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7766
7767        /* Host attention work mask setup */
7768        phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7769        phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7770
7771        /* Get all the module params for configuring this host */
7772        lpfc_get_cfgparam(phba);
7773        /* Set up phase-1 common device driver resources */
7774
7775        rc = lpfc_setup_driver_resource_phase1(phba);
7776        if (rc)
7777                return -ENODEV;
7778
7779        if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7780                phba->menlo_flag |= HBA_MENLO_SUPPORT;
7781                /* check for menlo minimum sg count */
7782                if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7783                        phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7784        }
7785
7786        if (!phba->sli.sli3_ring)
7787                phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7788                                              sizeof(struct lpfc_sli_ring),
7789                                              GFP_KERNEL);
7790        if (!phba->sli.sli3_ring)
7791                return -ENOMEM;
7792
7793        /*
7794         * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7795         * used to create the sg_dma_buf_pool must be dynamically calculated.
7796         */
7797
7798        if (phba->sli_rev == LPFC_SLI_REV4)
7799                entry_sz = sizeof(struct sli4_sge);
7800        else
7801                entry_sz = sizeof(struct ulp_bde64);
7802
7803        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7804        if (phba->cfg_enable_bg) {
7805                /*
7806                 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7807                 * the FCP rsp, and a BDE for each. Sice we have no control
7808                 * over how many protection data segments the SCSI Layer
7809                 * will hand us (ie: there could be one for every block
7810                 * in the IO), we just allocate enough BDEs to accomidate
7811                 * our max amount and we need to limit lpfc_sg_seg_cnt to
7812                 * minimize the risk of running out.
7813                 */
7814                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7815                        sizeof(struct fcp_rsp) +
7816                        (LPFC_MAX_SG_SEG_CNT * entry_sz);
7817
7818                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7819                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7820
7821                /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7822                phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7823        } else {
7824                /*
7825                 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7826                 * the FCP rsp, a BDE for each, and a BDE for up to
7827                 * cfg_sg_seg_cnt data segments.
7828                 */
7829                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7830                        sizeof(struct fcp_rsp) +
7831                        ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7832
7833                /* Total BDEs in BPL for scsi_sg_list */
7834                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7835        }
7836
7837        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7838                        "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7839                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7840                        phba->cfg_total_seg_cnt);
7841
7842        phba->max_vpi = LPFC_MAX_VPI;
7843        /* This will be set to correct value after config_port mbox */
7844        phba->max_vports = 0;
7845
7846        /*
7847         * Initialize the SLI Layer to run with lpfc HBAs.
7848         */
7849        lpfc_sli_setup(phba);
7850        lpfc_sli_queue_init(phba);
7851
7852        /* Allocate device driver memory */
7853        if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7854                return -ENOMEM;
7855
7856        phba->lpfc_sg_dma_buf_pool =
7857                dma_pool_create("lpfc_sg_dma_buf_pool",
7858                                &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7859                                BPL_ALIGN_SZ, 0);
7860
7861        if (!phba->lpfc_sg_dma_buf_pool)
7862                goto fail_free_mem;
7863
7864        phba->lpfc_cmd_rsp_buf_pool =
7865                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
7866                                        &phba->pcidev->dev,
7867                                        sizeof(struct fcp_cmnd) +
7868                                        sizeof(struct fcp_rsp),
7869                                        BPL_ALIGN_SZ, 0);
7870
7871        if (!phba->lpfc_cmd_rsp_buf_pool)
7872                goto fail_free_dma_buf_pool;
7873
7874        /*
7875         * Enable sr-iov virtual functions if supported and configured
7876         * through the module parameter.
7877         */
7878        if (phba->cfg_sriov_nr_virtfn > 0) {
7879                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7880                                                 phba->cfg_sriov_nr_virtfn);
7881                if (rc) {
7882                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7883                                        "2808 Requested number of SR-IOV "
7884                                        "virtual functions (%d) is not "
7885                                        "supported\n",
7886                                        phba->cfg_sriov_nr_virtfn);
7887                        phba->cfg_sriov_nr_virtfn = 0;
7888                }
7889        }
7890
7891        return 0;
7892
7893fail_free_dma_buf_pool:
7894        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7895        phba->lpfc_sg_dma_buf_pool = NULL;
7896fail_free_mem:
7897        lpfc_mem_free(phba);
7898        return -ENOMEM;
7899}
7900
7901/**
7902 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7903 * @phba: pointer to lpfc hba data structure.
7904 *
7905 * This routine is invoked to unset the driver internal resources set up
7906 * specific for supporting the SLI-3 HBA device it attached to.
7907 **/
7908static void
7909lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7910{
7911        /* Free device driver memory allocated */
7912        lpfc_mem_free_all(phba);
7913
7914        return;
7915}
7916
7917/**
7918 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7919 * @phba: pointer to lpfc hba data structure.
7920 *
7921 * This routine is invoked to set up the driver internal resources specific to
7922 * support the SLI-4 HBA device it attached to.
7923 *
7924 * Return codes
7925 *      0 - successful
7926 *      other values - error
7927 **/
7928static int
7929lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7930{
7931        LPFC_MBOXQ_t *mboxq;
7932        MAILBOX_t *mb;
7933        int rc, i, max_buf_size;
7934        int longs;
7935        int extra;
7936        uint64_t wwn;
7937        u32 if_type;
7938        u32 if_fam;
7939
7940        phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7941        phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7942        phba->sli4_hba.curr_disp_cpu = 0;
7943
7944        /* Get all the module params for configuring this host */
7945        lpfc_get_cfgparam(phba);
7946
7947        /* Set up phase-1 common device driver resources */
7948        rc = lpfc_setup_driver_resource_phase1(phba);
7949        if (rc)
7950                return -ENODEV;
7951
7952        /* Before proceed, wait for POST done and device ready */
7953        rc = lpfc_sli4_post_status_check(phba);
7954        if (rc)
7955                return -ENODEV;
7956
7957        /* Allocate all driver workqueues here */
7958
7959        /* The lpfc_wq workqueue for deferred irq use */
7960        phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7961
7962        /*
7963         * Initialize timers used by driver
7964         */
7965
7966        timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7967
7968        /* FCF rediscover timer */
7969        timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7970
7971        /* CMF congestion timer */
7972        hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7973        phba->cmf_timer.function = lpfc_cmf_timer;
7974
7975        /*
7976         * Control structure for handling external multi-buffer mailbox
7977         * command pass-through.
7978         */
7979        memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7980                sizeof(struct lpfc_mbox_ext_buf_ctx));
7981        INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7982
7983        phba->max_vpi = LPFC_MAX_VPI;
7984
7985        /* This will be set to correct value after the read_config mbox */
7986        phba->max_vports = 0;
7987
7988        /* Program the default value of vlan_id and fc_map */
7989        phba->valid_vlan = 0;
7990        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7991        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7992        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7993
7994        /*
7995         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7996         * we will associate a new ring, for each EQ/CQ/WQ tuple.
7997         * The WQ create will allocate the ring.
7998         */
7999
8000        /* Initialize buffer queue management fields */
8001        INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
8002        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
8003        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
8004
8005        /* for VMID idle timeout if VMID is enabled */
8006        if (lpfc_is_vmid_enabled(phba))
8007                timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8008
8009        /*
8010         * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8011         */
8012        /* Initialize the Abort buffer list used by driver */
8013        spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8014        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8015
8016        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8017                /* Initialize the Abort nvme buffer list used by driver */
8018                spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8019                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8020                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8021                spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8022                INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8023        }
8024
8025        /* This abort list used by worker thread */
8026        spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8027        spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8028        spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8029        spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8030
8031        /*
8032         * Initialize driver internal slow-path work queues
8033         */
8034
8035        /* Driver internel slow-path CQ Event pool */
8036        INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8037        /* Response IOCB work queue list */
8038        INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8039        /* Asynchronous event CQ Event work queue list */
8040        INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8041        /* Slow-path XRI aborted CQ Event work queue list */
8042        INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8043        /* Receive queue CQ Event work queue list */
8044        INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8045
8046        /* Initialize extent block lists. */
8047        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8048        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8049        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8050        INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8051
8052        /* Initialize mboxq lists. If the early init routines fail
8053         * these lists need to be correctly initialized.
8054         */
8055        INIT_LIST_HEAD(&phba->sli.mboxq);
8056        INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8057
8058        /* initialize optic_state to 0xFF */
8059        phba->sli4_hba.lnk_info.optic_state = 0xff;
8060
8061        /* Allocate device driver memory */
8062        rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8063        if (rc)
8064                return -ENOMEM;
8065
8066        /* IF Type 2 ports get initialized now. */
8067        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8068            LPFC_SLI_INTF_IF_TYPE_2) {
8069                rc = lpfc_pci_function_reset(phba);
8070                if (unlikely(rc)) {
8071                        rc = -ENODEV;
8072                        goto out_free_mem;
8073                }
8074                phba->temp_sensor_support = 1;
8075        }
8076
8077        /* Create the bootstrap mailbox command */
8078        rc = lpfc_create_bootstrap_mbox(phba);
8079        if (unlikely(rc))
8080                goto out_free_mem;
8081
8082        /* Set up the host's endian order with the device. */
8083        rc = lpfc_setup_endian_order(phba);
8084        if (unlikely(rc))
8085                goto out_free_bsmbx;
8086
8087        /* Set up the hba's configuration parameters. */
8088        rc = lpfc_sli4_read_config(phba);
8089        if (unlikely(rc))
8090                goto out_free_bsmbx;
8091
8092        if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8093                /* Right now the link is down, if FA-PWWN is configured the
8094                 * firmware will try FLOGI before the driver gets a link up.
8095                 * If it fails, the driver should get a MISCONFIGURED async
8096                 * event which will clear this flag. The only notification
8097                 * the driver gets is if it fails, if it succeeds there is no
8098                 * notification given. Assume success.
8099                 */
8100                phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8101        }
8102
8103        rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8104        if (unlikely(rc))
8105                goto out_free_bsmbx;
8106
8107        /* IF Type 0 ports get initialized now. */
8108        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8109            LPFC_SLI_INTF_IF_TYPE_0) {
8110                rc = lpfc_pci_function_reset(phba);
8111                if (unlikely(rc))
8112                        goto out_free_bsmbx;
8113        }
8114
8115        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8116                                                       GFP_KERNEL);
8117        if (!mboxq) {
8118                rc = -ENOMEM;
8119                goto out_free_bsmbx;
8120        }
8121
8122        /* Check for NVMET being configured */
8123        phba->nvmet_support = 0;
8124        if (lpfc_enable_nvmet_cnt) {
8125
8126                /* First get WWN of HBA instance */
8127                lpfc_read_nv(phba, mboxq);
8128                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8129                if (rc != MBX_SUCCESS) {
8130                        lpfc_printf_log(phba, KERN_ERR,
8131                                        LOG_TRACE_EVENT,
8132                                        "6016 Mailbox failed , mbxCmd x%x "
8133                                        "READ_NV, mbxStatus x%x\n",
8134                                        bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8135                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8136                        mempool_free(mboxq, phba->mbox_mem_pool);
8137                        rc = -EIO;
8138                        goto out_free_bsmbx;
8139                }
8140                mb = &mboxq->u.mb;
8141                memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8142                       sizeof(uint64_t));
8143                wwn = cpu_to_be64(wwn);
8144                phba->sli4_hba.wwnn.u.name = wwn;
8145                memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8146                       sizeof(uint64_t));
8147                /* wwn is WWPN of HBA instance */
8148                wwn = cpu_to_be64(wwn);
8149                phba->sli4_hba.wwpn.u.name = wwn;
8150
8151                /* Check to see if it matches any module parameter */
8152                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8153                        if (wwn == lpfc_enable_nvmet[i]) {
8154#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8155                                if (lpfc_nvmet_mem_alloc(phba))
8156                                        break;
8157
8158                                phba->nvmet_support = 1; /* a match */
8159
8160                                lpfc_printf_log(phba, KERN_ERR,
8161                                                LOG_TRACE_EVENT,
8162                                                "6017 NVME Target %016llx\n",
8163                                                wwn);
8164#else
8165                                lpfc_printf_log(phba, KERN_ERR,
8166                                                LOG_TRACE_EVENT,
8167                                                "6021 Can't enable NVME Target."
8168                                                " NVME_TARGET_FC infrastructure"
8169                                                " is not in kernel\n");
8170#endif
8171                                /* Not supported for NVMET */
8172                                phba->cfg_xri_rebalancing = 0;
8173                                if (phba->irq_chann_mode == NHT_MODE) {
8174                                        phba->cfg_irq_chann =
8175                                                phba->sli4_hba.num_present_cpu;
8176                                        phba->cfg_hdw_queue =
8177                                                phba->sli4_hba.num_present_cpu;
8178                                        phba->irq_chann_mode = NORMAL_MODE;
8179                                }
8180                                break;
8181                        }
8182                }
8183        }
8184
8185        lpfc_nvme_mod_param_dep(phba);
8186
8187        /*
8188         * Get sli4 parameters that override parameters from Port capabilities.
8189         * If this call fails, it isn't critical unless the SLI4 parameters come
8190         * back in conflict.
8191         */
8192        rc = lpfc_get_sli4_parameters(phba, mboxq);
8193        if (rc) {
8194                if_type = bf_get(lpfc_sli_intf_if_type,
8195                                 &phba->sli4_hba.sli_intf);
8196                if_fam = bf_get(lpfc_sli_intf_sli_family,
8197                                &phba->sli4_hba.sli_intf);
8198                if (phba->sli4_hba.extents_in_use &&
8199                    phba->sli4_hba.rpi_hdrs_in_use) {
8200                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201                                        "2999 Unsupported SLI4 Parameters "
8202                                        "Extents and RPI headers enabled.\n");
8203                        if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8204                            if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
8205                                mempool_free(mboxq, phba->mbox_mem_pool);
8206                                rc = -EIO;
8207                                goto out_free_bsmbx;
8208                        }
8209                }
8210                if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8211                      if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8212                        mempool_free(mboxq, phba->mbox_mem_pool);
8213                        rc = -EIO;
8214                        goto out_free_bsmbx;
8215                }
8216        }
8217
8218        /*
8219         * 1 for cmd, 1 for rsp, NVME adds an extra one
8220         * for boundary conditions in its max_sgl_segment template.
8221         */
8222        extra = 2;
8223        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8224                extra++;
8225
8226        /*
8227         * It doesn't matter what family our adapter is in, we are
8228         * limited to 2 Pages, 512 SGEs, for our SGL.
8229         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8230         */
8231        max_buf_size = (2 * SLI4_PAGE_SIZE);
8232
8233        /*
8234         * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8235         * used to create the sg_dma_buf_pool must be calculated.
8236         */
8237        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8238                /* Both cfg_enable_bg and cfg_external_dif code paths */
8239
8240                /*
8241                 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8242                 * the FCP rsp, and a SGE. Sice we have no control
8243                 * over how many protection segments the SCSI Layer
8244                 * will hand us (ie: there could be one for every block
8245                 * in the IO), just allocate enough SGEs to accomidate
8246                 * our max amount and we need to limit lpfc_sg_seg_cnt
8247                 * to minimize the risk of running out.
8248                 */
8249                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8250                                sizeof(struct fcp_rsp) + max_buf_size;
8251
8252                /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8253                phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8254
8255                /*
8256                 * If supporting DIF, reduce the seg count for scsi to
8257                 * allow room for the DIF sges.
8258                 */
8259                if (phba->cfg_enable_bg &&
8260                    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8261                        phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8262                else
8263                        phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8264
8265        } else {
8266                /*
8267                 * The scsi_buf for a regular I/O holds the FCP cmnd,
8268                 * the FCP rsp, a SGE for each, and a SGE for up to
8269                 * cfg_sg_seg_cnt data segments.
8270                 */
8271                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8272                                sizeof(struct fcp_rsp) +
8273                                ((phba->cfg_sg_seg_cnt + extra) *
8274                                sizeof(struct sli4_sge));
8275
8276                /* Total SGEs for scsi_sg_list */
8277                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8278                phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8279
8280                /*
8281                 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8282                 * need to post 1 page for the SGL.
8283                 */
8284        }
8285
8286        if (phba->cfg_xpsgl && !phba->nvmet_support)
8287                phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8288        else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8289                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8290        else
8291                phba->cfg_sg_dma_buf_size =
8292                                SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8293
8294        phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8295                               sizeof(struct sli4_sge);
8296
8297        /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8298        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8299                if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8300                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8301                                        "6300 Reducing NVME sg segment "
8302                                        "cnt to %d\n",
8303                                        LPFC_MAX_NVME_SEG_CNT);
8304                        phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8305                } else
8306                        phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8307        }
8308
8309        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8310                        "9087 sg_seg_cnt:%d dmabuf_size:%d "
8311                        "total:%d scsi:%d nvme:%d\n",
8312                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8313                        phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8314                        phba->cfg_nvme_seg_cnt);
8315
8316        if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8317                i = phba->cfg_sg_dma_buf_size;
8318        else
8319                i = SLI4_PAGE_SIZE;
8320
8321        phba->lpfc_sg_dma_buf_pool =
8322                        dma_pool_create("lpfc_sg_dma_buf_pool",
8323                                        &phba->pcidev->dev,
8324                                        phba->cfg_sg_dma_buf_size,
8325                                        i, 0);
8326        if (!phba->lpfc_sg_dma_buf_pool)
8327                goto out_free_bsmbx;
8328
8329        phba->lpfc_cmd_rsp_buf_pool =
8330                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
8331                                        &phba->pcidev->dev,
8332                                        sizeof(struct fcp_cmnd) +
8333                                        sizeof(struct fcp_rsp),
8334                                        i, 0);
8335        if (!phba->lpfc_cmd_rsp_buf_pool)
8336                goto out_free_sg_dma_buf;
8337
8338        mempool_free(mboxq, phba->mbox_mem_pool);
8339
8340        /* Verify OAS is supported */
8341        lpfc_sli4_oas_verify(phba);
8342
8343        /* Verify RAS support on adapter */
8344        lpfc_sli4_ras_init(phba);
8345
8346        /* Verify all the SLI4 queues */
8347        rc = lpfc_sli4_queue_verify(phba);
8348        if (rc)
8349                goto out_free_cmd_rsp_buf;
8350
8351        /* Create driver internal CQE event pool */
8352        rc = lpfc_sli4_cq_event_pool_create(phba);
8353        if (rc)
8354                goto out_free_cmd_rsp_buf;
8355
8356        /* Initialize sgl lists per host */
8357        lpfc_init_sgl_list(phba);
8358
8359        /* Allocate and initialize active sgl array */
8360        rc = lpfc_init_active_sgl_array(phba);
8361        if (rc) {
8362                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8363                                "1430 Failed to initialize sgl list.\n");
8364                goto out_destroy_cq_event_pool;
8365        }
8366        rc = lpfc_sli4_init_rpi_hdrs(phba);
8367        if (rc) {
8368                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8369                                "1432 Failed to initialize rpi headers.\n");
8370                goto out_free_active_sgl;
8371        }
8372
8373        /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8374        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8375        phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8376                                         GFP_KERNEL);
8377        if (!phba->fcf.fcf_rr_bmask) {
8378                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8379                                "2759 Failed allocate memory for FCF round "
8380                                "robin failover bmask\n");
8381                rc = -ENOMEM;
8382                goto out_remove_rpi_hdrs;
8383        }
8384
8385        phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8386                                            sizeof(struct lpfc_hba_eq_hdl),
8387                                            GFP_KERNEL);
8388        if (!phba->sli4_hba.hba_eq_hdl) {
8389                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8390                                "2572 Failed allocate memory for "
8391                                "fast-path per-EQ handle array\n");
8392                rc = -ENOMEM;
8393                goto out_free_fcf_rr_bmask;
8394        }
8395
8396        phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8397                                        sizeof(struct lpfc_vector_map_info),
8398                                        GFP_KERNEL);
8399        if (!phba->sli4_hba.cpu_map) {
8400                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8401                                "3327 Failed allocate memory for msi-x "
8402                                "interrupt vector mapping\n");
8403                rc = -ENOMEM;
8404                goto out_free_hba_eq_hdl;
8405        }
8406
8407        phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8408        if (!phba->sli4_hba.eq_info) {
8409                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8410                                "3321 Failed allocation for per_cpu stats\n");
8411                rc = -ENOMEM;
8412                goto out_free_hba_cpu_map;
8413        }
8414
8415        phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8416                                           sizeof(*phba->sli4_hba.idle_stat),
8417                                           GFP_KERNEL);
8418        if (!phba->sli4_hba.idle_stat) {
8419                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8420                                "3390 Failed allocation for idle_stat\n");
8421                rc = -ENOMEM;
8422                goto out_free_hba_eq_info;
8423        }
8424
8425#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8426        phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8427        if (!phba->sli4_hba.c_stat) {
8428                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8429                                "3332 Failed allocating per cpu hdwq stats\n");
8430                rc = -ENOMEM;
8431                goto out_free_hba_idle_stat;
8432        }
8433#endif
8434
8435        phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8436        if (!phba->cmf_stat) {
8437                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8438                                "3331 Failed allocating per cpu cgn stats\n");
8439                rc = -ENOMEM;
8440                goto out_free_hba_hdwq_info;
8441        }
8442
8443        /*
8444         * Enable sr-iov virtual functions if supported and configured
8445         * through the module parameter.
8446         */
8447        if (phba->cfg_sriov_nr_virtfn > 0) {
8448                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8449                                                 phba->cfg_sriov_nr_virtfn);
8450                if (rc) {
8451                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8452                                        "3020 Requested number of SR-IOV "
8453                                        "virtual functions (%d) is not "
8454                                        "supported\n",
8455                                        phba->cfg_sriov_nr_virtfn);
8456                        phba->cfg_sriov_nr_virtfn = 0;
8457                }
8458        }
8459
8460        return 0;
8461
8462out_free_hba_hdwq_info:
8463#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8464        free_percpu(phba->sli4_hba.c_stat);
8465out_free_hba_idle_stat:
8466#endif
8467        kfree(phba->sli4_hba.idle_stat);
8468out_free_hba_eq_info:
8469        free_percpu(phba->sli4_hba.eq_info);
8470out_free_hba_cpu_map:
8471        kfree(phba->sli4_hba.cpu_map);
8472out_free_hba_eq_hdl:
8473        kfree(phba->sli4_hba.hba_eq_hdl);
8474out_free_fcf_rr_bmask:
8475        kfree(phba->fcf.fcf_rr_bmask);
8476out_remove_rpi_hdrs:
8477        lpfc_sli4_remove_rpi_hdrs(phba);
8478out_free_active_sgl:
8479        lpfc_free_active_sgl(phba);
8480out_destroy_cq_event_pool:
8481        lpfc_sli4_cq_event_pool_destroy(phba);
8482out_free_cmd_rsp_buf:
8483        dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8484        phba->lpfc_cmd_rsp_buf_pool = NULL;
8485out_free_sg_dma_buf:
8486        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8487        phba->lpfc_sg_dma_buf_pool = NULL;
8488out_free_bsmbx:
8489        lpfc_destroy_bootstrap_mbox(phba);
8490out_free_mem:
8491        lpfc_mem_free(phba);
8492        return rc;
8493}
8494
8495/**
8496 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8497 * @phba: pointer to lpfc hba data structure.
8498 *
8499 * This routine is invoked to unset the driver internal resources set up
8500 * specific for supporting the SLI-4 HBA device it attached to.
8501 **/
8502static void
8503lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8504{
8505        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8506
8507        free_percpu(phba->sli4_hba.eq_info);
8508#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8509        free_percpu(phba->sli4_hba.c_stat);
8510#endif
8511        free_percpu(phba->cmf_stat);
8512        kfree(phba->sli4_hba.idle_stat);
8513
8514        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8515        kfree(phba->sli4_hba.cpu_map);
8516        phba->sli4_hba.num_possible_cpu = 0;
8517        phba->sli4_hba.num_present_cpu = 0;
8518        phba->sli4_hba.curr_disp_cpu = 0;
8519        cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8520
8521        /* Free memory allocated for fast-path work queue handles */
8522        kfree(phba->sli4_hba.hba_eq_hdl);
8523
8524        /* Free the allocated rpi headers. */
8525        lpfc_sli4_remove_rpi_hdrs(phba);
8526        lpfc_sli4_remove_rpis(phba);
8527
8528        /* Free eligible FCF index bmask */
8529        kfree(phba->fcf.fcf_rr_bmask);
8530
8531        /* Free the ELS sgl list */
8532        lpfc_free_active_sgl(phba);
8533        lpfc_free_els_sgl_list(phba);
8534        lpfc_free_nvmet_sgl_list(phba);
8535
8536        /* Free the completion queue EQ event pool */
8537        lpfc_sli4_cq_event_release_all(phba);
8538        lpfc_sli4_cq_event_pool_destroy(phba);
8539
8540        /* Release resource identifiers. */
8541        lpfc_sli4_dealloc_resource_identifiers(phba);
8542
8543        /* Free the bsmbx region. */
8544        lpfc_destroy_bootstrap_mbox(phba);
8545
8546        /* Free the SLI Layer memory with SLI4 HBAs */
8547        lpfc_mem_free_all(phba);
8548
8549        /* Free the current connect table */
8550        list_for_each_entry_safe(conn_entry, next_conn_entry,
8551                &phba->fcf_conn_rec_list, list) {
8552                list_del_init(&conn_entry->list);
8553                kfree(conn_entry);
8554        }
8555
8556        return;
8557}
8558
8559/**
8560 * lpfc_init_api_table_setup - Set up init api function jump table
8561 * @phba: The hba struct for which this call is being executed.
8562 * @dev_grp: The HBA PCI-Device group number.
8563 *
8564 * This routine sets up the device INIT interface API function jump table
8565 * in @phba struct.
8566 *
8567 * Returns: 0 - success, -ENODEV - failure.
8568 **/
8569int
8570lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8571{
8572        phba->lpfc_hba_init_link = lpfc_hba_init_link;
8573        phba->lpfc_hba_down_link = lpfc_hba_down_link;
8574        phba->lpfc_selective_reset = lpfc_selective_reset;
8575        switch (dev_grp) {
8576        case LPFC_PCI_DEV_LP:
8577                phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8578                phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8579                phba->lpfc_stop_port = lpfc_stop_port_s3;
8580                break;
8581        case LPFC_PCI_DEV_OC:
8582                phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8583                phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8584                phba->lpfc_stop_port = lpfc_stop_port_s4;
8585                break;
8586        default:
8587                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8588                                "1431 Invalid HBA PCI-device group: 0x%x\n",
8589                                dev_grp);
8590                return -ENODEV;
8591        }
8592        return 0;
8593}
8594
8595/**
8596 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8597 * @phba: pointer to lpfc hba data structure.
8598 *
8599 * This routine is invoked to set up the driver internal resources after the
8600 * device specific resource setup to support the HBA device it attached to.
8601 *
8602 * Return codes
8603 *      0 - successful
8604 *      other values - error
8605 **/
8606static int
8607lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8608{
8609        int error;
8610
8611        /* Startup the kernel thread for this host adapter. */
8612        phba->worker_thread = kthread_run(lpfc_do_work, phba,
8613                                          "lpfc_worker_%d", phba->brd_no);
8614        if (IS_ERR(phba->worker_thread)) {
8615                error = PTR_ERR(phba->worker_thread);
8616                return error;
8617        }
8618
8619        return 0;
8620}
8621
8622/**
8623 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8624 * @phba: pointer to lpfc hba data structure.
8625 *
8626 * This routine is invoked to unset the driver internal resources set up after
8627 * the device specific resource setup for supporting the HBA device it
8628 * attached to.
8629 **/
8630static void
8631lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8632{
8633        if (phba->wq) {
8634                destroy_workqueue(phba->wq);
8635                phba->wq = NULL;
8636        }
8637
8638        /* Stop kernel worker thread */
8639        if (phba->worker_thread)
8640                kthread_stop(phba->worker_thread);
8641}
8642
8643/**
8644 * lpfc_free_iocb_list - Free iocb list.
8645 * @phba: pointer to lpfc hba data structure.
8646 *
8647 * This routine is invoked to free the driver's IOCB list and memory.
8648 **/
8649void
8650lpfc_free_iocb_list(struct lpfc_hba *phba)
8651{
8652        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8653
8654        spin_lock_irq(&phba->hbalock);
8655        list_for_each_entry_safe(iocbq_entry, iocbq_next,
8656                                 &phba->lpfc_iocb_list, list) {
8657                list_del(&iocbq_entry->list);
8658                kfree(iocbq_entry);
8659                phba->total_iocbq_bufs--;
8660        }
8661        spin_unlock_irq(&phba->hbalock);
8662
8663        return;
8664}
8665
8666/**
8667 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8668 * @phba: pointer to lpfc hba data structure.
8669 * @iocb_count: number of requested iocbs
8670 *
8671 * This routine is invoked to allocate and initizlize the driver's IOCB
8672 * list and set up the IOCB tag array accordingly.
8673 *
8674 * Return codes
8675 *      0 - successful
8676 *      other values - error
8677 **/
8678int
8679lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8680{
8681        struct lpfc_iocbq *iocbq_entry = NULL;
8682        uint16_t iotag;
8683        int i;
8684
8685        /* Initialize and populate the iocb list per host.  */
8686        INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8687        for (i = 0; i < iocb_count; i++) {
8688                iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8689                if (iocbq_entry == NULL) {
8690                        printk(KERN_ERR "%s: only allocated %d iocbs of "
8691                                "expected %d count. Unloading driver.\n",
8692                                __func__, i, iocb_count);
8693                        goto out_free_iocbq;
8694                }
8695
8696                iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8697                if (iotag == 0) {
8698                        kfree(iocbq_entry);
8699                        printk(KERN_ERR "%s: failed to allocate IOTAG. "
8700                                "Unloading driver.\n", __func__);
8701                        goto out_free_iocbq;
8702                }
8703                iocbq_entry->sli4_lxritag = NO_XRI;
8704                iocbq_entry->sli4_xritag = NO_XRI;
8705
8706                spin_lock_irq(&phba->hbalock);
8707                list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8708                phba->total_iocbq_bufs++;
8709                spin_unlock_irq(&phba->hbalock);
8710        }
8711
8712        return 0;
8713
8714out_free_iocbq:
8715        lpfc_free_iocb_list(phba);
8716
8717        return -ENOMEM;
8718}
8719
8720/**
8721 * lpfc_free_sgl_list - Free a given sgl list.
8722 * @phba: pointer to lpfc hba data structure.
8723 * @sglq_list: pointer to the head of sgl list.
8724 *
8725 * This routine is invoked to free a give sgl list and memory.
8726 **/
8727void
8728lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8729{
8730        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8731
8732        list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8733                list_del(&sglq_entry->list);
8734                lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8735                kfree(sglq_entry);
8736        }
8737}
8738
8739/**
8740 * lpfc_free_els_sgl_list - Free els sgl list.
8741 * @phba: pointer to lpfc hba data structure.
8742 *
8743 * This routine is invoked to free the driver's els sgl list and memory.
8744 **/
8745static void
8746lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8747{
8748        LIST_HEAD(sglq_list);
8749
8750        /* Retrieve all els sgls from driver list */
8751        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8752        list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8753        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8754
8755        /* Now free the sgl list */
8756        lpfc_free_sgl_list(phba, &sglq_list);
8757}
8758
8759/**
8760 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8761 * @phba: pointer to lpfc hba data structure.
8762 *
8763 * This routine is invoked to free the driver's nvmet sgl list and memory.
8764 **/
8765static void
8766lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8767{
8768        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8769        LIST_HEAD(sglq_list);
8770
8771        /* Retrieve all nvmet sgls from driver list */
8772        spin_lock_irq(&phba->hbalock);
8773        spin_lock(&phba->sli4_hba.sgl_list_lock);
8774        list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8775        spin_unlock(&phba->sli4_hba.sgl_list_lock);
8776        spin_unlock_irq(&phba->hbalock);
8777
8778        /* Now free the sgl list */
8779        list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8780                list_del(&sglq_entry->list);
8781                lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8782                kfree(sglq_entry);
8783        }
8784
8785        /* Update the nvmet_xri_cnt to reflect no current sgls.
8786         * The next initialization cycle sets the count and allocates
8787         * the sgls over again.
8788         */
8789        phba->sli4_hba.nvmet_xri_cnt = 0;
8790}
8791
8792/**
8793 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8794 * @phba: pointer to lpfc hba data structure.
8795 *
8796 * This routine is invoked to allocate the driver's active sgl memory.
8797 * This array will hold the sglq_entry's for active IOs.
8798 **/
8799static int
8800lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8801{
8802        int size;
8803        size = sizeof(struct lpfc_sglq *);
8804        size *= phba->sli4_hba.max_cfg_param.max_xri;
8805
8806        phba->sli4_hba.lpfc_sglq_active_list =
8807                kzalloc(size, GFP_KERNEL);
8808        if (!phba->sli4_hba.lpfc_sglq_active_list)
8809                return -ENOMEM;
8810        return 0;
8811}
8812
8813/**
8814 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8815 * @phba: pointer to lpfc hba data structure.
8816 *
8817 * This routine is invoked to walk through the array of active sglq entries
8818 * and free all of the resources.
8819 * This is just a place holder for now.
8820 **/
8821static void
8822lpfc_free_active_sgl(struct lpfc_hba *phba)
8823{
8824        kfree(phba->sli4_hba.lpfc_sglq_active_list);
8825}
8826
8827/**
8828 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8829 * @phba: pointer to lpfc hba data structure.
8830 *
8831 * This routine is invoked to allocate and initizlize the driver's sgl
8832 * list and set up the sgl xritag tag array accordingly.
8833 *
8834 **/
8835static void
8836lpfc_init_sgl_list(struct lpfc_hba *phba)
8837{
8838        /* Initialize and populate the sglq list per host/VF. */
8839        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8840        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8841        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8842        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8843
8844        /* els xri-sgl book keeping */
8845        phba->sli4_hba.els_xri_cnt = 0;
8846
8847        /* nvme xri-buffer book keeping */
8848        phba->sli4_hba.io_xri_cnt = 0;
8849}
8850
8851/**
8852 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8853 * @phba: pointer to lpfc hba data structure.
8854 *
8855 * This routine is invoked to post rpi header templates to the
8856 * port for those SLI4 ports that do not support extents.  This routine
8857 * posts a PAGE_SIZE memory region to the port to hold up to
8858 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8859 * and should be called only when interrupts are disabled.
8860 *
8861 * Return codes
8862 *      0 - successful
8863 *      -ERROR - otherwise.
8864 **/
8865int
8866lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8867{
8868        int rc = 0;
8869        struct lpfc_rpi_hdr *rpi_hdr;
8870
8871        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8872        if (!phba->sli4_hba.rpi_hdrs_in_use)
8873                return rc;
8874        if (phba->sli4_hba.extents_in_use)
8875                return -EIO;
8876
8877        rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8878        if (!rpi_hdr) {
8879                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8880                                "0391 Error during rpi post operation\n");
8881                lpfc_sli4_remove_rpis(phba);
8882                rc = -ENODEV;
8883        }
8884
8885        return rc;
8886}
8887
8888/**
8889 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8890 * @phba: pointer to lpfc hba data structure.
8891 *
8892 * This routine is invoked to allocate a single 4KB memory region to
8893 * support rpis and stores them in the phba.  This single region
8894 * provides support for up to 64 rpis.  The region is used globally
8895 * by the device.
8896 *
8897 * Returns:
8898 *   A valid rpi hdr on success.
8899 *   A NULL pointer on any failure.
8900 **/
8901struct lpfc_rpi_hdr *
8902lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8903{
8904        uint16_t rpi_limit, curr_rpi_range;
8905        struct lpfc_dmabuf *dmabuf;
8906        struct lpfc_rpi_hdr *rpi_hdr;
8907
8908        /*
8909         * If the SLI4 port supports extents, posting the rpi header isn't
8910         * required.  Set the expected maximum count and let the actual value
8911         * get set when extents are fully allocated.
8912         */
8913        if (!phba->sli4_hba.rpi_hdrs_in_use)
8914                return NULL;
8915        if (phba->sli4_hba.extents_in_use)
8916                return NULL;
8917
8918        /* The limit on the logical index is just the max_rpi count. */
8919        rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8920
8921        spin_lock_irq(&phba->hbalock);
8922        /*
8923         * Establish the starting RPI in this header block.  The starting
8924         * rpi is normalized to a zero base because the physical rpi is
8925         * port based.
8926         */
8927        curr_rpi_range = phba->sli4_hba.next_rpi;
8928        spin_unlock_irq(&phba->hbalock);
8929
8930        /* Reached full RPI range */
8931        if (curr_rpi_range == rpi_limit)
8932                return NULL;
8933
8934        /*
8935         * First allocate the protocol header region for the port.  The
8936         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8937         */
8938        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8939        if (!dmabuf)
8940                return NULL;
8941
8942        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8943                                          LPFC_HDR_TEMPLATE_SIZE,
8944                                          &dmabuf->phys, GFP_KERNEL);
8945        if (!dmabuf->virt) {
8946                rpi_hdr = NULL;
8947                goto err_free_dmabuf;
8948        }
8949
8950        if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8951                rpi_hdr = NULL;
8952                goto err_free_coherent;
8953        }
8954
8955        /* Save the rpi header data for cleanup later. */
8956        rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8957        if (!rpi_hdr)
8958                goto err_free_coherent;
8959
8960        rpi_hdr->dmabuf = dmabuf;
8961        rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8962        rpi_hdr->page_count = 1;
8963        spin_lock_irq(&phba->hbalock);
8964
8965        /* The rpi_hdr stores the logical index only. */
8966        rpi_hdr->start_rpi = curr_rpi_range;
8967        rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8968        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8969
8970        spin_unlock_irq(&phba->hbalock);
8971        return rpi_hdr;
8972
8973 err_free_coherent:
8974        dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8975                          dmabuf->virt, dmabuf->phys);
8976 err_free_dmabuf:
8977        kfree(dmabuf);
8978        return NULL;
8979}
8980
8981/**
8982 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8983 * @phba: pointer to lpfc hba data structure.
8984 *
8985 * This routine is invoked to remove all memory resources allocated
8986 * to support rpis for SLI4 ports not supporting extents. This routine
8987 * presumes the caller has released all rpis consumed by fabric or port
8988 * logins and is prepared to have the header pages removed.
8989 **/
8990void
8991lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8992{
8993        struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8994
8995        if (!phba->sli4_hba.rpi_hdrs_in_use)
8996                goto exit;
8997
8998        list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8999                                 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9000                list_del(&rpi_hdr->list);
9001                dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9002                                  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9003                kfree(rpi_hdr->dmabuf);
9004                kfree(rpi_hdr);
9005        }
9006 exit:
9007        /* There are no rpis available to the port now. */
9008        phba->sli4_hba.next_rpi = 0;
9009}
9010
9011/**
9012 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9013 * @pdev: pointer to pci device data structure.
9014 *
9015 * This routine is invoked to allocate the driver hba data structure for an
9016 * HBA device. If the allocation is successful, the phba reference to the
9017 * PCI device data structure is set.
9018 *
9019 * Return codes
9020 *      pointer to @phba - successful
9021 *      NULL - error
9022 **/
9023static struct lpfc_hba *
9024lpfc_hba_alloc(struct pci_dev *pdev)
9025{
9026        struct lpfc_hba *phba;
9027
9028        /* Allocate memory for HBA structure */
9029        phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9030        if (!phba) {
9031                dev_err(&pdev->dev, "failed to allocate hba struct\n");
9032                return NULL;
9033        }
9034
9035        /* Set reference to PCI device in HBA structure */
9036        phba->pcidev = pdev;
9037
9038        /* Assign an unused board number */
9039        phba->brd_no = lpfc_get_instance();
9040        if (phba->brd_no < 0) {
9041                kfree(phba);
9042                return NULL;
9043        }
9044        phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9045
9046        spin_lock_init(&phba->ct_ev_lock);
9047        INIT_LIST_HEAD(&phba->ct_ev_waiters);
9048
9049        return phba;
9050}
9051
9052/**
9053 * lpfc_hba_free - Free driver hba data structure with a device.
9054 * @phba: pointer to lpfc hba data structure.
9055 *
9056 * This routine is invoked to free the driver hba data structure with an
9057 * HBA device.
9058 **/
9059static void
9060lpfc_hba_free(struct lpfc_hba *phba)
9061{
9062        if (phba->sli_rev == LPFC_SLI_REV4)
9063                kfree(phba->sli4_hba.hdwq);
9064
9065        /* Release the driver assigned board number */
9066        idr_remove(&lpfc_hba_index, phba->brd_no);
9067
9068        /* Free memory allocated with sli3 rings */
9069        kfree(phba->sli.sli3_ring);
9070        phba->sli.sli3_ring = NULL;
9071
9072        kfree(phba);
9073        return;
9074}
9075
9076/**
9077 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9078 * @vport: pointer to lpfc vport data structure.
9079 *
9080 * This routine is will setup initial FDMI attribute masks for
9081 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9082 * to get these attributes first before falling back, the attribute
9083 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9084 **/
9085void
9086lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9087{
9088        struct lpfc_hba *phba = vport->phba;
9089
9090        vport->load_flag |= FC_ALLOW_FDMI;
9091        if (phba->cfg_enable_SmartSAN ||
9092            phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9093                /* Setup appropriate attribute masks */
9094                vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9095                if (phba->cfg_enable_SmartSAN)
9096                        vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9097                else
9098                        vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9099        }
9100
9101        lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9102                        "6077 Setup FDMI mask: hba x%x port x%x\n",
9103                        vport->fdmi_hba_mask, vport->fdmi_port_mask);
9104}
9105
9106/**
9107 * lpfc_create_shost - Create hba physical port with associated scsi host.
9108 * @phba: pointer to lpfc hba data structure.
9109 *
9110 * This routine is invoked to create HBA physical port and associate a SCSI
9111 * host with it.
9112 *
9113 * Return codes
9114 *      0 - successful
9115 *      other values - error
9116 **/
9117static int
9118lpfc_create_shost(struct lpfc_hba *phba)
9119{
9120        struct lpfc_vport *vport;
9121        struct Scsi_Host  *shost;
9122
9123        /* Initialize HBA FC structure */
9124        phba->fc_edtov = FF_DEF_EDTOV;
9125        phba->fc_ratov = FF_DEF_RATOV;
9126        phba->fc_altov = FF_DEF_ALTOV;
9127        phba->fc_arbtov = FF_DEF_ARBTOV;
9128
9129        atomic_set(&phba->sdev_cnt, 0);
9130        vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9131        if (!vport)
9132                return -ENODEV;
9133
9134        shost = lpfc_shost_from_vport(vport);
9135        phba->pport = vport;
9136
9137        if (phba->nvmet_support) {
9138                /* Only 1 vport (pport) will support NVME target */
9139                phba->targetport = NULL;
9140                phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9141                lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9142                                "6076 NVME Target Found\n");
9143        }
9144
9145        lpfc_debugfs_initialize(vport);
9146        /* Put reference to SCSI host to driver's device private data */
9147        pci_set_drvdata(phba->pcidev, shost);
9148
9149        lpfc_setup_fdmi_mask(vport);
9150
9151        /*
9152         * At this point we are fully registered with PSA. In addition,
9153         * any initial discovery should be completed.
9154         */
9155        return 0;
9156}
9157
9158/**
9159 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9160 * @phba: pointer to lpfc hba data structure.
9161 *
9162 * This routine is invoked to destroy HBA physical port and the associated
9163 * SCSI host.
9164 **/
9165static void
9166lpfc_destroy_shost(struct lpfc_hba *phba)
9167{
9168        struct lpfc_vport *vport = phba->pport;
9169
9170        /* Destroy physical port that associated with the SCSI host */
9171        destroy_port(vport);
9172
9173        return;
9174}
9175
9176/**
9177 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9178 * @phba: pointer to lpfc hba data structure.
9179 * @shost: the shost to be used to detect Block guard settings.
9180 *
9181 * This routine sets up the local Block guard protocol settings for @shost.
9182 * This routine also allocates memory for debugging bg buffers.
9183 **/
9184static void
9185lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9186{
9187        uint32_t old_mask;
9188        uint32_t old_guard;
9189
9190        if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9191                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9192                                "1478 Registering BlockGuard with the "
9193                                "SCSI layer\n");
9194
9195                old_mask = phba->cfg_prot_mask;
9196                old_guard = phba->cfg_prot_guard;
9197
9198                /* Only allow supported values */
9199                phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9200                        SHOST_DIX_TYPE0_PROTECTION |
9201                        SHOST_DIX_TYPE1_PROTECTION);
9202                phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9203                                         SHOST_DIX_GUARD_CRC);
9204
9205                /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9206                if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9207                        phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9208
9209                if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9210                        if ((old_mask != phba->cfg_prot_mask) ||
9211                                (old_guard != phba->cfg_prot_guard))
9212                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9213                                        "1475 Registering BlockGuard with the "
9214                                        "SCSI layer: mask %d  guard %d\n",
9215                                        phba->cfg_prot_mask,
9216                                        phba->cfg_prot_guard);
9217
9218                        scsi_host_set_prot(shost, phba->cfg_prot_mask);
9219                        scsi_host_set_guard(shost, phba->cfg_prot_guard);
9220                } else
9221                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9222                                "1479 Not Registering BlockGuard with the SCSI "
9223                                "layer, Bad protection parameters: %d %d\n",
9224                                old_mask, old_guard);
9225        }
9226}
9227
9228/**
9229 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9230 * @phba: pointer to lpfc hba data structure.
9231 *
9232 * This routine is invoked to perform all the necessary post initialization
9233 * setup for the device.
9234 **/
9235static void
9236lpfc_post_init_setup(struct lpfc_hba *phba)
9237{
9238        struct Scsi_Host  *shost;
9239        struct lpfc_adapter_event_header adapter_event;
9240
9241        /* Get the default values for Model Name and Description */
9242        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9243
9244        /*
9245         * hba setup may have changed the hba_queue_depth so we need to
9246         * adjust the value of can_queue.
9247         */
9248        shost = pci_get_drvdata(phba->pcidev);
9249        shost->can_queue = phba->cfg_hba_queue_depth - 10;
9250
9251        lpfc_host_attrib_init(shost);
9252
9253        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9254                spin_lock_irq(shost->host_lock);
9255                lpfc_poll_start_timer(phba);
9256                spin_unlock_irq(shost->host_lock);
9257        }
9258
9259        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9260                        "0428 Perform SCSI scan\n");
9261        /* Send board arrival event to upper layer */
9262        adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9263        adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9264        fc_host_post_vendor_event(shost, fc_get_event_number(),
9265                                  sizeof(adapter_event),
9266                                  (char *) &adapter_event,
9267                                  LPFC_NL_VENDOR_ID);
9268        return;
9269}
9270
9271/**
9272 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9273 * @phba: pointer to lpfc hba data structure.
9274 *
9275 * This routine is invoked to set up the PCI device memory space for device
9276 * with SLI-3 interface spec.
9277 *
9278 * Return codes
9279 *      0 - successful
9280 *      other values - error
9281 **/
9282static int
9283lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9284{
9285        struct pci_dev *pdev = phba->pcidev;
9286        unsigned long bar0map_len, bar2map_len;
9287        int i, hbq_count;
9288        void *ptr;
9289        int error;
9290
9291        if (!pdev)
9292                return -ENODEV;
9293
9294        /* Set the device DMA mask size */
9295        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9296        if (error)
9297                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9298        if (error)
9299                return error;
9300        error = -ENODEV;
9301
9302        /* Get the bus address of Bar0 and Bar2 and the number of bytes
9303         * required by each mapping.
9304         */
9305        phba->pci_bar0_map = pci_resource_start(pdev, 0);
9306        bar0map_len = pci_resource_len(pdev, 0);
9307
9308        phba->pci_bar2_map = pci_resource_start(pdev, 2);
9309        bar2map_len = pci_resource_len(pdev, 2);
9310
9311        /* Map HBA SLIM to a kernel virtual address. */
9312        phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9313        if (!phba->slim_memmap_p) {
9314                dev_printk(KERN_ERR, &pdev->dev,
9315                           "ioremap failed for SLIM memory.\n");
9316                goto out;
9317        }
9318
9319        /* Map HBA Control Registers to a kernel virtual address. */
9320        phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9321        if (!phba->ctrl_regs_memmap_p) {
9322                dev_printk(KERN_ERR, &pdev->dev,
9323                           "ioremap failed for HBA control registers.\n");
9324                goto out_iounmap_slim;
9325        }
9326
9327        /* Allocate memory for SLI-2 structures */
9328        phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9329                                               &phba->slim2p.phys, GFP_KERNEL);
9330        if (!phba->slim2p.virt)
9331                goto out_iounmap;
9332
9333        phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9334        phba->mbox_ext = (phba->slim2p.virt +
9335                offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9336        phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9337        phba->IOCBs = (phba->slim2p.virt +
9338                       offsetof(struct lpfc_sli2_slim, IOCBs));
9339
9340        phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9341                                                 lpfc_sli_hbq_size(),
9342                                                 &phba->hbqslimp.phys,
9343                                                 GFP_KERNEL);
9344        if (!phba->hbqslimp.virt)
9345                goto out_free_slim;
9346
9347        hbq_count = lpfc_sli_hbq_count();
9348        ptr = phba->hbqslimp.virt;
9349        for (i = 0; i < hbq_count; ++i) {
9350                phba->hbqs[i].hbq_virt = ptr;
9351                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9352                ptr += (lpfc_hbq_defs[i]->entry_count *
9353                        sizeof(struct lpfc_hbq_entry));
9354        }
9355        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9356        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9357
9358        memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9359
9360        phba->MBslimaddr = phba->slim_memmap_p;
9361        phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9362        phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9363        phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9364        phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9365
9366        return 0;
9367
9368out_free_slim:
9369        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9370                          phba->slim2p.virt, phba->slim2p.phys);
9371out_iounmap:
9372        iounmap(phba->ctrl_regs_memmap_p);
9373out_iounmap_slim:
9374        iounmap(phba->slim_memmap_p);
9375out:
9376        return error;
9377}
9378
9379/**
9380 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9381 * @phba: pointer to lpfc hba data structure.
9382 *
9383 * This routine is invoked to unset the PCI device memory space for device
9384 * with SLI-3 interface spec.
9385 **/
9386static void
9387lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9388{
9389        struct pci_dev *pdev;
9390
9391        /* Obtain PCI device reference */
9392        if (!phba->pcidev)
9393                return;
9394        else
9395                pdev = phba->pcidev;
9396
9397        /* Free coherent DMA memory allocated */
9398        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9399                          phba->hbqslimp.virt, phba->hbqslimp.phys);
9400        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9401                          phba->slim2p.virt, phba->slim2p.phys);
9402
9403        /* I/O memory unmap */
9404        iounmap(phba->ctrl_regs_memmap_p);
9405        iounmap(phba->slim_memmap_p);
9406
9407        return;
9408}
9409
9410/**
9411 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9412 * @phba: pointer to lpfc hba data structure.
9413 *
9414 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9415 * done and check status.
9416 *
9417 * Return 0 if successful, otherwise -ENODEV.
9418 **/
9419int
9420lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9421{
9422        struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9423        struct lpfc_register reg_data;
9424        int i, port_error = 0;
9425        uint32_t if_type;
9426
9427        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9428        memset(&reg_data, 0, sizeof(reg_data));
9429        if (!phba->sli4_hba.PSMPHRregaddr)
9430                return -ENODEV;
9431
9432        /* Wait up to 30 seconds for the SLI Port POST done and ready */
9433        for (i = 0; i < 3000; i++) {
9434                if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9435                        &portsmphr_reg.word0) ||
9436                        (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9437                        /* Port has a fatal POST error, break out */
9438                        port_error = -ENODEV;
9439                        break;
9440                }
9441                if (LPFC_POST_STAGE_PORT_READY ==
9442                    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9443                        break;
9444                msleep(10);
9445        }
9446
9447        /*
9448         * If there was a port error during POST, then don't proceed with
9449         * other register reads as the data may not be valid.  Just exit.
9450         */
9451        if (port_error) {
9452                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9453                        "1408 Port Failed POST - portsmphr=0x%x, "
9454                        "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9455                        "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9456                        portsmphr_reg.word0,
9457                        bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9458                        bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9459                        bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9460                        bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9461                        bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9462                        bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9463                        bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9464                        bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9465        } else {
9466                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9467                                "2534 Device Info: SLIFamily=0x%x, "
9468                                "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9469                                "SLIHint_2=0x%x, FT=0x%x\n",
9470                                bf_get(lpfc_sli_intf_sli_family,
9471                                       &phba->sli4_hba.sli_intf),
9472                                bf_get(lpfc_sli_intf_slirev,
9473                                       &phba->sli4_hba.sli_intf),
9474                                bf_get(lpfc_sli_intf_if_type,
9475                                       &phba->sli4_hba.sli_intf),
9476                                bf_get(lpfc_sli_intf_sli_hint1,
9477                                       &phba->sli4_hba.sli_intf),
9478                                bf_get(lpfc_sli_intf_sli_hint2,
9479                                       &phba->sli4_hba.sli_intf),
9480                                bf_get(lpfc_sli_intf_func_type,
9481                                       &phba->sli4_hba.sli_intf));
9482                /*
9483                 * Check for other Port errors during the initialization
9484                 * process.  Fail the load if the port did not come up
9485                 * correctly.
9486                 */
9487                if_type = bf_get(lpfc_sli_intf_if_type,
9488                                 &phba->sli4_hba.sli_intf);
9489                switch (if_type) {
9490                case LPFC_SLI_INTF_IF_TYPE_0:
9491                        phba->sli4_hba.ue_mask_lo =
9492                              readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9493                        phba->sli4_hba.ue_mask_hi =
9494                              readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9495                        uerrlo_reg.word0 =
9496                              readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9497                        uerrhi_reg.word0 =
9498                                readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9499                        if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9500                            (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9501                                lpfc_printf_log(phba, KERN_ERR,
9502                                                LOG_TRACE_EVENT,
9503                                                "1422 Unrecoverable Error "
9504                                                "Detected during POST "
9505                                                "uerr_lo_reg=0x%x, "
9506                                                "uerr_hi_reg=0x%x, "
9507                                                "ue_mask_lo_reg=0x%x, "
9508                                                "ue_mask_hi_reg=0x%x\n",
9509                                                uerrlo_reg.word0,
9510                                                uerrhi_reg.word0,
9511                                                phba->sli4_hba.ue_mask_lo,
9512                                                phba->sli4_hba.ue_mask_hi);
9513                                port_error = -ENODEV;
9514                        }
9515                        break;
9516                case LPFC_SLI_INTF_IF_TYPE_2:
9517                case LPFC_SLI_INTF_IF_TYPE_6:
9518                        /* Final checks.  The port status should be clean. */
9519                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9520                                &reg_data.word0) ||
9521                                (bf_get(lpfc_sliport_status_err, &reg_data) &&
9522                                 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
9523                                phba->work_status[0] =
9524                                        readl(phba->sli4_hba.u.if_type2.
9525                                              ERR1regaddr);
9526                                phba->work_status[1] =
9527                                        readl(phba->sli4_hba.u.if_type2.
9528                                              ERR2regaddr);
9529                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9530                                        "2888 Unrecoverable port error "
9531                                        "following POST: port status reg "
9532                                        "0x%x, port_smphr reg 0x%x, "
9533                                        "error 1=0x%x, error 2=0x%x\n",
9534                                        reg_data.word0,
9535                                        portsmphr_reg.word0,
9536                                        phba->work_status[0],
9537                                        phba->work_status[1]);
9538                                port_error = -ENODEV;
9539                                break;
9540                        }
9541
9542                        if (lpfc_pldv_detect &&
9543                            bf_get(lpfc_sli_intf_sli_family,
9544                                   &phba->sli4_hba.sli_intf) ==
9545                                        LPFC_SLI_INTF_FAMILY_G6)
9546                                pci_write_config_byte(phba->pcidev,
9547                                                      LPFC_SLI_INTF, CFG_PLD);
9548                        break;
9549                case LPFC_SLI_INTF_IF_TYPE_1:
9550                default:
9551                        break;
9552                }
9553        }
9554        return port_error;
9555}
9556
9557/**
9558 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9559 * @phba: pointer to lpfc hba data structure.
9560 * @if_type:  The SLI4 interface type getting configured.
9561 *
9562 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9563 * memory map.
9564 **/
9565static void
9566lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9567{
9568        switch (if_type) {
9569        case LPFC_SLI_INTF_IF_TYPE_0:
9570                phba->sli4_hba.u.if_type0.UERRLOregaddr =
9571                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9572                phba->sli4_hba.u.if_type0.UERRHIregaddr =
9573                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9574                phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9575                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9576                phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9577                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9578                phba->sli4_hba.SLIINTFregaddr =
9579                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9580                break;
9581        case LPFC_SLI_INTF_IF_TYPE_2:
9582                phba->sli4_hba.u.if_type2.EQDregaddr =
9583                        phba->sli4_hba.conf_regs_memmap_p +
9584                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9585                phba->sli4_hba.u.if_type2.ERR1regaddr =
9586                        phba->sli4_hba.conf_regs_memmap_p +
9587                                                LPFC_CTL_PORT_ER1_OFFSET;
9588                phba->sli4_hba.u.if_type2.ERR2regaddr =
9589                        phba->sli4_hba.conf_regs_memmap_p +
9590                                                LPFC_CTL_PORT_ER2_OFFSET;
9591                phba->sli4_hba.u.if_type2.CTRLregaddr =
9592                        phba->sli4_hba.conf_regs_memmap_p +
9593                                                LPFC_CTL_PORT_CTL_OFFSET;
9594                phba->sli4_hba.u.if_type2.STATUSregaddr =
9595                        phba->sli4_hba.conf_regs_memmap_p +
9596                                                LPFC_CTL_PORT_STA_OFFSET;
9597                phba->sli4_hba.SLIINTFregaddr =
9598                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9599                phba->sli4_hba.PSMPHRregaddr =
9600                        phba->sli4_hba.conf_regs_memmap_p +
9601                                                LPFC_CTL_PORT_SEM_OFFSET;
9602                phba->sli4_hba.RQDBregaddr =
9603                        phba->sli4_hba.conf_regs_memmap_p +
9604                                                LPFC_ULP0_RQ_DOORBELL;
9605                phba->sli4_hba.WQDBregaddr =
9606                        phba->sli4_hba.conf_regs_memmap_p +
9607                                                LPFC_ULP0_WQ_DOORBELL;
9608                phba->sli4_hba.CQDBregaddr =
9609                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9610                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9611                phba->sli4_hba.MQDBregaddr =
9612                        phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9613                phba->sli4_hba.BMBXregaddr =
9614                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9615                break;
9616        case LPFC_SLI_INTF_IF_TYPE_6:
9617                phba->sli4_hba.u.if_type2.EQDregaddr =
9618                        phba->sli4_hba.conf_regs_memmap_p +
9619                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9620                phba->sli4_hba.u.if_type2.ERR1regaddr =
9621                        phba->sli4_hba.conf_regs_memmap_p +
9622                                                LPFC_CTL_PORT_ER1_OFFSET;
9623                phba->sli4_hba.u.if_type2.ERR2regaddr =
9624                        phba->sli4_hba.conf_regs_memmap_p +
9625                                                LPFC_CTL_PORT_ER2_OFFSET;
9626                phba->sli4_hba.u.if_type2.CTRLregaddr =
9627                        phba->sli4_hba.conf_regs_memmap_p +
9628                                                LPFC_CTL_PORT_CTL_OFFSET;
9629                phba->sli4_hba.u.if_type2.STATUSregaddr =
9630                        phba->sli4_hba.conf_regs_memmap_p +
9631                                                LPFC_CTL_PORT_STA_OFFSET;
9632                phba->sli4_hba.PSMPHRregaddr =
9633                        phba->sli4_hba.conf_regs_memmap_p +
9634                                                LPFC_CTL_PORT_SEM_OFFSET;
9635                phba->sli4_hba.BMBXregaddr =
9636                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9637                break;
9638        case LPFC_SLI_INTF_IF_TYPE_1:
9639        default:
9640                dev_printk(KERN_ERR, &phba->pcidev->dev,
9641                           "FATAL - unsupported SLI4 interface type - %d\n",
9642                           if_type);
9643                break;
9644        }
9645}
9646
9647/**
9648 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9649 * @phba: pointer to lpfc hba data structure.
9650 * @if_type: sli if type to operate on.
9651 *
9652 * This routine is invoked to set up SLI4 BAR1 register memory map.
9653 **/
9654static void
9655lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9656{
9657        switch (if_type) {
9658        case LPFC_SLI_INTF_IF_TYPE_0:
9659                phba->sli4_hba.PSMPHRregaddr =
9660                        phba->sli4_hba.ctrl_regs_memmap_p +
9661                        LPFC_SLIPORT_IF0_SMPHR;
9662                phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9663                        LPFC_HST_ISR0;
9664                phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9665                        LPFC_HST_IMR0;
9666                phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9667                        LPFC_HST_ISCR0;
9668                break;
9669        case LPFC_SLI_INTF_IF_TYPE_6:
9670                phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9671                        LPFC_IF6_RQ_DOORBELL;
9672                phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9673                        LPFC_IF6_WQ_DOORBELL;
9674                phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9675                        LPFC_IF6_CQ_DOORBELL;
9676                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9677                        LPFC_IF6_EQ_DOORBELL;
9678                phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9679                        LPFC_IF6_MQ_DOORBELL;
9680                break;
9681        case LPFC_SLI_INTF_IF_TYPE_2:
9682        case LPFC_SLI_INTF_IF_TYPE_1:
9683        default:
9684                dev_err(&phba->pcidev->dev,
9685                           "FATAL - unsupported SLI4 interface type - %d\n",
9686                           if_type);
9687                break;
9688        }
9689}
9690
9691/**
9692 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9693 * @phba: pointer to lpfc hba data structure.
9694 * @vf: virtual function number
9695 *
9696 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9697 * based on the given viftual function number, @vf.
9698 *
9699 * Return 0 if successful, otherwise -ENODEV.
9700 **/
9701static int
9702lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9703{
9704        if (vf > LPFC_VIR_FUNC_MAX)
9705                return -ENODEV;
9706
9707        phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9708                                vf * LPFC_VFR_PAGE_SIZE +
9709                                        LPFC_ULP0_RQ_DOORBELL);
9710        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9711                                vf * LPFC_VFR_PAGE_SIZE +
9712                                        LPFC_ULP0_WQ_DOORBELL);
9713        phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9714                                vf * LPFC_VFR_PAGE_SIZE +
9715                                        LPFC_EQCQ_DOORBELL);
9716        phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9717        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9718                                vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9719        phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9720                                vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9721        return 0;
9722}
9723
9724/**
9725 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9726 * @phba: pointer to lpfc hba data structure.
9727 *
9728 * This routine is invoked to create the bootstrap mailbox
9729 * region consistent with the SLI-4 interface spec.  This
9730 * routine allocates all memory necessary to communicate
9731 * mailbox commands to the port and sets up all alignment
9732 * needs.  No locks are expected to be held when calling
9733 * this routine.
9734 *
9735 * Return codes
9736 *      0 - successful
9737 *      -ENOMEM - could not allocated memory.
9738 **/
9739static int
9740lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9741{
9742        uint32_t bmbx_size;
9743        struct lpfc_dmabuf *dmabuf;
9744        struct dma_address *dma_address;
9745        uint32_t pa_addr;
9746        uint64_t phys_addr;
9747
9748        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9749        if (!dmabuf)
9750                return -ENOMEM;
9751
9752        /*
9753         * The bootstrap mailbox region is comprised of 2 parts
9754         * plus an alignment restriction of 16 bytes.
9755         */
9756        bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9757        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9758                                          &dmabuf->phys, GFP_KERNEL);
9759        if (!dmabuf->virt) {
9760                kfree(dmabuf);
9761                return -ENOMEM;
9762        }
9763
9764        /*
9765         * Initialize the bootstrap mailbox pointers now so that the register
9766         * operations are simple later.  The mailbox dma address is required
9767         * to be 16-byte aligned.  Also align the virtual memory as each
9768         * maibox is copied into the bmbx mailbox region before issuing the
9769         * command to the port.
9770         */
9771        phba->sli4_hba.bmbx.dmabuf = dmabuf;
9772        phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9773
9774        phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9775                                              LPFC_ALIGN_16_BYTE);
9776        phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9777                                              LPFC_ALIGN_16_BYTE);
9778
9779        /*
9780         * Set the high and low physical addresses now.  The SLI4 alignment
9781         * requirement is 16 bytes and the mailbox is posted to the port
9782         * as two 30-bit addresses.  The other data is a bit marking whether
9783         * the 30-bit address is the high or low address.
9784         * Upcast bmbx aphys to 64bits so shift instruction compiles
9785         * clean on 32 bit machines.
9786         */
9787        dma_address = &phba->sli4_hba.bmbx.dma_address;
9788        phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9789        pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9790        dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9791                                           LPFC_BMBX_BIT1_ADDR_HI);
9792
9793        pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9794        dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9795                                           LPFC_BMBX_BIT1_ADDR_LO);
9796        return 0;
9797}
9798
9799/**
9800 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9801 * @phba: pointer to lpfc hba data structure.
9802 *
9803 * This routine is invoked to teardown the bootstrap mailbox
9804 * region and release all host resources. This routine requires
9805 * the caller to ensure all mailbox commands recovered, no
9806 * additional mailbox comands are sent, and interrupts are disabled
9807 * before calling this routine.
9808 *
9809 **/
9810static void
9811lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9812{
9813        dma_free_coherent(&phba->pcidev->dev,
9814                          phba->sli4_hba.bmbx.bmbx_size,
9815                          phba->sli4_hba.bmbx.dmabuf->virt,
9816                          phba->sli4_hba.bmbx.dmabuf->phys);
9817
9818        kfree(phba->sli4_hba.bmbx.dmabuf);
9819        memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9820}
9821
9822static const char * const lpfc_topo_to_str[] = {
9823        "Loop then P2P",
9824        "Loopback",
9825        "P2P Only",
9826        "Unsupported",
9827        "Loop Only",
9828        "Unsupported",
9829        "P2P then Loop",
9830};
9831
9832#define LINK_FLAGS_DEF  0x0
9833#define LINK_FLAGS_P2P  0x1
9834#define LINK_FLAGS_LOOP 0x2
9835/**
9836 * lpfc_map_topology - Map the topology read from READ_CONFIG
9837 * @phba: pointer to lpfc hba data structure.
9838 * @rd_config: pointer to read config data
9839 *
9840 * This routine is invoked to map the topology values as read
9841 * from the read config mailbox command. If the persistent
9842 * topology feature is supported, the firmware will provide the
9843 * saved topology information to be used in INIT_LINK
9844 **/
9845static void
9846lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9847{
9848        u8 ptv, tf, pt;
9849
9850        ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9851        tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9852        pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9853
9854        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9855                        "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9856                         ptv, tf, pt);
9857        if (!ptv) {
9858                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9859                                "2019 FW does not support persistent topology "
9860                                "Using driver parameter defined value [%s]",
9861                                lpfc_topo_to_str[phba->cfg_topology]);
9862                return;
9863        }
9864        /* FW supports persistent topology - override module parameter value */
9865        phba->hba_flag |= HBA_PERSISTENT_TOPO;
9866
9867        /* if ASIC_GEN_NUM >= 0xC) */
9868        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9869                    LPFC_SLI_INTF_IF_TYPE_6) ||
9870            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9871                    LPFC_SLI_INTF_FAMILY_G6)) {
9872                if (!tf) {
9873                        phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9874                                        ? FLAGS_TOPOLOGY_MODE_LOOP
9875                                        : FLAGS_TOPOLOGY_MODE_PT_PT);
9876                } else {
9877                        phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9878                }
9879        } else { /* G5 */
9880                if (tf) {
9881                        /* If topology failover set - pt is '0' or '1' */
9882                        phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9883                                              FLAGS_TOPOLOGY_MODE_LOOP_PT);
9884                } else {
9885                        phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9886                                        ? FLAGS_TOPOLOGY_MODE_PT_PT
9887                                        : FLAGS_TOPOLOGY_MODE_LOOP);
9888                }
9889        }
9890        if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9891                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9892                                "2020 Using persistent topology value [%s]",
9893                                lpfc_topo_to_str[phba->cfg_topology]);
9894        } else {
9895                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9896                                "2021 Invalid topology values from FW "
9897                                "Using driver parameter defined value [%s]",
9898                                lpfc_topo_to_str[phba->cfg_topology]);
9899        }
9900}
9901
9902/**
9903 * lpfc_sli4_read_config - Get the config parameters.
9904 * @phba: pointer to lpfc hba data structure.
9905 *
9906 * This routine is invoked to read the configuration parameters from the HBA.
9907 * The configuration parameters are used to set the base and maximum values
9908 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9909 * allocation for the port.
9910 *
9911 * Return codes
9912 *      0 - successful
9913 *      -ENOMEM - No available memory
9914 *      -EIO - The mailbox failed to complete successfully.
9915 **/
9916int
9917lpfc_sli4_read_config(struct lpfc_hba *phba)
9918{
9919        LPFC_MBOXQ_t *pmb;
9920        struct lpfc_mbx_read_config *rd_config;
9921        union  lpfc_sli4_cfg_shdr *shdr;
9922        uint32_t shdr_status, shdr_add_status;
9923        struct lpfc_mbx_get_func_cfg *get_func_cfg;
9924        struct lpfc_rsrc_desc_fcfcoe *desc;
9925        char *pdesc_0;
9926        uint16_t forced_link_speed;
9927        uint32_t if_type, qmin, fawwpn;
9928        int length, i, rc = 0, rc2;
9929
9930        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9931        if (!pmb) {
9932                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9933                                "2011 Unable to allocate memory for issuing "
9934                                "SLI_CONFIG_SPECIAL mailbox command\n");
9935                return -ENOMEM;
9936        }
9937
9938        lpfc_read_config(phba, pmb);
9939
9940        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9941        if (rc != MBX_SUCCESS) {
9942                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9943                                "2012 Mailbox failed , mbxCmd x%x "
9944                                "READ_CONFIG, mbxStatus x%x\n",
9945                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
9946                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
9947                rc = -EIO;
9948        } else {
9949                rd_config = &pmb->u.mqe.un.rd_config;
9950                if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9951                        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9952                        phba->sli4_hba.lnk_info.lnk_tp =
9953                                bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9954                        phba->sli4_hba.lnk_info.lnk_no =
9955                                bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9956                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9957                                        "3081 lnk_type:%d, lnk_numb:%d\n",
9958                                        phba->sli4_hba.lnk_info.lnk_tp,
9959                                        phba->sli4_hba.lnk_info.lnk_no);
9960                } else
9961                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9962                                        "3082 Mailbox (x%x) returned ldv:x0\n",
9963                                        bf_get(lpfc_mqe_command, &pmb->u.mqe));
9964                if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9965                        phba->bbcredit_support = 1;
9966                        phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9967                }
9968
9969                fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9970
9971                if (fawwpn) {
9972                        lpfc_printf_log(phba, KERN_INFO,
9973                                        LOG_INIT | LOG_DISCOVERY,
9974                                        "2702 READ_CONFIG: FA-PWWN is "
9975                                        "configured on\n");
9976                        phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9977                } else {
9978                        phba->sli4_hba.fawwpn_flag = 0;
9979                }
9980
9981                phba->sli4_hba.conf_trunk =
9982                        bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9983                phba->sli4_hba.extents_in_use =
9984                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9985
9986                phba->sli4_hba.max_cfg_param.max_xri =
9987                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9988                /* Reduce resource usage in kdump environment */
9989                if (is_kdump_kernel() &&
9990                    phba->sli4_hba.max_cfg_param.max_xri > 512)
9991                        phba->sli4_hba.max_cfg_param.max_xri = 512;
9992                phba->sli4_hba.max_cfg_param.xri_base =
9993                        bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9994                phba->sli4_hba.max_cfg_param.max_vpi =
9995                        bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9996                /* Limit the max we support */
9997                if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9998                        phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9999                phba->sli4_hba.max_cfg_param.vpi_base =
10000                        bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10001                phba->sli4_hba.max_cfg_param.max_rpi =
10002                        bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10003                phba->sli4_hba.max_cfg_param.rpi_base =
10004                        bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10005                phba->sli4_hba.max_cfg_param.max_vfi =
10006                        bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10007                phba->sli4_hba.max_cfg_param.vfi_base =
10008                        bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10009                phba->sli4_hba.max_cfg_param.max_fcfi =
10010                        bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10011                phba->sli4_hba.max_cfg_param.max_eq =
10012                        bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10013                phba->sli4_hba.max_cfg_param.max_rq =
10014                        bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10015                phba->sli4_hba.max_cfg_param.max_wq =
10016                        bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10017                phba->sli4_hba.max_cfg_param.max_cq =
10018                        bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10019                phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10020                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10021                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10022                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10023                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10024                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10025                phba->max_vports = phba->max_vpi;
10026
10027                /* Next decide on FPIN or Signal E2E CGN support
10028                 * For congestion alarms and warnings valid combination are:
10029                 * 1. FPIN alarms / FPIN warnings
10030                 * 2. Signal alarms / Signal warnings
10031                 * 3. FPIN alarms / Signal warnings
10032                 * 4. Signal alarms / FPIN warnings
10033                 *
10034                 * Initialize the adapter frequency to 100 mSecs
10035                 */
10036                phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10037                phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10038                phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10039
10040                if (lpfc_use_cgn_signal) {
10041                        if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10042                                phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10043                                phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10044                        }
10045                        if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10046                                /* MUST support both alarm and warning
10047                                 * because EDC does not support alarm alone.
10048                                 */
10049                                if (phba->cgn_reg_signal !=
10050                                    EDC_CG_SIG_WARN_ONLY) {
10051                                        /* Must support both or none */
10052                                        phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10053                                        phba->cgn_reg_signal =
10054                                                EDC_CG_SIG_NOTSUPPORTED;
10055                                } else {
10056                                        phba->cgn_reg_signal =
10057                                                EDC_CG_SIG_WARN_ALARM;
10058                                        phba->cgn_reg_fpin =
10059                                                LPFC_CGN_FPIN_NONE;
10060                                }
10061                        }
10062                }
10063
10064                /* Set the congestion initial signal and fpin values. */
10065                phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10066                phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10067
10068                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10069                                "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10070                                phba->cgn_reg_signal, phba->cgn_reg_fpin);
10071
10072                lpfc_map_topology(phba, rd_config);
10073                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10074                                "2003 cfg params Extents? %d "
10075                                "XRI(B:%d M:%d), "
10076                                "VPI(B:%d M:%d) "
10077                                "VFI(B:%d M:%d) "
10078                                "RPI(B:%d M:%d) "
10079                                "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10080                                phba->sli4_hba.extents_in_use,
10081                                phba->sli4_hba.max_cfg_param.xri_base,
10082                                phba->sli4_hba.max_cfg_param.max_xri,
10083                                phba->sli4_hba.max_cfg_param.vpi_base,
10084                                phba->sli4_hba.max_cfg_param.max_vpi,
10085                                phba->sli4_hba.max_cfg_param.vfi_base,
10086                                phba->sli4_hba.max_cfg_param.max_vfi,
10087                                phba->sli4_hba.max_cfg_param.rpi_base,
10088                                phba->sli4_hba.max_cfg_param.max_rpi,
10089                                phba->sli4_hba.max_cfg_param.max_fcfi,
10090                                phba->sli4_hba.max_cfg_param.max_eq,
10091                                phba->sli4_hba.max_cfg_param.max_cq,
10092                                phba->sli4_hba.max_cfg_param.max_wq,
10093                                phba->sli4_hba.max_cfg_param.max_rq,
10094                                phba->lmt);
10095
10096                /*
10097                 * Calculate queue resources based on how
10098                 * many WQ/CQ/EQs are available.
10099                 */
10100                qmin = phba->sli4_hba.max_cfg_param.max_wq;
10101                if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10102                        qmin = phba->sli4_hba.max_cfg_param.max_cq;
10103                if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10104                        qmin = phba->sli4_hba.max_cfg_param.max_eq;
10105                /*
10106                 * Whats left after this can go toward NVME / FCP.
10107                 * The minus 4 accounts for ELS, NVME LS, MBOX
10108                 * plus one extra. When configured for
10109                 * NVMET, FCP io channel WQs are not created.
10110                 */
10111                qmin -= 4;
10112
10113                /* Check to see if there is enough for NVME */
10114                if ((phba->cfg_irq_chann > qmin) ||
10115                    (phba->cfg_hdw_queue > qmin)) {
10116                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10117                                        "2005 Reducing Queues - "
10118                                        "FW resource limitation: "
10119                                        "WQ %d CQ %d EQ %d: min %d: "
10120                                        "IRQ %d HDWQ %d\n",
10121                                        phba->sli4_hba.max_cfg_param.max_wq,
10122                                        phba->sli4_hba.max_cfg_param.max_cq,
10123                                        phba->sli4_hba.max_cfg_param.max_eq,
10124                                        qmin, phba->cfg_irq_chann,
10125                                        phba->cfg_hdw_queue);
10126
10127                        if (phba->cfg_irq_chann > qmin)
10128                                phba->cfg_irq_chann = qmin;
10129                        if (phba->cfg_hdw_queue > qmin)
10130                                phba->cfg_hdw_queue = qmin;
10131                }
10132        }
10133
10134        if (rc)
10135                goto read_cfg_out;
10136
10137        /* Update link speed if forced link speed is supported */
10138        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10139        if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10140                forced_link_speed =
10141                        bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10142                if (forced_link_speed) {
10143                        phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10144
10145                        switch (forced_link_speed) {
10146                        case LINK_SPEED_1G:
10147                                phba->cfg_link_speed =
10148                                        LPFC_USER_LINK_SPEED_1G;
10149                                break;
10150                        case LINK_SPEED_2G:
10151                                phba->cfg_link_speed =
10152                                        LPFC_USER_LINK_SPEED_2G;
10153                                break;
10154                        case LINK_SPEED_4G:
10155                                phba->cfg_link_speed =
10156                                        LPFC_USER_LINK_SPEED_4G;
10157                                break;
10158                        case LINK_SPEED_8G:
10159                                phba->cfg_link_speed =
10160                                        LPFC_USER_LINK_SPEED_8G;
10161                                break;
10162                        case LINK_SPEED_10G:
10163                                phba->cfg_link_speed =
10164                                        LPFC_USER_LINK_SPEED_10G;
10165                                break;
10166                        case LINK_SPEED_16G:
10167                                phba->cfg_link_speed =
10168                                        LPFC_USER_LINK_SPEED_16G;
10169                                break;
10170                        case LINK_SPEED_32G:
10171                                phba->cfg_link_speed =
10172                                        LPFC_USER_LINK_SPEED_32G;
10173                                break;
10174                        case LINK_SPEED_64G:
10175                                phba->cfg_link_speed =
10176                                        LPFC_USER_LINK_SPEED_64G;
10177                                break;
10178                        case 0xffff:
10179                                phba->cfg_link_speed =
10180                                        LPFC_USER_LINK_SPEED_AUTO;
10181                                break;
10182                        default:
10183                                lpfc_printf_log(phba, KERN_ERR,
10184                                                LOG_TRACE_EVENT,
10185                                                "0047 Unrecognized link "
10186                                                "speed : %d\n",
10187                                                forced_link_speed);
10188                                phba->cfg_link_speed =
10189                                        LPFC_USER_LINK_SPEED_AUTO;
10190                        }
10191                }
10192        }
10193
10194        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
10195        length = phba->sli4_hba.max_cfg_param.max_xri -
10196                        lpfc_sli4_get_els_iocb_cnt(phba);
10197        if (phba->cfg_hba_queue_depth > length) {
10198                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10199                                "3361 HBA queue depth changed from %d to %d\n",
10200                                phba->cfg_hba_queue_depth, length);
10201                phba->cfg_hba_queue_depth = length;
10202        }
10203
10204        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10205            LPFC_SLI_INTF_IF_TYPE_2)
10206                goto read_cfg_out;
10207
10208        /* get the pf# and vf# for SLI4 if_type 2 port */
10209        length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10210                  sizeof(struct lpfc_sli4_cfg_mhdr));
10211        lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10212                         LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10213                         length, LPFC_SLI4_MBX_EMBED);
10214
10215        rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10216        shdr = (union lpfc_sli4_cfg_shdr *)
10217                                &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10218        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10219        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10220        if (rc2 || shdr_status || shdr_add_status) {
10221                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10222                                "3026 Mailbox failed , mbxCmd x%x "
10223                                "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10224                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
10225                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
10226                goto read_cfg_out;
10227        }
10228
10229        /* search for fc_fcoe resrouce descriptor */
10230        get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10231
10232        pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10233        desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10234        length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10235        if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10236                length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10237        else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10238                goto read_cfg_out;
10239
10240        for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10241                desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10242                if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10243                    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10244                        phba->sli4_hba.iov.pf_number =
10245                                bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10246                        phba->sli4_hba.iov.vf_number =
10247                                bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10248                        break;
10249                }
10250        }
10251
10252        if (i < LPFC_RSRC_DESC_MAX_NUM)
10253                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10254                                "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10255                                "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10256                                phba->sli4_hba.iov.vf_number);
10257        else
10258                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10259                                "3028 GET_FUNCTION_CONFIG: failed to find "
10260                                "Resource Descriptor:x%x\n",
10261                                LPFC_RSRC_DESC_TYPE_FCFCOE);
10262
10263read_cfg_out:
10264        mempool_free(pmb, phba->mbox_mem_pool);
10265        return rc;
10266}
10267
10268/**
10269 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10270 * @phba: pointer to lpfc hba data structure.
10271 *
10272 * This routine is invoked to setup the port-side endian order when
10273 * the port if_type is 0.  This routine has no function for other
10274 * if_types.
10275 *
10276 * Return codes
10277 *      0 - successful
10278 *      -ENOMEM - No available memory
10279 *      -EIO - The mailbox failed to complete successfully.
10280 **/
10281static int
10282lpfc_setup_endian_order(struct lpfc_hba *phba)
10283{
10284        LPFC_MBOXQ_t *mboxq;
10285        uint32_t if_type, rc = 0;
10286        uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10287                                      HOST_ENDIAN_HIGH_WORD1};
10288
10289        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10290        switch (if_type) {
10291        case LPFC_SLI_INTF_IF_TYPE_0:
10292                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10293                                                       GFP_KERNEL);
10294                if (!mboxq) {
10295                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10296                                        "0492 Unable to allocate memory for "
10297                                        "issuing SLI_CONFIG_SPECIAL mailbox "
10298                                        "command\n");
10299                        return -ENOMEM;
10300                }
10301
10302                /*
10303                 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10304                 * two words to contain special data values and no other data.
10305                 */
10306                memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10307                memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10308                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10309                if (rc != MBX_SUCCESS) {
10310                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10311                                        "0493 SLI_CONFIG_SPECIAL mailbox "
10312                                        "failed with status x%x\n",
10313                                        rc);
10314                        rc = -EIO;
10315                }
10316                mempool_free(mboxq, phba->mbox_mem_pool);
10317                break;
10318        case LPFC_SLI_INTF_IF_TYPE_6:
10319        case LPFC_SLI_INTF_IF_TYPE_2:
10320        case LPFC_SLI_INTF_IF_TYPE_1:
10321        default:
10322                break;
10323        }
10324        return rc;
10325}
10326
10327/**
10328 * lpfc_sli4_queue_verify - Verify and update EQ counts
10329 * @phba: pointer to lpfc hba data structure.
10330 *
10331 * This routine is invoked to check the user settable queue counts for EQs.
10332 * After this routine is called the counts will be set to valid values that
10333 * adhere to the constraints of the system's interrupt vectors and the port's
10334 * queue resources.
10335 *
10336 * Return codes
10337 *      0 - successful
10338 *      -ENOMEM - No available memory
10339 **/
10340static int
10341lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10342{
10343        /*
10344         * Sanity check for configured queue parameters against the run-time
10345         * device parameters
10346         */
10347
10348        if (phba->nvmet_support) {
10349                if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10350                        phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10351                if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10352                        phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10353        }
10354
10355        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10356                        "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10357                        phba->cfg_hdw_queue, phba->cfg_irq_chann,
10358                        phba->cfg_nvmet_mrq);
10359
10360        /* Get EQ depth from module parameter, fake the default for now */
10361        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10362        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10363
10364        /* Get CQ depth from module parameter, fake the default for now */
10365        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10366        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10367        return 0;
10368}
10369
10370static int
10371lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10372{
10373        struct lpfc_queue *qdesc;
10374        u32 wqesize;
10375        int cpu;
10376
10377        cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10378        /* Create Fast Path IO CQs */
10379        if (phba->enab_exp_wqcq_pages)
10380                /* Increase the CQ size when WQEs contain an embedded cdb */
10381                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10382                                              phba->sli4_hba.cq_esize,
10383                                              LPFC_CQE_EXP_COUNT, cpu);
10384
10385        else
10386                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10387                                              phba->sli4_hba.cq_esize,
10388                                              phba->sli4_hba.cq_ecount, cpu);
10389        if (!qdesc) {
10390                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10391                                "0499 Failed allocate fast-path IO CQ (%d)\n",
10392                                idx);
10393                return 1;
10394        }
10395        qdesc->qe_valid = 1;
10396        qdesc->hdwq = idx;
10397        qdesc->chann = cpu;
10398        phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10399
10400        /* Create Fast Path IO WQs */
10401        if (phba->enab_exp_wqcq_pages) {
10402                /* Increase the WQ size when WQEs contain an embedded cdb */
10403                wqesize = (phba->fcp_embed_io) ?
10404                        LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10405                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10406                                              wqesize,
10407                                              LPFC_WQE_EXP_COUNT, cpu);
10408        } else
10409                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10410                                              phba->sli4_hba.wq_esize,
10411                                              phba->sli4_hba.wq_ecount, cpu);
10412
10413        if (!qdesc) {
10414                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415                                "0503 Failed allocate fast-path IO WQ (%d)\n",
10416                                idx);
10417                return 1;
10418        }
10419        qdesc->hdwq = idx;
10420        qdesc->chann = cpu;
10421        phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10422        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10423        return 0;
10424}
10425
10426/**
10427 * lpfc_sli4_queue_create - Create all the SLI4 queues
10428 * @phba: pointer to lpfc hba data structure.
10429 *
10430 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10431 * operation. For each SLI4 queue type, the parameters such as queue entry
10432 * count (queue depth) shall be taken from the module parameter. For now,
10433 * we just use some constant number as place holder.
10434 *
10435 * Return codes
10436 *      0 - successful
10437 *      -ENOMEM - No availble memory
10438 *      -EIO - The mailbox failed to complete successfully.
10439 **/
10440int
10441lpfc_sli4_queue_create(struct lpfc_hba *phba)
10442{
10443        struct lpfc_queue *qdesc;
10444        int idx, cpu, eqcpu;
10445        struct lpfc_sli4_hdw_queue *qp;
10446        struct lpfc_vector_map_info *cpup;
10447        struct lpfc_vector_map_info *eqcpup;
10448        struct lpfc_eq_intr_info *eqi;
10449
10450        /*
10451         * Create HBA Record arrays.
10452         * Both NVME and FCP will share that same vectors / EQs
10453         */
10454        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10455        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10456        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10457        phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10458        phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10459        phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10460        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10461        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10462        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10463        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10464
10465        if (!phba->sli4_hba.hdwq) {
10466                phba->sli4_hba.hdwq = kcalloc(
10467                        phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10468                        GFP_KERNEL);
10469                if (!phba->sli4_hba.hdwq) {
10470                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10471                                        "6427 Failed allocate memory for "
10472                                        "fast-path Hardware Queue array\n");
10473                        goto out_error;
10474                }
10475                /* Prepare hardware queues to take IO buffers */
10476                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10477                        qp = &phba->sli4_hba.hdwq[idx];
10478                        spin_lock_init(&qp->io_buf_list_get_lock);
10479                        spin_lock_init(&qp->io_buf_list_put_lock);
10480                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10481                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10482                        qp->get_io_bufs = 0;
10483                        qp->put_io_bufs = 0;
10484                        qp->total_io_bufs = 0;
10485                        spin_lock_init(&qp->abts_io_buf_list_lock);
10486                        INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10487                        qp->abts_scsi_io_bufs = 0;
10488                        qp->abts_nvme_io_bufs = 0;
10489                        INIT_LIST_HEAD(&qp->sgl_list);
10490                        INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10491                        spin_lock_init(&qp->hdwq_lock);
10492                }
10493        }
10494
10495        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10496                if (phba->nvmet_support) {
10497                        phba->sli4_hba.nvmet_cqset = kcalloc(
10498                                        phba->cfg_nvmet_mrq,
10499                                        sizeof(struct lpfc_queue *),
10500                                        GFP_KERNEL);
10501                        if (!phba->sli4_hba.nvmet_cqset) {
10502                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10503                                        "3121 Fail allocate memory for "
10504                                        "fast-path CQ set array\n");
10505                                goto out_error;
10506                        }
10507                        phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10508                                        phba->cfg_nvmet_mrq,
10509                                        sizeof(struct lpfc_queue *),
10510                                        GFP_KERNEL);
10511                        if (!phba->sli4_hba.nvmet_mrq_hdr) {
10512                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10513                                        "3122 Fail allocate memory for "
10514                                        "fast-path RQ set hdr array\n");
10515                                goto out_error;
10516                        }
10517                        phba->sli4_hba.nvmet_mrq_data = kcalloc(
10518                                        phba->cfg_nvmet_mrq,
10519                                        sizeof(struct lpfc_queue *),
10520                                        GFP_KERNEL);
10521                        if (!phba->sli4_hba.nvmet_mrq_data) {
10522                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10523                                        "3124 Fail allocate memory for "
10524                                        "fast-path RQ set data array\n");
10525                                goto out_error;
10526                        }
10527                }
10528        }
10529
10530        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10531
10532        /* Create HBA Event Queues (EQs) */
10533        for_each_present_cpu(cpu) {
10534                /* We only want to create 1 EQ per vector, even though
10535                 * multiple CPUs might be using that vector. so only
10536                 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10537                 */
10538                cpup = &phba->sli4_hba.cpu_map[cpu];
10539                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10540                        continue;
10541
10542                /* Get a ptr to the Hardware Queue associated with this CPU */
10543                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10544
10545                /* Allocate an EQ */
10546                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10547                                              phba->sli4_hba.eq_esize,
10548                                              phba->sli4_hba.eq_ecount, cpu);
10549                if (!qdesc) {
10550                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10551                                        "0497 Failed allocate EQ (%d)\n",
10552                                        cpup->hdwq);
10553                        goto out_error;
10554                }
10555                qdesc->qe_valid = 1;
10556                qdesc->hdwq = cpup->hdwq;
10557                qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10558                qdesc->last_cpu = qdesc->chann;
10559
10560                /* Save the allocated EQ in the Hardware Queue */
10561                qp->hba_eq = qdesc;
10562
10563                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10564                list_add(&qdesc->cpu_list, &eqi->list);
10565        }
10566
10567        /* Now we need to populate the other Hardware Queues, that share
10568         * an IRQ vector, with the associated EQ ptr.
10569         */
10570        for_each_present_cpu(cpu) {
10571                cpup = &phba->sli4_hba.cpu_map[cpu];
10572
10573                /* Check for EQ already allocated in previous loop */
10574                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10575                        continue;
10576
10577                /* Check for multiple CPUs per hdwq */
10578                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10579                if (qp->hba_eq)
10580                        continue;
10581
10582                /* We need to share an EQ for this hdwq */
10583                eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10584                eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10585                qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10586        }
10587
10588        /* Allocate IO Path SLI4 CQ/WQs */
10589        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10590                if (lpfc_alloc_io_wq_cq(phba, idx))
10591                        goto out_error;
10592        }
10593
10594        if (phba->nvmet_support) {
10595                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10596                        cpu = lpfc_find_cpu_handle(phba, idx,
10597                                                   LPFC_FIND_BY_HDWQ);
10598                        qdesc = lpfc_sli4_queue_alloc(phba,
10599                                                      LPFC_DEFAULT_PAGE_SIZE,
10600                                                      phba->sli4_hba.cq_esize,
10601                                                      phba->sli4_hba.cq_ecount,
10602                                                      cpu);
10603                        if (!qdesc) {
10604                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10605                                                "3142 Failed allocate NVME "
10606                                                "CQ Set (%d)\n", idx);
10607                                goto out_error;
10608                        }
10609                        qdesc->qe_valid = 1;
10610                        qdesc->hdwq = idx;
10611                        qdesc->chann = cpu;
10612                        phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10613                }
10614        }
10615
10616        /*
10617         * Create Slow Path Completion Queues (CQs)
10618         */
10619
10620        cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10621        /* Create slow-path Mailbox Command Complete Queue */
10622        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10623                                      phba->sli4_hba.cq_esize,
10624                                      phba->sli4_hba.cq_ecount, cpu);
10625        if (!qdesc) {
10626                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10627                                "0500 Failed allocate slow-path mailbox CQ\n");
10628                goto out_error;
10629        }
10630        qdesc->qe_valid = 1;
10631        phba->sli4_hba.mbx_cq = qdesc;
10632
10633        /* Create slow-path ELS Complete Queue */
10634        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10635                                      phba->sli4_hba.cq_esize,
10636                                      phba->sli4_hba.cq_ecount, cpu);
10637        if (!qdesc) {
10638                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10639                                "0501 Failed allocate slow-path ELS CQ\n");
10640                goto out_error;
10641        }
10642        qdesc->qe_valid = 1;
10643        qdesc->chann = cpu;
10644        phba->sli4_hba.els_cq = qdesc;
10645
10646
10647        /*
10648         * Create Slow Path Work Queues (WQs)
10649         */
10650
10651        /* Create Mailbox Command Queue */
10652
10653        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10654                                      phba->sli4_hba.mq_esize,
10655                                      phba->sli4_hba.mq_ecount, cpu);
10656        if (!qdesc) {
10657                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10658                                "0505 Failed allocate slow-path MQ\n");
10659                goto out_error;
10660        }
10661        qdesc->chann = cpu;
10662        phba->sli4_hba.mbx_wq = qdesc;
10663
10664        /*
10665         * Create ELS Work Queues
10666         */
10667
10668        /* Create slow-path ELS Work Queue */
10669        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10670                                      phba->sli4_hba.wq_esize,
10671                                      phba->sli4_hba.wq_ecount, cpu);
10672        if (!qdesc) {
10673                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10674                                "0504 Failed allocate slow-path ELS WQ\n");
10675                goto out_error;
10676        }
10677        qdesc->chann = cpu;
10678        phba->sli4_hba.els_wq = qdesc;
10679        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10680
10681        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10682                /* Create NVME LS Complete Queue */
10683                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10684                                              phba->sli4_hba.cq_esize,
10685                                              phba->sli4_hba.cq_ecount, cpu);
10686                if (!qdesc) {
10687                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10688                                        "6079 Failed allocate NVME LS CQ\n");
10689                        goto out_error;
10690                }
10691                qdesc->chann = cpu;
10692                qdesc->qe_valid = 1;
10693                phba->sli4_hba.nvmels_cq = qdesc;
10694
10695                /* Create NVME LS Work Queue */
10696                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10697                                              phba->sli4_hba.wq_esize,
10698                                              phba->sli4_hba.wq_ecount, cpu);
10699                if (!qdesc) {
10700                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10701                                        "6080 Failed allocate NVME LS WQ\n");
10702                        goto out_error;
10703                }
10704                qdesc->chann = cpu;
10705                phba->sli4_hba.nvmels_wq = qdesc;
10706                list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10707        }
10708
10709        /*
10710         * Create Receive Queue (RQ)
10711         */
10712
10713        /* Create Receive Queue for header */
10714        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10715                                      phba->sli4_hba.rq_esize,
10716                                      phba->sli4_hba.rq_ecount, cpu);
10717        if (!qdesc) {
10718                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10719                                "0506 Failed allocate receive HRQ\n");
10720                goto out_error;
10721        }
10722        phba->sli4_hba.hdr_rq = qdesc;
10723
10724        /* Create Receive Queue for data */
10725        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10726                                      phba->sli4_hba.rq_esize,
10727                                      phba->sli4_hba.rq_ecount, cpu);
10728        if (!qdesc) {
10729                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10730                                "0507 Failed allocate receive DRQ\n");
10731                goto out_error;
10732        }
10733        phba->sli4_hba.dat_rq = qdesc;
10734
10735        if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10736            phba->nvmet_support) {
10737                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10738                        cpu = lpfc_find_cpu_handle(phba, idx,
10739                                                   LPFC_FIND_BY_HDWQ);
10740                        /* Create NVMET Receive Queue for header */
10741                        qdesc = lpfc_sli4_queue_alloc(phba,
10742                                                      LPFC_DEFAULT_PAGE_SIZE,
10743                                                      phba->sli4_hba.rq_esize,
10744                                                      LPFC_NVMET_RQE_DEF_COUNT,
10745                                                      cpu);
10746                        if (!qdesc) {
10747                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10748                                                "3146 Failed allocate "
10749                                                "receive HRQ\n");
10750                                goto out_error;
10751                        }
10752                        qdesc->hdwq = idx;
10753                        phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10754
10755                        /* Only needed for header of RQ pair */
10756                        qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10757                                                   GFP_KERNEL,
10758                                                   cpu_to_node(cpu));
10759                        if (qdesc->rqbp == NULL) {
10760                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10761                                                "6131 Failed allocate "
10762                                                "Header RQBP\n");
10763                                goto out_error;
10764                        }
10765
10766                        /* Put list in known state in case driver load fails. */
10767                        INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10768
10769                        /* Create NVMET Receive Queue for data */
10770                        qdesc = lpfc_sli4_queue_alloc(phba,
10771                                                      LPFC_DEFAULT_PAGE_SIZE,
10772                                                      phba->sli4_hba.rq_esize,
10773                                                      LPFC_NVMET_RQE_DEF_COUNT,
10774                                                      cpu);
10775                        if (!qdesc) {
10776                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10777                                                "3156 Failed allocate "
10778                                                "receive DRQ\n");
10779                                goto out_error;
10780                        }
10781                        qdesc->hdwq = idx;
10782                        phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10783                }
10784        }
10785
10786        /* Clear NVME stats */
10787        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10788                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10789                        memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10790                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10791                }
10792        }
10793
10794        /* Clear SCSI stats */
10795        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10796                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10797                        memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10798                               sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10799                }
10800        }
10801
10802        return 0;
10803
10804out_error:
10805        lpfc_sli4_queue_destroy(phba);
10806        return -ENOMEM;
10807}
10808
10809static inline void
10810__lpfc_sli4_release_queue(struct lpfc_queue **qp)
10811{
10812        if (*qp != NULL) {
10813                lpfc_sli4_queue_free(*qp);
10814                *qp = NULL;
10815        }
10816}
10817
10818static inline void
10819lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10820{
10821        int idx;
10822
10823        if (*qs == NULL)
10824                return;
10825
10826        for (idx = 0; idx < max; idx++)
10827                __lpfc_sli4_release_queue(&(*qs)[idx]);
10828
10829        kfree(*qs);
10830        *qs = NULL;
10831}
10832
10833static inline void
10834lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10835{
10836        struct lpfc_sli4_hdw_queue *hdwq;
10837        struct lpfc_queue *eq;
10838        uint32_t idx;
10839
10840        hdwq = phba->sli4_hba.hdwq;
10841
10842        /* Loop thru all Hardware Queues */
10843        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10844                /* Free the CQ/WQ corresponding to the Hardware Queue */
10845                lpfc_sli4_queue_free(hdwq[idx].io_cq);
10846                lpfc_sli4_queue_free(hdwq[idx].io_wq);
10847                hdwq[idx].hba_eq = NULL;
10848                hdwq[idx].io_cq = NULL;
10849                hdwq[idx].io_wq = NULL;
10850                if (phba->cfg_xpsgl && !phba->nvmet_support)
10851                        lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10852                lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10853        }
10854        /* Loop thru all IRQ vectors */
10855        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10856                /* Free the EQ corresponding to the IRQ vector */
10857                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10858                lpfc_sli4_queue_free(eq);
10859                phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10860        }
10861}
10862
10863/**
10864 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10865 * @phba: pointer to lpfc hba data structure.
10866 *
10867 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10868 * operation.
10869 *
10870 * Return codes
10871 *      0 - successful
10872 *      -ENOMEM - No available memory
10873 *      -EIO - The mailbox failed to complete successfully.
10874 **/
10875void
10876lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10877{
10878        /*
10879         * Set FREE_INIT before beginning to free the queues.
10880         * Wait until the users of queues to acknowledge to
10881         * release queues by clearing FREE_WAIT.
10882         */
10883        spin_lock_irq(&phba->hbalock);
10884        phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10885        while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10886                spin_unlock_irq(&phba->hbalock);
10887                msleep(20);
10888                spin_lock_irq(&phba->hbalock);
10889        }
10890        spin_unlock_irq(&phba->hbalock);
10891
10892        lpfc_sli4_cleanup_poll_list(phba);
10893
10894        /* Release HBA eqs */
10895        if (phba->sli4_hba.hdwq)
10896                lpfc_sli4_release_hdwq(phba);
10897
10898        if (phba->nvmet_support) {
10899                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10900                                         phba->cfg_nvmet_mrq);
10901
10902                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10903                                         phba->cfg_nvmet_mrq);
10904                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10905                                         phba->cfg_nvmet_mrq);
10906        }
10907
10908        /* Release mailbox command work queue */
10909        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10910
10911        /* Release ELS work queue */
10912        __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10913
10914        /* Release ELS work queue */
10915        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10916
10917        /* Release unsolicited receive queue */
10918        __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10919        __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10920
10921        /* Release ELS complete queue */
10922        __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10923
10924        /* Release NVME LS complete queue */
10925        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10926
10927        /* Release mailbox command complete queue */
10928        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10929
10930        /* Everything on this list has been freed */
10931        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10932
10933        /* Done with freeing the queues */
10934        spin_lock_irq(&phba->hbalock);
10935        phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10936        spin_unlock_irq(&phba->hbalock);
10937}
10938
10939int
10940lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10941{
10942        struct lpfc_rqb *rqbp;
10943        struct lpfc_dmabuf *h_buf;
10944        struct rqb_dmabuf *rqb_buffer;
10945
10946        rqbp = rq->rqbp;
10947        while (!list_empty(&rqbp->rqb_buffer_list)) {
10948                list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10949                                 struct lpfc_dmabuf, list);
10950
10951                rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10952                (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10953                rqbp->buffer_count--;
10954        }
10955        return 1;
10956}
10957
10958static int
10959lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10960        struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10961        int qidx, uint32_t qtype)
10962{
10963        struct lpfc_sli_ring *pring;
10964        int rc;
10965
10966        if (!eq || !cq || !wq) {
10967                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10968                        "6085 Fast-path %s (%d) not allocated\n",
10969                        ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10970                return -ENOMEM;
10971        }
10972
10973        /* create the Cq first */
10974        rc = lpfc_cq_create(phba, cq, eq,
10975                        (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10976        if (rc) {
10977                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10978                                "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10979                                qidx, (uint32_t)rc);
10980                return rc;
10981        }
10982
10983        if (qtype != LPFC_MBOX) {
10984                /* Setup cq_map for fast lookup */
10985                if (cq_map)
10986                        *cq_map = cq->queue_id;
10987
10988                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10989                        "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10990                        qidx, cq->queue_id, qidx, eq->queue_id);
10991
10992                /* create the wq */
10993                rc = lpfc_wq_create(phba, wq, cq, qtype);
10994                if (rc) {
10995                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996                                "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10997                                qidx, (uint32_t)rc);
10998                        /* no need to tear down cq - caller will do so */
10999                        return rc;
11000                }
11001
11002                /* Bind this CQ/WQ to the NVME ring */
11003                pring = wq->pring;
11004                pring->sli.sli4.wqp = (void *)wq;
11005                cq->pring = pring;
11006
11007                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11008                        "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11009                        qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11010        } else {
11011                rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11012                if (rc) {
11013                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11014                                        "0539 Failed setup of slow-path MQ: "
11015                                        "rc = 0x%x\n", rc);
11016                        /* no need to tear down cq - caller will do so */
11017                        return rc;
11018                }
11019
11020                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11021                        "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11022                        phba->sli4_hba.mbx_wq->queue_id,
11023                        phba->sli4_hba.mbx_cq->queue_id);
11024        }
11025
11026        return 0;
11027}
11028
11029/**
11030 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11031 * @phba: pointer to lpfc hba data structure.
11032 *
11033 * This routine will populate the cq_lookup table by all
11034 * available CQ queue_id's.
11035 **/
11036static void
11037lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11038{
11039        struct lpfc_queue *eq, *childq;
11040        int qidx;
11041
11042        memset(phba->sli4_hba.cq_lookup, 0,
11043               (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11044        /* Loop thru all IRQ vectors */
11045        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11046                /* Get the EQ corresponding to the IRQ vector */
11047                eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11048                if (!eq)
11049                        continue;
11050                /* Loop through all CQs associated with that EQ */
11051                list_for_each_entry(childq, &eq->child_list, list) {
11052                        if (childq->queue_id > phba->sli4_hba.cq_max)
11053                                continue;
11054                        if (childq->subtype == LPFC_IO)
11055                                phba->sli4_hba.cq_lookup[childq->queue_id] =
11056                                        childq;
11057                }
11058        }
11059}
11060
11061/**
11062 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11063 * @phba: pointer to lpfc hba data structure.
11064 *
11065 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11066 * operation.
11067 *
11068 * Return codes
11069 *      0 - successful
11070 *      -ENOMEM - No available memory
11071 *      -EIO - The mailbox failed to complete successfully.
11072 **/
11073int
11074lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11075{
11076        uint32_t shdr_status, shdr_add_status;
11077        union lpfc_sli4_cfg_shdr *shdr;
11078        struct lpfc_vector_map_info *cpup;
11079        struct lpfc_sli4_hdw_queue *qp;
11080        LPFC_MBOXQ_t *mboxq;
11081        int qidx, cpu;
11082        uint32_t length, usdelay;
11083        int rc = -ENOMEM;
11084
11085        /* Check for dual-ULP support */
11086        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11087        if (!mboxq) {
11088                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11089                                "3249 Unable to allocate memory for "
11090                                "QUERY_FW_CFG mailbox command\n");
11091                return -ENOMEM;
11092        }
11093        length = (sizeof(struct lpfc_mbx_query_fw_config) -
11094                  sizeof(struct lpfc_sli4_cfg_mhdr));
11095        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11096                         LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11097                         length, LPFC_SLI4_MBX_EMBED);
11098
11099        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11100
11101        shdr = (union lpfc_sli4_cfg_shdr *)
11102                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11103        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11104        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11105        if (shdr_status || shdr_add_status || rc) {
11106                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11107                                "3250 QUERY_FW_CFG mailbox failed with status "
11108                                "x%x add_status x%x, mbx status x%x\n",
11109                                shdr_status, shdr_add_status, rc);
11110                mempool_free(mboxq, phba->mbox_mem_pool);
11111                rc = -ENXIO;
11112                goto out_error;
11113        }
11114
11115        phba->sli4_hba.fw_func_mode =
11116                        mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11117        phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11118        phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11119        phba->sli4_hba.physical_port =
11120                        mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11121        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11122                        "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11123                        "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11124                        phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11125
11126        mempool_free(mboxq, phba->mbox_mem_pool);
11127
11128        /*
11129         * Set up HBA Event Queues (EQs)
11130         */
11131        qp = phba->sli4_hba.hdwq;
11132
11133        /* Set up HBA event queue */
11134        if (!qp) {
11135                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11136                                "3147 Fast-path EQs not allocated\n");
11137                rc = -ENOMEM;
11138                goto out_error;
11139        }
11140
11141        /* Loop thru all IRQ vectors */
11142        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11143                /* Create HBA Event Queues (EQs) in order */
11144                for_each_present_cpu(cpu) {
11145                        cpup = &phba->sli4_hba.cpu_map[cpu];
11146
11147                        /* Look for the CPU thats using that vector with
11148                         * LPFC_CPU_FIRST_IRQ set.
11149                         */
11150                        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11151                                continue;
11152                        if (qidx != cpup->eq)
11153                                continue;
11154
11155                        /* Create an EQ for that vector */
11156                        rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11157                                            phba->cfg_fcp_imax);
11158                        if (rc) {
11159                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11160                                                "0523 Failed setup of fast-path"
11161                                                " EQ (%d), rc = 0x%x\n",
11162                                                cpup->eq, (uint32_t)rc);
11163                                goto out_destroy;
11164                        }
11165
11166                        /* Save the EQ for that vector in the hba_eq_hdl */
11167                        phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11168                                qp[cpup->hdwq].hba_eq;
11169
11170                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11171                                        "2584 HBA EQ setup: queue[%d]-id=%d\n",
11172                                        cpup->eq,
11173                                        qp[cpup->hdwq].hba_eq->queue_id);
11174                }
11175        }
11176
11177        /* Loop thru all Hardware Queues */
11178        for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11179                cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11180                cpup = &phba->sli4_hba.cpu_map[cpu];
11181
11182                /* Create the CQ/WQ corresponding to the Hardware Queue */
11183                rc = lpfc_create_wq_cq(phba,
11184                                       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11185                                       qp[qidx].io_cq,
11186                                       qp[qidx].io_wq,
11187                                       &phba->sli4_hba.hdwq[qidx].io_cq_map,
11188                                       qidx,
11189                                       LPFC_IO);
11190                if (rc) {
11191                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11192                                        "0535 Failed to setup fastpath "
11193                                        "IO WQ/CQ (%d), rc = 0x%x\n",
11194                                        qidx, (uint32_t)rc);
11195                        goto out_destroy;
11196                }
11197        }
11198
11199        /*
11200         * Set up Slow Path Complete Queues (CQs)
11201         */
11202
11203        /* Set up slow-path MBOX CQ/MQ */
11204
11205        if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11206                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11207                                "0528 %s not allocated\n",
11208                                phba->sli4_hba.mbx_cq ?
11209                                "Mailbox WQ" : "Mailbox CQ");
11210                rc = -ENOMEM;
11211                goto out_destroy;
11212        }
11213
11214        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11215                               phba->sli4_hba.mbx_cq,
11216                               phba->sli4_hba.mbx_wq,
11217                               NULL, 0, LPFC_MBOX);
11218        if (rc) {
11219                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11220                        "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11221                        (uint32_t)rc);
11222                goto out_destroy;
11223        }
11224        if (phba->nvmet_support) {
11225                if (!phba->sli4_hba.nvmet_cqset) {
11226                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11227                                        "3165 Fast-path NVME CQ Set "
11228                                        "array not allocated\n");
11229                        rc = -ENOMEM;
11230                        goto out_destroy;
11231                }
11232                if (phba->cfg_nvmet_mrq > 1) {
11233                        rc = lpfc_cq_create_set(phba,
11234                                        phba->sli4_hba.nvmet_cqset,
11235                                        qp,
11236                                        LPFC_WCQ, LPFC_NVMET);
11237                        if (rc) {
11238                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11239                                                "3164 Failed setup of NVME CQ "
11240                                                "Set, rc = 0x%x\n",
11241                                                (uint32_t)rc);
11242                                goto out_destroy;
11243                        }
11244                } else {
11245                        /* Set up NVMET Receive Complete Queue */
11246                        rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11247                                            qp[0].hba_eq,
11248                                            LPFC_WCQ, LPFC_NVMET);
11249                        if (rc) {
11250                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11251                                                "6089 Failed setup NVMET CQ: "
11252                                                "rc = 0x%x\n", (uint32_t)rc);
11253                                goto out_destroy;
11254                        }
11255                        phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11256
11257                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11258                                        "6090 NVMET CQ setup: cq-id=%d, "
11259                                        "parent eq-id=%d\n",
11260                                        phba->sli4_hba.nvmet_cqset[0]->queue_id,
11261                                        qp[0].hba_eq->queue_id);
11262                }
11263        }
11264
11265        /* Set up slow-path ELS WQ/CQ */
11266        if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11267                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11268                                "0530 ELS %s not allocated\n",
11269                                phba->sli4_hba.els_cq ? "WQ" : "CQ");
11270                rc = -ENOMEM;
11271                goto out_destroy;
11272        }
11273        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11274                               phba->sli4_hba.els_cq,
11275                               phba->sli4_hba.els_wq,
11276                               NULL, 0, LPFC_ELS);
11277        if (rc) {
11278                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11279                                "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11280                                (uint32_t)rc);
11281                goto out_destroy;
11282        }
11283        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11284                        "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11285                        phba->sli4_hba.els_wq->queue_id,
11286                        phba->sli4_hba.els_cq->queue_id);
11287
11288        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11289                /* Set up NVME LS Complete Queue */
11290                if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11291                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11292                                        "6091 LS %s not allocated\n",
11293                                        phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11294                        rc = -ENOMEM;
11295                        goto out_destroy;
11296                }
11297                rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11298                                       phba->sli4_hba.nvmels_cq,
11299                                       phba->sli4_hba.nvmels_wq,
11300                                       NULL, 0, LPFC_NVME_LS);
11301                if (rc) {
11302                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11303                                        "0526 Failed setup of NVVME LS WQ/CQ: "
11304                                        "rc = 0x%x\n", (uint32_t)rc);
11305                        goto out_destroy;
11306                }
11307
11308                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11309                                "6096 ELS WQ setup: wq-id=%d, "
11310                                "parent cq-id=%d\n",
11311                                phba->sli4_hba.nvmels_wq->queue_id,
11312                                phba->sli4_hba.nvmels_cq->queue_id);
11313        }
11314
11315        /*
11316         * Create NVMET Receive Queue (RQ)
11317         */
11318        if (phba->nvmet_support) {
11319                if ((!phba->sli4_hba.nvmet_cqset) ||
11320                    (!phba->sli4_hba.nvmet_mrq_hdr) ||
11321                    (!phba->sli4_hba.nvmet_mrq_data)) {
11322                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11323                                        "6130 MRQ CQ Queues not "
11324                                        "allocated\n");
11325                        rc = -ENOMEM;
11326                        goto out_destroy;
11327                }
11328                if (phba->cfg_nvmet_mrq > 1) {
11329                        rc = lpfc_mrq_create(phba,
11330                                             phba->sli4_hba.nvmet_mrq_hdr,
11331                                             phba->sli4_hba.nvmet_mrq_data,
11332                                             phba->sli4_hba.nvmet_cqset,
11333                                             LPFC_NVMET);
11334                        if (rc) {
11335                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11336                                                "6098 Failed setup of NVMET "
11337                                                "MRQ: rc = 0x%x\n",
11338                                                (uint32_t)rc);
11339                                goto out_destroy;
11340                        }
11341
11342                } else {
11343                        rc = lpfc_rq_create(phba,
11344                                            phba->sli4_hba.nvmet_mrq_hdr[0],
11345                                            phba->sli4_hba.nvmet_mrq_data[0],
11346                                            phba->sli4_hba.nvmet_cqset[0],
11347                                            LPFC_NVMET);
11348                        if (rc) {
11349                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11350                                                "6057 Failed setup of NVMET "
11351                                                "Receive Queue: rc = 0x%x\n",
11352                                                (uint32_t)rc);
11353                                goto out_destroy;
11354                        }
11355
11356                        lpfc_printf_log(
11357                                phba, KERN_INFO, LOG_INIT,
11358                                "6099 NVMET RQ setup: hdr-rq-id=%d, "
11359                                "dat-rq-id=%d parent cq-id=%d\n",
11360                                phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11361                                phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11362                                phba->sli4_hba.nvmet_cqset[0]->queue_id);
11363
11364                }
11365        }
11366
11367        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11368                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11369                                "0540 Receive Queue not allocated\n");
11370                rc = -ENOMEM;
11371                goto out_destroy;
11372        }
11373
11374        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11375                            phba->sli4_hba.els_cq, LPFC_USOL);
11376        if (rc) {
11377                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11378                                "0541 Failed setup of Receive Queue: "
11379                                "rc = 0x%x\n", (uint32_t)rc);
11380                goto out_destroy;
11381        }
11382
11383        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11384                        "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11385                        "parent cq-id=%d\n",
11386                        phba->sli4_hba.hdr_rq->queue_id,
11387                        phba->sli4_hba.dat_rq->queue_id,
11388                        phba->sli4_hba.els_cq->queue_id);
11389
11390        if (phba->cfg_fcp_imax)
11391                usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11392        else
11393                usdelay = 0;
11394
11395        for (qidx = 0; qidx < phba->cfg_irq_chann;
11396             qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11397                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11398                                         usdelay);
11399
11400        if (phba->sli4_hba.cq_max) {
11401                kfree(phba->sli4_hba.cq_lookup);
11402                phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11403                        sizeof(struct lpfc_queue *), GFP_KERNEL);
11404                if (!phba->sli4_hba.cq_lookup) {
11405                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11406                                        "0549 Failed setup of CQ Lookup table: "
11407                                        "size 0x%x\n", phba->sli4_hba.cq_max);
11408                        rc = -ENOMEM;
11409                        goto out_destroy;
11410                }
11411                lpfc_setup_cq_lookup(phba);
11412        }
11413        return 0;
11414
11415out_destroy:
11416        lpfc_sli4_queue_unset(phba);
11417out_error:
11418        return rc;
11419}
11420
11421/**
11422 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11423 * @phba: pointer to lpfc hba data structure.
11424 *
11425 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11426 * operation.
11427 *
11428 * Return codes
11429 *      0 - successful
11430 *      -ENOMEM - No available memory
11431 *      -EIO - The mailbox failed to complete successfully.
11432 **/
11433void
11434lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11435{
11436        struct lpfc_sli4_hdw_queue *qp;
11437        struct lpfc_queue *eq;
11438        int qidx;
11439
11440        /* Unset mailbox command work queue */
11441        if (phba->sli4_hba.mbx_wq)
11442                lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11443
11444        /* Unset NVME LS work queue */
11445        if (phba->sli4_hba.nvmels_wq)
11446                lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11447
11448        /* Unset ELS work queue */
11449        if (phba->sli4_hba.els_wq)
11450                lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11451
11452        /* Unset unsolicited receive queue */
11453        if (phba->sli4_hba.hdr_rq)
11454                lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11455                                phba->sli4_hba.dat_rq);
11456
11457        /* Unset mailbox command complete queue */
11458        if (phba->sli4_hba.mbx_cq)
11459                lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11460
11461        /* Unset ELS complete queue */
11462        if (phba->sli4_hba.els_cq)
11463                lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11464
11465        /* Unset NVME LS complete queue */
11466        if (phba->sli4_hba.nvmels_cq)
11467                lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11468
11469        if (phba->nvmet_support) {
11470                /* Unset NVMET MRQ queue */
11471                if (phba->sli4_hba.nvmet_mrq_hdr) {
11472                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11473                                lpfc_rq_destroy(
11474                                        phba,
11475                                        phba->sli4_hba.nvmet_mrq_hdr[qidx],
11476                                        phba->sli4_hba.nvmet_mrq_data[qidx]);
11477                }
11478
11479                /* Unset NVMET CQ Set complete queue */
11480                if (phba->sli4_hba.nvmet_cqset) {
11481                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11482                                lpfc_cq_destroy(
11483                                        phba, phba->sli4_hba.nvmet_cqset[qidx]);
11484                }
11485        }
11486
11487        /* Unset fast-path SLI4 queues */
11488        if (phba->sli4_hba.hdwq) {
11489                /* Loop thru all Hardware Queues */
11490                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11491                        /* Destroy the CQ/WQ corresponding to Hardware Queue */
11492                        qp = &phba->sli4_hba.hdwq[qidx];
11493                        lpfc_wq_destroy(phba, qp->io_wq);
11494                        lpfc_cq_destroy(phba, qp->io_cq);
11495                }
11496                /* Loop thru all IRQ vectors */
11497                for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11498                        /* Destroy the EQ corresponding to the IRQ vector */
11499                        eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11500                        lpfc_eq_destroy(phba, eq);
11501                }
11502        }
11503
11504        kfree(phba->sli4_hba.cq_lookup);
11505        phba->sli4_hba.cq_lookup = NULL;
11506        phba->sli4_hba.cq_max = 0;
11507}
11508
11509/**
11510 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11511 * @phba: pointer to lpfc hba data structure.
11512 *
11513 * This routine is invoked to allocate and set up a pool of completion queue
11514 * events. The body of the completion queue event is a completion queue entry
11515 * CQE. For now, this pool is used for the interrupt service routine to queue
11516 * the following HBA completion queue events for the worker thread to process:
11517 *   - Mailbox asynchronous events
11518 *   - Receive queue completion unsolicited events
11519 * Later, this can be used for all the slow-path events.
11520 *
11521 * Return codes
11522 *      0 - successful
11523 *      -ENOMEM - No available memory
11524 **/
11525static int
11526lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11527{
11528        struct lpfc_cq_event *cq_event;
11529        int i;
11530
11531        for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11532                cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11533                if (!cq_event)
11534                        goto out_pool_create_fail;
11535                list_add_tail(&cq_event->list,
11536                              &phba->sli4_hba.sp_cqe_event_pool);
11537        }
11538        return 0;
11539
11540out_pool_create_fail:
11541        lpfc_sli4_cq_event_pool_destroy(phba);
11542        return -ENOMEM;
11543}
11544
11545/**
11546 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11547 * @phba: pointer to lpfc hba data structure.
11548 *
11549 * This routine is invoked to free the pool of completion queue events at
11550 * driver unload time. Note that, it is the responsibility of the driver
11551 * cleanup routine to free all the outstanding completion-queue events
11552 * allocated from this pool back into the pool before invoking this routine
11553 * to destroy the pool.
11554 **/
11555static void
11556lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11557{
11558        struct lpfc_cq_event *cq_event, *next_cq_event;
11559
11560        list_for_each_entry_safe(cq_event, next_cq_event,
11561                                 &phba->sli4_hba.sp_cqe_event_pool, list) {
11562                list_del(&cq_event->list);
11563                kfree(cq_event);
11564        }
11565}
11566
11567/**
11568 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11569 * @phba: pointer to lpfc hba data structure.
11570 *
11571 * This routine is the lock free version of the API invoked to allocate a
11572 * completion-queue event from the free pool.
11573 *
11574 * Return: Pointer to the newly allocated completion-queue event if successful
11575 *         NULL otherwise.
11576 **/
11577struct lpfc_cq_event *
11578__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11579{
11580        struct lpfc_cq_event *cq_event = NULL;
11581
11582        list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11583                         struct lpfc_cq_event, list);
11584        return cq_event;
11585}
11586
11587/**
11588 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11589 * @phba: pointer to lpfc hba data structure.
11590 *
11591 * This routine is the lock version of the API invoked to allocate a
11592 * completion-queue event from the free pool.
11593 *
11594 * Return: Pointer to the newly allocated completion-queue event if successful
11595 *         NULL otherwise.
11596 **/
11597struct lpfc_cq_event *
11598lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11599{
11600        struct lpfc_cq_event *cq_event;
11601        unsigned long iflags;
11602
11603        spin_lock_irqsave(&phba->hbalock, iflags);
11604        cq_event = __lpfc_sli4_cq_event_alloc(phba);
11605        spin_unlock_irqrestore(&phba->hbalock, iflags);
11606        return cq_event;
11607}
11608
11609/**
11610 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11611 * @phba: pointer to lpfc hba data structure.
11612 * @cq_event: pointer to the completion queue event to be freed.
11613 *
11614 * This routine is the lock free version of the API invoked to release a
11615 * completion-queue event back into the free pool.
11616 **/
11617void
11618__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11619                             struct lpfc_cq_event *cq_event)
11620{
11621        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11622}
11623
11624/**
11625 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11626 * @phba: pointer to lpfc hba data structure.
11627 * @cq_event: pointer to the completion queue event to be freed.
11628 *
11629 * This routine is the lock version of the API invoked to release a
11630 * completion-queue event back into the free pool.
11631 **/
11632void
11633lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11634                           struct lpfc_cq_event *cq_event)
11635{
11636        unsigned long iflags;
11637        spin_lock_irqsave(&phba->hbalock, iflags);
11638        __lpfc_sli4_cq_event_release(phba, cq_event);
11639        spin_unlock_irqrestore(&phba->hbalock, iflags);
11640}
11641
11642/**
11643 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11644 * @phba: pointer to lpfc hba data structure.
11645 *
11646 * This routine is to free all the pending completion-queue events to the
11647 * back into the free pool for device reset.
11648 **/
11649static void
11650lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11651{
11652        LIST_HEAD(cq_event_list);
11653        struct lpfc_cq_event *cq_event;
11654        unsigned long iflags;
11655
11656        /* Retrieve all the pending WCQEs from pending WCQE lists */
11657
11658        /* Pending ELS XRI abort events */
11659        spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11660        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11661                         &cq_event_list);
11662        spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11663
11664        /* Pending asynnc events */
11665        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11666        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11667                         &cq_event_list);
11668        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11669
11670        while (!list_empty(&cq_event_list)) {
11671                list_remove_head(&cq_event_list, cq_event,
11672                                 struct lpfc_cq_event, list);
11673                lpfc_sli4_cq_event_release(phba, cq_event);
11674        }
11675}
11676
11677/**
11678 * lpfc_pci_function_reset - Reset pci function.
11679 * @phba: pointer to lpfc hba data structure.
11680 *
11681 * This routine is invoked to request a PCI function reset. It will destroys
11682 * all resources assigned to the PCI function which originates this request.
11683 *
11684 * Return codes
11685 *      0 - successful
11686 *      -ENOMEM - No available memory
11687 *      -EIO - The mailbox failed to complete successfully.
11688 **/
11689int
11690lpfc_pci_function_reset(struct lpfc_hba *phba)
11691{
11692        LPFC_MBOXQ_t *mboxq;
11693        uint32_t rc = 0, if_type;
11694        uint32_t shdr_status, shdr_add_status;
11695        uint32_t rdy_chk;
11696        uint32_t port_reset = 0;
11697        union lpfc_sli4_cfg_shdr *shdr;
11698        struct lpfc_register reg_data;
11699        uint16_t devid;
11700
11701        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11702        switch (if_type) {
11703        case LPFC_SLI_INTF_IF_TYPE_0:
11704                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11705                                                       GFP_KERNEL);
11706                if (!mboxq) {
11707                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11708                                        "0494 Unable to allocate memory for "
11709                                        "issuing SLI_FUNCTION_RESET mailbox "
11710                                        "command\n");
11711                        return -ENOMEM;
11712                }
11713
11714                /* Setup PCI function reset mailbox-ioctl command */
11715                lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11716                                 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11717                                 LPFC_SLI4_MBX_EMBED);
11718                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11719                shdr = (union lpfc_sli4_cfg_shdr *)
11720                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11721                shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11722                shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11723                                         &shdr->response);
11724                mempool_free(mboxq, phba->mbox_mem_pool);
11725                if (shdr_status || shdr_add_status || rc) {
11726                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11727                                        "0495 SLI_FUNCTION_RESET mailbox "
11728                                        "failed with status x%x add_status x%x,"
11729                                        " mbx status x%x\n",
11730                                        shdr_status, shdr_add_status, rc);
11731                        rc = -ENXIO;
11732                }
11733                break;
11734        case LPFC_SLI_INTF_IF_TYPE_2:
11735        case LPFC_SLI_INTF_IF_TYPE_6:
11736wait:
11737                /*
11738                 * Poll the Port Status Register and wait for RDY for
11739                 * up to 30 seconds. If the port doesn't respond, treat
11740                 * it as an error.
11741                 */
11742                for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11743                        if (lpfc_readl(phba->sli4_hba.u.if_type2.
11744                                STATUSregaddr, &reg_data.word0)) {
11745                                rc = -ENODEV;
11746                                goto out;
11747                        }
11748                        if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11749                                break;
11750                        msleep(20);
11751                }
11752
11753                if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11754                        phba->work_status[0] = readl(
11755                                phba->sli4_hba.u.if_type2.ERR1regaddr);
11756                        phba->work_status[1] = readl(
11757                                phba->sli4_hba.u.if_type2.ERR2regaddr);
11758                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11759                                        "2890 Port not ready, port status reg "
11760                                        "0x%x error 1=0x%x, error 2=0x%x\n",
11761                                        reg_data.word0,
11762                                        phba->work_status[0],
11763                                        phba->work_status[1]);
11764                        rc = -ENODEV;
11765                        goto out;
11766                }
11767
11768                if (bf_get(lpfc_sliport_status_pldv, &reg_data))
11769                        lpfc_pldv_detect = true;
11770
11771                if (!port_reset) {
11772                        /*
11773                         * Reset the port now
11774                         */
11775                        reg_data.word0 = 0;
11776                        bf_set(lpfc_sliport_ctrl_end, &reg_data,
11777                               LPFC_SLIPORT_LITTLE_ENDIAN);
11778                        bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11779                               LPFC_SLIPORT_INIT_PORT);
11780                        writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11781                               CTRLregaddr);
11782                        /* flush */
11783                        pci_read_config_word(phba->pcidev,
11784                                             PCI_DEVICE_ID, &devid);
11785
11786                        port_reset = 1;
11787                        msleep(20);
11788                        goto wait;
11789                } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11790                        rc = -ENODEV;
11791                        goto out;
11792                }
11793                break;
11794
11795        case LPFC_SLI_INTF_IF_TYPE_1:
11796        default:
11797                break;
11798        }
11799
11800out:
11801        /* Catch the not-ready port failure after a port reset. */
11802        if (rc) {
11803                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11804                                "3317 HBA not functional: IP Reset Failed "
11805                                "try: echo fw_reset > board_mode\n");
11806                rc = -ENODEV;
11807        }
11808
11809        return rc;
11810}
11811
11812/**
11813 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11814 * @phba: pointer to lpfc hba data structure.
11815 *
11816 * This routine is invoked to set up the PCI device memory space for device
11817 * with SLI-4 interface spec.
11818 *
11819 * Return codes
11820 *      0 - successful
11821 *      other values - error
11822 **/
11823static int
11824lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11825{
11826        struct pci_dev *pdev = phba->pcidev;
11827        unsigned long bar0map_len, bar1map_len, bar2map_len;
11828        int error;
11829        uint32_t if_type;
11830
11831        if (!pdev)
11832                return -ENODEV;
11833
11834        /* Set the device DMA mask size */
11835        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11836        if (error)
11837                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11838        if (error)
11839                return error;
11840
11841        /*
11842         * The BARs and register set definitions and offset locations are
11843         * dependent on the if_type.
11844         */
11845        if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11846                                  &phba->sli4_hba.sli_intf.word0)) {
11847                return -ENODEV;
11848        }
11849
11850        /* There is no SLI3 failback for SLI4 devices. */
11851        if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11852            LPFC_SLI_INTF_VALID) {
11853                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11854                                "2894 SLI_INTF reg contents invalid "
11855                                "sli_intf reg 0x%x\n",
11856                                phba->sli4_hba.sli_intf.word0);
11857                return -ENODEV;
11858        }
11859
11860        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11861        /*
11862         * Get the bus address of SLI4 device Bar regions and the
11863         * number of bytes required by each mapping. The mapping of the
11864         * particular PCI BARs regions is dependent on the type of
11865         * SLI4 device.
11866         */
11867        if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11868                phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11869                bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11870
11871                /*
11872                 * Map SLI4 PCI Config Space Register base to a kernel virtual
11873                 * addr
11874                 */
11875                phba->sli4_hba.conf_regs_memmap_p =
11876                        ioremap(phba->pci_bar0_map, bar0map_len);
11877                if (!phba->sli4_hba.conf_regs_memmap_p) {
11878                        dev_printk(KERN_ERR, &pdev->dev,
11879                                   "ioremap failed for SLI4 PCI config "
11880                                   "registers.\n");
11881                        return -ENODEV;
11882                }
11883                phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11884                /* Set up BAR0 PCI config space register memory map */
11885                lpfc_sli4_bar0_register_memmap(phba, if_type);
11886        } else {
11887                phba->pci_bar0_map = pci_resource_start(pdev, 1);
11888                bar0map_len = pci_resource_len(pdev, 1);
11889                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11890                        dev_printk(KERN_ERR, &pdev->dev,
11891                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11892                        return -ENODEV;
11893                }
11894                phba->sli4_hba.conf_regs_memmap_p =
11895                                ioremap(phba->pci_bar0_map, bar0map_len);
11896                if (!phba->sli4_hba.conf_regs_memmap_p) {
11897                        dev_printk(KERN_ERR, &pdev->dev,
11898                                "ioremap failed for SLI4 PCI config "
11899                                "registers.\n");
11900                        return -ENODEV;
11901                }
11902                lpfc_sli4_bar0_register_memmap(phba, if_type);
11903        }
11904
11905        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11906                if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11907                        /*
11908                         * Map SLI4 if type 0 HBA Control Register base to a
11909                         * kernel virtual address and setup the registers.
11910                         */
11911                        phba->pci_bar1_map = pci_resource_start(pdev,
11912                                                                PCI_64BIT_BAR2);
11913                        bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11914                        phba->sli4_hba.ctrl_regs_memmap_p =
11915                                        ioremap(phba->pci_bar1_map,
11916                                                bar1map_len);
11917                        if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11918                                dev_err(&pdev->dev,
11919                                           "ioremap failed for SLI4 HBA "
11920                                            "control registers.\n");
11921                                error = -ENOMEM;
11922                                goto out_iounmap_conf;
11923                        }
11924                        phba->pci_bar2_memmap_p =
11925                                         phba->sli4_hba.ctrl_regs_memmap_p;
11926                        lpfc_sli4_bar1_register_memmap(phba, if_type);
11927                } else {
11928                        error = -ENOMEM;
11929                        goto out_iounmap_conf;
11930                }
11931        }
11932
11933        if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11934            (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11935                /*
11936                 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11937                 * virtual address and setup the registers.
11938                 */
11939                phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11940                bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11941                phba->sli4_hba.drbl_regs_memmap_p =
11942                                ioremap(phba->pci_bar1_map, bar1map_len);
11943                if (!phba->sli4_hba.drbl_regs_memmap_p) {
11944                        dev_err(&pdev->dev,
11945                           "ioremap failed for SLI4 HBA doorbell registers.\n");
11946                        error = -ENOMEM;
11947                        goto out_iounmap_conf;
11948                }
11949                phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11950                lpfc_sli4_bar1_register_memmap(phba, if_type);
11951        }
11952
11953        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11954                if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11955                        /*
11956                         * Map SLI4 if type 0 HBA Doorbell Register base to
11957                         * a kernel virtual address and setup the registers.
11958                         */
11959                        phba->pci_bar2_map = pci_resource_start(pdev,
11960                                                                PCI_64BIT_BAR4);
11961                        bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11962                        phba->sli4_hba.drbl_regs_memmap_p =
11963                                        ioremap(phba->pci_bar2_map,
11964                                                bar2map_len);
11965                        if (!phba->sli4_hba.drbl_regs_memmap_p) {
11966                                dev_err(&pdev->dev,
11967                                           "ioremap failed for SLI4 HBA"
11968                                           " doorbell registers.\n");
11969                                error = -ENOMEM;
11970                                goto out_iounmap_ctrl;
11971                        }
11972                        phba->pci_bar4_memmap_p =
11973                                        phba->sli4_hba.drbl_regs_memmap_p;
11974                        error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11975                        if (error)
11976                                goto out_iounmap_all;
11977                } else {
11978                        error = -ENOMEM;
11979                        goto out_iounmap_all;
11980                }
11981        }
11982
11983        if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11984            pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11985                /*
11986                 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11987                 * virtual address and setup the registers.
11988                 */
11989                phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11990                bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11991                phba->sli4_hba.dpp_regs_memmap_p =
11992                                ioremap(phba->pci_bar2_map, bar2map_len);
11993                if (!phba->sli4_hba.dpp_regs_memmap_p) {
11994                        dev_err(&pdev->dev,
11995                           "ioremap failed for SLI4 HBA dpp registers.\n");
11996                        error = -ENOMEM;
11997                        goto out_iounmap_ctrl;
11998                }
11999                phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
12000        }
12001
12002        /* Set up the EQ/CQ register handeling functions now */
12003        switch (if_type) {
12004        case LPFC_SLI_INTF_IF_TYPE_0:
12005        case LPFC_SLI_INTF_IF_TYPE_2:
12006                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12007                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12008                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12009                break;
12010        case LPFC_SLI_INTF_IF_TYPE_6:
12011                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12012                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12013                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12014                break;
12015        default:
12016                break;
12017        }
12018
12019        return 0;
12020
12021out_iounmap_all:
12022        iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12023out_iounmap_ctrl:
12024        iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12025out_iounmap_conf:
12026        iounmap(phba->sli4_hba.conf_regs_memmap_p);
12027
12028        return error;
12029}
12030
12031/**
12032 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12033 * @phba: pointer to lpfc hba data structure.
12034 *
12035 * This routine is invoked to unset the PCI device memory space for device
12036 * with SLI-4 interface spec.
12037 **/
12038static void
12039lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12040{
12041        uint32_t if_type;
12042        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12043
12044        switch (if_type) {
12045        case LPFC_SLI_INTF_IF_TYPE_0:
12046                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12047                iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12048                iounmap(phba->sli4_hba.conf_regs_memmap_p);
12049                break;
12050        case LPFC_SLI_INTF_IF_TYPE_2:
12051                iounmap(phba->sli4_hba.conf_regs_memmap_p);
12052                break;
12053        case LPFC_SLI_INTF_IF_TYPE_6:
12054                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12055                iounmap(phba->sli4_hba.conf_regs_memmap_p);
12056                if (phba->sli4_hba.dpp_regs_memmap_p)
12057                        iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12058                break;
12059        case LPFC_SLI_INTF_IF_TYPE_1:
12060        default:
12061                dev_printk(KERN_ERR, &phba->pcidev->dev,
12062                           "FATAL - unsupported SLI4 interface type - %d\n",
12063                           if_type);
12064                break;
12065        }
12066}
12067
12068/**
12069 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12070 * @phba: pointer to lpfc hba data structure.
12071 *
12072 * This routine is invoked to enable the MSI-X interrupt vectors to device
12073 * with SLI-3 interface specs.
12074 *
12075 * Return codes
12076 *   0 - successful
12077 *   other values - error
12078 **/
12079static int
12080lpfc_sli_enable_msix(struct lpfc_hba *phba)
12081{
12082        int rc;
12083        LPFC_MBOXQ_t *pmb;
12084
12085        /* Set up MSI-X multi-message vectors */
12086        rc = pci_alloc_irq_vectors(phba->pcidev,
12087                        LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12088        if (rc < 0) {
12089                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12090                                "0420 PCI enable MSI-X failed (%d)\n", rc);
12091                goto vec_fail_out;
12092        }
12093
12094        /*
12095         * Assign MSI-X vectors to interrupt handlers
12096         */
12097
12098        /* vector-0 is associated to slow-path handler */
12099        rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12100                         &lpfc_sli_sp_intr_handler, 0,
12101                         LPFC_SP_DRIVER_HANDLER_NAME, phba);
12102        if (rc) {
12103                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12104                                "0421 MSI-X slow-path request_irq failed "
12105                                "(%d)\n", rc);
12106                goto msi_fail_out;
12107        }
12108
12109        /* vector-1 is associated to fast-path handler */
12110        rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12111                         &lpfc_sli_fp_intr_handler, 0,
12112                         LPFC_FP_DRIVER_HANDLER_NAME, phba);
12113
12114        if (rc) {
12115                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12116                                "0429 MSI-X fast-path request_irq failed "
12117                                "(%d)\n", rc);
12118                goto irq_fail_out;
12119        }
12120
12121        /*
12122         * Configure HBA MSI-X attention conditions to messages
12123         */
12124        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12125
12126        if (!pmb) {
12127                rc = -ENOMEM;
12128                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12129                                "0474 Unable to allocate memory for issuing "
12130                                "MBOX_CONFIG_MSI command\n");
12131                goto mem_fail_out;
12132        }
12133        rc = lpfc_config_msi(phba, pmb);
12134        if (rc)
12135                goto mbx_fail_out;
12136        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12137        if (rc != MBX_SUCCESS) {
12138                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12139                                "0351 Config MSI mailbox command failed, "
12140                                "mbxCmd x%x, mbxStatus x%x\n",
12141                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12142                goto mbx_fail_out;
12143        }
12144
12145        /* Free memory allocated for mailbox command */
12146        mempool_free(pmb, phba->mbox_mem_pool);
12147        return rc;
12148
12149mbx_fail_out:
12150        /* Free memory allocated for mailbox command */
12151        mempool_free(pmb, phba->mbox_mem_pool);
12152
12153mem_fail_out:
12154        /* free the irq already requested */
12155        free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12156
12157irq_fail_out:
12158        /* free the irq already requested */
12159        free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12160
12161msi_fail_out:
12162        /* Unconfigure MSI-X capability structure */
12163        pci_free_irq_vectors(phba->pcidev);
12164
12165vec_fail_out:
12166        return rc;
12167}
12168
12169/**
12170 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12171 * @phba: pointer to lpfc hba data structure.
12172 *
12173 * This routine is invoked to enable the MSI interrupt mode to device with
12174 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12175 * enable the MSI vector. The device driver is responsible for calling the
12176 * request_irq() to register MSI vector with a interrupt the handler, which
12177 * is done in this function.
12178 *
12179 * Return codes
12180 *      0 - successful
12181 *      other values - error
12182 */
12183static int
12184lpfc_sli_enable_msi(struct lpfc_hba *phba)
12185{
12186        int rc;
12187
12188        rc = pci_enable_msi(phba->pcidev);
12189        if (!rc)
12190                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191                                "0012 PCI enable MSI mode success.\n");
12192        else {
12193                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12194                                "0471 PCI enable MSI mode failed (%d)\n", rc);
12195                return rc;
12196        }
12197
12198        rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12199                         0, LPFC_DRIVER_NAME, phba);
12200        if (rc) {
12201                pci_disable_msi(phba->pcidev);
12202                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12203                                "0478 MSI request_irq failed (%d)\n", rc);
12204        }
12205        return rc;
12206}
12207
12208/**
12209 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12210 * @phba: pointer to lpfc hba data structure.
12211 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12212 *
12213 * This routine is invoked to enable device interrupt and associate driver's
12214 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12215 * spec. Depends on the interrupt mode configured to the driver, the driver
12216 * will try to fallback from the configured interrupt mode to an interrupt
12217 * mode which is supported by the platform, kernel, and device in the order
12218 * of:
12219 * MSI-X -> MSI -> IRQ.
12220 *
12221 * Return codes
12222 *   0 - successful
12223 *   other values - error
12224 **/
12225static uint32_t
12226lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12227{
12228        uint32_t intr_mode = LPFC_INTR_ERROR;
12229        int retval;
12230
12231        /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12232        retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12233        if (retval)
12234                return intr_mode;
12235        phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12236
12237        if (cfg_mode == 2) {
12238                /* Now, try to enable MSI-X interrupt mode */
12239                retval = lpfc_sli_enable_msix(phba);
12240                if (!retval) {
12241                        /* Indicate initialization to MSI-X mode */
12242                        phba->intr_type = MSIX;
12243                        intr_mode = 2;
12244                }
12245        }
12246
12247        /* Fallback to MSI if MSI-X initialization failed */
12248        if (cfg_mode >= 1 && phba->intr_type == NONE) {
12249                retval = lpfc_sli_enable_msi(phba);
12250                if (!retval) {
12251                        /* Indicate initialization to MSI mode */
12252                        phba->intr_type = MSI;
12253                        intr_mode = 1;
12254                }
12255        }
12256
12257        /* Fallback to INTx if both MSI-X/MSI initalization failed */
12258        if (phba->intr_type == NONE) {
12259                retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12260                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12261                if (!retval) {
12262                        /* Indicate initialization to INTx mode */
12263                        phba->intr_type = INTx;
12264                        intr_mode = 0;
12265                }
12266        }
12267        return intr_mode;
12268}
12269
12270/**
12271 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12272 * @phba: pointer to lpfc hba data structure.
12273 *
12274 * This routine is invoked to disable device interrupt and disassociate the
12275 * driver's interrupt handler(s) from interrupt vector(s) to device with
12276 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12277 * release the interrupt vector(s) for the message signaled interrupt.
12278 **/
12279static void
12280lpfc_sli_disable_intr(struct lpfc_hba *phba)
12281{
12282        int nr_irqs, i;
12283
12284        if (phba->intr_type == MSIX)
12285                nr_irqs = LPFC_MSIX_VECTORS;
12286        else
12287                nr_irqs = 1;
12288
12289        for (i = 0; i < nr_irqs; i++)
12290                free_irq(pci_irq_vector(phba->pcidev, i), phba);
12291        pci_free_irq_vectors(phba->pcidev);
12292
12293        /* Reset interrupt management states */
12294        phba->intr_type = NONE;
12295        phba->sli.slistat.sli_intr = 0;
12296}
12297
12298/**
12299 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12300 * @phba: pointer to lpfc hba data structure.
12301 * @id: EQ vector index or Hardware Queue index
12302 * @match: LPFC_FIND_BY_EQ = match by EQ
12303 *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12304 * Return the CPU that matches the selection criteria
12305 */
12306static uint16_t
12307lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12308{
12309        struct lpfc_vector_map_info *cpup;
12310        int cpu;
12311
12312        /* Loop through all CPUs */
12313        for_each_present_cpu(cpu) {
12314                cpup = &phba->sli4_hba.cpu_map[cpu];
12315
12316                /* If we are matching by EQ, there may be multiple CPUs using
12317                 * using the same vector, so select the one with
12318                 * LPFC_CPU_FIRST_IRQ set.
12319                 */
12320                if ((match == LPFC_FIND_BY_EQ) &&
12321                    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12322                    (cpup->eq == id))
12323                        return cpu;
12324
12325                /* If matching by HDWQ, select the first CPU that matches */
12326                if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12327                        return cpu;
12328        }
12329        return 0;
12330}
12331
12332#ifdef CONFIG_X86
12333/**
12334 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12335 * @phba: pointer to lpfc hba data structure.
12336 * @cpu: CPU map index
12337 * @phys_id: CPU package physical id
12338 * @core_id: CPU core id
12339 */
12340static int
12341lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12342                uint16_t phys_id, uint16_t core_id)
12343{
12344        struct lpfc_vector_map_info *cpup;
12345        int idx;
12346
12347        for_each_present_cpu(idx) {
12348                cpup = &phba->sli4_hba.cpu_map[idx];
12349                /* Does the cpup match the one we are looking for */
12350                if ((cpup->phys_id == phys_id) &&
12351                    (cpup->core_id == core_id) &&
12352                    (cpu != idx))
12353                        return 1;
12354        }
12355        return 0;
12356}
12357#endif
12358
12359/*
12360 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12361 * @phba: pointer to lpfc hba data structure.
12362 * @eqidx: index for eq and irq vector
12363 * @flag: flags to set for vector_map structure
12364 * @cpu: cpu used to index vector_map structure
12365 *
12366 * The routine assigns eq info into vector_map structure
12367 */
12368static inline void
12369lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12370                        unsigned int cpu)
12371{
12372        struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12373        struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12374
12375        cpup->eq = eqidx;
12376        cpup->flag |= flag;
12377
12378        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12379                        "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12380                        cpu, eqhdl->irq, cpup->eq, cpup->flag);
12381}
12382
12383/**
12384 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12385 * @phba: pointer to lpfc hba data structure.
12386 *
12387 * The routine initializes the cpu_map array structure
12388 */
12389static void
12390lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12391{
12392        struct lpfc_vector_map_info *cpup;
12393        struct lpfc_eq_intr_info *eqi;
12394        int cpu;
12395
12396        for_each_possible_cpu(cpu) {
12397                cpup = &phba->sli4_hba.cpu_map[cpu];
12398                cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12399                cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12400                cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12401                cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12402                cpup->flag = 0;
12403                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12404                INIT_LIST_HEAD(&eqi->list);
12405                eqi->icnt = 0;
12406        }
12407}
12408
12409/**
12410 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12411 * @phba: pointer to lpfc hba data structure.
12412 *
12413 * The routine initializes the hba_eq_hdl array structure
12414 */
12415static void
12416lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12417{
12418        struct lpfc_hba_eq_hdl *eqhdl;
12419        int i;
12420
12421        for (i = 0; i < phba->cfg_irq_chann; i++) {
12422                eqhdl = lpfc_get_eq_hdl(i);
12423                eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12424                eqhdl->phba = phba;
12425        }
12426}
12427
12428/**
12429 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12430 * @phba: pointer to lpfc hba data structure.
12431 * @vectors: number of msix vectors allocated.
12432 *
12433 * The routine will figure out the CPU affinity assignment for every
12434 * MSI-X vector allocated for the HBA.
12435 * In addition, the CPU to IO channel mapping will be calculated
12436 * and the phba->sli4_hba.cpu_map array will reflect this.
12437 */
12438static void
12439lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12440{
12441        int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12442        int max_phys_id, min_phys_id;
12443        int max_core_id, min_core_id;
12444        struct lpfc_vector_map_info *cpup;
12445        struct lpfc_vector_map_info *new_cpup;
12446#ifdef CONFIG_X86
12447        struct cpuinfo_x86 *cpuinfo;
12448#endif
12449#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12450        struct lpfc_hdwq_stat *c_stat;
12451#endif
12452
12453        max_phys_id = 0;
12454        min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12455        max_core_id = 0;
12456        min_core_id = LPFC_VECTOR_MAP_EMPTY;
12457
12458        /* Update CPU map with physical id and core id of each CPU */
12459        for_each_present_cpu(cpu) {
12460                cpup = &phba->sli4_hba.cpu_map[cpu];
12461#ifdef CONFIG_X86
12462                cpuinfo = &cpu_data(cpu);
12463                cpup->phys_id = cpuinfo->phys_proc_id;
12464                cpup->core_id = cpuinfo->cpu_core_id;
12465                if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12466                        cpup->flag |= LPFC_CPU_MAP_HYPER;
12467#else
12468                /* No distinction between CPUs for other platforms */
12469                cpup->phys_id = 0;
12470                cpup->core_id = cpu;
12471#endif
12472
12473                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12474                                "3328 CPU %d physid %d coreid %d flag x%x\n",
12475                                cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12476
12477                if (cpup->phys_id > max_phys_id)
12478                        max_phys_id = cpup->phys_id;
12479                if (cpup->phys_id < min_phys_id)
12480                        min_phys_id = cpup->phys_id;
12481
12482                if (cpup->core_id > max_core_id)
12483                        max_core_id = cpup->core_id;
12484                if (cpup->core_id < min_core_id)
12485                        min_core_id = cpup->core_id;
12486        }
12487
12488        /* After looking at each irq vector assigned to this pcidev, its
12489         * possible to see that not ALL CPUs have been accounted for.
12490         * Next we will set any unassigned (unaffinitized) cpu map
12491         * entries to a IRQ on the same phys_id.
12492         */
12493        first_cpu = cpumask_first(cpu_present_mask);
12494        start_cpu = first_cpu;
12495
12496        for_each_present_cpu(cpu) {
12497                cpup = &phba->sli4_hba.cpu_map[cpu];
12498
12499                /* Is this CPU entry unassigned */
12500                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12501                        /* Mark CPU as IRQ not assigned by the kernel */
12502                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12503
12504                        /* If so, find a new_cpup thats on the the SAME
12505                         * phys_id as cpup. start_cpu will start where we
12506                         * left off so all unassigned entries don't get assgined
12507                         * the IRQ of the first entry.
12508                         */
12509                        new_cpu = start_cpu;
12510                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12511                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12512                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12513                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12514                                    (new_cpup->phys_id == cpup->phys_id))
12515                                        goto found_same;
12516                                new_cpu = cpumask_next(
12517                                        new_cpu, cpu_present_mask);
12518                                if (new_cpu == nr_cpumask_bits)
12519                                        new_cpu = first_cpu;
12520                        }
12521                        /* At this point, we leave the CPU as unassigned */
12522                        continue;
12523found_same:
12524                        /* We found a matching phys_id, so copy the IRQ info */
12525                        cpup->eq = new_cpup->eq;
12526
12527                        /* Bump start_cpu to the next slot to minmize the
12528                         * chance of having multiple unassigned CPU entries
12529                         * selecting the same IRQ.
12530                         */
12531                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12532                        if (start_cpu == nr_cpumask_bits)
12533                                start_cpu = first_cpu;
12534
12535                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12536                                        "3337 Set Affinity: CPU %d "
12537                                        "eq %d from peer cpu %d same "
12538                                        "phys_id (%d)\n",
12539                                        cpu, cpup->eq, new_cpu,
12540                                        cpup->phys_id);
12541                }
12542        }
12543
12544        /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12545        start_cpu = first_cpu;
12546
12547        for_each_present_cpu(cpu) {
12548                cpup = &phba->sli4_hba.cpu_map[cpu];
12549
12550                /* Is this entry unassigned */
12551                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12552                        /* Mark it as IRQ not assigned by the kernel */
12553                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12554
12555                        /* If so, find a new_cpup thats on ANY phys_id
12556                         * as the cpup. start_cpu will start where we
12557                         * left off so all unassigned entries don't get
12558                         * assigned the IRQ of the first entry.
12559                         */
12560                        new_cpu = start_cpu;
12561                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12562                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12563                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12564                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12565                                        goto found_any;
12566                                new_cpu = cpumask_next(
12567                                        new_cpu, cpu_present_mask);
12568                                if (new_cpu == nr_cpumask_bits)
12569                                        new_cpu = first_cpu;
12570                        }
12571                        /* We should never leave an entry unassigned */
12572                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12573                                        "3339 Set Affinity: CPU %d "
12574                                        "eq %d UNASSIGNED\n",
12575                                        cpup->hdwq, cpup->eq);
12576                        continue;
12577found_any:
12578                        /* We found an available entry, copy the IRQ info */
12579                        cpup->eq = new_cpup->eq;
12580
12581                        /* Bump start_cpu to the next slot to minmize the
12582                         * chance of having multiple unassigned CPU entries
12583                         * selecting the same IRQ.
12584                         */
12585                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12586                        if (start_cpu == nr_cpumask_bits)
12587                                start_cpu = first_cpu;
12588
12589                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12590                                        "3338 Set Affinity: CPU %d "
12591                                        "eq %d from peer cpu %d (%d/%d)\n",
12592                                        cpu, cpup->eq, new_cpu,
12593                                        new_cpup->phys_id, new_cpup->core_id);
12594                }
12595        }
12596
12597        /* Assign hdwq indices that are unique across all cpus in the map
12598         * that are also FIRST_CPUs.
12599         */
12600        idx = 0;
12601        for_each_present_cpu(cpu) {
12602                cpup = &phba->sli4_hba.cpu_map[cpu];
12603
12604                /* Only FIRST IRQs get a hdwq index assignment. */
12605                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12606                        continue;
12607
12608                /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12609                cpup->hdwq = idx;
12610                idx++;
12611                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12612                                "3333 Set Affinity: CPU %d (phys %d core %d): "
12613                                "hdwq %d eq %d flg x%x\n",
12614                                cpu, cpup->phys_id, cpup->core_id,
12615                                cpup->hdwq, cpup->eq, cpup->flag);
12616        }
12617        /* Associate a hdwq with each cpu_map entry
12618         * This will be 1 to 1 - hdwq to cpu, unless there are less
12619         * hardware queues then CPUs. For that case we will just round-robin
12620         * the available hardware queues as they get assigned to CPUs.
12621         * The next_idx is the idx from the FIRST_CPU loop above to account
12622         * for irq_chann < hdwq.  The idx is used for round-robin assignments
12623         * and needs to start at 0.
12624         */
12625        next_idx = idx;
12626        start_cpu = 0;
12627        idx = 0;
12628        for_each_present_cpu(cpu) {
12629                cpup = &phba->sli4_hba.cpu_map[cpu];
12630
12631                /* FIRST cpus are already mapped. */
12632                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12633                        continue;
12634
12635                /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12636                 * of the unassigned cpus to the next idx so that all
12637                 * hdw queues are fully utilized.
12638                 */
12639                if (next_idx < phba->cfg_hdw_queue) {
12640                        cpup->hdwq = next_idx;
12641                        next_idx++;
12642                        continue;
12643                }
12644
12645                /* Not a First CPU and all hdw_queues are used.  Reuse a
12646                 * Hardware Queue for another CPU, so be smart about it
12647                 * and pick one that has its IRQ/EQ mapped to the same phys_id
12648                 * (CPU package) and core_id.
12649                 */
12650                new_cpu = start_cpu;
12651                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12652                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12653                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12654                            new_cpup->phys_id == cpup->phys_id &&
12655                            new_cpup->core_id == cpup->core_id) {
12656                                goto found_hdwq;
12657                        }
12658                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12659                        if (new_cpu == nr_cpumask_bits)
12660                                new_cpu = first_cpu;
12661                }
12662
12663                /* If we can't match both phys_id and core_id,
12664                 * settle for just a phys_id match.
12665                 */
12666                new_cpu = start_cpu;
12667                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12668                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12669                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12670                            new_cpup->phys_id == cpup->phys_id)
12671                                goto found_hdwq;
12672
12673                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12674                        if (new_cpu == nr_cpumask_bits)
12675                                new_cpu = first_cpu;
12676                }
12677
12678                /* Otherwise just round robin on cfg_hdw_queue */
12679                cpup->hdwq = idx % phba->cfg_hdw_queue;
12680                idx++;
12681                goto logit;
12682 found_hdwq:
12683                /* We found an available entry, copy the IRQ info */
12684                start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12685                if (start_cpu == nr_cpumask_bits)
12686                        start_cpu = first_cpu;
12687                cpup->hdwq = new_cpup->hdwq;
12688 logit:
12689                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12690                                "3335 Set Affinity: CPU %d (phys %d core %d): "
12691                                "hdwq %d eq %d flg x%x\n",
12692                                cpu, cpup->phys_id, cpup->core_id,
12693                                cpup->hdwq, cpup->eq, cpup->flag);
12694        }
12695
12696        /*
12697         * Initialize the cpu_map slots for not-present cpus in case
12698         * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12699         */
12700        idx = 0;
12701        for_each_possible_cpu(cpu) {
12702                cpup = &phba->sli4_hba.cpu_map[cpu];
12703#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12704                c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12705                c_stat->hdwq_no = cpup->hdwq;
12706#endif
12707                if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12708                        continue;
12709
12710                cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12711#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12712                c_stat->hdwq_no = cpup->hdwq;
12713#endif
12714                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12715                                "3340 Set Affinity: not present "
12716                                "CPU %d hdwq %d\n",
12717                                cpu, cpup->hdwq);
12718        }
12719
12720        /* The cpu_map array will be used later during initialization
12721         * when EQ / CQ / WQs are allocated and configured.
12722         */
12723        return;
12724}
12725
12726/**
12727 * lpfc_cpuhp_get_eq
12728 *
12729 * @phba:   pointer to lpfc hba data structure.
12730 * @cpu:    cpu going offline
12731 * @eqlist: eq list to append to
12732 */
12733static int
12734lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12735                  struct list_head *eqlist)
12736{
12737        const struct cpumask *maskp;
12738        struct lpfc_queue *eq;
12739        struct cpumask *tmp;
12740        u16 idx;
12741
12742        tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12743        if (!tmp)
12744                return -ENOMEM;
12745
12746        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12747                maskp = pci_irq_get_affinity(phba->pcidev, idx);
12748                if (!maskp)
12749                        continue;
12750                /*
12751                 * if irq is not affinitized to the cpu going
12752                 * then we don't need to poll the eq attached
12753                 * to it.
12754                 */
12755                if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12756                        continue;
12757                /* get the cpus that are online and are affini-
12758                 * tized to this irq vector.  If the count is
12759                 * more than 1 then cpuhp is not going to shut-
12760                 * down this vector.  Since this cpu has not
12761                 * gone offline yet, we need >1.
12762                 */
12763                cpumask_and(tmp, maskp, cpu_online_mask);
12764                if (cpumask_weight(tmp) > 1)
12765                        continue;
12766
12767                /* Now that we have an irq to shutdown, get the eq
12768                 * mapped to this irq.  Note: multiple hdwq's in
12769                 * the software can share an eq, but eventually
12770                 * only eq will be mapped to this vector
12771                 */
12772                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12773                list_add(&eq->_poll_list, eqlist);
12774        }
12775        kfree(tmp);
12776        return 0;
12777}
12778
12779static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12780{
12781        if (phba->sli_rev != LPFC_SLI_REV4)
12782                return;
12783
12784        cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12785                                            &phba->cpuhp);
12786        /*
12787         * unregistering the instance doesn't stop the polling
12788         * timer. Wait for the poll timer to retire.
12789         */
12790        synchronize_rcu();
12791        del_timer_sync(&phba->cpuhp_poll_timer);
12792}
12793
12794static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12795{
12796        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12797                return;
12798
12799        __lpfc_cpuhp_remove(phba);
12800}
12801
12802static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12803{
12804        if (phba->sli_rev != LPFC_SLI_REV4)
12805                return;
12806
12807        rcu_read_lock();
12808
12809        if (!list_empty(&phba->poll_list))
12810                mod_timer(&phba->cpuhp_poll_timer,
12811                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12812
12813        rcu_read_unlock();
12814
12815        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12816                                         &phba->cpuhp);
12817}
12818
12819static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12820{
12821        if (phba->pport->load_flag & FC_UNLOADING) {
12822                *retval = -EAGAIN;
12823                return true;
12824        }
12825
12826        if (phba->sli_rev != LPFC_SLI_REV4) {
12827                *retval = 0;
12828                return true;
12829        }
12830
12831        /* proceed with the hotplug */
12832        return false;
12833}
12834
12835/**
12836 * lpfc_irq_set_aff - set IRQ affinity
12837 * @eqhdl: EQ handle
12838 * @cpu: cpu to set affinity
12839 *
12840 **/
12841static inline void
12842lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12843{
12844        cpumask_clear(&eqhdl->aff_mask);
12845        cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12846        irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12847        irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12848}
12849
12850/**
12851 * lpfc_irq_clear_aff - clear IRQ affinity
12852 * @eqhdl: EQ handle
12853 *
12854 **/
12855static inline void
12856lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12857{
12858        cpumask_clear(&eqhdl->aff_mask);
12859        irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12860}
12861
12862/**
12863 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12864 * @phba: pointer to HBA context object.
12865 * @cpu: cpu going offline/online
12866 * @offline: true, cpu is going offline. false, cpu is coming online.
12867 *
12868 * If cpu is going offline, we'll try our best effort to find the next
12869 * online cpu on the phba's original_mask and migrate all offlining IRQ
12870 * affinities.
12871 *
12872 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12873 *
12874 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12875 *       PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12876 *
12877 **/
12878static void
12879lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12880{
12881        struct lpfc_vector_map_info *cpup;
12882        struct cpumask *aff_mask;
12883        unsigned int cpu_select, cpu_next, idx;
12884        const struct cpumask *orig_mask;
12885
12886        if (phba->irq_chann_mode == NORMAL_MODE)
12887                return;
12888
12889        orig_mask = &phba->sli4_hba.irq_aff_mask;
12890
12891        if (!cpumask_test_cpu(cpu, orig_mask))
12892                return;
12893
12894        cpup = &phba->sli4_hba.cpu_map[cpu];
12895
12896        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12897                return;
12898
12899        if (offline) {
12900                /* Find next online CPU on original mask */
12901                cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12902                cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12903
12904                /* Found a valid CPU */
12905                if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12906                        /* Go through each eqhdl and ensure offlining
12907                         * cpu aff_mask is migrated
12908                         */
12909                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12910                                aff_mask = lpfc_get_aff_mask(idx);
12911
12912                                /* Migrate affinity */
12913                                if (cpumask_test_cpu(cpu, aff_mask))
12914                                        lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12915                                                         cpu_select);
12916                        }
12917                } else {
12918                        /* Rely on irqbalance if no online CPUs left on NUMA */
12919                        for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12920                                lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12921                }
12922        } else {
12923                /* Migrate affinity back to this CPU */
12924                lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12925        }
12926}
12927
12928static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12929{
12930        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12931        struct lpfc_queue *eq, *next;
12932        LIST_HEAD(eqlist);
12933        int retval;
12934
12935        if (!phba) {
12936                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12937                return 0;
12938        }
12939
12940        if (__lpfc_cpuhp_checks(phba, &retval))
12941                return retval;
12942
12943        lpfc_irq_rebalance(phba, cpu, true);
12944
12945        retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12946        if (retval)
12947                return retval;
12948
12949        /* start polling on these eq's */
12950        list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12951                list_del_init(&eq->_poll_list);
12952                lpfc_sli4_start_polling(eq);
12953        }
12954
12955        return 0;
12956}
12957
12958static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12959{
12960        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12961        struct lpfc_queue *eq, *next;
12962        unsigned int n;
12963        int retval;
12964
12965        if (!phba) {
12966                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12967                return 0;
12968        }
12969
12970        if (__lpfc_cpuhp_checks(phba, &retval))
12971                return retval;
12972
12973        lpfc_irq_rebalance(phba, cpu, false);
12974
12975        list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12976                n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12977                if (n == cpu)
12978                        lpfc_sli4_stop_polling(eq);
12979        }
12980
12981        return 0;
12982}
12983
12984/**
12985 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12986 * @phba: pointer to lpfc hba data structure.
12987 *
12988 * This routine is invoked to enable the MSI-X interrupt vectors to device
12989 * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12990 * to cpus on the system.
12991 *
12992 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12993 * the number of cpus on the same numa node as this adapter.  The vectors are
12994 * allocated without requesting OS affinity mapping.  A vector will be
12995 * allocated and assigned to each online and offline cpu.  If the cpu is
12996 * online, then affinity will be set to that cpu.  If the cpu is offline, then
12997 * affinity will be set to the nearest peer cpu within the numa node that is
12998 * online.  If there are no online cpus within the numa node, affinity is not
12999 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
13000 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
13001 * configured.
13002 *
13003 * If numa mode is not enabled and there is more than 1 vector allocated, then
13004 * the driver relies on the managed irq interface where the OS assigns vector to
13005 * cpu affinity.  The driver will then use that affinity mapping to setup its
13006 * cpu mapping table.
13007 *
13008 * Return codes
13009 * 0 - successful
13010 * other values - error
13011 **/
13012static int
13013lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13014{
13015        int vectors, rc, index;
13016        char *name;
13017        const struct cpumask *aff_mask = NULL;
13018        unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13019        struct lpfc_vector_map_info *cpup;
13020        struct lpfc_hba_eq_hdl *eqhdl;
13021        const struct cpumask *maskp;
13022        unsigned int flags = PCI_IRQ_MSIX;
13023
13024        /* Set up MSI-X multi-message vectors */
13025        vectors = phba->cfg_irq_chann;
13026
13027        if (phba->irq_chann_mode != NORMAL_MODE)
13028                aff_mask = &phba->sli4_hba.irq_aff_mask;
13029
13030        if (aff_mask) {
13031                cpu_cnt = cpumask_weight(aff_mask);
13032                vectors = min(phba->cfg_irq_chann, cpu_cnt);
13033
13034                /* cpu: iterates over aff_mask including offline or online
13035                 * cpu_select: iterates over online aff_mask to set affinity
13036                 */
13037                cpu = cpumask_first(aff_mask);
13038                cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13039        } else {
13040                flags |= PCI_IRQ_AFFINITY;
13041        }
13042
13043        rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13044        if (rc < 0) {
13045                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13046                                "0484 PCI enable MSI-X failed (%d)\n", rc);
13047                goto vec_fail_out;
13048        }
13049        vectors = rc;
13050
13051        /* Assign MSI-X vectors to interrupt handlers */
13052        for (index = 0; index < vectors; index++) {
13053                eqhdl = lpfc_get_eq_hdl(index);
13054                name = eqhdl->handler_name;
13055                memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13056                snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13057                         LPFC_DRIVER_HANDLER_NAME"%d", index);
13058
13059                eqhdl->idx = index;
13060                rc = request_irq(pci_irq_vector(phba->pcidev, index),
13061                         &lpfc_sli4_hba_intr_handler, 0,
13062                         name, eqhdl);
13063                if (rc) {
13064                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13065                                        "0486 MSI-X fast-path (%d) "
13066                                        "request_irq failed (%d)\n", index, rc);
13067                        goto cfg_fail_out;
13068                }
13069
13070                eqhdl->irq = pci_irq_vector(phba->pcidev, index);
13071
13072                if (aff_mask) {
13073                        /* If found a neighboring online cpu, set affinity */
13074                        if (cpu_select < nr_cpu_ids)
13075                                lpfc_irq_set_aff(eqhdl, cpu_select);
13076
13077                        /* Assign EQ to cpu_map */
13078                        lpfc_assign_eq_map_info(phba, index,
13079                                                LPFC_CPU_FIRST_IRQ,
13080                                                cpu);
13081
13082                        /* Iterate to next offline or online cpu in aff_mask */
13083                        cpu = cpumask_next(cpu, aff_mask);
13084
13085                        /* Find next online cpu in aff_mask to set affinity */
13086                        cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13087                } else if (vectors == 1) {
13088                        cpu = cpumask_first(cpu_present_mask);
13089                        lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13090                                                cpu);
13091                } else {
13092                        maskp = pci_irq_get_affinity(phba->pcidev, index);
13093
13094                        /* Loop through all CPUs associated with vector index */
13095                        for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13096                                cpup = &phba->sli4_hba.cpu_map[cpu];
13097
13098                                /* If this is the first CPU thats assigned to
13099                                 * this vector, set LPFC_CPU_FIRST_IRQ.
13100                                 *
13101                                 * With certain platforms its possible that irq
13102                                 * vectors are affinitized to all the cpu's.
13103                                 * This can result in each cpu_map.eq to be set
13104                                 * to the last vector, resulting in overwrite
13105                                 * of all the previous cpu_map.eq.  Ensure that
13106                                 * each vector receives a place in cpu_map.
13107                                 * Later call to lpfc_cpu_affinity_check will
13108                                 * ensure we are nicely balanced out.
13109                                 */
13110                                if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13111                                        continue;
13112                                lpfc_assign_eq_map_info(phba, index,
13113                                                        LPFC_CPU_FIRST_IRQ,
13114                                                        cpu);
13115                                break;
13116                        }
13117                }
13118        }
13119
13120        if (vectors != phba->cfg_irq_chann) {
13121                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13122                                "3238 Reducing IO channels to match number of "
13123                                "MSI-X vectors, requested %d got %d\n",
13124                                phba->cfg_irq_chann, vectors);
13125                if (phba->cfg_irq_chann > vectors)
13126                        phba->cfg_irq_chann = vectors;
13127        }
13128
13129        return rc;
13130
13131cfg_fail_out:
13132        /* free the irq already requested */
13133        for (--index; index >= 0; index--) {
13134                eqhdl = lpfc_get_eq_hdl(index);
13135                lpfc_irq_clear_aff(eqhdl);
13136                free_irq(eqhdl->irq, eqhdl);
13137        }
13138
13139        /* Unconfigure MSI-X capability structure */
13140        pci_free_irq_vectors(phba->pcidev);
13141
13142vec_fail_out:
13143        return rc;
13144}
13145
13146/**
13147 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13148 * @phba: pointer to lpfc hba data structure.
13149 *
13150 * This routine is invoked to enable the MSI interrupt mode to device with
13151 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13152 * called to enable the MSI vector. The device driver is responsible for
13153 * calling the request_irq() to register MSI vector with a interrupt the
13154 * handler, which is done in this function.
13155 *
13156 * Return codes
13157 *      0 - successful
13158 *      other values - error
13159 **/
13160static int
13161lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13162{
13163        int rc, index;
13164        unsigned int cpu;
13165        struct lpfc_hba_eq_hdl *eqhdl;
13166
13167        rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13168                                   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13169        if (rc > 0)
13170                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13171                                "0487 PCI enable MSI mode success.\n");
13172        else {
13173                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13174                                "0488 PCI enable MSI mode failed (%d)\n", rc);
13175                return rc ? rc : -1;
13176        }
13177
13178        rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13179                         0, LPFC_DRIVER_NAME, phba);
13180        if (rc) {
13181                pci_free_irq_vectors(phba->pcidev);
13182                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13183                                "0490 MSI request_irq failed (%d)\n", rc);
13184                return rc;
13185        }
13186
13187        eqhdl = lpfc_get_eq_hdl(0);
13188        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13189
13190        cpu = cpumask_first(cpu_present_mask);
13191        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13192
13193        for (index = 0; index < phba->cfg_irq_chann; index++) {
13194                eqhdl = lpfc_get_eq_hdl(index);
13195                eqhdl->idx = index;
13196        }
13197
13198        return 0;
13199}
13200
13201/**
13202 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13203 * @phba: pointer to lpfc hba data structure.
13204 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13205 *
13206 * This routine is invoked to enable device interrupt and associate driver's
13207 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13208 * interface spec. Depends on the interrupt mode configured to the driver,
13209 * the driver will try to fallback from the configured interrupt mode to an
13210 * interrupt mode which is supported by the platform, kernel, and device in
13211 * the order of:
13212 * MSI-X -> MSI -> IRQ.
13213 *
13214 * Return codes
13215 *      0 - successful
13216 *      other values - error
13217 **/
13218static uint32_t
13219lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13220{
13221        uint32_t intr_mode = LPFC_INTR_ERROR;
13222        int retval, idx;
13223
13224        if (cfg_mode == 2) {
13225                /* Preparation before conf_msi mbox cmd */
13226                retval = 0;
13227                if (!retval) {
13228                        /* Now, try to enable MSI-X interrupt mode */
13229                        retval = lpfc_sli4_enable_msix(phba);
13230                        if (!retval) {
13231                                /* Indicate initialization to MSI-X mode */
13232                                phba->intr_type = MSIX;
13233                                intr_mode = 2;
13234                        }
13235                }
13236        }
13237
13238        /* Fallback to MSI if MSI-X initialization failed */
13239        if (cfg_mode >= 1 && phba->intr_type == NONE) {
13240                retval = lpfc_sli4_enable_msi(phba);
13241                if (!retval) {
13242                        /* Indicate initialization to MSI mode */
13243                        phba->intr_type = MSI;
13244                        intr_mode = 1;
13245                }
13246        }
13247
13248        /* Fallback to INTx if both MSI-X/MSI initalization failed */
13249        if (phba->intr_type == NONE) {
13250                retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13251                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13252                if (!retval) {
13253                        struct lpfc_hba_eq_hdl *eqhdl;
13254                        unsigned int cpu;
13255
13256                        /* Indicate initialization to INTx mode */
13257                        phba->intr_type = INTx;
13258                        intr_mode = 0;
13259
13260                        eqhdl = lpfc_get_eq_hdl(0);
13261                        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13262
13263                        cpu = cpumask_first(cpu_present_mask);
13264                        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13265                                                cpu);
13266                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13267                                eqhdl = lpfc_get_eq_hdl(idx);
13268                                eqhdl->idx = idx;
13269                        }
13270                }
13271        }
13272        return intr_mode;
13273}
13274
13275/**
13276 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13277 * @phba: pointer to lpfc hba data structure.
13278 *
13279 * This routine is invoked to disable device interrupt and disassociate
13280 * the driver's interrupt handler(s) from interrupt vector(s) to device
13281 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13282 * will release the interrupt vector(s) for the message signaled interrupt.
13283 **/
13284static void
13285lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13286{
13287        /* Disable the currently initialized interrupt mode */
13288        if (phba->intr_type == MSIX) {
13289                int index;
13290                struct lpfc_hba_eq_hdl *eqhdl;
13291
13292                /* Free up MSI-X multi-message vectors */
13293                for (index = 0; index < phba->cfg_irq_chann; index++) {
13294                        eqhdl = lpfc_get_eq_hdl(index);
13295                        lpfc_irq_clear_aff(eqhdl);
13296                        free_irq(eqhdl->irq, eqhdl);
13297                }
13298        } else {
13299                free_irq(phba->pcidev->irq, phba);
13300        }
13301
13302        pci_free_irq_vectors(phba->pcidev);
13303
13304        /* Reset interrupt management states */
13305        phba->intr_type = NONE;
13306        phba->sli.slistat.sli_intr = 0;
13307}
13308
13309/**
13310 * lpfc_unset_hba - Unset SLI3 hba device initialization
13311 * @phba: pointer to lpfc hba data structure.
13312 *
13313 * This routine is invoked to unset the HBA device initialization steps to
13314 * a device with SLI-3 interface spec.
13315 **/
13316static void
13317lpfc_unset_hba(struct lpfc_hba *phba)
13318{
13319        struct lpfc_vport *vport = phba->pport;
13320        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
13321
13322        spin_lock_irq(shost->host_lock);
13323        vport->load_flag |= FC_UNLOADING;
13324        spin_unlock_irq(shost->host_lock);
13325
13326        kfree(phba->vpi_bmask);
13327        kfree(phba->vpi_ids);
13328
13329        lpfc_stop_hba_timers(phba);
13330
13331        phba->pport->work_port_events = 0;
13332
13333        lpfc_sli_hba_down(phba);
13334
13335        lpfc_sli_brdrestart(phba);
13336
13337        lpfc_sli_disable_intr(phba);
13338
13339        return;
13340}
13341
13342/**
13343 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13344 * @phba: Pointer to HBA context object.
13345 *
13346 * This function is called in the SLI4 code path to wait for completion
13347 * of device's XRIs exchange busy. It will check the XRI exchange busy
13348 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13349 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13350 * I/Os every 30 seconds, log error message, and wait forever. Only when
13351 * all XRI exchange busy complete, the driver unload shall proceed with
13352 * invoking the function reset ioctl mailbox command to the CNA and the
13353 * the rest of the driver unload resource release.
13354 **/
13355static void
13356lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13357{
13358        struct lpfc_sli4_hdw_queue *qp;
13359        int idx, ccnt;
13360        int wait_time = 0;
13361        int io_xri_cmpl = 1;
13362        int nvmet_xri_cmpl = 1;
13363        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13364
13365        /* Driver just aborted IOs during the hba_unset process.  Pause
13366         * here to give the HBA time to complete the IO and get entries
13367         * into the abts lists.
13368         */
13369        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13370
13371        /* Wait for NVME pending IO to flush back to transport. */
13372        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13373                lpfc_nvme_wait_for_io_drain(phba);
13374
13375        ccnt = 0;
13376        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13377                qp = &phba->sli4_hba.hdwq[idx];
13378                io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13379                if (!io_xri_cmpl) /* if list is NOT empty */
13380                        ccnt++;
13381        }
13382        if (ccnt)
13383                io_xri_cmpl = 0;
13384
13385        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13386                nvmet_xri_cmpl =
13387                        list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13388        }
13389
13390        while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13391                if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13392                        if (!nvmet_xri_cmpl)
13393                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13394                                                "6424 NVMET XRI exchange busy "
13395                                                "wait time: %d seconds.\n",
13396                                                wait_time/1000);
13397                        if (!io_xri_cmpl)
13398                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13399                                                "6100 IO XRI exchange busy "
13400                                                "wait time: %d seconds.\n",
13401                                                wait_time/1000);
13402                        if (!els_xri_cmpl)
13403                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13404                                                "2878 ELS XRI exchange busy "
13405                                                "wait time: %d seconds.\n",
13406                                                wait_time/1000);
13407                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13408                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13409                } else {
13410                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13411                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13412                }
13413
13414                ccnt = 0;
13415                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13416                        qp = &phba->sli4_hba.hdwq[idx];
13417                        io_xri_cmpl = list_empty(
13418                            &qp->lpfc_abts_io_buf_list);
13419                        if (!io_xri_cmpl) /* if list is NOT empty */
13420                                ccnt++;
13421                }
13422                if (ccnt)
13423                        io_xri_cmpl = 0;
13424
13425                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13426                        nvmet_xri_cmpl = list_empty(
13427                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13428                }
13429                els_xri_cmpl =
13430                        list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13431
13432        }
13433}
13434
13435/**
13436 * lpfc_sli4_hba_unset - Unset the fcoe hba
13437 * @phba: Pointer to HBA context object.
13438 *
13439 * This function is called in the SLI4 code path to reset the HBA's FCoE
13440 * function. The caller is not required to hold any lock. This routine
13441 * issues PCI function reset mailbox command to reset the FCoE function.
13442 * At the end of the function, it calls lpfc_hba_down_post function to
13443 * free any pending commands.
13444 **/
13445static void
13446lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13447{
13448        int wait_cnt = 0;
13449        LPFC_MBOXQ_t *mboxq;
13450        struct pci_dev *pdev = phba->pcidev;
13451
13452        lpfc_stop_hba_timers(phba);
13453        hrtimer_cancel(&phba->cmf_timer);
13454
13455        if (phba->pport)
13456                phba->sli4_hba.intr_enable = 0;
13457
13458        /*
13459         * Gracefully wait out the potential current outstanding asynchronous
13460         * mailbox command.
13461         */
13462
13463        /* First, block any pending async mailbox command from posted */
13464        spin_lock_irq(&phba->hbalock);
13465        phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13466        spin_unlock_irq(&phba->hbalock);
13467        /* Now, trying to wait it out if we can */
13468        while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13469                msleep(10);
13470                if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13471                        break;
13472        }
13473        /* Forcefully release the outstanding mailbox command if timed out */
13474        if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13475                spin_lock_irq(&phba->hbalock);
13476                mboxq = phba->sli.mbox_active;
13477                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13478                __lpfc_mbox_cmpl_put(phba, mboxq);
13479                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13480                phba->sli.mbox_active = NULL;
13481                spin_unlock_irq(&phba->hbalock);
13482        }
13483
13484        /* Abort all iocbs associated with the hba */
13485        lpfc_sli_hba_iocb_abort(phba);
13486
13487        if (!pci_channel_offline(phba->pcidev))
13488                /* Wait for completion of device XRI exchange busy */
13489                lpfc_sli4_xri_exchange_busy_wait(phba);
13490
13491        /* per-phba callback de-registration for hotplug event */
13492        if (phba->pport)
13493                lpfc_cpuhp_remove(phba);
13494
13495        /* Disable PCI subsystem interrupt */
13496        lpfc_sli4_disable_intr(phba);
13497
13498        /* Disable SR-IOV if enabled */
13499        if (phba->cfg_sriov_nr_virtfn)
13500                pci_disable_sriov(pdev);
13501
13502        /* Stop kthread signal shall trigger work_done one more time */
13503        kthread_stop(phba->worker_thread);
13504
13505        /* Disable FW logging to host memory */
13506        lpfc_ras_stop_fwlog(phba);
13507
13508        /* Reset SLI4 HBA FCoE function */
13509        lpfc_pci_function_reset(phba);
13510
13511        /* release all queue allocated resources. */
13512        lpfc_sli4_queue_destroy(phba);
13513
13514        /* Free RAS DMA memory */
13515        if (phba->ras_fwlog.ras_enabled)
13516                lpfc_sli4_ras_dma_free(phba);
13517
13518        /* Stop the SLI4 device port */
13519        if (phba->pport)
13520                phba->pport->work_port_events = 0;
13521}
13522
13523static uint32_t
13524lpfc_cgn_crc32(uint32_t crc, u8 byte)
13525{
13526        uint32_t msb = 0;
13527        uint32_t bit;
13528
13529        for (bit = 0; bit < 8; bit++) {
13530                msb = (crc >> 31) & 1;
13531                crc <<= 1;
13532
13533                if (msb ^ (byte & 1)) {
13534                        crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13535                        crc |= 1;
13536                }
13537                byte >>= 1;
13538        }
13539        return crc;
13540}
13541
13542static uint32_t
13543lpfc_cgn_reverse_bits(uint32_t wd)
13544{
13545        uint32_t result = 0;
13546        uint32_t i;
13547
13548        for (i = 0; i < 32; i++) {
13549                result <<= 1;
13550                result |= (1 & (wd >> i));
13551        }
13552        return result;
13553}
13554
13555/*
13556 * The routine corresponds with the algorithm the HBA firmware
13557 * uses to validate the data integrity.
13558 */
13559uint32_t
13560lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13561{
13562        uint32_t  i;
13563        uint32_t result;
13564        uint8_t  *data = (uint8_t *)ptr;
13565
13566        for (i = 0; i < byteLen; ++i)
13567                crc = lpfc_cgn_crc32(crc, data[i]);
13568
13569        result = ~lpfc_cgn_reverse_bits(crc);
13570        return result;
13571}
13572
13573void
13574lpfc_init_congestion_buf(struct lpfc_hba *phba)
13575{
13576        struct lpfc_cgn_info *cp;
13577        struct timespec64 cmpl_time;
13578        struct tm broken;
13579        uint16_t size;
13580        uint32_t crc;
13581
13582        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13583                        "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13584
13585        if (!phba->cgn_i)
13586                return;
13587        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13588
13589        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13590        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13591        atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13592        atomic_set(&phba->cgn_sync_warn_cnt, 0);
13593
13594        atomic_set(&phba->cgn_driver_evt_cnt, 0);
13595        atomic_set(&phba->cgn_latency_evt_cnt, 0);
13596        atomic64_set(&phba->cgn_latency_evt, 0);
13597        phba->cgn_evt_minute = 0;
13598        phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13599
13600        memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13601        cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13602        cp->cgn_info_version = LPFC_CGN_INFO_V3;
13603
13604        /* cgn parameters */
13605        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13606        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13607        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13608        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13609
13610        ktime_get_real_ts64(&cmpl_time);
13611        time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13612
13613        cp->cgn_info_month = broken.tm_mon + 1;
13614        cp->cgn_info_day = broken.tm_mday;
13615        cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13616        cp->cgn_info_hour = broken.tm_hour;
13617        cp->cgn_info_minute = broken.tm_min;
13618        cp->cgn_info_second = broken.tm_sec;
13619
13620        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13621                        "2643 CGNInfo Init: Start Time "
13622                        "%d/%d/%d %d:%d:%d\n",
13623                        cp->cgn_info_day, cp->cgn_info_month,
13624                        cp->cgn_info_year, cp->cgn_info_hour,
13625                        cp->cgn_info_minute, cp->cgn_info_second);
13626
13627        /* Fill in default LUN qdepth */
13628        if (phba->pport) {
13629                size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13630                cp->cgn_lunq = cpu_to_le16(size);
13631        }
13632
13633        /* last used Index initialized to 0xff already */
13634
13635        cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13636        cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13637        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13638        cp->cgn_info_crc = cpu_to_le32(crc);
13639
13640        phba->cgn_evt_timestamp = jiffies +
13641                msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13642}
13643
13644void
13645lpfc_init_congestion_stat(struct lpfc_hba *phba)
13646{
13647        struct lpfc_cgn_info *cp;
13648        struct timespec64 cmpl_time;
13649        struct tm broken;
13650        uint32_t crc;
13651
13652        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13653                        "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13654
13655        if (!phba->cgn_i)
13656                return;
13657
13658        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13659        memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13660
13661        ktime_get_real_ts64(&cmpl_time);
13662        time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13663
13664        cp->cgn_stat_month = broken.tm_mon + 1;
13665        cp->cgn_stat_day = broken.tm_mday;
13666        cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13667        cp->cgn_stat_hour = broken.tm_hour;
13668        cp->cgn_stat_minute = broken.tm_min;
13669
13670        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13671                        "2647 CGNstat Init: Start Time "
13672                        "%d/%d/%d %d:%d\n",
13673                        cp->cgn_stat_day, cp->cgn_stat_month,
13674                        cp->cgn_stat_year, cp->cgn_stat_hour,
13675                        cp->cgn_stat_minute);
13676
13677        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13678        cp->cgn_info_crc = cpu_to_le32(crc);
13679}
13680
13681/**
13682 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13683 * @phba: Pointer to hba context object.
13684 * @reg: flag to determine register or unregister.
13685 */
13686static int
13687__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13688{
13689        struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13690        union  lpfc_sli4_cfg_shdr *shdr;
13691        uint32_t shdr_status, shdr_add_status;
13692        LPFC_MBOXQ_t *mboxq;
13693        int length, rc;
13694
13695        if (!phba->cgn_i)
13696                return -ENXIO;
13697
13698        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13699        if (!mboxq) {
13700                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13701                                "2641 REG_CONGESTION_BUF mbox allocation fail: "
13702                                "HBA state x%x reg %d\n",
13703                                phba->pport->port_state, reg);
13704                return -ENOMEM;
13705        }
13706
13707        length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13708                sizeof(struct lpfc_sli4_cfg_mhdr));
13709        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13710                         LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13711                         LPFC_SLI4_MBX_EMBED);
13712        reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13713        bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13714        if (reg > 0)
13715                bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13716        else
13717                bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13718        reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13719        reg_congestion_buf->addr_lo =
13720                putPaddrLow(phba->cgn_i->phys);
13721        reg_congestion_buf->addr_hi =
13722                putPaddrHigh(phba->cgn_i->phys);
13723
13724        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13725        shdr = (union lpfc_sli4_cfg_shdr *)
13726                &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13727        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13728        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13729                                 &shdr->response);
13730        mempool_free(mboxq, phba->mbox_mem_pool);
13731        if (shdr_status || shdr_add_status || rc) {
13732                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13733                                "2642 REG_CONGESTION_BUF mailbox "
13734                                "failed with status x%x add_status x%x,"
13735                                " mbx status x%x reg %d\n",
13736                                shdr_status, shdr_add_status, rc, reg);
13737                return -ENXIO;
13738        }
13739        return 0;
13740}
13741
13742int
13743lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13744{
13745        lpfc_cmf_stop(phba);
13746        return __lpfc_reg_congestion_buf(phba, 0);
13747}
13748
13749int
13750lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13751{
13752        return __lpfc_reg_congestion_buf(phba, 1);
13753}
13754
13755/**
13756 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13757 * @phba: Pointer to HBA context object.
13758 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13759 *
13760 * This function is called in the SLI4 code path to read the port's
13761 * sli4 capabilities.
13762 *
13763 * This function may be be called from any context that can block-wait
13764 * for the completion.  The expectation is that this routine is called
13765 * typically from probe_one or from the online routine.
13766 **/
13767int
13768lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13769{
13770        int rc;
13771        struct lpfc_mqe *mqe = &mboxq->u.mqe;
13772        struct lpfc_pc_sli4_params *sli4_params;
13773        uint32_t mbox_tmo;
13774        int length;
13775        bool exp_wqcq_pages = true;
13776        struct lpfc_sli4_parameters *mbx_sli4_parameters;
13777
13778        /*
13779         * By default, the driver assumes the SLI4 port requires RPI
13780         * header postings.  The SLI4_PARAM response will correct this
13781         * assumption.
13782         */
13783        phba->sli4_hba.rpi_hdrs_in_use = 1;
13784
13785        /* Read the port's SLI4 Config Parameters */
13786        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13787                  sizeof(struct lpfc_sli4_cfg_mhdr));
13788        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13789                         LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13790                         length, LPFC_SLI4_MBX_EMBED);
13791        if (!phba->sli4_hba.intr_enable)
13792                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13793        else {
13794                mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13795                rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13796        }
13797        if (unlikely(rc))
13798                return rc;
13799        sli4_params = &phba->sli4_hba.pc_sli4_params;
13800        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13801        sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13802        sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13803        sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13804        sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13805                                             mbx_sli4_parameters);
13806        sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13807                                             mbx_sli4_parameters);
13808        if (bf_get(cfg_phwq, mbx_sli4_parameters))
13809                phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13810        else
13811                phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13812        sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13813        sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13814                                           mbx_sli4_parameters);
13815        sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13816        sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13817        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13818        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13819        sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13820        sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13821        sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13822        sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13823        sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13824        sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13825        sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13826                                            mbx_sli4_parameters);
13827        sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13828        sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13829                                           mbx_sli4_parameters);
13830        phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13831        phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13832
13833        /* Check for Extended Pre-Registered SGL support */
13834        phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13835
13836        /* Check for firmware nvme support */
13837        rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13838                     bf_get(cfg_xib, mbx_sli4_parameters));
13839
13840        if (rc) {
13841                /* Save this to indicate the Firmware supports NVME */
13842                sli4_params->nvme = 1;
13843
13844                /* Firmware NVME support, check driver FC4 NVME support */
13845                if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13846                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13847                                        "6133 Disabling NVME support: "
13848                                        "FC4 type not supported: x%x\n",
13849                                        phba->cfg_enable_fc4_type);
13850                        goto fcponly;
13851                }
13852        } else {
13853                /* No firmware NVME support, check driver FC4 NVME support */
13854                sli4_params->nvme = 0;
13855                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13856                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13857                                        "6101 Disabling NVME support: Not "
13858                                        "supported by firmware (%d %d) x%x\n",
13859                                        bf_get(cfg_nvme, mbx_sli4_parameters),
13860                                        bf_get(cfg_xib, mbx_sli4_parameters),
13861                                        phba->cfg_enable_fc4_type);
13862fcponly:
13863                        phba->nvmet_support = 0;
13864                        phba->cfg_nvmet_mrq = 0;
13865                        phba->cfg_nvme_seg_cnt = 0;
13866
13867                        /* If no FC4 type support, move to just SCSI support */
13868                        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13869                                return -ENODEV;
13870                        phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13871                }
13872        }
13873
13874        /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13875         * accommodate 512K and 1M IOs in a single nvme buf.
13876         */
13877        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13878                phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13879
13880        /* Enable embedded Payload BDE if support is indicated */
13881        if (bf_get(cfg_pbde, mbx_sli4_parameters))
13882                phba->cfg_enable_pbde = 1;
13883        else
13884                phba->cfg_enable_pbde = 0;
13885
13886        /*
13887         * To support Suppress Response feature we must satisfy 3 conditions.
13888         * lpfc_suppress_rsp module parameter must be set (default).
13889         * In SLI4-Parameters Descriptor:
13890         * Extended Inline Buffers (XIB) must be supported.
13891         * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13892         * (double negative).
13893         */
13894        if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13895            !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13896                phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13897        else
13898                phba->cfg_suppress_rsp = 0;
13899
13900        if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13901                phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13902
13903        /* Make sure that sge_supp_len can be handled by the driver */
13904        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13905                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13906
13907        /*
13908         * Check whether the adapter supports an embedded copy of the
13909         * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13910         * to use this option, 128-byte WQEs must be used.
13911         */
13912        if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13913                phba->fcp_embed_io = 1;
13914        else
13915                phba->fcp_embed_io = 0;
13916
13917        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13918                        "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13919                        bf_get(cfg_xib, mbx_sli4_parameters),
13920                        phba->cfg_enable_pbde,
13921                        phba->fcp_embed_io, sli4_params->nvme,
13922                        phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13923
13924        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13925            LPFC_SLI_INTF_IF_TYPE_2) &&
13926            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13927                 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13928                exp_wqcq_pages = false;
13929
13930        if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13931            (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13932            exp_wqcq_pages &&
13933            (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13934                phba->enab_exp_wqcq_pages = 1;
13935        else
13936                phba->enab_exp_wqcq_pages = 0;
13937        /*
13938         * Check if the SLI port supports MDS Diagnostics
13939         */
13940        if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13941                phba->mds_diags_support = 1;
13942        else
13943                phba->mds_diags_support = 0;
13944
13945        /*
13946         * Check if the SLI port supports NSLER
13947         */
13948        if (bf_get(cfg_nsler, mbx_sli4_parameters))
13949                phba->nsler = 1;
13950        else
13951                phba->nsler = 0;
13952
13953        return 0;
13954}
13955
13956/**
13957 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13958 * @pdev: pointer to PCI device
13959 * @pid: pointer to PCI device identifier
13960 *
13961 * This routine is to be called to attach a device with SLI-3 interface spec
13962 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13963 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13964 * information of the device and driver to see if the driver state that it can
13965 * support this kind of device. If the match is successful, the driver core
13966 * invokes this routine. If this routine determines it can claim the HBA, it
13967 * does all the initialization that it needs to do to handle the HBA properly.
13968 *
13969 * Return code
13970 *      0 - driver can claim the device
13971 *      negative value - driver can not claim the device
13972 **/
13973static int
13974lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13975{
13976        struct lpfc_hba   *phba;
13977        struct lpfc_vport *vport = NULL;
13978        struct Scsi_Host  *shost = NULL;
13979        int error;
13980        uint32_t cfg_mode, intr_mode;
13981
13982        /* Allocate memory for HBA structure */
13983        phba = lpfc_hba_alloc(pdev);
13984        if (!phba)
13985                return -ENOMEM;
13986
13987        /* Perform generic PCI device enabling operation */
13988        error = lpfc_enable_pci_dev(phba);
13989        if (error)
13990                goto out_free_phba;
13991
13992        /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13993        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13994        if (error)
13995                goto out_disable_pci_dev;
13996
13997        /* Set up SLI-3 specific device PCI memory space */
13998        error = lpfc_sli_pci_mem_setup(phba);
13999        if (error) {
14000                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14001                                "1402 Failed to set up pci memory space.\n");
14002                goto out_disable_pci_dev;
14003        }
14004
14005        /* Set up SLI-3 specific device driver resources */
14006        error = lpfc_sli_driver_resource_setup(phba);
14007        if (error) {
14008                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14009                                "1404 Failed to set up driver resource.\n");
14010                goto out_unset_pci_mem_s3;
14011        }
14012
14013        /* Initialize and populate the iocb list per host */
14014
14015        error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14016        if (error) {
14017                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14018                                "1405 Failed to initialize iocb list.\n");
14019                goto out_unset_driver_resource_s3;
14020        }
14021
14022        /* Set up common device driver resources */
14023        error = lpfc_setup_driver_resource_phase2(phba);
14024        if (error) {
14025                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14026                                "1406 Failed to set up driver resource.\n");
14027                goto out_free_iocb_list;
14028        }
14029
14030        /* Get the default values for Model Name and Description */
14031        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14032
14033        /* Create SCSI host to the physical port */
14034        error = lpfc_create_shost(phba);
14035        if (error) {
14036                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14037                                "1407 Failed to create scsi host.\n");
14038                goto out_unset_driver_resource;
14039        }
14040
14041        /* Configure sysfs attributes */
14042        vport = phba->pport;
14043        error = lpfc_alloc_sysfs_attr(vport);
14044        if (error) {
14045                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14046                                "1476 Failed to allocate sysfs attr\n");
14047                goto out_destroy_shost;
14048        }
14049
14050        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14051        /* Now, trying to enable interrupt and bring up the device */
14052        cfg_mode = phba->cfg_use_msi;
14053        while (true) {
14054                /* Put device to a known state before enabling interrupt */
14055                lpfc_stop_port(phba);
14056                /* Configure and enable interrupt */
14057                intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14058                if (intr_mode == LPFC_INTR_ERROR) {
14059                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14060                                        "0431 Failed to enable interrupt.\n");
14061                        error = -ENODEV;
14062                        goto out_free_sysfs_attr;
14063                }
14064                /* SLI-3 HBA setup */
14065                if (lpfc_sli_hba_setup(phba)) {
14066                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14067                                        "1477 Failed to set up hba\n");
14068                        error = -ENODEV;
14069                        goto out_remove_device;
14070                }
14071
14072                /* Wait 50ms for the interrupts of previous mailbox commands */
14073                msleep(50);
14074                /* Check active interrupts on message signaled interrupts */
14075                if (intr_mode == 0 ||
14076                    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14077                        /* Log the current active interrupt mode */
14078                        phba->intr_mode = intr_mode;
14079                        lpfc_log_intr_mode(phba, intr_mode);
14080                        break;
14081                } else {
14082                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14083                                        "0447 Configure interrupt mode (%d) "
14084                                        "failed active interrupt test.\n",
14085                                        intr_mode);
14086                        /* Disable the current interrupt mode */
14087                        lpfc_sli_disable_intr(phba);
14088                        /* Try next level of interrupt mode */
14089                        cfg_mode = --intr_mode;
14090                }
14091        }
14092
14093        /* Perform post initialization setup */
14094        lpfc_post_init_setup(phba);
14095
14096        /* Check if there are static vports to be created. */
14097        lpfc_create_static_vport(phba);
14098
14099        return 0;
14100
14101out_remove_device:
14102        lpfc_unset_hba(phba);
14103out_free_sysfs_attr:
14104        lpfc_free_sysfs_attr(vport);
14105out_destroy_shost:
14106        lpfc_destroy_shost(phba);
14107out_unset_driver_resource:
14108        lpfc_unset_driver_resource_phase2(phba);
14109out_free_iocb_list:
14110        lpfc_free_iocb_list(phba);
14111out_unset_driver_resource_s3:
14112        lpfc_sli_driver_resource_unset(phba);
14113out_unset_pci_mem_s3:
14114        lpfc_sli_pci_mem_unset(phba);
14115out_disable_pci_dev:
14116        lpfc_disable_pci_dev(phba);
14117        if (shost)
14118                scsi_host_put(shost);
14119out_free_phba:
14120        lpfc_hba_free(phba);
14121        return error;
14122}
14123
14124/**
14125 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14126 * @pdev: pointer to PCI device
14127 *
14128 * This routine is to be called to disattach a device with SLI-3 interface
14129 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14130 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14131 * device to be removed from the PCI subsystem properly.
14132 **/
14133static void
14134lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14135{
14136        struct Scsi_Host  *shost = pci_get_drvdata(pdev);
14137        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14138        struct lpfc_vport **vports;
14139        struct lpfc_hba   *phba = vport->phba;
14140        int i;
14141
14142        spin_lock_irq(&phba->hbalock);
14143        vport->load_flag |= FC_UNLOADING;
14144        spin_unlock_irq(&phba->hbalock);
14145
14146        lpfc_free_sysfs_attr(vport);
14147
14148        /* Release all the vports against this physical port */
14149        vports = lpfc_create_vport_work_array(phba);
14150        if (vports != NULL)
14151                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14152                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14153                                continue;
14154                        fc_vport_terminate(vports[i]->fc_vport);
14155                }
14156        lpfc_destroy_vport_work_array(phba, vports);
14157
14158        /* Remove FC host with the physical port */
14159        fc_remove_host(shost);
14160        scsi_remove_host(shost);
14161
14162        /* Clean up all nodes, mailboxes and IOs. */
14163        lpfc_cleanup(vport);
14164
14165        /*
14166         * Bring down the SLI Layer. This step disable all interrupts,
14167         * clears the rings, discards all mailbox commands, and resets
14168         * the HBA.
14169         */
14170
14171        /* HBA interrupt will be disabled after this call */
14172        lpfc_sli_hba_down(phba);
14173        /* Stop kthread signal shall trigger work_done one more time */
14174        kthread_stop(phba->worker_thread);
14175        /* Final cleanup of txcmplq and reset the HBA */
14176        lpfc_sli_brdrestart(phba);
14177
14178        kfree(phba->vpi_bmask);
14179        kfree(phba->vpi_ids);
14180
14181        lpfc_stop_hba_timers(phba);
14182        spin_lock_irq(&phba->port_list_lock);
14183        list_del_init(&vport->listentry);
14184        spin_unlock_irq(&phba->port_list_lock);
14185
14186        lpfc_debugfs_terminate(vport);
14187
14188        /* Disable SR-IOV if enabled */
14189        if (phba->cfg_sriov_nr_virtfn)
14190                pci_disable_sriov(pdev);
14191
14192        /* Disable interrupt */
14193        lpfc_sli_disable_intr(phba);
14194
14195        scsi_host_put(shost);
14196
14197        /*
14198         * Call scsi_free before mem_free since scsi bufs are released to their
14199         * corresponding pools here.
14200         */
14201        lpfc_scsi_free(phba);
14202        lpfc_free_iocb_list(phba);
14203
14204        lpfc_mem_free_all(phba);
14205
14206        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14207                          phba->hbqslimp.virt, phba->hbqslimp.phys);
14208
14209        /* Free resources associated with SLI2 interface */
14210        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14211                          phba->slim2p.virt, phba->slim2p.phys);
14212
14213        /* unmap adapter SLIM and Control Registers */
14214        iounmap(phba->ctrl_regs_memmap_p);
14215        iounmap(phba->slim_memmap_p);
14216
14217        lpfc_hba_free(phba);
14218
14219        pci_release_mem_regions(pdev);
14220        pci_disable_device(pdev);
14221}
14222
14223/**
14224 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14225 * @dev_d: pointer to device
14226 *
14227 * This routine is to be called from the kernel's PCI subsystem to support
14228 * system Power Management (PM) to device with SLI-3 interface spec. When
14229 * PM invokes this method, it quiesces the device by stopping the driver's
14230 * worker thread for the device, turning off device's interrupt and DMA,
14231 * and bring the device offline. Note that as the driver implements the
14232 * minimum PM requirements to a power-aware driver's PM support for the
14233 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14234 * to the suspend() method call will be treated as SUSPEND and the driver will
14235 * fully reinitialize its device during resume() method call, the driver will
14236 * set device to PCI_D3hot state in PCI config space instead of setting it
14237 * according to the @msg provided by the PM.
14238 *
14239 * Return code
14240 *      0 - driver suspended the device
14241 *      Error otherwise
14242 **/
14243static int __maybe_unused
14244lpfc_pci_suspend_one_s3(struct device *dev_d)
14245{
14246        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14247        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14248
14249        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14250                        "0473 PCI device Power Management suspend.\n");
14251
14252        /* Bring down the device */
14253        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14254        lpfc_offline(phba);
14255        kthread_stop(phba->worker_thread);
14256
14257        /* Disable interrupt from device */
14258        lpfc_sli_disable_intr(phba);
14259
14260        return 0;
14261}
14262
14263/**
14264 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14265 * @dev_d: pointer to device
14266 *
14267 * This routine is to be called from the kernel's PCI subsystem to support
14268 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14269 * invokes this method, it restores the device's PCI config space state and
14270 * fully reinitializes the device and brings it online. Note that as the
14271 * driver implements the minimum PM requirements to a power-aware driver's
14272 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14273 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14274 * driver will fully reinitialize its device during resume() method call,
14275 * the device will be set to PCI_D0 directly in PCI config space before
14276 * restoring the state.
14277 *
14278 * Return code
14279 *      0 - driver suspended the device
14280 *      Error otherwise
14281 **/
14282static int __maybe_unused
14283lpfc_pci_resume_one_s3(struct device *dev_d)
14284{
14285        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14286        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14287        uint32_t intr_mode;
14288        int error;
14289
14290        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14291                        "0452 PCI device Power Management resume.\n");
14292
14293        /* Startup the kernel thread for this host adapter. */
14294        phba->worker_thread = kthread_run(lpfc_do_work, phba,
14295                                        "lpfc_worker_%d", phba->brd_no);
14296        if (IS_ERR(phba->worker_thread)) {
14297                error = PTR_ERR(phba->worker_thread);
14298                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14299                                "0434 PM resume failed to start worker "
14300                                "thread: error=x%x.\n", error);
14301                return error;
14302        }
14303
14304        /* Init cpu_map array */
14305        lpfc_cpu_map_array_init(phba);
14306        /* Init hba_eq_hdl array */
14307        lpfc_hba_eq_hdl_array_init(phba);
14308        /* Configure and enable interrupt */
14309        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14310        if (intr_mode == LPFC_INTR_ERROR) {
14311                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14312                                "0430 PM resume Failed to enable interrupt\n");
14313                return -EIO;
14314        } else
14315                phba->intr_mode = intr_mode;
14316
14317        /* Restart HBA and bring it online */
14318        lpfc_sli_brdrestart(phba);
14319        lpfc_online(phba);
14320
14321        /* Log the current active interrupt mode */
14322        lpfc_log_intr_mode(phba, phba->intr_mode);
14323
14324        return 0;
14325}
14326
14327/**
14328 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14329 * @phba: pointer to lpfc hba data structure.
14330 *
14331 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14332 * aborts all the outstanding SCSI I/Os to the pci device.
14333 **/
14334static void
14335lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14336{
14337        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14338                        "2723 PCI channel I/O abort preparing for recovery\n");
14339
14340        /*
14341         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14342         * and let the SCSI mid-layer to retry them to recover.
14343         */
14344        lpfc_sli_abort_fcp_rings(phba);
14345}
14346
14347/**
14348 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14349 * @phba: pointer to lpfc hba data structure.
14350 *
14351 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14352 * disables the device interrupt and pci device, and aborts the internal FCP
14353 * pending I/Os.
14354 **/
14355static void
14356lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14357{
14358        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14359                        "2710 PCI channel disable preparing for reset\n");
14360
14361        /* Block any management I/Os to the device */
14362        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14363
14364        /* Block all SCSI devices' I/Os on the host */
14365        lpfc_scsi_dev_block(phba);
14366
14367        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14368        lpfc_sli_flush_io_rings(phba);
14369
14370        /* stop all timers */
14371        lpfc_stop_hba_timers(phba);
14372
14373        /* Disable interrupt and pci device */
14374        lpfc_sli_disable_intr(phba);
14375        pci_disable_device(phba->pcidev);
14376}
14377
14378/**
14379 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14380 * @phba: pointer to lpfc hba data structure.
14381 *
14382 * This routine is called to prepare the SLI3 device for PCI slot permanently
14383 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14384 * pending I/Os.
14385 **/
14386static void
14387lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14388{
14389        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14390                        "2711 PCI channel permanent disable for failure\n");
14391        /* Block all SCSI devices' I/Os on the host */
14392        lpfc_scsi_dev_block(phba);
14393        lpfc_sli4_prep_dev_for_reset(phba);
14394
14395        /* stop all timers */
14396        lpfc_stop_hba_timers(phba);
14397
14398        /* Clean up all driver's outstanding SCSI I/Os */
14399        lpfc_sli_flush_io_rings(phba);
14400}
14401
14402/**
14403 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14404 * @pdev: pointer to PCI device.
14405 * @state: the current PCI connection state.
14406 *
14407 * This routine is called from the PCI subsystem for I/O error handling to
14408 * device with SLI-3 interface spec. This function is called by the PCI
14409 * subsystem after a PCI bus error affecting this device has been detected.
14410 * When this function is invoked, it will need to stop all the I/Os and
14411 * interrupt(s) to the device. Once that is done, it will return
14412 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14413 * as desired.
14414 *
14415 * Return codes
14416 *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14417 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14418 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14419 **/
14420static pci_ers_result_t
14421lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14422{
14423        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14424        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14425
14426        switch (state) {
14427        case pci_channel_io_normal:
14428                /* Non-fatal error, prepare for recovery */
14429                lpfc_sli_prep_dev_for_recover(phba);
14430                return PCI_ERS_RESULT_CAN_RECOVER;
14431        case pci_channel_io_frozen:
14432                /* Fatal error, prepare for slot reset */
14433                lpfc_sli_prep_dev_for_reset(phba);
14434                return PCI_ERS_RESULT_NEED_RESET;
14435        case pci_channel_io_perm_failure:
14436                /* Permanent failure, prepare for device down */
14437                lpfc_sli_prep_dev_for_perm_failure(phba);
14438                return PCI_ERS_RESULT_DISCONNECT;
14439        default:
14440                /* Unknown state, prepare and request slot reset */
14441                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442                                "0472 Unknown PCI error state: x%x\n", state);
14443                lpfc_sli_prep_dev_for_reset(phba);
14444                return PCI_ERS_RESULT_NEED_RESET;
14445        }
14446}
14447
14448/**
14449 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14450 * @pdev: pointer to PCI device.
14451 *
14452 * This routine is called from the PCI subsystem for error handling to
14453 * device with SLI-3 interface spec. This is called after PCI bus has been
14454 * reset to restart the PCI card from scratch, as if from a cold-boot.
14455 * During the PCI subsystem error recovery, after driver returns
14456 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14457 * recovery and then call this routine before calling the .resume method
14458 * to recover the device. This function will initialize the HBA device,
14459 * enable the interrupt, but it will just put the HBA to offline state
14460 * without passing any I/O traffic.
14461 *
14462 * Return codes
14463 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
14464 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14465 */
14466static pci_ers_result_t
14467lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14468{
14469        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14470        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14471        struct lpfc_sli *psli = &phba->sli;
14472        uint32_t intr_mode;
14473
14474        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14475        if (pci_enable_device_mem(pdev)) {
14476                printk(KERN_ERR "lpfc: Cannot re-enable "
14477                        "PCI device after reset.\n");
14478                return PCI_ERS_RESULT_DISCONNECT;
14479        }
14480
14481        pci_restore_state(pdev);
14482
14483        /*
14484         * As the new kernel behavior of pci_restore_state() API call clears
14485         * device saved_state flag, need to save the restored state again.
14486         */
14487        pci_save_state(pdev);
14488
14489        if (pdev->is_busmaster)
14490                pci_set_master(pdev);
14491
14492        spin_lock_irq(&phba->hbalock);
14493        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14494        spin_unlock_irq(&phba->hbalock);
14495
14496        /* Configure and enable interrupt */
14497        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14498        if (intr_mode == LPFC_INTR_ERROR) {
14499                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14500                                "0427 Cannot re-enable interrupt after "
14501                                "slot reset.\n");
14502                return PCI_ERS_RESULT_DISCONNECT;
14503        } else
14504                phba->intr_mode = intr_mode;
14505
14506        /* Take device offline, it will perform cleanup */
14507        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14508        lpfc_offline(phba);
14509        lpfc_sli_brdrestart(phba);
14510
14511        /* Log the current active interrupt mode */
14512        lpfc_log_intr_mode(phba, phba->intr_mode);
14513
14514        return PCI_ERS_RESULT_RECOVERED;
14515}
14516
14517/**
14518 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14519 * @pdev: pointer to PCI device
14520 *
14521 * This routine is called from the PCI subsystem for error handling to device
14522 * with SLI-3 interface spec. It is called when kernel error recovery tells
14523 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14524 * error recovery. After this call, traffic can start to flow from this device
14525 * again.
14526 */
14527static void
14528lpfc_io_resume_s3(struct pci_dev *pdev)
14529{
14530        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14531        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14532
14533        /* Bring device online, it will be no-op for non-fatal error resume */
14534        lpfc_online(phba);
14535}
14536
14537/**
14538 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14539 * @phba: pointer to lpfc hba data structure.
14540 *
14541 * returns the number of ELS/CT IOCBs to reserve
14542 **/
14543int
14544lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14545{
14546        int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14547
14548        if (phba->sli_rev == LPFC_SLI_REV4) {
14549                if (max_xri <= 100)
14550                        return 10;
14551                else if (max_xri <= 256)
14552                        return 25;
14553                else if (max_xri <= 512)
14554                        return 50;
14555                else if (max_xri <= 1024)
14556                        return 100;
14557                else if (max_xri <= 1536)
14558                        return 150;
14559                else if (max_xri <= 2048)
14560                        return 200;
14561                else
14562                        return 250;
14563        } else
14564                return 0;
14565}
14566
14567/**
14568 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14569 * @phba: pointer to lpfc hba data structure.
14570 *
14571 * returns the number of ELS/CT + NVMET IOCBs to reserve
14572 **/
14573int
14574lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14575{
14576        int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14577
14578        if (phba->nvmet_support)
14579                max_xri += LPFC_NVMET_BUF_POST;
14580        return max_xri;
14581}
14582
14583
14584static int
14585lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14586        uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14587        const struct firmware *fw)
14588{
14589        int rc;
14590        u8 sli_family;
14591
14592        sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14593        /* Three cases:  (1) FW was not supported on the detected adapter.
14594         * (2) FW update has been locked out administratively.
14595         * (3) Some other error during FW update.
14596         * In each case, an unmaskable message is written to the console
14597         * for admin diagnosis.
14598         */
14599        if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14600            (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14601             magic_number != MAGIC_NUMBER_G6) ||
14602            (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14603             magic_number != MAGIC_NUMBER_G7) ||
14604            (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14605             magic_number != MAGIC_NUMBER_G7P)) {
14606                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14607                                "3030 This firmware version is not supported on"
14608                                " this HBA model. Device:%x Magic:%x Type:%x "
14609                                "ID:%x Size %d %zd\n",
14610                                phba->pcidev->device, magic_number, ftype, fid,
14611                                fsize, fw->size);
14612                rc = -EINVAL;
14613        } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14614                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14615                                "3021 Firmware downloads have been prohibited "
14616                                "by a system configuration setting on "
14617                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
14618                                "%zd\n",
14619                                phba->pcidev->device, magic_number, ftype, fid,
14620                                fsize, fw->size);
14621                rc = -EACCES;
14622        } else {
14623                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14624                                "3022 FW Download failed. Add Status x%x "
14625                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
14626                                "%zd\n",
14627                                offset, phba->pcidev->device, magic_number,
14628                                ftype, fid, fsize, fw->size);
14629                rc = -EIO;
14630        }
14631        return rc;
14632}
14633
14634/**
14635 * lpfc_write_firmware - attempt to write a firmware image to the port
14636 * @fw: pointer to firmware image returned from request_firmware.
14637 * @context: pointer to firmware image returned from request_firmware.
14638 *
14639 **/
14640static void
14641lpfc_write_firmware(const struct firmware *fw, void *context)
14642{
14643        struct lpfc_hba *phba = (struct lpfc_hba *)context;
14644        char fwrev[FW_REV_STR_SIZE];
14645        struct lpfc_grp_hdr *image;
14646        struct list_head dma_buffer_list;
14647        int i, rc = 0;
14648        struct lpfc_dmabuf *dmabuf, *next;
14649        uint32_t offset = 0, temp_offset = 0;
14650        uint32_t magic_number, ftype, fid, fsize;
14651
14652        /* It can be null in no-wait mode, sanity check */
14653        if (!fw) {
14654                rc = -ENXIO;
14655                goto out;
14656        }
14657        image = (struct lpfc_grp_hdr *)fw->data;
14658
14659        magic_number = be32_to_cpu(image->magic_number);
14660        ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14661        fid = bf_get_be32(lpfc_grp_hdr_id, image);
14662        fsize = be32_to_cpu(image->size);
14663
14664        INIT_LIST_HEAD(&dma_buffer_list);
14665        lpfc_decode_firmware_rev(phba, fwrev, 1);
14666        if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14667                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14668                                "3023 Updating Firmware, Current Version:%s "
14669                                "New Version:%s\n",
14670                                fwrev, image->revision);
14671                for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14672                        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14673                                         GFP_KERNEL);
14674                        if (!dmabuf) {
14675                                rc = -ENOMEM;
14676                                goto release_out;
14677                        }
14678                        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14679                                                          SLI4_PAGE_SIZE,
14680                                                          &dmabuf->phys,
14681                                                          GFP_KERNEL);
14682                        if (!dmabuf->virt) {
14683                                kfree(dmabuf);
14684                                rc = -ENOMEM;
14685                                goto release_out;
14686                        }
14687                        list_add_tail(&dmabuf->list, &dma_buffer_list);
14688                }
14689                while (offset < fw->size) {
14690                        temp_offset = offset;
14691                        list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14692                                if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14693                                        memcpy(dmabuf->virt,
14694                                               fw->data + temp_offset,
14695                                               fw->size - temp_offset);
14696                                        temp_offset = fw->size;
14697                                        break;
14698                                }
14699                                memcpy(dmabuf->virt, fw->data + temp_offset,
14700                                       SLI4_PAGE_SIZE);
14701                                temp_offset += SLI4_PAGE_SIZE;
14702                        }
14703                        rc = lpfc_wr_object(phba, &dma_buffer_list,
14704                                    (fw->size - offset), &offset);
14705                        if (rc) {
14706                                rc = lpfc_log_write_firmware_error(phba, offset,
14707                                                                   magic_number,
14708                                                                   ftype,
14709                                                                   fid,
14710                                                                   fsize,
14711                                                                   fw);
14712                                goto release_out;
14713                        }
14714                }
14715                rc = offset;
14716        } else
14717                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14718                                "3029 Skipped Firmware update, Current "
14719                                "Version:%s New Version:%s\n",
14720                                fwrev, image->revision);
14721
14722release_out:
14723        list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14724                list_del(&dmabuf->list);
14725                dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14726                                  dmabuf->virt, dmabuf->phys);
14727                kfree(dmabuf);
14728        }
14729        release_firmware(fw);
14730out:
14731        if (rc < 0)
14732                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14733                                "3062 Firmware update error, status %d.\n", rc);
14734        else
14735                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14736                                "3024 Firmware update success: size %d.\n", rc);
14737}
14738
14739/**
14740 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14741 * @phba: pointer to lpfc hba data structure.
14742 * @fw_upgrade: which firmware to update.
14743 *
14744 * This routine is called to perform Linux generic firmware upgrade on device
14745 * that supports such feature.
14746 **/
14747int
14748lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14749{
14750        uint8_t file_name[ELX_MODEL_NAME_SIZE];
14751        int ret;
14752        const struct firmware *fw;
14753
14754        /* Only supported on SLI4 interface type 2 for now */
14755        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14756            LPFC_SLI_INTF_IF_TYPE_2)
14757                return -EPERM;
14758
14759        snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14760
14761        if (fw_upgrade == INT_FW_UPGRADE) {
14762                ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14763                                        file_name, &phba->pcidev->dev,
14764                                        GFP_KERNEL, (void *)phba,
14765                                        lpfc_write_firmware);
14766        } else if (fw_upgrade == RUN_FW_UPGRADE) {
14767                ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14768                if (!ret)
14769                        lpfc_write_firmware(fw, (void *)phba);
14770        } else {
14771                ret = -EINVAL;
14772        }
14773
14774        return ret;
14775}
14776
14777/**
14778 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14779 * @pdev: pointer to PCI device
14780 * @pid: pointer to PCI device identifier
14781 *
14782 * This routine is called from the kernel's PCI subsystem to device with
14783 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14784 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14785 * information of the device and driver to see if the driver state that it
14786 * can support this kind of device. If the match is successful, the driver
14787 * core invokes this routine. If this routine determines it can claim the HBA,
14788 * it does all the initialization that it needs to do to handle the HBA
14789 * properly.
14790 *
14791 * Return code
14792 *      0 - driver can claim the device
14793 *      negative value - driver can not claim the device
14794 **/
14795static int
14796lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14797{
14798        struct lpfc_hba   *phba;
14799        struct lpfc_vport *vport = NULL;
14800        struct Scsi_Host  *shost = NULL;
14801        int error;
14802        uint32_t cfg_mode, intr_mode;
14803
14804        /* Allocate memory for HBA structure */
14805        phba = lpfc_hba_alloc(pdev);
14806        if (!phba)
14807                return -ENOMEM;
14808
14809        INIT_LIST_HEAD(&phba->poll_list);
14810
14811        /* Perform generic PCI device enabling operation */
14812        error = lpfc_enable_pci_dev(phba);
14813        if (error)
14814                goto out_free_phba;
14815
14816        /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14817        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14818        if (error)
14819                goto out_disable_pci_dev;
14820
14821        /* Set up SLI-4 specific device PCI memory space */
14822        error = lpfc_sli4_pci_mem_setup(phba);
14823        if (error) {
14824                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14825                                "1410 Failed to set up pci memory space.\n");
14826                goto out_disable_pci_dev;
14827        }
14828
14829        /* Set up SLI-4 Specific device driver resources */
14830        error = lpfc_sli4_driver_resource_setup(phba);
14831        if (error) {
14832                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14833                                "1412 Failed to set up driver resource.\n");
14834                goto out_unset_pci_mem_s4;
14835        }
14836
14837        INIT_LIST_HEAD(&phba->active_rrq_list);
14838        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14839
14840        /* Set up common device driver resources */
14841        error = lpfc_setup_driver_resource_phase2(phba);
14842        if (error) {
14843                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14844                                "1414 Failed to set up driver resource.\n");
14845                goto out_unset_driver_resource_s4;
14846        }
14847
14848        /* Get the default values for Model Name and Description */
14849        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14850
14851        /* Now, trying to enable interrupt and bring up the device */
14852        cfg_mode = phba->cfg_use_msi;
14853
14854        /* Put device to a known state before enabling interrupt */
14855        phba->pport = NULL;
14856        lpfc_stop_port(phba);
14857
14858        /* Init cpu_map array */
14859        lpfc_cpu_map_array_init(phba);
14860
14861        /* Init hba_eq_hdl array */
14862        lpfc_hba_eq_hdl_array_init(phba);
14863
14864        /* Configure and enable interrupt */
14865        intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14866        if (intr_mode == LPFC_INTR_ERROR) {
14867                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14868                                "0426 Failed to enable interrupt.\n");
14869                error = -ENODEV;
14870                goto out_unset_driver_resource;
14871        }
14872        /* Default to single EQ for non-MSI-X */
14873        if (phba->intr_type != MSIX) {
14874                phba->cfg_irq_chann = 1;
14875                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14876                        if (phba->nvmet_support)
14877                                phba->cfg_nvmet_mrq = 1;
14878                }
14879        }
14880        lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14881
14882        /* Create SCSI host to the physical port */
14883        error = lpfc_create_shost(phba);
14884        if (error) {
14885                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14886                                "1415 Failed to create scsi host.\n");
14887                goto out_disable_intr;
14888        }
14889        vport = phba->pport;
14890        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14891
14892        /* Configure sysfs attributes */
14893        error = lpfc_alloc_sysfs_attr(vport);
14894        if (error) {
14895                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14896                                "1416 Failed to allocate sysfs attr\n");
14897                goto out_destroy_shost;
14898        }
14899
14900        /* Set up SLI-4 HBA */
14901        if (lpfc_sli4_hba_setup(phba)) {
14902                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14903                                "1421 Failed to set up hba\n");
14904                error = -ENODEV;
14905                goto out_free_sysfs_attr;
14906        }
14907
14908        /* Log the current active interrupt mode */
14909        phba->intr_mode = intr_mode;
14910        lpfc_log_intr_mode(phba, intr_mode);
14911
14912        /* Perform post initialization setup */
14913        lpfc_post_init_setup(phba);
14914
14915        /* NVME support in FW earlier in the driver load corrects the
14916         * FC4 type making a check for nvme_support unnecessary.
14917         */
14918        if (phba->nvmet_support == 0) {
14919                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14920                        /* Create NVME binding with nvme_fc_transport. This
14921                         * ensures the vport is initialized.  If the localport
14922                         * create fails, it should not unload the driver to
14923                         * support field issues.
14924                         */
14925                        error = lpfc_nvme_create_localport(vport);
14926                        if (error) {
14927                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14928                                                "6004 NVME registration "
14929                                                "failed, error x%x\n",
14930                                                error);
14931                        }
14932                }
14933        }
14934
14935        /* check for firmware upgrade or downgrade */
14936        if (phba->cfg_request_firmware_upgrade)
14937                lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14938
14939        /* Check if there are static vports to be created. */
14940        lpfc_create_static_vport(phba);
14941
14942        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14943        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14944
14945        return 0;
14946
14947out_free_sysfs_attr:
14948        lpfc_free_sysfs_attr(vport);
14949out_destroy_shost:
14950        lpfc_destroy_shost(phba);
14951out_disable_intr:
14952        lpfc_sli4_disable_intr(phba);
14953out_unset_driver_resource:
14954        lpfc_unset_driver_resource_phase2(phba);
14955out_unset_driver_resource_s4:
14956        lpfc_sli4_driver_resource_unset(phba);
14957out_unset_pci_mem_s4:
14958        lpfc_sli4_pci_mem_unset(phba);
14959out_disable_pci_dev:
14960        lpfc_disable_pci_dev(phba);
14961        if (shost)
14962                scsi_host_put(shost);
14963out_free_phba:
14964        lpfc_hba_free(phba);
14965        return error;
14966}
14967
14968/**
14969 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14970 * @pdev: pointer to PCI device
14971 *
14972 * This routine is called from the kernel's PCI subsystem to device with
14973 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14974 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14975 * device to be removed from the PCI subsystem properly.
14976 **/
14977static void
14978lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14979{
14980        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14981        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14982        struct lpfc_vport **vports;
14983        struct lpfc_hba *phba = vport->phba;
14984        int i;
14985
14986        /* Mark the device unloading flag */
14987        spin_lock_irq(&phba->hbalock);
14988        vport->load_flag |= FC_UNLOADING;
14989        spin_unlock_irq(&phba->hbalock);
14990        if (phba->cgn_i)
14991                lpfc_unreg_congestion_buf(phba);
14992
14993        lpfc_free_sysfs_attr(vport);
14994
14995        /* Release all the vports against this physical port */
14996        vports = lpfc_create_vport_work_array(phba);
14997        if (vports != NULL)
14998                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14999                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
15000                                continue;
15001                        fc_vport_terminate(vports[i]->fc_vport);
15002                }
15003        lpfc_destroy_vport_work_array(phba, vports);
15004
15005        /* Remove FC host with the physical port */
15006        fc_remove_host(shost);
15007        scsi_remove_host(shost);
15008
15009        /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
15010         * localports are destroyed after to cleanup all transport memory.
15011         */
15012        lpfc_cleanup(vport);
15013        lpfc_nvmet_destroy_targetport(phba);
15014        lpfc_nvme_destroy_localport(vport);
15015
15016        /* De-allocate multi-XRI pools */
15017        if (phba->cfg_xri_rebalancing)
15018                lpfc_destroy_multixri_pools(phba);
15019
15020        /*
15021         * Bring down the SLI Layer. This step disables all interrupts,
15022         * clears the rings, discards all mailbox commands, and resets
15023         * the HBA FCoE function.
15024         */
15025        lpfc_debugfs_terminate(vport);
15026
15027        lpfc_stop_hba_timers(phba);
15028        spin_lock_irq(&phba->port_list_lock);
15029        list_del_init(&vport->listentry);
15030        spin_unlock_irq(&phba->port_list_lock);
15031
15032        /* Perform scsi free before driver resource_unset since scsi
15033         * buffers are released to their corresponding pools here.
15034         */
15035        lpfc_io_free(phba);
15036        lpfc_free_iocb_list(phba);
15037        lpfc_sli4_hba_unset(phba);
15038
15039        lpfc_unset_driver_resource_phase2(phba);
15040        lpfc_sli4_driver_resource_unset(phba);
15041
15042        /* Unmap adapter Control and Doorbell registers */
15043        lpfc_sli4_pci_mem_unset(phba);
15044
15045        /* Release PCI resources and disable device's PCI function */
15046        scsi_host_put(shost);
15047        lpfc_disable_pci_dev(phba);
15048
15049        /* Finally, free the driver's device data structure */
15050        lpfc_hba_free(phba);
15051
15052        return;
15053}
15054
15055/**
15056 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15057 * @dev_d: pointer to device
15058 *
15059 * This routine is called from the kernel's PCI subsystem to support system
15060 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15061 * this method, it quiesces the device by stopping the driver's worker
15062 * thread for the device, turning off device's interrupt and DMA, and bring
15063 * the device offline. Note that as the driver implements the minimum PM
15064 * requirements to a power-aware driver's PM support for suspend/resume -- all
15065 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15066 * method call will be treated as SUSPEND and the driver will fully
15067 * reinitialize its device during resume() method call, the driver will set
15068 * device to PCI_D3hot state in PCI config space instead of setting it
15069 * according to the @msg provided by the PM.
15070 *
15071 * Return code
15072 *      0 - driver suspended the device
15073 *      Error otherwise
15074 **/
15075static int __maybe_unused
15076lpfc_pci_suspend_one_s4(struct device *dev_d)
15077{
15078        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15079        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15080
15081        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15082                        "2843 PCI device Power Management suspend.\n");
15083
15084        /* Bring down the device */
15085        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15086        lpfc_offline(phba);
15087        kthread_stop(phba->worker_thread);
15088
15089        /* Disable interrupt from device */
15090        lpfc_sli4_disable_intr(phba);
15091        lpfc_sli4_queue_destroy(phba);
15092
15093        return 0;
15094}
15095
15096/**
15097 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15098 * @dev_d: pointer to device
15099 *
15100 * This routine is called from the kernel's PCI subsystem to support system
15101 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15102 * this method, it restores the device's PCI config space state and fully
15103 * reinitializes the device and brings it online. Note that as the driver
15104 * implements the minimum PM requirements to a power-aware driver's PM for
15105 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15106 * to the suspend() method call will be treated as SUSPEND and the driver
15107 * will fully reinitialize its device during resume() method call, the device
15108 * will be set to PCI_D0 directly in PCI config space before restoring the
15109 * state.
15110 *
15111 * Return code
15112 *      0 - driver suspended the device
15113 *      Error otherwise
15114 **/
15115static int __maybe_unused
15116lpfc_pci_resume_one_s4(struct device *dev_d)
15117{
15118        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15119        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15120        uint32_t intr_mode;
15121        int error;
15122
15123        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15124                        "0292 PCI device Power Management resume.\n");
15125
15126         /* Startup the kernel thread for this host adapter. */
15127        phba->worker_thread = kthread_run(lpfc_do_work, phba,
15128                                        "lpfc_worker_%d", phba->brd_no);
15129        if (IS_ERR(phba->worker_thread)) {
15130                error = PTR_ERR(phba->worker_thread);
15131                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15132                                "0293 PM resume failed to start worker "
15133                                "thread: error=x%x.\n", error);
15134                return error;
15135        }
15136
15137        /* Configure and enable interrupt */
15138        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15139        if (intr_mode == LPFC_INTR_ERROR) {
15140                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15141                                "0294 PM resume Failed to enable interrupt\n");
15142                return -EIO;
15143        } else
15144                phba->intr_mode = intr_mode;
15145
15146        /* Restart HBA and bring it online */
15147        lpfc_sli_brdrestart(phba);
15148        lpfc_online(phba);
15149
15150        /* Log the current active interrupt mode */
15151        lpfc_log_intr_mode(phba, phba->intr_mode);
15152
15153        return 0;
15154}
15155
15156/**
15157 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15158 * @phba: pointer to lpfc hba data structure.
15159 *
15160 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15161 * aborts all the outstanding SCSI I/Os to the pci device.
15162 **/
15163static void
15164lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15165{
15166        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15167                        "2828 PCI channel I/O abort preparing for recovery\n");
15168        /*
15169         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15170         * and let the SCSI mid-layer to retry them to recover.
15171         */
15172        lpfc_sli_abort_fcp_rings(phba);
15173}
15174
15175/**
15176 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15177 * @phba: pointer to lpfc hba data structure.
15178 *
15179 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15180 * disables the device interrupt and pci device, and aborts the internal FCP
15181 * pending I/Os.
15182 **/
15183static void
15184lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15185{
15186        int offline =  pci_channel_offline(phba->pcidev);
15187
15188        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15189                        "2826 PCI channel disable preparing for reset offline"
15190                        " %d\n", offline);
15191
15192        /* Block any management I/Os to the device */
15193        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15194
15195
15196        /* HBA_PCI_ERR was set in io_error_detect */
15197        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15198        /* Flush all driver's outstanding I/Os as we are to reset */
15199        lpfc_sli_flush_io_rings(phba);
15200        lpfc_offline(phba);
15201
15202        /* stop all timers */
15203        lpfc_stop_hba_timers(phba);
15204
15205        lpfc_sli4_queue_destroy(phba);
15206        /* Disable interrupt and pci device */
15207        lpfc_sli4_disable_intr(phba);
15208        pci_disable_device(phba->pcidev);
15209}
15210
15211/**
15212 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15213 * @phba: pointer to lpfc hba data structure.
15214 *
15215 * This routine is called to prepare the SLI4 device for PCI slot permanently
15216 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15217 * pending I/Os.
15218 **/
15219static void
15220lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15221{
15222        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15223                        "2827 PCI channel permanent disable for failure\n");
15224
15225        /* Block all SCSI devices' I/Os on the host */
15226        lpfc_scsi_dev_block(phba);
15227
15228        /* stop all timers */
15229        lpfc_stop_hba_timers(phba);
15230
15231        /* Clean up all driver's outstanding I/Os */
15232        lpfc_sli_flush_io_rings(phba);
15233}
15234
15235/**
15236 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15237 * @pdev: pointer to PCI device.
15238 * @state: the current PCI connection state.
15239 *
15240 * This routine is called from the PCI subsystem for error handling to device
15241 * with SLI-4 interface spec. This function is called by the PCI subsystem
15242 * after a PCI bus error affecting this device has been detected. When this
15243 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15244 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15245 * for the PCI subsystem to perform proper recovery as desired.
15246 *
15247 * Return codes
15248 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15249 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15250 **/
15251static pci_ers_result_t
15252lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15253{
15254        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15255        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15256        bool hba_pci_err;
15257
15258        switch (state) {
15259        case pci_channel_io_normal:
15260                /* Non-fatal error, prepare for recovery */
15261                lpfc_sli4_prep_dev_for_recover(phba);
15262                return PCI_ERS_RESULT_CAN_RECOVER;
15263        case pci_channel_io_frozen:
15264                hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15265                /* Fatal error, prepare for slot reset */
15266                if (!hba_pci_err)
15267                        lpfc_sli4_prep_dev_for_reset(phba);
15268                else
15269                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15270                                        "2832  Already handling PCI error "
15271                                        "state: x%x\n", state);
15272                return PCI_ERS_RESULT_NEED_RESET;
15273        case pci_channel_io_perm_failure:
15274                set_bit(HBA_PCI_ERR, &phba->bit_flags);
15275                /* Permanent failure, prepare for device down */
15276                lpfc_sli4_prep_dev_for_perm_failure(phba);
15277                return PCI_ERS_RESULT_DISCONNECT;
15278        default:
15279                hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15280                if (!hba_pci_err)
15281                        lpfc_sli4_prep_dev_for_reset(phba);
15282                /* Unknown state, prepare and request slot reset */
15283                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15284                                "2825 Unknown PCI error state: x%x\n", state);
15285                lpfc_sli4_prep_dev_for_reset(phba);
15286                return PCI_ERS_RESULT_NEED_RESET;
15287        }
15288}
15289
15290/**
15291 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15292 * @pdev: pointer to PCI device.
15293 *
15294 * This routine is called from the PCI subsystem for error handling to device
15295 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15296 * restart the PCI card from scratch, as if from a cold-boot. During the
15297 * PCI subsystem error recovery, after the driver returns
15298 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15299 * recovery and then call this routine before calling the .resume method to
15300 * recover the device. This function will initialize the HBA device, enable
15301 * the interrupt, but it will just put the HBA to offline state without
15302 * passing any I/O traffic.
15303 *
15304 * Return codes
15305 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15306 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15307 */
15308static pci_ers_result_t
15309lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15310{
15311        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15312        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15313        struct lpfc_sli *psli = &phba->sli;
15314        uint32_t intr_mode;
15315        bool hba_pci_err;
15316
15317        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15318        if (pci_enable_device_mem(pdev)) {
15319                printk(KERN_ERR "lpfc: Cannot re-enable "
15320                       "PCI device after reset.\n");
15321                return PCI_ERS_RESULT_DISCONNECT;
15322        }
15323
15324        pci_restore_state(pdev);
15325
15326        hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15327        if (!hba_pci_err)
15328                dev_info(&pdev->dev,
15329                         "hba_pci_err was not set, recovering slot reset.\n");
15330        /*
15331         * As the new kernel behavior of pci_restore_state() API call clears
15332         * device saved_state flag, need to save the restored state again.
15333         */
15334        pci_save_state(pdev);
15335
15336        if (pdev->is_busmaster)
15337                pci_set_master(pdev);
15338
15339        spin_lock_irq(&phba->hbalock);
15340        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15341        spin_unlock_irq(&phba->hbalock);
15342
15343        /* Init cpu_map array */
15344        lpfc_cpu_map_array_init(phba);
15345        /* Configure and enable interrupt */
15346        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15347        if (intr_mode == LPFC_INTR_ERROR) {
15348                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15349                                "2824 Cannot re-enable interrupt after "
15350                                "slot reset.\n");
15351                return PCI_ERS_RESULT_DISCONNECT;
15352        } else
15353                phba->intr_mode = intr_mode;
15354        lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15355
15356        /* Log the current active interrupt mode */
15357        lpfc_log_intr_mode(phba, phba->intr_mode);
15358
15359        return PCI_ERS_RESULT_RECOVERED;
15360}
15361
15362/**
15363 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15364 * @pdev: pointer to PCI device
15365 *
15366 * This routine is called from the PCI subsystem for error handling to device
15367 * with SLI-4 interface spec. It is called when kernel error recovery tells
15368 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15369 * error recovery. After this call, traffic can start to flow from this device
15370 * again.
15371 **/
15372static void
15373lpfc_io_resume_s4(struct pci_dev *pdev)
15374{
15375        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15376        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15377
15378        /*
15379         * In case of slot reset, as function reset is performed through
15380         * mailbox command which needs DMA to be enabled, this operation
15381         * has to be moved to the io resume phase. Taking device offline
15382         * will perform the necessary cleanup.
15383         */
15384        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15385                /* Perform device reset */
15386                lpfc_sli_brdrestart(phba);
15387                /* Bring the device back online */
15388                lpfc_online(phba);
15389        }
15390}
15391
15392/**
15393 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15394 * @pdev: pointer to PCI device
15395 * @pid: pointer to PCI device identifier
15396 *
15397 * This routine is to be registered to the kernel's PCI subsystem. When an
15398 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15399 * at PCI device-specific information of the device and driver to see if the
15400 * driver state that it can support this kind of device. If the match is
15401 * successful, the driver core invokes this routine. This routine dispatches
15402 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15403 * do all the initialization that it needs to do to handle the HBA device
15404 * properly.
15405 *
15406 * Return code
15407 *      0 - driver can claim the device
15408 *      negative value - driver can not claim the device
15409 **/
15410static int
15411lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15412{
15413        int rc;
15414        struct lpfc_sli_intf intf;
15415
15416        if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15417                return -ENODEV;
15418
15419        if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15420            (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15421                rc = lpfc_pci_probe_one_s4(pdev, pid);
15422        else
15423                rc = lpfc_pci_probe_one_s3(pdev, pid);
15424
15425        return rc;
15426}
15427
15428/**
15429 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15430 * @pdev: pointer to PCI device
15431 *
15432 * This routine is to be registered to the kernel's PCI subsystem. When an
15433 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15434 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15435 * remove routine, which will perform all the necessary cleanup for the
15436 * device to be removed from the PCI subsystem properly.
15437 **/
15438static void
15439lpfc_pci_remove_one(struct pci_dev *pdev)
15440{
15441        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15442        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15443
15444        switch (phba->pci_dev_grp) {
15445        case LPFC_PCI_DEV_LP:
15446                lpfc_pci_remove_one_s3(pdev);
15447                break;
15448        case LPFC_PCI_DEV_OC:
15449                lpfc_pci_remove_one_s4(pdev);
15450                break;
15451        default:
15452                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15453                                "1424 Invalid PCI device group: 0x%x\n",
15454                                phba->pci_dev_grp);
15455                break;
15456        }
15457        return;
15458}
15459
15460/**
15461 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15462 * @dev: pointer to device
15463 *
15464 * This routine is to be registered to the kernel's PCI subsystem to support
15465 * system Power Management (PM). When PM invokes this method, it dispatches
15466 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15467 * suspend the device.
15468 *
15469 * Return code
15470 *      0 - driver suspended the device
15471 *      Error otherwise
15472 **/
15473static int __maybe_unused
15474lpfc_pci_suspend_one(struct device *dev)
15475{
15476        struct Scsi_Host *shost = dev_get_drvdata(dev);
15477        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15478        int rc = -ENODEV;
15479
15480        switch (phba->pci_dev_grp) {
15481        case LPFC_PCI_DEV_LP:
15482                rc = lpfc_pci_suspend_one_s3(dev);
15483                break;
15484        case LPFC_PCI_DEV_OC:
15485                rc = lpfc_pci_suspend_one_s4(dev);
15486                break;
15487        default:
15488                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15489                                "1425 Invalid PCI device group: 0x%x\n",
15490                                phba->pci_dev_grp);
15491                break;
15492        }
15493        return rc;
15494}
15495
15496/**
15497 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15498 * @dev: pointer to device
15499 *
15500 * This routine is to be registered to the kernel's PCI subsystem to support
15501 * system Power Management (PM). When PM invokes this method, it dispatches
15502 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15503 * resume the device.
15504 *
15505 * Return code
15506 *      0 - driver suspended the device
15507 *      Error otherwise
15508 **/
15509static int __maybe_unused
15510lpfc_pci_resume_one(struct device *dev)
15511{
15512        struct Scsi_Host *shost = dev_get_drvdata(dev);
15513        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15514        int rc = -ENODEV;
15515
15516        switch (phba->pci_dev_grp) {
15517        case LPFC_PCI_DEV_LP:
15518                rc = lpfc_pci_resume_one_s3(dev);
15519                break;
15520        case LPFC_PCI_DEV_OC:
15521                rc = lpfc_pci_resume_one_s4(dev);
15522                break;
15523        default:
15524                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15525                                "1426 Invalid PCI device group: 0x%x\n",
15526                                phba->pci_dev_grp);
15527                break;
15528        }
15529        return rc;
15530}
15531
15532/**
15533 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15534 * @pdev: pointer to PCI device.
15535 * @state: the current PCI connection state.
15536 *
15537 * This routine is registered to the PCI subsystem for error handling. This
15538 * function is called by the PCI subsystem after a PCI bus error affecting
15539 * this device has been detected. When this routine is invoked, it dispatches
15540 * the action to the proper SLI-3 or SLI-4 device error detected handling
15541 * routine, which will perform the proper error detected operation.
15542 *
15543 * Return codes
15544 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15545 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15546 **/
15547static pci_ers_result_t
15548lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15549{
15550        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15551        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15552        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15553
15554        if (phba->link_state == LPFC_HBA_ERROR &&
15555            phba->hba_flag & HBA_IOQ_FLUSH)
15556                return PCI_ERS_RESULT_NEED_RESET;
15557
15558        switch (phba->pci_dev_grp) {
15559        case LPFC_PCI_DEV_LP:
15560                rc = lpfc_io_error_detected_s3(pdev, state);
15561                break;
15562        case LPFC_PCI_DEV_OC:
15563                rc = lpfc_io_error_detected_s4(pdev, state);
15564                break;
15565        default:
15566                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567                                "1427 Invalid PCI device group: 0x%x\n",
15568                                phba->pci_dev_grp);
15569                break;
15570        }
15571        return rc;
15572}
15573
15574/**
15575 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15576 * @pdev: pointer to PCI device.
15577 *
15578 * This routine is registered to the PCI subsystem for error handling. This
15579 * function is called after PCI bus has been reset to restart the PCI card
15580 * from scratch, as if from a cold-boot. When this routine is invoked, it
15581 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15582 * routine, which will perform the proper device reset.
15583 *
15584 * Return codes
15585 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15586 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15587 **/
15588static pci_ers_result_t
15589lpfc_io_slot_reset(struct pci_dev *pdev)
15590{
15591        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15592        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15593        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15594
15595        switch (phba->pci_dev_grp) {
15596        case LPFC_PCI_DEV_LP:
15597                rc = lpfc_io_slot_reset_s3(pdev);
15598                break;
15599        case LPFC_PCI_DEV_OC:
15600                rc = lpfc_io_slot_reset_s4(pdev);
15601                break;
15602        default:
15603                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15604                                "1428 Invalid PCI device group: 0x%x\n",
15605                                phba->pci_dev_grp);
15606                break;
15607        }
15608        return rc;
15609}
15610
15611/**
15612 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15613 * @pdev: pointer to PCI device
15614 *
15615 * This routine is registered to the PCI subsystem for error handling. It
15616 * is called when kernel error recovery tells the lpfc driver that it is
15617 * OK to resume normal PCI operation after PCI bus error recovery. When
15618 * this routine is invoked, it dispatches the action to the proper SLI-3
15619 * or SLI-4 device io_resume routine, which will resume the device operation.
15620 **/
15621static void
15622lpfc_io_resume(struct pci_dev *pdev)
15623{
15624        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15625        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15626
15627        switch (phba->pci_dev_grp) {
15628        case LPFC_PCI_DEV_LP:
15629                lpfc_io_resume_s3(pdev);
15630                break;
15631        case LPFC_PCI_DEV_OC:
15632                lpfc_io_resume_s4(pdev);
15633                break;
15634        default:
15635                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15636                                "1429 Invalid PCI device group: 0x%x\n",
15637                                phba->pci_dev_grp);
15638                break;
15639        }
15640        return;
15641}
15642
15643/**
15644 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15645 * @phba: pointer to lpfc hba data structure.
15646 *
15647 * This routine checks to see if OAS is supported for this adapter. If
15648 * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15649 * the enable oas flag is cleared and the pool created for OAS device data
15650 * is destroyed.
15651 *
15652 **/
15653static void
15654lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15655{
15656
15657        if (!phba->cfg_EnableXLane)
15658                return;
15659
15660        if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15661                phba->cfg_fof = 1;
15662        } else {
15663                phba->cfg_fof = 0;
15664                mempool_destroy(phba->device_data_mem_pool);
15665                phba->device_data_mem_pool = NULL;
15666        }
15667
15668        return;
15669}
15670
15671/**
15672 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15673 * @phba: pointer to lpfc hba data structure.
15674 *
15675 * This routine checks to see if RAS is supported by the adapter. Check the
15676 * function through which RAS support enablement is to be done.
15677 **/
15678void
15679lpfc_sli4_ras_init(struct lpfc_hba *phba)
15680{
15681        /* if ASIC_GEN_NUM >= 0xC) */
15682        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15683                    LPFC_SLI_INTF_IF_TYPE_6) ||
15684            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15685                    LPFC_SLI_INTF_FAMILY_G6)) {
15686                phba->ras_fwlog.ras_hwsupport = true;
15687                if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15688                    phba->cfg_ras_fwlog_buffsize)
15689                        phba->ras_fwlog.ras_enabled = true;
15690                else
15691                        phba->ras_fwlog.ras_enabled = false;
15692        } else {
15693                phba->ras_fwlog.ras_hwsupport = false;
15694        }
15695}
15696
15697
15698MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15699
15700static const struct pci_error_handlers lpfc_err_handler = {
15701        .error_detected = lpfc_io_error_detected,
15702        .slot_reset = lpfc_io_slot_reset,
15703        .resume = lpfc_io_resume,
15704};
15705
15706static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15707                         lpfc_pci_suspend_one,
15708                         lpfc_pci_resume_one);
15709
15710static struct pci_driver lpfc_driver = {
15711        .name           = LPFC_DRIVER_NAME,
15712        .id_table       = lpfc_id_table,
15713        .probe          = lpfc_pci_probe_one,
15714        .remove         = lpfc_pci_remove_one,
15715        .shutdown       = lpfc_pci_remove_one,
15716        .driver.pm      = &lpfc_pci_pm_ops_one,
15717        .err_handler    = &lpfc_err_handler,
15718};
15719
15720static const struct file_operations lpfc_mgmt_fop = {
15721        .owner = THIS_MODULE,
15722};
15723
15724static struct miscdevice lpfc_mgmt_dev = {
15725        .minor = MISC_DYNAMIC_MINOR,
15726        .name = "lpfcmgmt",
15727        .fops = &lpfc_mgmt_fop,
15728};
15729
15730/**
15731 * lpfc_init - lpfc module initialization routine
15732 *
15733 * This routine is to be invoked when the lpfc module is loaded into the
15734 * kernel. The special kernel macro module_init() is used to indicate the
15735 * role of this routine to the kernel as lpfc module entry point.
15736 *
15737 * Return codes
15738 *   0 - successful
15739 *   -ENOMEM - FC attach transport failed
15740 *   all others - failed
15741 */
15742static int __init
15743lpfc_init(void)
15744{
15745        int error = 0;
15746
15747        pr_info(LPFC_MODULE_DESC "\n");
15748        pr_info(LPFC_COPYRIGHT "\n");
15749
15750        error = misc_register(&lpfc_mgmt_dev);
15751        if (error)
15752                printk(KERN_ERR "Could not register lpfcmgmt device, "
15753                        "misc_register returned with status %d", error);
15754
15755        error = -ENOMEM;
15756        lpfc_transport_functions.vport_create = lpfc_vport_create;
15757        lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15758        lpfc_transport_template =
15759                                fc_attach_transport(&lpfc_transport_functions);
15760        if (lpfc_transport_template == NULL)
15761                goto unregister;
15762        lpfc_vport_transport_template =
15763                fc_attach_transport(&lpfc_vport_transport_functions);
15764        if (lpfc_vport_transport_template == NULL) {
15765                fc_release_transport(lpfc_transport_template);
15766                goto unregister;
15767        }
15768        lpfc_wqe_cmd_template();
15769        lpfc_nvmet_cmd_template();
15770
15771        /* Initialize in case vector mapping is needed */
15772        lpfc_present_cpu = num_present_cpus();
15773
15774        lpfc_pldv_detect = false;
15775
15776        error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15777                                        "lpfc/sli4:online",
15778                                        lpfc_cpu_online, lpfc_cpu_offline);
15779        if (error < 0)
15780                goto cpuhp_failure;
15781        lpfc_cpuhp_state = error;
15782
15783        error = pci_register_driver(&lpfc_driver);
15784        if (error)
15785                goto unwind;
15786
15787        return error;
15788
15789unwind:
15790        cpuhp_remove_multi_state(lpfc_cpuhp_state);
15791cpuhp_failure:
15792        fc_release_transport(lpfc_transport_template);
15793        fc_release_transport(lpfc_vport_transport_template);
15794unregister:
15795        misc_deregister(&lpfc_mgmt_dev);
15796
15797        return error;
15798}
15799
15800void lpfc_dmp_dbg(struct lpfc_hba *phba)
15801{
15802        unsigned int start_idx;
15803        unsigned int dbg_cnt;
15804        unsigned int temp_idx;
15805        int i;
15806        int j = 0;
15807        unsigned long rem_nsec;
15808
15809        if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15810                return;
15811
15812        start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15813        dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15814        if (!dbg_cnt)
15815                goto out;
15816        temp_idx = start_idx;
15817        if (dbg_cnt >= DBG_LOG_SZ) {
15818                dbg_cnt = DBG_LOG_SZ;
15819                temp_idx -= 1;
15820        } else {
15821                if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15822                        temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15823                } else {
15824                        if (start_idx < dbg_cnt)
15825                                start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15826                        else
15827                                start_idx -= dbg_cnt;
15828                }
15829        }
15830        dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15831                 start_idx, temp_idx, dbg_cnt);
15832
15833        for (i = 0; i < dbg_cnt; i++) {
15834                if ((start_idx + i) < DBG_LOG_SZ)
15835                        temp_idx = (start_idx + i) % DBG_LOG_SZ;
15836                else
15837                        temp_idx = j++;
15838                rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15839                dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15840                         temp_idx,
15841                         (unsigned long)phba->dbg_log[temp_idx].t_ns,
15842                         rem_nsec / 1000,
15843                         phba->dbg_log[temp_idx].log);
15844        }
15845out:
15846        atomic_set(&phba->dbg_log_cnt, 0);
15847        atomic_set(&phba->dbg_log_dmping, 0);
15848}
15849
15850__printf(2, 3)
15851void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15852{
15853        unsigned int idx;
15854        va_list args;
15855        int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15856        struct va_format vaf;
15857
15858
15859        va_start(args, fmt);
15860        if (unlikely(dbg_dmping)) {
15861                vaf.fmt = fmt;
15862                vaf.va = &args;
15863                dev_info(&phba->pcidev->dev, "%pV", &vaf);
15864                va_end(args);
15865                return;
15866        }
15867        idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15868                DBG_LOG_SZ;
15869
15870        atomic_inc(&phba->dbg_log_cnt);
15871
15872        vscnprintf(phba->dbg_log[idx].log,
15873                   sizeof(phba->dbg_log[idx].log), fmt, args);
15874        va_end(args);
15875
15876        phba->dbg_log[idx].t_ns = local_clock();
15877}
15878
15879/**
15880 * lpfc_exit - lpfc module removal routine
15881 *
15882 * This routine is invoked when the lpfc module is removed from the kernel.
15883 * The special kernel macro module_exit() is used to indicate the role of
15884 * this routine to the kernel as lpfc module exit point.
15885 */
15886static void __exit
15887lpfc_exit(void)
15888{
15889        misc_deregister(&lpfc_mgmt_dev);
15890        pci_unregister_driver(&lpfc_driver);
15891        cpuhp_remove_multi_state(lpfc_cpuhp_state);
15892        fc_release_transport(lpfc_transport_template);
15893        fc_release_transport(lpfc_vport_transport_template);
15894        idr_destroy(&lpfc_hba_index);
15895}
15896
15897module_init(lpfc_init);
15898module_exit(lpfc_exit);
15899MODULE_LICENSE("GPL");
15900MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15901MODULE_AUTHOR("Broadcom");
15902MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15903