linux/drivers/scsi/lpfc/lpfc_init.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/idr.h>
  28#include <linux/interrupt.h>
  29#include <linux/module.h>
  30#include <linux/kthread.h>
  31#include <linux/pci.h>
  32#include <linux/spinlock.h>
  33#include <linux/ctype.h>
  34#include <linux/aer.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <linux/miscdevice.h>
  38#include <linux/percpu.h>
  39#include <linux/msi.h>
  40#include <linux/irq.h>
  41#include <linux/bitops.h>
  42#include <linux/crash_dump.h>
  43#include <linux/cpu.h>
  44#include <linux/cpuhotplug.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_host.h>
  49#include <scsi/scsi_transport_fc.h>
  50#include <scsi/scsi_tcq.h>
  51#include <scsi/fc/fc_fs.h>
  52
  53#include "lpfc_hw4.h"
  54#include "lpfc_hw.h"
  55#include "lpfc_sli.h"
  56#include "lpfc_sli4.h"
  57#include "lpfc_nl.h"
  58#include "lpfc_disc.h"
  59#include "lpfc.h"
  60#include "lpfc_scsi.h"
  61#include "lpfc_nvme.h"
  62#include "lpfc_logmsg.h"
  63#include "lpfc_crtn.h"
  64#include "lpfc_vport.h"
  65#include "lpfc_version.h"
  66#include "lpfc_ids.h"
  67
  68static enum cpuhp_state lpfc_cpuhp_state;
  69/* Used when mapping IRQ vectors in a driver centric manner */
  70static uint32_t lpfc_present_cpu;
  71
  72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  76static int lpfc_post_rcv_buf(struct lpfc_hba *);
  77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  79static int lpfc_setup_endian_order(struct lpfc_hba *);
  80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  83static void lpfc_init_sgl_list(struct lpfc_hba *);
  84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  85static void lpfc_free_active_sgl(struct lpfc_hba *);
  86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  96
  97static struct scsi_transport_template *lpfc_transport_template = NULL;
  98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  99static DEFINE_IDR(lpfc_hba_index);
 100#define LPFC_NVMET_BUF_POST 254
 101static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
 102
 103/**
 104 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
 105 * @phba: pointer to lpfc hba data structure.
 106 *
 107 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
 108 * mailbox command. It retrieves the revision information from the HBA and
 109 * collects the Vital Product Data (VPD) about the HBA for preparing the
 110 * configuration of the HBA.
 111 *
 112 * Return codes:
 113 *   0 - success.
 114 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
 115 *   Any other value - indicates an error.
 116 **/
 117int
 118lpfc_config_port_prep(struct lpfc_hba *phba)
 119{
 120        lpfc_vpd_t *vp = &phba->vpd;
 121        int i = 0, rc;
 122        LPFC_MBOXQ_t *pmb;
 123        MAILBOX_t *mb;
 124        char *lpfc_vpd_data = NULL;
 125        uint16_t offset = 0;
 126        static char licensed[56] =
 127                    "key unlock for use with gnu public licensed code only\0";
 128        static int init_key = 1;
 129
 130        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 131        if (!pmb) {
 132                phba->link_state = LPFC_HBA_ERROR;
 133                return -ENOMEM;
 134        }
 135
 136        mb = &pmb->u.mb;
 137        phba->link_state = LPFC_INIT_MBX_CMDS;
 138
 139        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 140                if (init_key) {
 141                        uint32_t *ptext = (uint32_t *) licensed;
 142
 143                        for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 144                                *ptext = cpu_to_be32(*ptext);
 145                        init_key = 0;
 146                }
 147
 148                lpfc_read_nv(phba, pmb);
 149                memset((char*)mb->un.varRDnvp.rsvd3, 0,
 150                        sizeof (mb->un.varRDnvp.rsvd3));
 151                memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 152                         sizeof (licensed));
 153
 154                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 155
 156                if (rc != MBX_SUCCESS) {
 157                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 158                                        "0324 Config Port initialization "
 159                                        "error, mbxCmd x%x READ_NVPARM, "
 160                                        "mbxStatus x%x\n",
 161                                        mb->mbxCommand, mb->mbxStatus);
 162                        mempool_free(pmb, phba->mbox_mem_pool);
 163                        return -ERESTART;
 164                }
 165                memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 166                       sizeof(phba->wwnn));
 167                memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 168                       sizeof(phba->wwpn));
 169        }
 170
 171        /*
 172         * Clear all option bits except LPFC_SLI3_BG_ENABLED,
 173         * which was already set in lpfc_get_cfgparam()
 174         */
 175        phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 176
 177        /* Setup and issue mailbox READ REV command */
 178        lpfc_read_rev(phba, pmb);
 179        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 180        if (rc != MBX_SUCCESS) {
 181                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 182                                "0439 Adapter failed to init, mbxCmd x%x "
 183                                "READ_REV, mbxStatus x%x\n",
 184                                mb->mbxCommand, mb->mbxStatus);
 185                mempool_free( pmb, phba->mbox_mem_pool);
 186                return -ERESTART;
 187        }
 188
 189
 190        /*
 191         * The value of rr must be 1 since the driver set the cv field to 1.
 192         * This setting requires the FW to set all revision fields.
 193         */
 194        if (mb->un.varRdRev.rr == 0) {
 195                vp->rev.rBit = 0;
 196                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 197                                "0440 Adapter failed to init, READ_REV has "
 198                                "missing revision information.\n");
 199                mempool_free(pmb, phba->mbox_mem_pool);
 200                return -ERESTART;
 201        }
 202
 203        if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 204                mempool_free(pmb, phba->mbox_mem_pool);
 205                return -EINVAL;
 206        }
 207
 208        /* Save information as VPD data */
 209        vp->rev.rBit = 1;
 210        memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 211        vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 212        memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 213        vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 214        memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 215        vp->rev.biuRev = mb->un.varRdRev.biuRev;
 216        vp->rev.smRev = mb->un.varRdRev.smRev;
 217        vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 218        vp->rev.endecRev = mb->un.varRdRev.endecRev;
 219        vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 220        vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 221        vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 222        vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 223        vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 224        vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 225
 226        /* If the sli feature level is less then 9, we must
 227         * tear down all RPIs and VPIs on link down if NPIV
 228         * is enabled.
 229         */
 230        if (vp->rev.feaLevelHigh < 9)
 231                phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 232
 233        if (lpfc_is_LC_HBA(phba->pcidev->device))
 234                memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 235                                                sizeof (phba->RandomData));
 236
 237        /* Get adapter VPD information */
 238        lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 239        if (!lpfc_vpd_data)
 240                goto out_free_mbox;
 241        do {
 242                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 243                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 244
 245                if (rc != MBX_SUCCESS) {
 246                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 247                                        "0441 VPD not present on adapter, "
 248                                        "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 249                                        mb->mbxCommand, mb->mbxStatus);
 250                        mb->un.varDmp.word_cnt = 0;
 251                }
 252                /* dump mem may return a zero when finished or we got a
 253                 * mailbox error, either way we are done.
 254                 */
 255                if (mb->un.varDmp.word_cnt == 0)
 256                        break;
 257
 258                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 259                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 260                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 261                                      lpfc_vpd_data + offset,
 262                                      mb->un.varDmp.word_cnt);
 263                offset += mb->un.varDmp.word_cnt;
 264        } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 265
 266        lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 267
 268        kfree(lpfc_vpd_data);
 269out_free_mbox:
 270        mempool_free(pmb, phba->mbox_mem_pool);
 271        return 0;
 272}
 273
 274/**
 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
 276 * @phba: pointer to lpfc hba data structure.
 277 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 278 *
 279 * This is the completion handler for driver's configuring asynchronous event
 280 * mailbox command to the device. If the mailbox command returns successfully,
 281 * it will set internal async event support flag to 1; otherwise, it will
 282 * set internal async event support flag to 0.
 283 **/
 284static void
 285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 286{
 287        if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 288                phba->temp_sensor_support = 1;
 289        else
 290                phba->temp_sensor_support = 0;
 291        mempool_free(pmboxq, phba->mbox_mem_pool);
 292        return;
 293}
 294
 295/**
 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
 297 * @phba: pointer to lpfc hba data structure.
 298 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 299 *
 300 * This is the completion handler for dump mailbox command for getting
 301 * wake up parameters. When this command complete, the response contain
 302 * Option rom version of the HBA. This function translate the version number
 303 * into a human readable string and store it in OptionROMVersion.
 304 **/
 305static void
 306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 307{
 308        struct prog_id *prg;
 309        uint32_t prog_id_word;
 310        char dist = ' ';
 311        /* character array used for decoding dist type. */
 312        char dist_char[] = "nabx";
 313
 314        if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 315                mempool_free(pmboxq, phba->mbox_mem_pool);
 316                return;
 317        }
 318
 319        prg = (struct prog_id *) &prog_id_word;
 320
 321        /* word 7 contain option rom version */
 322        prog_id_word = pmboxq->u.mb.un.varWords[7];
 323
 324        /* Decode the Option rom version word to a readable string */
 325        if (prg->dist < 4)
 326                dist = dist_char[prg->dist];
 327
 328        if ((prg->dist == 3) && (prg->num == 0))
 329                snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 330                        prg->ver, prg->rev, prg->lev);
 331        else
 332                snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 333                        prg->ver, prg->rev, prg->lev,
 334                        dist, prg->num);
 335        mempool_free(pmboxq, phba->mbox_mem_pool);
 336        return;
 337}
 338
 339/**
 340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
 341 *      cfg_soft_wwnn, cfg_soft_wwpn
 342 * @vport: pointer to lpfc vport data structure.
 343 *
 344 *
 345 * Return codes
 346 *   None.
 347 **/
 348void
 349lpfc_update_vport_wwn(struct lpfc_vport *vport)
 350{
 351        uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
 352        u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
 353
 354        /* If the soft name exists then update it using the service params */
 355        if (vport->phba->cfg_soft_wwnn)
 356                u64_to_wwn(vport->phba->cfg_soft_wwnn,
 357                           vport->fc_sparam.nodeName.u.wwn);
 358        if (vport->phba->cfg_soft_wwpn)
 359                u64_to_wwn(vport->phba->cfg_soft_wwpn,
 360                           vport->fc_sparam.portName.u.wwn);
 361
 362        /*
 363         * If the name is empty or there exists a soft name
 364         * then copy the service params name, otherwise use the fc name
 365         */
 366        if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
 367                memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 368                        sizeof(struct lpfc_name));
 369        else
 370                memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 371                        sizeof(struct lpfc_name));
 372
 373        /*
 374         * If the port name has changed, then set the Param changes flag
 375         * to unreg the login
 376         */
 377        if (vport->fc_portname.u.wwn[0] != 0 &&
 378                memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
 379                        sizeof(struct lpfc_name)))
 380                vport->vport_flag |= FAWWPN_PARAM_CHG;
 381
 382        if (vport->fc_portname.u.wwn[0] == 0 ||
 383            vport->phba->cfg_soft_wwpn ||
 384            (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
 385            vport->vport_flag & FAWWPN_SET) {
 386                memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 387                        sizeof(struct lpfc_name));
 388                vport->vport_flag &= ~FAWWPN_SET;
 389                if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
 390                        vport->vport_flag |= FAWWPN_SET;
 391        }
 392        else
 393                memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 394                        sizeof(struct lpfc_name));
 395}
 396
 397/**
 398 * lpfc_config_port_post - Perform lpfc initialization after config port
 399 * @phba: pointer to lpfc hba data structure.
 400 *
 401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
 402 * command call. It performs all internal resource and state setups on the
 403 * port: post IOCB buffers, enable appropriate host interrupt attentions,
 404 * ELS ring timers, etc.
 405 *
 406 * Return codes
 407 *   0 - success.
 408 *   Any other value - error.
 409 **/
 410int
 411lpfc_config_port_post(struct lpfc_hba *phba)
 412{
 413        struct lpfc_vport *vport = phba->pport;
 414        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 415        LPFC_MBOXQ_t *pmb;
 416        MAILBOX_t *mb;
 417        struct lpfc_dmabuf *mp;
 418        struct lpfc_sli *psli = &phba->sli;
 419        uint32_t status, timeout;
 420        int i, j;
 421        int rc;
 422
 423        spin_lock_irq(&phba->hbalock);
 424        /*
 425         * If the Config port completed correctly the HBA is not
 426         * over heated any more.
 427         */
 428        if (phba->over_temp_state == HBA_OVER_TEMP)
 429                phba->over_temp_state = HBA_NORMAL_TEMP;
 430        spin_unlock_irq(&phba->hbalock);
 431
 432        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 433        if (!pmb) {
 434                phba->link_state = LPFC_HBA_ERROR;
 435                return -ENOMEM;
 436        }
 437        mb = &pmb->u.mb;
 438
 439        /* Get login parameters for NID.  */
 440        rc = lpfc_read_sparam(phba, pmb, 0);
 441        if (rc) {
 442                mempool_free(pmb, phba->mbox_mem_pool);
 443                return -ENOMEM;
 444        }
 445
 446        pmb->vport = vport;
 447        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 448                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 449                                "0448 Adapter failed init, mbxCmd x%x "
 450                                "READ_SPARM mbxStatus x%x\n",
 451                                mb->mbxCommand, mb->mbxStatus);
 452                phba->link_state = LPFC_HBA_ERROR;
 453                mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 454                mempool_free(pmb, phba->mbox_mem_pool);
 455                lpfc_mbuf_free(phba, mp->virt, mp->phys);
 456                kfree(mp);
 457                return -EIO;
 458        }
 459
 460        mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 461
 462        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 463        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 464        kfree(mp);
 465        pmb->ctx_buf = NULL;
 466        lpfc_update_vport_wwn(vport);
 467
 468        /* Update the fc_host data structures with new wwn. */
 469        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 470        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 471        fc_host_max_npiv_vports(shost) = phba->max_vpi;
 472
 473        /* If no serial number in VPD data, use low 6 bytes of WWNN */
 474        /* This should be consolidated into parse_vpd ? - mr */
 475        if (phba->SerialNumber[0] == 0) {
 476                uint8_t *outptr;
 477
 478                outptr = &vport->fc_nodename.u.s.IEEE[0];
 479                for (i = 0; i < 12; i++) {
 480                        status = *outptr++;
 481                        j = ((status & 0xf0) >> 4);
 482                        if (j <= 9)
 483                                phba->SerialNumber[i] =
 484                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 485                        else
 486                                phba->SerialNumber[i] =
 487                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 488                        i++;
 489                        j = (status & 0xf);
 490                        if (j <= 9)
 491                                phba->SerialNumber[i] =
 492                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 493                        else
 494                                phba->SerialNumber[i] =
 495                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 496                }
 497        }
 498
 499        lpfc_read_config(phba, pmb);
 500        pmb->vport = vport;
 501        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 502                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 503                                "0453 Adapter failed to init, mbxCmd x%x "
 504                                "READ_CONFIG, mbxStatus x%x\n",
 505                                mb->mbxCommand, mb->mbxStatus);
 506                phba->link_state = LPFC_HBA_ERROR;
 507                mempool_free( pmb, phba->mbox_mem_pool);
 508                return -EIO;
 509        }
 510
 511        /* Check if the port is disabled */
 512        lpfc_sli_read_link_ste(phba);
 513
 514        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 515        if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
 516                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 517                                "3359 HBA queue depth changed from %d to %d\n",
 518                                phba->cfg_hba_queue_depth,
 519                                mb->un.varRdConfig.max_xri);
 520                phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
 521        }
 522
 523        phba->lmt = mb->un.varRdConfig.lmt;
 524
 525        /* Get the default values for Model Name and Description */
 526        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 527
 528        phba->link_state = LPFC_LINK_DOWN;
 529
 530        /* Only process IOCBs on ELS ring till hba_state is READY */
 531        if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
 532                psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
 533        if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
 534                psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
 535
 536        /* Post receive buffers for desired rings */
 537        if (phba->sli_rev != 3)
 538                lpfc_post_rcv_buf(phba);
 539
 540        /*
 541         * Configure HBA MSI-X attention conditions to messages if MSI-X mode
 542         */
 543        if (phba->intr_type == MSIX) {
 544                rc = lpfc_config_msi(phba, pmb);
 545                if (rc) {
 546                        mempool_free(pmb, phba->mbox_mem_pool);
 547                        return -EIO;
 548                }
 549                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 550                if (rc != MBX_SUCCESS) {
 551                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 552                                        "0352 Config MSI mailbox command "
 553                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
 554                                        pmb->u.mb.mbxCommand,
 555                                        pmb->u.mb.mbxStatus);
 556                        mempool_free(pmb, phba->mbox_mem_pool);
 557                        return -EIO;
 558                }
 559        }
 560
 561        spin_lock_irq(&phba->hbalock);
 562        /* Initialize ERATT handling flag */
 563        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 564
 565        /* Enable appropriate host interrupts */
 566        if (lpfc_readl(phba->HCregaddr, &status)) {
 567                spin_unlock_irq(&phba->hbalock);
 568                return -EIO;
 569        }
 570        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 571        if (psli->num_rings > 0)
 572                status |= HC_R0INT_ENA;
 573        if (psli->num_rings > 1)
 574                status |= HC_R1INT_ENA;
 575        if (psli->num_rings > 2)
 576                status |= HC_R2INT_ENA;
 577        if (psli->num_rings > 3)
 578                status |= HC_R3INT_ENA;
 579
 580        if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 581            (phba->cfg_poll & DISABLE_FCP_RING_INT))
 582                status &= ~(HC_R0INT_ENA);
 583
 584        writel(status, phba->HCregaddr);
 585        readl(phba->HCregaddr); /* flush */
 586        spin_unlock_irq(&phba->hbalock);
 587
 588        /* Set up ring-0 (ELS) timer */
 589        timeout = phba->fc_ratov * 2;
 590        mod_timer(&vport->els_tmofunc,
 591                  jiffies + msecs_to_jiffies(1000 * timeout));
 592        /* Set up heart beat (HB) timer */
 593        mod_timer(&phba->hb_tmofunc,
 594                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 595        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
 596        phba->last_completion_time = jiffies;
 597        /* Set up error attention (ERATT) polling timer */
 598        mod_timer(&phba->eratt_poll,
 599                  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
 600
 601        if (phba->hba_flag & LINK_DISABLED) {
 602                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 603                                "2598 Adapter Link is disabled.\n");
 604                lpfc_down_link(phba, pmb);
 605                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 606                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 607                if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 608                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 609                                        "2599 Adapter failed to issue DOWN_LINK"
 610                                        " mbox command rc 0x%x\n", rc);
 611
 612                        mempool_free(pmb, phba->mbox_mem_pool);
 613                        return -EIO;
 614                }
 615        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 616                mempool_free(pmb, phba->mbox_mem_pool);
 617                rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 618                if (rc)
 619                        return rc;
 620        }
 621        /* MBOX buffer will be freed in mbox compl */
 622        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 623        if (!pmb) {
 624                phba->link_state = LPFC_HBA_ERROR;
 625                return -ENOMEM;
 626        }
 627
 628        lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 629        pmb->mbox_cmpl = lpfc_config_async_cmpl;
 630        pmb->vport = phba->pport;
 631        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 632
 633        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 634                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 635                                "0456 Adapter failed to issue "
 636                                "ASYNCEVT_ENABLE mbox status x%x\n",
 637                                rc);
 638                mempool_free(pmb, phba->mbox_mem_pool);
 639        }
 640
 641        /* Get Option rom version */
 642        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 643        if (!pmb) {
 644                phba->link_state = LPFC_HBA_ERROR;
 645                return -ENOMEM;
 646        }
 647
 648        lpfc_dump_wakeup_param(phba, pmb);
 649        pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 650        pmb->vport = phba->pport;
 651        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 652
 653        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 654                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 655                                "0435 Adapter failed "
 656                                "to get Option ROM version status x%x\n", rc);
 657                mempool_free(pmb, phba->mbox_mem_pool);
 658        }
 659
 660        return 0;
 661}
 662
 663/**
 664 * lpfc_hba_init_link - Initialize the FC link
 665 * @phba: pointer to lpfc hba data structure.
 666 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 667 *
 668 * This routine will issue the INIT_LINK mailbox command call.
 669 * It is available to other drivers through the lpfc_hba data
 670 * structure for use as a delayed link up mechanism with the
 671 * module parameter lpfc_suppress_link_up.
 672 *
 673 * Return code
 674 *              0 - success
 675 *              Any other value - error
 676 **/
 677static int
 678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 679{
 680        return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 681}
 682
 683/**
 684 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
 685 * @phba: pointer to lpfc hba data structure.
 686 * @fc_topology: desired fc topology.
 687 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 688 *
 689 * This routine will issue the INIT_LINK mailbox command call.
 690 * It is available to other drivers through the lpfc_hba data
 691 * structure for use as a delayed link up mechanism with the
 692 * module parameter lpfc_suppress_link_up.
 693 *
 694 * Return code
 695 *              0 - success
 696 *              Any other value - error
 697 **/
 698int
 699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 700                               uint32_t flag)
 701{
 702        struct lpfc_vport *vport = phba->pport;
 703        LPFC_MBOXQ_t *pmb;
 704        MAILBOX_t *mb;
 705        int rc;
 706
 707        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 708        if (!pmb) {
 709                phba->link_state = LPFC_HBA_ERROR;
 710                return -ENOMEM;
 711        }
 712        mb = &pmb->u.mb;
 713        pmb->vport = vport;
 714
 715        if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 716            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 717             !(phba->lmt & LMT_1Gb)) ||
 718            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 719             !(phba->lmt & LMT_2Gb)) ||
 720            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 721             !(phba->lmt & LMT_4Gb)) ||
 722            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 723             !(phba->lmt & LMT_8Gb)) ||
 724            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 725             !(phba->lmt & LMT_10Gb)) ||
 726            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 727             !(phba->lmt & LMT_16Gb)) ||
 728            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
 729             !(phba->lmt & LMT_32Gb)) ||
 730            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
 731             !(phba->lmt & LMT_64Gb))) {
 732                /* Reset link speed to auto */
 733                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 734                                "1302 Invalid speed for this board:%d "
 735                                "Reset link speed to auto.\n",
 736                                phba->cfg_link_speed);
 737                        phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 738        }
 739        lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 740        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 741        if (phba->sli_rev < LPFC_SLI_REV4)
 742                lpfc_set_loopback_flag(phba);
 743        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 744        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 745                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 746                                "0498 Adapter failed to init, mbxCmd x%x "
 747                                "INIT_LINK, mbxStatus x%x\n",
 748                                mb->mbxCommand, mb->mbxStatus);
 749                if (phba->sli_rev <= LPFC_SLI_REV3) {
 750                        /* Clear all interrupt enable conditions */
 751                        writel(0, phba->HCregaddr);
 752                        readl(phba->HCregaddr); /* flush */
 753                        /* Clear all pending interrupts */
 754                        writel(0xffffffff, phba->HAregaddr);
 755                        readl(phba->HAregaddr); /* flush */
 756                }
 757                phba->link_state = LPFC_HBA_ERROR;
 758                if (rc != MBX_BUSY || flag == MBX_POLL)
 759                        mempool_free(pmb, phba->mbox_mem_pool);
 760                return -EIO;
 761        }
 762        phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 763        if (flag == MBX_POLL)
 764                mempool_free(pmb, phba->mbox_mem_pool);
 765
 766        return 0;
 767}
 768
 769/**
 770 * lpfc_hba_down_link - this routine downs the FC link
 771 * @phba: pointer to lpfc hba data structure.
 772 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 773 *
 774 * This routine will issue the DOWN_LINK mailbox command call.
 775 * It is available to other drivers through the lpfc_hba data
 776 * structure for use to stop the link.
 777 *
 778 * Return code
 779 *              0 - success
 780 *              Any other value - error
 781 **/
 782static int
 783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 784{
 785        LPFC_MBOXQ_t *pmb;
 786        int rc;
 787
 788        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 789        if (!pmb) {
 790                phba->link_state = LPFC_HBA_ERROR;
 791                return -ENOMEM;
 792        }
 793
 794        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 795                        "0491 Adapter Link is disabled.\n");
 796        lpfc_down_link(phba, pmb);
 797        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 798        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 799        if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 800                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 801                                "2522 Adapter failed to issue DOWN_LINK"
 802                                " mbox command rc 0x%x\n", rc);
 803
 804                mempool_free(pmb, phba->mbox_mem_pool);
 805                return -EIO;
 806        }
 807        if (flag == MBX_POLL)
 808                mempool_free(pmb, phba->mbox_mem_pool);
 809
 810        return 0;
 811}
 812
 813/**
 814 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
 815 * @phba: pointer to lpfc HBA data structure.
 816 *
 817 * This routine will do LPFC uninitialization before the HBA is reset when
 818 * bringing down the SLI Layer.
 819 *
 820 * Return codes
 821 *   0 - success.
 822 *   Any other value - error.
 823 **/
 824int
 825lpfc_hba_down_prep(struct lpfc_hba *phba)
 826{
 827        struct lpfc_vport **vports;
 828        int i;
 829
 830        if (phba->sli_rev <= LPFC_SLI_REV3) {
 831                /* Disable interrupts */
 832                writel(0, phba->HCregaddr);
 833                readl(phba->HCregaddr); /* flush */
 834        }
 835
 836        if (phba->pport->load_flag & FC_UNLOADING)
 837                lpfc_cleanup_discovery_resources(phba->pport);
 838        else {
 839                vports = lpfc_create_vport_work_array(phba);
 840                if (vports != NULL)
 841                        for (i = 0; i <= phba->max_vports &&
 842                                vports[i] != NULL; i++)
 843                                lpfc_cleanup_discovery_resources(vports[i]);
 844                lpfc_destroy_vport_work_array(phba, vports);
 845        }
 846        return 0;
 847}
 848
 849/**
 850 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
 851 * rspiocb which got deferred
 852 *
 853 * @phba: pointer to lpfc HBA data structure.
 854 *
 855 * This routine will cleanup completed slow path events after HBA is reset
 856 * when bringing down the SLI Layer.
 857 *
 858 *
 859 * Return codes
 860 *   void.
 861 **/
 862static void
 863lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 864{
 865        struct lpfc_iocbq *rspiocbq;
 866        struct hbq_dmabuf *dmabuf;
 867        struct lpfc_cq_event *cq_event;
 868
 869        spin_lock_irq(&phba->hbalock);
 870        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 871        spin_unlock_irq(&phba->hbalock);
 872
 873        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 874                /* Get the response iocb from the head of work queue */
 875                spin_lock_irq(&phba->hbalock);
 876                list_remove_head(&phba->sli4_hba.sp_queue_event,
 877                                 cq_event, struct lpfc_cq_event, list);
 878                spin_unlock_irq(&phba->hbalock);
 879
 880                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 881                case CQE_CODE_COMPL_WQE:
 882                        rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 883                                                 cq_event);
 884                        lpfc_sli_release_iocbq(phba, rspiocbq);
 885                        break;
 886                case CQE_CODE_RECEIVE:
 887                case CQE_CODE_RECEIVE_V1:
 888                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
 889                                              cq_event);
 890                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
 891                }
 892        }
 893}
 894
 895/**
 896 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
 897 * @phba: pointer to lpfc HBA data structure.
 898 *
 899 * This routine will cleanup posted ELS buffers after the HBA is reset
 900 * when bringing down the SLI Layer.
 901 *
 902 *
 903 * Return codes
 904 *   void.
 905 **/
 906static void
 907lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 908{
 909        struct lpfc_sli *psli = &phba->sli;
 910        struct lpfc_sli_ring *pring;
 911        struct lpfc_dmabuf *mp, *next_mp;
 912        LIST_HEAD(buflist);
 913        int count;
 914
 915        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 916                lpfc_sli_hbqbuf_free_all(phba);
 917        else {
 918                /* Cleanup preposted buffers on the ELS ring */
 919                pring = &psli->sli3_ring[LPFC_ELS_RING];
 920                spin_lock_irq(&phba->hbalock);
 921                list_splice_init(&pring->postbufq, &buflist);
 922                spin_unlock_irq(&phba->hbalock);
 923
 924                count = 0;
 925                list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 926                        list_del(&mp->list);
 927                        count++;
 928                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 929                        kfree(mp);
 930                }
 931
 932                spin_lock_irq(&phba->hbalock);
 933                pring->postbufq_cnt -= count;
 934                spin_unlock_irq(&phba->hbalock);
 935        }
 936}
 937
 938/**
 939 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
 940 * @phba: pointer to lpfc HBA data structure.
 941 *
 942 * This routine will cleanup the txcmplq after the HBA is reset when bringing
 943 * down the SLI Layer.
 944 *
 945 * Return codes
 946 *   void
 947 **/
 948static void
 949lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
 950{
 951        struct lpfc_sli *psli = &phba->sli;
 952        struct lpfc_queue *qp = NULL;
 953        struct lpfc_sli_ring *pring;
 954        LIST_HEAD(completions);
 955        int i;
 956        struct lpfc_iocbq *piocb, *next_iocb;
 957
 958        if (phba->sli_rev != LPFC_SLI_REV4) {
 959                for (i = 0; i < psli->num_rings; i++) {
 960                        pring = &psli->sli3_ring[i];
 961                        spin_lock_irq(&phba->hbalock);
 962                        /* At this point in time the HBA is either reset or DOA
 963                         * Nothing should be on txcmplq as it will
 964                         * NEVER complete.
 965                         */
 966                        list_splice_init(&pring->txcmplq, &completions);
 967                        pring->txcmplq_cnt = 0;
 968                        spin_unlock_irq(&phba->hbalock);
 969
 970                        lpfc_sli_abort_iocb_ring(phba, pring);
 971                }
 972                /* Cancel all the IOCBs from the completions list */
 973                lpfc_sli_cancel_iocbs(phba, &completions,
 974                                      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 975                return;
 976        }
 977        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
 978                pring = qp->pring;
 979                if (!pring)
 980                        continue;
 981                spin_lock_irq(&pring->ring_lock);
 982                list_for_each_entry_safe(piocb, next_iocb,
 983                                         &pring->txcmplq, list)
 984                        piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 985                list_splice_init(&pring->txcmplq, &completions);
 986                pring->txcmplq_cnt = 0;
 987                spin_unlock_irq(&pring->ring_lock);
 988                lpfc_sli_abort_iocb_ring(phba, pring);
 989        }
 990        /* Cancel all the IOCBs from the completions list */
 991        lpfc_sli_cancel_iocbs(phba, &completions,
 992                              IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 993}
 994
 995/**
 996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
 997 * @phba: pointer to lpfc HBA data structure.
 998 *
 999 * This routine will do uninitialization after the HBA is reset when bring
1000 * down the SLI Layer.
1001 *
1002 * Return codes
1003 *   0 - success.
1004 *   Any other value - error.
1005 **/
1006static int
1007lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1008{
1009        lpfc_hba_free_post_buf(phba);
1010        lpfc_hba_clean_txcmplq(phba);
1011        return 0;
1012}
1013
1014/**
1015 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1016 * @phba: pointer to lpfc HBA data structure.
1017 *
1018 * This routine will do uninitialization after the HBA is reset when bring
1019 * down the SLI Layer.
1020 *
1021 * Return codes
1022 *   0 - success.
1023 *   Any other value - error.
1024 **/
1025static int
1026lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1027{
1028        struct lpfc_io_buf *psb, *psb_next;
1029        struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1030        struct lpfc_sli4_hdw_queue *qp;
1031        LIST_HEAD(aborts);
1032        LIST_HEAD(nvme_aborts);
1033        LIST_HEAD(nvmet_aborts);
1034        struct lpfc_sglq *sglq_entry = NULL;
1035        int cnt, idx;
1036
1037
1038        lpfc_sli_hbqbuf_free_all(phba);
1039        lpfc_hba_clean_txcmplq(phba);
1040
1041        /* At this point in time the HBA is either reset or DOA. Either
1042         * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1043         * on the lpfc_els_sgl_list so that it can either be freed if the
1044         * driver is unloading or reposted if the driver is restarting
1045         * the port.
1046         */
1047
1048        /* sgl_list_lock required because worker thread uses this
1049         * list.
1050         */
1051        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1052        list_for_each_entry(sglq_entry,
1053                &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054                sglq_entry->state = SGL_FREED;
1055
1056        list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057                        &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1061
1062        /* abts_xxxx_buf_list_lock required because worker thread uses this
1063         * list.
1064         */
1065        spin_lock_irq(&phba->hbalock);
1066        cnt = 0;
1067        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1068                qp = &phba->sli4_hba.hdwq[idx];
1069
1070                spin_lock(&qp->abts_io_buf_list_lock);
1071                list_splice_init(&qp->lpfc_abts_io_buf_list,
1072                                 &aborts);
1073
1074                list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1075                        psb->pCmd = NULL;
1076                        psb->status = IOSTAT_SUCCESS;
1077                        cnt++;
1078                }
1079                spin_lock(&qp->io_buf_list_put_lock);
1080                list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1081                qp->put_io_bufs += qp->abts_scsi_io_bufs;
1082                qp->put_io_bufs += qp->abts_nvme_io_bufs;
1083                qp->abts_scsi_io_bufs = 0;
1084                qp->abts_nvme_io_bufs = 0;
1085                spin_unlock(&qp->io_buf_list_put_lock);
1086                spin_unlock(&qp->abts_io_buf_list_lock);
1087        }
1088        spin_unlock_irq(&phba->hbalock);
1089
1090        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1091                spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1092                list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1093                                 &nvmet_aborts);
1094                spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1095                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1096                        ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1097                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1098                }
1099        }
1100
1101        lpfc_sli4_free_sp_events(phba);
1102        return cnt;
1103}
1104
1105/**
1106 * lpfc_hba_down_post - Wrapper func for hba down post routine
1107 * @phba: pointer to lpfc HBA data structure.
1108 *
1109 * This routine wraps the actual SLI3 or SLI4 routine for performing
1110 * uninitialization after the HBA is reset when bring down the SLI Layer.
1111 *
1112 * Return codes
1113 *   0 - success.
1114 *   Any other value - error.
1115 **/
1116int
1117lpfc_hba_down_post(struct lpfc_hba *phba)
1118{
1119        return (*phba->lpfc_hba_down_post)(phba);
1120}
1121
1122/**
1123 * lpfc_hb_timeout - The HBA-timer timeout handler
1124 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1125 *
1126 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1127 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1128 * work-port-events bitmap and the worker thread is notified. This timeout
1129 * event will be used by the worker thread to invoke the actual timeout
1130 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1131 * be performed in the timeout handler and the HBA timeout event bit shall
1132 * be cleared by the worker thread after it has taken the event bitmap out.
1133 **/
1134static void
1135lpfc_hb_timeout(struct timer_list *t)
1136{
1137        struct lpfc_hba *phba;
1138        uint32_t tmo_posted;
1139        unsigned long iflag;
1140
1141        phba = from_timer(phba, t, hb_tmofunc);
1142
1143        /* Check for heart beat timeout conditions */
1144        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1145        tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1146        if (!tmo_posted)
1147                phba->pport->work_port_events |= WORKER_HB_TMO;
1148        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1149
1150        /* Tell the worker thread there is work to do */
1151        if (!tmo_posted)
1152                lpfc_worker_wake_up(phba);
1153        return;
1154}
1155
1156/**
1157 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1158 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1159 *
1160 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1161 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1162 * work-port-events bitmap and the worker thread is notified. This timeout
1163 * event will be used by the worker thread to invoke the actual timeout
1164 * handler routine, lpfc_rrq_handler. Any periodical operations will
1165 * be performed in the timeout handler and the RRQ timeout event bit shall
1166 * be cleared by the worker thread after it has taken the event bitmap out.
1167 **/
1168static void
1169lpfc_rrq_timeout(struct timer_list *t)
1170{
1171        struct lpfc_hba *phba;
1172        unsigned long iflag;
1173
1174        phba = from_timer(phba, t, rrq_tmr);
1175        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1176        if (!(phba->pport->load_flag & FC_UNLOADING))
1177                phba->hba_flag |= HBA_RRQ_ACTIVE;
1178        else
1179                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1180        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1181
1182        if (!(phba->pport->load_flag & FC_UNLOADING))
1183                lpfc_worker_wake_up(phba);
1184}
1185
1186/**
1187 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1188 * @phba: pointer to lpfc hba data structure.
1189 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1190 *
1191 * This is the callback function to the lpfc heart-beat mailbox command.
1192 * If configured, the lpfc driver issues the heart-beat mailbox command to
1193 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1194 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1195 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1196 * heart-beat outstanding state. Once the mailbox command comes back and
1197 * no error conditions detected, the heart-beat mailbox command timer is
1198 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1199 * state is cleared for the next heart-beat. If the timer expired with the
1200 * heart-beat outstanding state set, the driver will put the HBA offline.
1201 **/
1202static void
1203lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1204{
1205        unsigned long drvr_flag;
1206
1207        spin_lock_irqsave(&phba->hbalock, drvr_flag);
1208        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1209        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1210
1211        /* Check and reset heart-beat timer if necessary */
1212        mempool_free(pmboxq, phba->mbox_mem_pool);
1213        if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1214                !(phba->link_state == LPFC_HBA_ERROR) &&
1215                !(phba->pport->load_flag & FC_UNLOADING))
1216                mod_timer(&phba->hb_tmofunc,
1217                          jiffies +
1218                          msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1219        return;
1220}
1221
1222/*
1223 * lpfc_idle_stat_delay_work - idle_stat tracking
1224 *
1225 * This routine tracks per-cq idle_stat and determines polling decisions.
1226 *
1227 * Return codes:
1228 *   None
1229 **/
1230static void
1231lpfc_idle_stat_delay_work(struct work_struct *work)
1232{
1233        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1234                                             struct lpfc_hba,
1235                                             idle_stat_delay_work);
1236        struct lpfc_queue *cq;
1237        struct lpfc_sli4_hdw_queue *hdwq;
1238        struct lpfc_idle_stat *idle_stat;
1239        u32 i, idle_percent;
1240        u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1241
1242        if (phba->pport->load_flag & FC_UNLOADING)
1243                return;
1244
1245        if (phba->link_state == LPFC_HBA_ERROR ||
1246            phba->pport->fc_flag & FC_OFFLINE_MODE)
1247                goto requeue;
1248
1249        for_each_present_cpu(i) {
1250                hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1251                cq = hdwq->io_cq;
1252
1253                /* Skip if we've already handled this cq's primary CPU */
1254                if (cq->chann != i)
1255                        continue;
1256
1257                idle_stat = &phba->sli4_hba.idle_stat[i];
1258
1259                /* get_cpu_idle_time returns values as running counters. Thus,
1260                 * to know the amount for this period, the prior counter values
1261                 * need to be subtracted from the current counter values.
1262                 * From there, the idle time stat can be calculated as a
1263                 * percentage of 100 - the sum of the other consumption times.
1264                 */
1265                wall_idle = get_cpu_idle_time(i, &wall, 1);
1266                diff_idle = wall_idle - idle_stat->prev_idle;
1267                diff_wall = wall - idle_stat->prev_wall;
1268
1269                if (diff_wall <= diff_idle)
1270                        busy_time = 0;
1271                else
1272                        busy_time = diff_wall - diff_idle;
1273
1274                idle_percent = div64_u64(100 * busy_time, diff_wall);
1275                idle_percent = 100 - idle_percent;
1276
1277                if (idle_percent < 15)
1278                        cq->poll_mode = LPFC_QUEUE_WORK;
1279                else
1280                        cq->poll_mode = LPFC_IRQ_POLL;
1281
1282                idle_stat->prev_idle = wall_idle;
1283                idle_stat->prev_wall = wall;
1284        }
1285
1286requeue:
1287        schedule_delayed_work(&phba->idle_stat_delay_work,
1288                              msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1289}
1290
1291static void
1292lpfc_hb_eq_delay_work(struct work_struct *work)
1293{
1294        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1295                                             struct lpfc_hba, eq_delay_work);
1296        struct lpfc_eq_intr_info *eqi, *eqi_new;
1297        struct lpfc_queue *eq, *eq_next;
1298        unsigned char *ena_delay = NULL;
1299        uint32_t usdelay;
1300        int i;
1301
1302        if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1303                return;
1304
1305        if (phba->link_state == LPFC_HBA_ERROR ||
1306            phba->pport->fc_flag & FC_OFFLINE_MODE)
1307                goto requeue;
1308
1309        ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1310                            GFP_KERNEL);
1311        if (!ena_delay)
1312                goto requeue;
1313
1314        for (i = 0; i < phba->cfg_irq_chann; i++) {
1315                /* Get the EQ corresponding to the IRQ vector */
1316                eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1317                if (!eq)
1318                        continue;
1319                if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1320                        eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1321                        ena_delay[eq->last_cpu] = 1;
1322                }
1323        }
1324
1325        for_each_present_cpu(i) {
1326                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1327                if (ena_delay[i]) {
1328                        usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1329                        if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1330                                usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1331                } else {
1332                        usdelay = 0;
1333                }
1334
1335                eqi->icnt = 0;
1336
1337                list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1338                        if (unlikely(eq->last_cpu != i)) {
1339                                eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1340                                                      eq->last_cpu);
1341                                list_move_tail(&eq->cpu_list, &eqi_new->list);
1342                                continue;
1343                        }
1344                        if (usdelay != eq->q_mode)
1345                                lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1346                                                         usdelay);
1347                }
1348        }
1349
1350        kfree(ena_delay);
1351
1352requeue:
1353        queue_delayed_work(phba->wq, &phba->eq_delay_work,
1354                           msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1355}
1356
1357/**
1358 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1359 * @phba: pointer to lpfc hba data structure.
1360 *
1361 * For each heartbeat, this routine does some heuristic methods to adjust
1362 * XRI distribution. The goal is to fully utilize free XRIs.
1363 **/
1364static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1365{
1366        u32 i;
1367        u32 hwq_count;
1368
1369        hwq_count = phba->cfg_hdw_queue;
1370        for (i = 0; i < hwq_count; i++) {
1371                /* Adjust XRIs in private pool */
1372                lpfc_adjust_pvt_pool_count(phba, i);
1373
1374                /* Adjust high watermark */
1375                lpfc_adjust_high_watermark(phba, i);
1376
1377#ifdef LPFC_MXP_STAT
1378                /* Snapshot pbl, pvt and busy count */
1379                lpfc_snapshot_mxp(phba, i);
1380#endif
1381        }
1382}
1383
1384/**
1385 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1386 * @phba: pointer to lpfc hba data structure.
1387 *
1388 * If a HB mbox is not already in progrees, this routine will allocate
1389 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1390 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1391 **/
1392int
1393lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1394{
1395        LPFC_MBOXQ_t *pmboxq;
1396        int retval;
1397
1398        /* Is a Heartbeat mbox already in progress */
1399        if (phba->hba_flag & HBA_HBEAT_INP)
1400                return 0;
1401
1402        pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1403        if (!pmboxq)
1404                return -ENOMEM;
1405
1406        lpfc_heart_beat(phba, pmboxq);
1407        pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1408        pmboxq->vport = phba->pport;
1409        retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1410
1411        if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1412                mempool_free(pmboxq, phba->mbox_mem_pool);
1413                return -ENXIO;
1414        }
1415        phba->hba_flag |= HBA_HBEAT_INP;
1416
1417        return 0;
1418}
1419
1420/**
1421 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1422 * @phba: pointer to lpfc hba data structure.
1423 *
1424 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1425 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1426 * of the value of lpfc_enable_hba_heartbeat.
1427 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1428 * try to issue a MBX_HEARTBEAT mbox command.
1429 **/
1430void
1431lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1432{
1433        if (phba->cfg_enable_hba_heartbeat)
1434                return;
1435        phba->hba_flag |= HBA_HBEAT_TMO;
1436}
1437
1438/**
1439 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1440 * @phba: pointer to lpfc hba data structure.
1441 *
1442 * This is the actual HBA-timer timeout handler to be invoked by the worker
1443 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1444 * handler performs any periodic operations needed for the device. If such
1445 * periodic event has already been attended to either in the interrupt handler
1446 * or by processing slow-ring or fast-ring events within the HBA-timer
1447 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1448 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1449 * is configured and there is no heart-beat mailbox command outstanding, a
1450 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1451 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1452 * to offline.
1453 **/
1454void
1455lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1456{
1457        struct lpfc_vport **vports;
1458        struct lpfc_dmabuf *buf_ptr;
1459        int retval = 0;
1460        int i, tmo;
1461        struct lpfc_sli *psli = &phba->sli;
1462        LIST_HEAD(completions);
1463
1464        if (phba->cfg_xri_rebalancing) {
1465                /* Multi-XRI pools handler */
1466                lpfc_hb_mxp_handler(phba);
1467        }
1468
1469        vports = lpfc_create_vport_work_array(phba);
1470        if (vports != NULL)
1471                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1472                        lpfc_rcv_seq_check_edtov(vports[i]);
1473                        lpfc_fdmi_change_check(vports[i]);
1474                }
1475        lpfc_destroy_vport_work_array(phba, vports);
1476
1477        if ((phba->link_state == LPFC_HBA_ERROR) ||
1478                (phba->pport->load_flag & FC_UNLOADING) ||
1479                (phba->pport->fc_flag & FC_OFFLINE_MODE))
1480                return;
1481
1482        if (phba->elsbuf_cnt &&
1483                (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1484                spin_lock_irq(&phba->hbalock);
1485                list_splice_init(&phba->elsbuf, &completions);
1486                phba->elsbuf_cnt = 0;
1487                phba->elsbuf_prev_cnt = 0;
1488                spin_unlock_irq(&phba->hbalock);
1489
1490                while (!list_empty(&completions)) {
1491                        list_remove_head(&completions, buf_ptr,
1492                                struct lpfc_dmabuf, list);
1493                        lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1494                        kfree(buf_ptr);
1495                }
1496        }
1497        phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1498
1499        /* If there is no heart beat outstanding, issue a heartbeat command */
1500        if (phba->cfg_enable_hba_heartbeat) {
1501                /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1502                spin_lock_irq(&phba->pport->work_port_lock);
1503                if (time_after(phba->last_completion_time +
1504                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1505                                jiffies)) {
1506                        spin_unlock_irq(&phba->pport->work_port_lock);
1507                        if (phba->hba_flag & HBA_HBEAT_INP)
1508                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1509                        else
1510                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1511                        goto out;
1512                }
1513                spin_unlock_irq(&phba->pport->work_port_lock);
1514
1515                /* Check if a MBX_HEARTBEAT is already in progress */
1516                if (phba->hba_flag & HBA_HBEAT_INP) {
1517                        /*
1518                         * If heart beat timeout called with HBA_HBEAT_INP set
1519                         * we need to give the hb mailbox cmd a chance to
1520                         * complete or TMO.
1521                         */
1522                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1523                                "0459 Adapter heartbeat still outstanding: "
1524                                "last compl time was %d ms.\n",
1525                                jiffies_to_msecs(jiffies
1526                                         - phba->last_completion_time));
1527                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1528                } else {
1529                        if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1530                                (list_empty(&psli->mboxq))) {
1531
1532                                retval = lpfc_issue_hb_mbox(phba);
1533                                if (retval) {
1534                                        tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1535                                        goto out;
1536                                }
1537                                phba->skipped_hb = 0;
1538                        } else if (time_before_eq(phba->last_completion_time,
1539                                        phba->skipped_hb)) {
1540                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1541                                        "2857 Last completion time not "
1542                                        " updated in %d ms\n",
1543                                        jiffies_to_msecs(jiffies
1544                                                 - phba->last_completion_time));
1545                        } else
1546                                phba->skipped_hb = jiffies;
1547
1548                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1549                        goto out;
1550                }
1551        } else {
1552                /* Check to see if we want to force a MBX_HEARTBEAT */
1553                if (phba->hba_flag & HBA_HBEAT_TMO) {
1554                        retval = lpfc_issue_hb_mbox(phba);
1555                        if (retval)
1556                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1557                        else
1558                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1559                        goto out;
1560                }
1561                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1562        }
1563out:
1564        mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1565}
1566
1567/**
1568 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1569 * @phba: pointer to lpfc hba data structure.
1570 *
1571 * This routine is called to bring the HBA offline when HBA hardware error
1572 * other than Port Error 6 has been detected.
1573 **/
1574static void
1575lpfc_offline_eratt(struct lpfc_hba *phba)
1576{
1577        struct lpfc_sli   *psli = &phba->sli;
1578
1579        spin_lock_irq(&phba->hbalock);
1580        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1581        spin_unlock_irq(&phba->hbalock);
1582        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1583
1584        lpfc_offline(phba);
1585        lpfc_reset_barrier(phba);
1586        spin_lock_irq(&phba->hbalock);
1587        lpfc_sli_brdreset(phba);
1588        spin_unlock_irq(&phba->hbalock);
1589        lpfc_hba_down_post(phba);
1590        lpfc_sli_brdready(phba, HS_MBRDY);
1591        lpfc_unblock_mgmt_io(phba);
1592        phba->link_state = LPFC_HBA_ERROR;
1593        return;
1594}
1595
1596/**
1597 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1598 * @phba: pointer to lpfc hba data structure.
1599 *
1600 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1601 * other than Port Error 6 has been detected.
1602 **/
1603void
1604lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1605{
1606        spin_lock_irq(&phba->hbalock);
1607        phba->link_state = LPFC_HBA_ERROR;
1608        spin_unlock_irq(&phba->hbalock);
1609
1610        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1611        lpfc_sli_flush_io_rings(phba);
1612        lpfc_offline(phba);
1613        lpfc_hba_down_post(phba);
1614        lpfc_unblock_mgmt_io(phba);
1615}
1616
1617/**
1618 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1619 * @phba: pointer to lpfc hba data structure.
1620 *
1621 * This routine is invoked to handle the deferred HBA hardware error
1622 * conditions. This type of error is indicated by HBA by setting ER1
1623 * and another ER bit in the host status register. The driver will
1624 * wait until the ER1 bit clears before handling the error condition.
1625 **/
1626static void
1627lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1628{
1629        uint32_t old_host_status = phba->work_hs;
1630        struct lpfc_sli *psli = &phba->sli;
1631
1632        /* If the pci channel is offline, ignore possible errors,
1633         * since we cannot communicate with the pci card anyway.
1634         */
1635        if (pci_channel_offline(phba->pcidev)) {
1636                spin_lock_irq(&phba->hbalock);
1637                phba->hba_flag &= ~DEFER_ERATT;
1638                spin_unlock_irq(&phba->hbalock);
1639                return;
1640        }
1641
1642        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1643                        "0479 Deferred Adapter Hardware Error "
1644                        "Data: x%x x%x x%x\n",
1645                        phba->work_hs, phba->work_status[0],
1646                        phba->work_status[1]);
1647
1648        spin_lock_irq(&phba->hbalock);
1649        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1650        spin_unlock_irq(&phba->hbalock);
1651
1652
1653        /*
1654         * Firmware stops when it triggred erratt. That could cause the I/Os
1655         * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1656         * SCSI layer retry it after re-establishing link.
1657         */
1658        lpfc_sli_abort_fcp_rings(phba);
1659
1660        /*
1661         * There was a firmware error. Take the hba offline and then
1662         * attempt to restart it.
1663         */
1664        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1665        lpfc_offline(phba);
1666
1667        /* Wait for the ER1 bit to clear.*/
1668        while (phba->work_hs & HS_FFER1) {
1669                msleep(100);
1670                if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1671                        phba->work_hs = UNPLUG_ERR ;
1672                        break;
1673                }
1674                /* If driver is unloading let the worker thread continue */
1675                if (phba->pport->load_flag & FC_UNLOADING) {
1676                        phba->work_hs = 0;
1677                        break;
1678                }
1679        }
1680
1681        /*
1682         * This is to ptrotect against a race condition in which
1683         * first write to the host attention register clear the
1684         * host status register.
1685         */
1686        if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1687                phba->work_hs = old_host_status & ~HS_FFER1;
1688
1689        spin_lock_irq(&phba->hbalock);
1690        phba->hba_flag &= ~DEFER_ERATT;
1691        spin_unlock_irq(&phba->hbalock);
1692        phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1693        phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1694}
1695
1696static void
1697lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1698{
1699        struct lpfc_board_event_header board_event;
1700        struct Scsi_Host *shost;
1701
1702        board_event.event_type = FC_REG_BOARD_EVENT;
1703        board_event.subcategory = LPFC_EVENT_PORTINTERR;
1704        shost = lpfc_shost_from_vport(phba->pport);
1705        fc_host_post_vendor_event(shost, fc_get_event_number(),
1706                                  sizeof(board_event),
1707                                  (char *) &board_event,
1708                                  LPFC_NL_VENDOR_ID);
1709}
1710
1711/**
1712 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1713 * @phba: pointer to lpfc hba data structure.
1714 *
1715 * This routine is invoked to handle the following HBA hardware error
1716 * conditions:
1717 * 1 - HBA error attention interrupt
1718 * 2 - DMA ring index out of range
1719 * 3 - Mailbox command came back as unknown
1720 **/
1721static void
1722lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1723{
1724        struct lpfc_vport *vport = phba->pport;
1725        struct lpfc_sli   *psli = &phba->sli;
1726        uint32_t event_data;
1727        unsigned long temperature;
1728        struct temp_event temp_event_data;
1729        struct Scsi_Host  *shost;
1730
1731        /* If the pci channel is offline, ignore possible errors,
1732         * since we cannot communicate with the pci card anyway.
1733         */
1734        if (pci_channel_offline(phba->pcidev)) {
1735                spin_lock_irq(&phba->hbalock);
1736                phba->hba_flag &= ~DEFER_ERATT;
1737                spin_unlock_irq(&phba->hbalock);
1738                return;
1739        }
1740
1741        /* If resets are disabled then leave the HBA alone and return */
1742        if (!phba->cfg_enable_hba_reset)
1743                return;
1744
1745        /* Send an internal error event to mgmt application */
1746        lpfc_board_errevt_to_mgmt(phba);
1747
1748        if (phba->hba_flag & DEFER_ERATT)
1749                lpfc_handle_deferred_eratt(phba);
1750
1751        if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1752                if (phba->work_hs & HS_FFER6)
1753                        /* Re-establishing Link */
1754                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1755                                        "1301 Re-establishing Link "
1756                                        "Data: x%x x%x x%x\n",
1757                                        phba->work_hs, phba->work_status[0],
1758                                        phba->work_status[1]);
1759                if (phba->work_hs & HS_FFER8)
1760                        /* Device Zeroization */
1761                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762                                        "2861 Host Authentication device "
1763                                        "zeroization Data:x%x x%x x%x\n",
1764                                        phba->work_hs, phba->work_status[0],
1765                                        phba->work_status[1]);
1766
1767                spin_lock_irq(&phba->hbalock);
1768                psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1769                spin_unlock_irq(&phba->hbalock);
1770
1771                /*
1772                * Firmware stops when it triggled erratt with HS_FFER6.
1773                * That could cause the I/Os dropped by the firmware.
1774                * Error iocb (I/O) on txcmplq and let the SCSI layer
1775                * retry it after re-establishing link.
1776                */
1777                lpfc_sli_abort_fcp_rings(phba);
1778
1779                /*
1780                 * There was a firmware error.  Take the hba offline and then
1781                 * attempt to restart it.
1782                 */
1783                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1784                lpfc_offline(phba);
1785                lpfc_sli_brdrestart(phba);
1786                if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1787                        lpfc_unblock_mgmt_io(phba);
1788                        return;
1789                }
1790                lpfc_unblock_mgmt_io(phba);
1791        } else if (phba->work_hs & HS_CRIT_TEMP) {
1792                temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1793                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1794                temp_event_data.event_code = LPFC_CRIT_TEMP;
1795                temp_event_data.data = (uint32_t)temperature;
1796
1797                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1798                                "0406 Adapter maximum temperature exceeded "
1799                                "(%ld), taking this port offline "
1800                                "Data: x%x x%x x%x\n",
1801                                temperature, phba->work_hs,
1802                                phba->work_status[0], phba->work_status[1]);
1803
1804                shost = lpfc_shost_from_vport(phba->pport);
1805                fc_host_post_vendor_event(shost, fc_get_event_number(),
1806                                          sizeof(temp_event_data),
1807                                          (char *) &temp_event_data,
1808                                          SCSI_NL_VID_TYPE_PCI
1809                                          | PCI_VENDOR_ID_EMULEX);
1810
1811                spin_lock_irq(&phba->hbalock);
1812                phba->over_temp_state = HBA_OVER_TEMP;
1813                spin_unlock_irq(&phba->hbalock);
1814                lpfc_offline_eratt(phba);
1815
1816        } else {
1817                /* The if clause above forces this code path when the status
1818                 * failure is a value other than FFER6. Do not call the offline
1819                 * twice. This is the adapter hardware error path.
1820                 */
1821                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1822                                "0457 Adapter Hardware Error "
1823                                "Data: x%x x%x x%x\n",
1824                                phba->work_hs,
1825                                phba->work_status[0], phba->work_status[1]);
1826
1827                event_data = FC_REG_DUMP_EVENT;
1828                shost = lpfc_shost_from_vport(vport);
1829                fc_host_post_vendor_event(shost, fc_get_event_number(),
1830                                sizeof(event_data), (char *) &event_data,
1831                                SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1832
1833                lpfc_offline_eratt(phba);
1834        }
1835        return;
1836}
1837
1838/**
1839 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1840 * @phba: pointer to lpfc hba data structure.
1841 * @mbx_action: flag for mailbox shutdown action.
1842 * @en_rn_msg: send reset/port recovery message.
1843 * This routine is invoked to perform an SLI4 port PCI function reset in
1844 * response to port status register polling attention. It waits for port
1845 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1846 * During this process, interrupt vectors are freed and later requested
1847 * for handling possible port resource change.
1848 **/
1849static int
1850lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1851                            bool en_rn_msg)
1852{
1853        int rc;
1854        uint32_t intr_mode;
1855
1856        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1857            LPFC_SLI_INTF_IF_TYPE_2) {
1858                /*
1859                 * On error status condition, driver need to wait for port
1860                 * ready before performing reset.
1861                 */
1862                rc = lpfc_sli4_pdev_status_reg_wait(phba);
1863                if (rc)
1864                        return rc;
1865        }
1866
1867        /* need reset: attempt for port recovery */
1868        if (en_rn_msg)
1869                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1870                                "2887 Reset Needed: Attempting Port "
1871                                "Recovery...\n");
1872
1873        /* If we are no wait, the HBA has been reset and is not
1874         * functional, thus we should clear LPFC_SLI_ACTIVE flag.
1875         */
1876        if (mbx_action == LPFC_MBX_NO_WAIT) {
1877                spin_lock_irq(&phba->hbalock);
1878                phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1879                spin_unlock_irq(&phba->hbalock);
1880        }
1881
1882        lpfc_offline_prep(phba, mbx_action);
1883        lpfc_sli_flush_io_rings(phba);
1884        lpfc_offline(phba);
1885        /* release interrupt for possible resource change */
1886        lpfc_sli4_disable_intr(phba);
1887        rc = lpfc_sli_brdrestart(phba);
1888        if (rc) {
1889                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890                                "6309 Failed to restart board\n");
1891                return rc;
1892        }
1893        /* request and enable interrupt */
1894        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1895        if (intr_mode == LPFC_INTR_ERROR) {
1896                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1897                                "3175 Failed to enable interrupt\n");
1898                return -EIO;
1899        }
1900        phba->intr_mode = intr_mode;
1901        rc = lpfc_online(phba);
1902        if (rc == 0)
1903                lpfc_unblock_mgmt_io(phba);
1904
1905        return rc;
1906}
1907
1908/**
1909 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1910 * @phba: pointer to lpfc hba data structure.
1911 *
1912 * This routine is invoked to handle the SLI4 HBA hardware error attention
1913 * conditions.
1914 **/
1915static void
1916lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1917{
1918        struct lpfc_vport *vport = phba->pport;
1919        uint32_t event_data;
1920        struct Scsi_Host *shost;
1921        uint32_t if_type;
1922        struct lpfc_register portstat_reg = {0};
1923        uint32_t reg_err1, reg_err2;
1924        uint32_t uerrlo_reg, uemasklo_reg;
1925        uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1926        bool en_rn_msg = true;
1927        struct temp_event temp_event_data;
1928        struct lpfc_register portsmphr_reg;
1929        int rc, i;
1930
1931        /* If the pci channel is offline, ignore possible errors, since
1932         * we cannot communicate with the pci card anyway.
1933         */
1934        if (pci_channel_offline(phba->pcidev)) {
1935                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1936                                "3166 pci channel is offline\n");
1937                lpfc_sli4_offline_eratt(phba);
1938                return;
1939        }
1940
1941        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1942        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1943        switch (if_type) {
1944        case LPFC_SLI_INTF_IF_TYPE_0:
1945                pci_rd_rc1 = lpfc_readl(
1946                                phba->sli4_hba.u.if_type0.UERRLOregaddr,
1947                                &uerrlo_reg);
1948                pci_rd_rc2 = lpfc_readl(
1949                                phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1950                                &uemasklo_reg);
1951                /* consider PCI bus read error as pci_channel_offline */
1952                if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1953                        return;
1954                if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1955                        lpfc_sli4_offline_eratt(phba);
1956                        return;
1957                }
1958                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959                                "7623 Checking UE recoverable");
1960
1961                for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1962                        if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1963                                       &portsmphr_reg.word0))
1964                                continue;
1965
1966                        smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1967                                                   &portsmphr_reg);
1968                        if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1969                            LPFC_PORT_SEM_UE_RECOVERABLE)
1970                                break;
1971                        /*Sleep for 1Sec, before checking SEMAPHORE */
1972                        msleep(1000);
1973                }
1974
1975                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1976                                "4827 smphr_port_status x%x : Waited %dSec",
1977                                smphr_port_status, i);
1978
1979                /* Recoverable UE, reset the HBA device */
1980                if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1981                    LPFC_PORT_SEM_UE_RECOVERABLE) {
1982                        for (i = 0; i < 20; i++) {
1983                                msleep(1000);
1984                                if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1985                                    &portsmphr_reg.word0) &&
1986                                    (LPFC_POST_STAGE_PORT_READY ==
1987                                     bf_get(lpfc_port_smphr_port_status,
1988                                     &portsmphr_reg))) {
1989                                        rc = lpfc_sli4_port_sta_fn_reset(phba,
1990                                                LPFC_MBX_NO_WAIT, en_rn_msg);
1991                                        if (rc == 0)
1992                                                return;
1993                                        lpfc_printf_log(phba, KERN_ERR,
1994                                                LOG_TRACE_EVENT,
1995                                                "4215 Failed to recover UE");
1996                                        break;
1997                                }
1998                        }
1999                }
2000                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2001                                "7624 Firmware not ready: Failing UE recovery,"
2002                                " waited %dSec", i);
2003                phba->link_state = LPFC_HBA_ERROR;
2004                break;
2005
2006        case LPFC_SLI_INTF_IF_TYPE_2:
2007        case LPFC_SLI_INTF_IF_TYPE_6:
2008                pci_rd_rc1 = lpfc_readl(
2009                                phba->sli4_hba.u.if_type2.STATUSregaddr,
2010                                &portstat_reg.word0);
2011                /* consider PCI bus read error as pci_channel_offline */
2012                if (pci_rd_rc1 == -EIO) {
2013                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2014                                "3151 PCI bus read access failure: x%x\n",
2015                                readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2016                        lpfc_sli4_offline_eratt(phba);
2017                        return;
2018                }
2019                reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2020                reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2021                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2022                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2023                                        "2889 Port Overtemperature event, "
2024                                        "taking port offline Data: x%x x%x\n",
2025                                        reg_err1, reg_err2);
2026
2027                        phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2028                        temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2029                        temp_event_data.event_code = LPFC_CRIT_TEMP;
2030                        temp_event_data.data = 0xFFFFFFFF;
2031
2032                        shost = lpfc_shost_from_vport(phba->pport);
2033                        fc_host_post_vendor_event(shost, fc_get_event_number(),
2034                                                  sizeof(temp_event_data),
2035                                                  (char *)&temp_event_data,
2036                                                  SCSI_NL_VID_TYPE_PCI
2037                                                  | PCI_VENDOR_ID_EMULEX);
2038
2039                        spin_lock_irq(&phba->hbalock);
2040                        phba->over_temp_state = HBA_OVER_TEMP;
2041                        spin_unlock_irq(&phba->hbalock);
2042                        lpfc_sli4_offline_eratt(phba);
2043                        return;
2044                }
2045                if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2046                    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2047                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048                                        "3143 Port Down: Firmware Update "
2049                                        "Detected\n");
2050                        en_rn_msg = false;
2051                } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2052                         reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2053                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2054                                        "3144 Port Down: Debug Dump\n");
2055                else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2056                         reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2057                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2058                                        "3145 Port Down: Provisioning\n");
2059
2060                /* If resets are disabled then leave the HBA alone and return */
2061                if (!phba->cfg_enable_hba_reset)
2062                        return;
2063
2064                /* Check port status register for function reset */
2065                rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2066                                en_rn_msg);
2067                if (rc == 0) {
2068                        /* don't report event on forced debug dump */
2069                        if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2070                            reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2071                                return;
2072                        else
2073                                break;
2074                }
2075                /* fall through for not able to recover */
2076                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2077                                "3152 Unrecoverable error\n");
2078                phba->link_state = LPFC_HBA_ERROR;
2079                break;
2080        case LPFC_SLI_INTF_IF_TYPE_1:
2081        default:
2082                break;
2083        }
2084        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2085                        "3123 Report dump event to upper layer\n");
2086        /* Send an internal error event to mgmt application */
2087        lpfc_board_errevt_to_mgmt(phba);
2088
2089        event_data = FC_REG_DUMP_EVENT;
2090        shost = lpfc_shost_from_vport(vport);
2091        fc_host_post_vendor_event(shost, fc_get_event_number(),
2092                                  sizeof(event_data), (char *) &event_data,
2093                                  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2094}
2095
2096/**
2097 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2098 * @phba: pointer to lpfc HBA data structure.
2099 *
2100 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2101 * routine from the API jump table function pointer from the lpfc_hba struct.
2102 *
2103 * Return codes
2104 *   0 - success.
2105 *   Any other value - error.
2106 **/
2107void
2108lpfc_handle_eratt(struct lpfc_hba *phba)
2109{
2110        (*phba->lpfc_handle_eratt)(phba);
2111}
2112
2113/**
2114 * lpfc_handle_latt - The HBA link event handler
2115 * @phba: pointer to lpfc hba data structure.
2116 *
2117 * This routine is invoked from the worker thread to handle a HBA host
2118 * attention link event. SLI3 only.
2119 **/
2120void
2121lpfc_handle_latt(struct lpfc_hba *phba)
2122{
2123        struct lpfc_vport *vport = phba->pport;
2124        struct lpfc_sli   *psli = &phba->sli;
2125        LPFC_MBOXQ_t *pmb;
2126        volatile uint32_t control;
2127        struct lpfc_dmabuf *mp;
2128        int rc = 0;
2129
2130        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2131        if (!pmb) {
2132                rc = 1;
2133                goto lpfc_handle_latt_err_exit;
2134        }
2135
2136        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2137        if (!mp) {
2138                rc = 2;
2139                goto lpfc_handle_latt_free_pmb;
2140        }
2141
2142        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2143        if (!mp->virt) {
2144                rc = 3;
2145                goto lpfc_handle_latt_free_mp;
2146        }
2147
2148        /* Cleanup any outstanding ELS commands */
2149        lpfc_els_flush_all_cmd(phba);
2150
2151        psli->slistat.link_event++;
2152        lpfc_read_topology(phba, pmb, mp);
2153        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2154        pmb->vport = vport;
2155        /* Block ELS IOCBs until we have processed this mbox command */
2156        phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2157        rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2158        if (rc == MBX_NOT_FINISHED) {
2159                rc = 4;
2160                goto lpfc_handle_latt_free_mbuf;
2161        }
2162
2163        /* Clear Link Attention in HA REG */
2164        spin_lock_irq(&phba->hbalock);
2165        writel(HA_LATT, phba->HAregaddr);
2166        readl(phba->HAregaddr); /* flush */
2167        spin_unlock_irq(&phba->hbalock);
2168
2169        return;
2170
2171lpfc_handle_latt_free_mbuf:
2172        phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2173        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2174lpfc_handle_latt_free_mp:
2175        kfree(mp);
2176lpfc_handle_latt_free_pmb:
2177        mempool_free(pmb, phba->mbox_mem_pool);
2178lpfc_handle_latt_err_exit:
2179        /* Enable Link attention interrupts */
2180        spin_lock_irq(&phba->hbalock);
2181        psli->sli_flag |= LPFC_PROCESS_LA;
2182        control = readl(phba->HCregaddr);
2183        control |= HC_LAINT_ENA;
2184        writel(control, phba->HCregaddr);
2185        readl(phba->HCregaddr); /* flush */
2186
2187        /* Clear Link Attention in HA REG */
2188        writel(HA_LATT, phba->HAregaddr);
2189        readl(phba->HAregaddr); /* flush */
2190        spin_unlock_irq(&phba->hbalock);
2191        lpfc_linkdown(phba);
2192        phba->link_state = LPFC_HBA_ERROR;
2193
2194        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2195                        "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2196
2197        return;
2198}
2199
2200/**
2201 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2202 * @phba: pointer to lpfc hba data structure.
2203 * @vpd: pointer to the vital product data.
2204 * @len: length of the vital product data in bytes.
2205 *
2206 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2207 * an array of characters. In this routine, the ModelName, ProgramType, and
2208 * ModelDesc, etc. fields of the phba data structure will be populated.
2209 *
2210 * Return codes
2211 *   0 - pointer to the VPD passed in is NULL
2212 *   1 - success
2213 **/
2214int
2215lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2216{
2217        uint8_t lenlo, lenhi;
2218        int Length;
2219        int i, j;
2220        int finished = 0;
2221        int index = 0;
2222
2223        if (!vpd)
2224                return 0;
2225
2226        /* Vital Product */
2227        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2228                        "0455 Vital Product Data: x%x x%x x%x x%x\n",
2229                        (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2230                        (uint32_t) vpd[3]);
2231        while (!finished && (index < (len - 4))) {
2232                switch (vpd[index]) {
2233                case 0x82:
2234                case 0x91:
2235                        index += 1;
2236                        lenlo = vpd[index];
2237                        index += 1;
2238                        lenhi = vpd[index];
2239                        index += 1;
2240                        i = ((((unsigned short)lenhi) << 8) + lenlo);
2241                        index += i;
2242                        break;
2243                case 0x90:
2244                        index += 1;
2245                        lenlo = vpd[index];
2246                        index += 1;
2247                        lenhi = vpd[index];
2248                        index += 1;
2249                        Length = ((((unsigned short)lenhi) << 8) + lenlo);
2250                        if (Length > len - index)
2251                                Length = len - index;
2252                        while (Length > 0) {
2253                        /* Look for Serial Number */
2254                        if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2255                                index += 2;
2256                                i = vpd[index];
2257                                index += 1;
2258                                j = 0;
2259                                Length -= (3+i);
2260                                while(i--) {
2261                                        phba->SerialNumber[j++] = vpd[index++];
2262                                        if (j == 31)
2263                                                break;
2264                                }
2265                                phba->SerialNumber[j] = 0;
2266                                continue;
2267                        }
2268                        else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2269                                phba->vpd_flag |= VPD_MODEL_DESC;
2270                                index += 2;
2271                                i = vpd[index];
2272                                index += 1;
2273                                j = 0;
2274                                Length -= (3+i);
2275                                while(i--) {
2276                                        phba->ModelDesc[j++] = vpd[index++];
2277                                        if (j == 255)
2278                                                break;
2279                                }
2280                                phba->ModelDesc[j] = 0;
2281                                continue;
2282                        }
2283                        else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2284                                phba->vpd_flag |= VPD_MODEL_NAME;
2285                                index += 2;
2286                                i = vpd[index];
2287                                index += 1;
2288                                j = 0;
2289                                Length -= (3+i);
2290                                while(i--) {
2291                                        phba->ModelName[j++] = vpd[index++];
2292                                        if (j == 79)
2293                                                break;
2294                                }
2295                                phba->ModelName[j] = 0;
2296                                continue;
2297                        }
2298                        else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2299                                phba->vpd_flag |= VPD_PROGRAM_TYPE;
2300                                index += 2;
2301                                i = vpd[index];
2302                                index += 1;
2303                                j = 0;
2304                                Length -= (3+i);
2305                                while(i--) {
2306                                        phba->ProgramType[j++] = vpd[index++];
2307                                        if (j == 255)
2308                                                break;
2309                                }
2310                                phba->ProgramType[j] = 0;
2311                                continue;
2312                        }
2313                        else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2314                                phba->vpd_flag |= VPD_PORT;
2315                                index += 2;
2316                                i = vpd[index];
2317                                index += 1;
2318                                j = 0;
2319                                Length -= (3+i);
2320                                while(i--) {
2321                                        if ((phba->sli_rev == LPFC_SLI_REV4) &&
2322                                            (phba->sli4_hba.pport_name_sta ==
2323                                             LPFC_SLI4_PPNAME_GET)) {
2324                                                j++;
2325                                                index++;
2326                                        } else
2327                                                phba->Port[j++] = vpd[index++];
2328                                        if (j == 19)
2329                                                break;
2330                                }
2331                                if ((phba->sli_rev != LPFC_SLI_REV4) ||
2332                                    (phba->sli4_hba.pport_name_sta ==
2333                                     LPFC_SLI4_PPNAME_NON))
2334                                        phba->Port[j] = 0;
2335                                continue;
2336                        }
2337                        else {
2338                                index += 2;
2339                                i = vpd[index];
2340                                index += 1;
2341                                index += i;
2342                                Length -= (3 + i);
2343                        }
2344                }
2345                finished = 0;
2346                break;
2347                case 0x78:
2348                        finished = 1;
2349                        break;
2350                default:
2351                        index ++;
2352                        break;
2353                }
2354        }
2355
2356        return(1);
2357}
2358
2359/**
2360 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2361 * @phba: pointer to lpfc hba data structure.
2362 * @mdp: pointer to the data structure to hold the derived model name.
2363 * @descp: pointer to the data structure to hold the derived description.
2364 *
2365 * This routine retrieves HBA's description based on its registered PCI device
2366 * ID. The @descp passed into this function points to an array of 256 chars. It
2367 * shall be returned with the model name, maximum speed, and the host bus type.
2368 * The @mdp passed into this function points to an array of 80 chars. When the
2369 * function returns, the @mdp will be filled with the model name.
2370 **/
2371static void
2372lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2373{
2374        lpfc_vpd_t *vp;
2375        uint16_t dev_id = phba->pcidev->device;
2376        int max_speed;
2377        int GE = 0;
2378        int oneConnect = 0; /* default is not a oneConnect */
2379        struct {
2380                char *name;
2381                char *bus;
2382                char *function;
2383        } m = {"<Unknown>", "", ""};
2384
2385        if (mdp && mdp[0] != '\0'
2386                && descp && descp[0] != '\0')
2387                return;
2388
2389        if (phba->lmt & LMT_64Gb)
2390                max_speed = 64;
2391        else if (phba->lmt & LMT_32Gb)
2392                max_speed = 32;
2393        else if (phba->lmt & LMT_16Gb)
2394                max_speed = 16;
2395        else if (phba->lmt & LMT_10Gb)
2396                max_speed = 10;
2397        else if (phba->lmt & LMT_8Gb)
2398                max_speed = 8;
2399        else if (phba->lmt & LMT_4Gb)
2400                max_speed = 4;
2401        else if (phba->lmt & LMT_2Gb)
2402                max_speed = 2;
2403        else if (phba->lmt & LMT_1Gb)
2404                max_speed = 1;
2405        else
2406                max_speed = 0;
2407
2408        vp = &phba->vpd;
2409
2410        switch (dev_id) {
2411        case PCI_DEVICE_ID_FIREFLY:
2412                m = (typeof(m)){"LP6000", "PCI",
2413                                "Obsolete, Unsupported Fibre Channel Adapter"};
2414                break;
2415        case PCI_DEVICE_ID_SUPERFLY:
2416                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2417                        m = (typeof(m)){"LP7000", "PCI", ""};
2418                else
2419                        m = (typeof(m)){"LP7000E", "PCI", ""};
2420                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2421                break;
2422        case PCI_DEVICE_ID_DRAGONFLY:
2423                m = (typeof(m)){"LP8000", "PCI",
2424                                "Obsolete, Unsupported Fibre Channel Adapter"};
2425                break;
2426        case PCI_DEVICE_ID_CENTAUR:
2427                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2428                        m = (typeof(m)){"LP9002", "PCI", ""};
2429                else
2430                        m = (typeof(m)){"LP9000", "PCI", ""};
2431                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432                break;
2433        case PCI_DEVICE_ID_RFLY:
2434                m = (typeof(m)){"LP952", "PCI",
2435                                "Obsolete, Unsupported Fibre Channel Adapter"};
2436                break;
2437        case PCI_DEVICE_ID_PEGASUS:
2438                m = (typeof(m)){"LP9802", "PCI-X",
2439                                "Obsolete, Unsupported Fibre Channel Adapter"};
2440                break;
2441        case PCI_DEVICE_ID_THOR:
2442                m = (typeof(m)){"LP10000", "PCI-X",
2443                                "Obsolete, Unsupported Fibre Channel Adapter"};
2444                break;
2445        case PCI_DEVICE_ID_VIPER:
2446                m = (typeof(m)){"LPX1000",  "PCI-X",
2447                                "Obsolete, Unsupported Fibre Channel Adapter"};
2448                break;
2449        case PCI_DEVICE_ID_PFLY:
2450                m = (typeof(m)){"LP982", "PCI-X",
2451                                "Obsolete, Unsupported Fibre Channel Adapter"};
2452                break;
2453        case PCI_DEVICE_ID_TFLY:
2454                m = (typeof(m)){"LP1050", "PCI-X",
2455                                "Obsolete, Unsupported Fibre Channel Adapter"};
2456                break;
2457        case PCI_DEVICE_ID_HELIOS:
2458                m = (typeof(m)){"LP11000", "PCI-X2",
2459                                "Obsolete, Unsupported Fibre Channel Adapter"};
2460                break;
2461        case PCI_DEVICE_ID_HELIOS_SCSP:
2462                m = (typeof(m)){"LP11000-SP", "PCI-X2",
2463                                "Obsolete, Unsupported Fibre Channel Adapter"};
2464                break;
2465        case PCI_DEVICE_ID_HELIOS_DCSP:
2466                m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2467                                "Obsolete, Unsupported Fibre Channel Adapter"};
2468                break;
2469        case PCI_DEVICE_ID_NEPTUNE:
2470                m = (typeof(m)){"LPe1000", "PCIe",
2471                                "Obsolete, Unsupported Fibre Channel Adapter"};
2472                break;
2473        case PCI_DEVICE_ID_NEPTUNE_SCSP:
2474                m = (typeof(m)){"LPe1000-SP", "PCIe",
2475                                "Obsolete, Unsupported Fibre Channel Adapter"};
2476                break;
2477        case PCI_DEVICE_ID_NEPTUNE_DCSP:
2478                m = (typeof(m)){"LPe1002-SP", "PCIe",
2479                                "Obsolete, Unsupported Fibre Channel Adapter"};
2480                break;
2481        case PCI_DEVICE_ID_BMID:
2482                m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2483                break;
2484        case PCI_DEVICE_ID_BSMB:
2485                m = (typeof(m)){"LP111", "PCI-X2",
2486                                "Obsolete, Unsupported Fibre Channel Adapter"};
2487                break;
2488        case PCI_DEVICE_ID_ZEPHYR:
2489                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2490                break;
2491        case PCI_DEVICE_ID_ZEPHYR_SCSP:
2492                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2493                break;
2494        case PCI_DEVICE_ID_ZEPHYR_DCSP:
2495                m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2496                GE = 1;
2497                break;
2498        case PCI_DEVICE_ID_ZMID:
2499                m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2500                break;
2501        case PCI_DEVICE_ID_ZSMB:
2502                m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2503                break;
2504        case PCI_DEVICE_ID_LP101:
2505                m = (typeof(m)){"LP101", "PCI-X",
2506                                "Obsolete, Unsupported Fibre Channel Adapter"};
2507                break;
2508        case PCI_DEVICE_ID_LP10000S:
2509                m = (typeof(m)){"LP10000-S", "PCI",
2510                                "Obsolete, Unsupported Fibre Channel Adapter"};
2511                break;
2512        case PCI_DEVICE_ID_LP11000S:
2513                m = (typeof(m)){"LP11000-S", "PCI-X2",
2514                                "Obsolete, Unsupported Fibre Channel Adapter"};
2515                break;
2516        case PCI_DEVICE_ID_LPE11000S:
2517                m = (typeof(m)){"LPe11000-S", "PCIe",
2518                                "Obsolete, Unsupported Fibre Channel Adapter"};
2519                break;
2520        case PCI_DEVICE_ID_SAT:
2521                m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2522                break;
2523        case PCI_DEVICE_ID_SAT_MID:
2524                m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2525                break;
2526        case PCI_DEVICE_ID_SAT_SMB:
2527                m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2528                break;
2529        case PCI_DEVICE_ID_SAT_DCSP:
2530                m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2531                break;
2532        case PCI_DEVICE_ID_SAT_SCSP:
2533                m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2534                break;
2535        case PCI_DEVICE_ID_SAT_S:
2536                m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2537                break;
2538        case PCI_DEVICE_ID_HORNET:
2539                m = (typeof(m)){"LP21000", "PCIe",
2540                                "Obsolete, Unsupported FCoE Adapter"};
2541                GE = 1;
2542                break;
2543        case PCI_DEVICE_ID_PROTEUS_VF:
2544                m = (typeof(m)){"LPev12000", "PCIe IOV",
2545                                "Obsolete, Unsupported Fibre Channel Adapter"};
2546                break;
2547        case PCI_DEVICE_ID_PROTEUS_PF:
2548                m = (typeof(m)){"LPev12000", "PCIe IOV",
2549                                "Obsolete, Unsupported Fibre Channel Adapter"};
2550                break;
2551        case PCI_DEVICE_ID_PROTEUS_S:
2552                m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2553                                "Obsolete, Unsupported Fibre Channel Adapter"};
2554                break;
2555        case PCI_DEVICE_ID_TIGERSHARK:
2556                oneConnect = 1;
2557                m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2558                break;
2559        case PCI_DEVICE_ID_TOMCAT:
2560                oneConnect = 1;
2561                m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2562                break;
2563        case PCI_DEVICE_ID_FALCON:
2564                m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2565                                "EmulexSecure Fibre"};
2566                break;
2567        case PCI_DEVICE_ID_BALIUS:
2568                m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2569                                "Obsolete, Unsupported Fibre Channel Adapter"};
2570                break;
2571        case PCI_DEVICE_ID_LANCER_FC:
2572                m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2573                break;
2574        case PCI_DEVICE_ID_LANCER_FC_VF:
2575                m = (typeof(m)){"LPe16000", "PCIe",
2576                                "Obsolete, Unsupported Fibre Channel Adapter"};
2577                break;
2578        case PCI_DEVICE_ID_LANCER_FCOE:
2579                oneConnect = 1;
2580                m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2581                break;
2582        case PCI_DEVICE_ID_LANCER_FCOE_VF:
2583                oneConnect = 1;
2584                m = (typeof(m)){"OCe15100", "PCIe",
2585                                "Obsolete, Unsupported FCoE"};
2586                break;
2587        case PCI_DEVICE_ID_LANCER_G6_FC:
2588                m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2589                break;
2590        case PCI_DEVICE_ID_LANCER_G7_FC:
2591                m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2592                break;
2593        case PCI_DEVICE_ID_SKYHAWK:
2594        case PCI_DEVICE_ID_SKYHAWK_VF:
2595                oneConnect = 1;
2596                m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2597                break;
2598        default:
2599                m = (typeof(m)){"Unknown", "", ""};
2600                break;
2601        }
2602
2603        if (mdp && mdp[0] == '\0')
2604                snprintf(mdp, 79,"%s", m.name);
2605        /*
2606         * oneConnect hba requires special processing, they are all initiators
2607         * and we put the port number on the end
2608         */
2609        if (descp && descp[0] == '\0') {
2610                if (oneConnect)
2611                        snprintf(descp, 255,
2612                                "Emulex OneConnect %s, %s Initiator %s",
2613                                m.name, m.function,
2614                                phba->Port);
2615                else if (max_speed == 0)
2616                        snprintf(descp, 255,
2617                                "Emulex %s %s %s",
2618                                m.name, m.bus, m.function);
2619                else
2620                        snprintf(descp, 255,
2621                                "Emulex %s %d%s %s %s",
2622                                m.name, max_speed, (GE) ? "GE" : "Gb",
2623                                m.bus, m.function);
2624        }
2625}
2626
2627/**
2628 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2629 * @phba: pointer to lpfc hba data structure.
2630 * @pring: pointer to a IOCB ring.
2631 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2632 *
2633 * This routine posts a given number of IOCBs with the associated DMA buffer
2634 * descriptors specified by the cnt argument to the given IOCB ring.
2635 *
2636 * Return codes
2637 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2638 **/
2639int
2640lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2641{
2642        IOCB_t *icmd;
2643        struct lpfc_iocbq *iocb;
2644        struct lpfc_dmabuf *mp1, *mp2;
2645
2646        cnt += pring->missbufcnt;
2647
2648        /* While there are buffers to post */
2649        while (cnt > 0) {
2650                /* Allocate buffer for  command iocb */
2651                iocb = lpfc_sli_get_iocbq(phba);
2652                if (iocb == NULL) {
2653                        pring->missbufcnt = cnt;
2654                        return cnt;
2655                }
2656                icmd = &iocb->iocb;
2657
2658                /* 2 buffers can be posted per command */
2659                /* Allocate buffer to post */
2660                mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2661                if (mp1)
2662                    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2663                if (!mp1 || !mp1->virt) {
2664                        kfree(mp1);
2665                        lpfc_sli_release_iocbq(phba, iocb);
2666                        pring->missbufcnt = cnt;
2667                        return cnt;
2668                }
2669
2670                INIT_LIST_HEAD(&mp1->list);
2671                /* Allocate buffer to post */
2672                if (cnt > 1) {
2673                        mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2674                        if (mp2)
2675                                mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2676                                                            &mp2->phys);
2677                        if (!mp2 || !mp2->virt) {
2678                                kfree(mp2);
2679                                lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2680                                kfree(mp1);
2681                                lpfc_sli_release_iocbq(phba, iocb);
2682                                pring->missbufcnt = cnt;
2683                                return cnt;
2684                        }
2685
2686                        INIT_LIST_HEAD(&mp2->list);
2687                } else {
2688                        mp2 = NULL;
2689                }
2690
2691                icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2692                icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2693                icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2694                icmd->ulpBdeCount = 1;
2695                cnt--;
2696                if (mp2) {
2697                        icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2698                        icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2699                        icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2700                        cnt--;
2701                        icmd->ulpBdeCount = 2;
2702                }
2703
2704                icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2705                icmd->ulpLe = 1;
2706
2707                if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2708                    IOCB_ERROR) {
2709                        lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2710                        kfree(mp1);
2711                        cnt++;
2712                        if (mp2) {
2713                                lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2714                                kfree(mp2);
2715                                cnt++;
2716                        }
2717                        lpfc_sli_release_iocbq(phba, iocb);
2718                        pring->missbufcnt = cnt;
2719                        return cnt;
2720                }
2721                lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2722                if (mp2)
2723                        lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2724        }
2725        pring->missbufcnt = 0;
2726        return 0;
2727}
2728
2729/**
2730 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2731 * @phba: pointer to lpfc hba data structure.
2732 *
2733 * This routine posts initial receive IOCB buffers to the ELS ring. The
2734 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2735 * set to 64 IOCBs. SLI3 only.
2736 *
2737 * Return codes
2738 *   0 - success (currently always success)
2739 **/
2740static int
2741lpfc_post_rcv_buf(struct lpfc_hba *phba)
2742{
2743        struct lpfc_sli *psli = &phba->sli;
2744
2745        /* Ring 0, ELS / CT buffers */
2746        lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2747        /* Ring 2 - FCP no buffers needed */
2748
2749        return 0;
2750}
2751
2752#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2753
2754/**
2755 * lpfc_sha_init - Set up initial array of hash table entries
2756 * @HashResultPointer: pointer to an array as hash table.
2757 *
2758 * This routine sets up the initial values to the array of hash table entries
2759 * for the LC HBAs.
2760 **/
2761static void
2762lpfc_sha_init(uint32_t * HashResultPointer)
2763{
2764        HashResultPointer[0] = 0x67452301;
2765        HashResultPointer[1] = 0xEFCDAB89;
2766        HashResultPointer[2] = 0x98BADCFE;
2767        HashResultPointer[3] = 0x10325476;
2768        HashResultPointer[4] = 0xC3D2E1F0;
2769}
2770
2771/**
2772 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2773 * @HashResultPointer: pointer to an initial/result hash table.
2774 * @HashWorkingPointer: pointer to an working hash table.
2775 *
2776 * This routine iterates an initial hash table pointed by @HashResultPointer
2777 * with the values from the working hash table pointeed by @HashWorkingPointer.
2778 * The results are putting back to the initial hash table, returned through
2779 * the @HashResultPointer as the result hash table.
2780 **/
2781static void
2782lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2783{
2784        int t;
2785        uint32_t TEMP;
2786        uint32_t A, B, C, D, E;
2787        t = 16;
2788        do {
2789                HashWorkingPointer[t] =
2790                    S(1,
2791                      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2792                                                                     8] ^
2793                      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2794        } while (++t <= 79);
2795        t = 0;
2796        A = HashResultPointer[0];
2797        B = HashResultPointer[1];
2798        C = HashResultPointer[2];
2799        D = HashResultPointer[3];
2800        E = HashResultPointer[4];
2801
2802        do {
2803                if (t < 20) {
2804                        TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2805                } else if (t < 40) {
2806                        TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2807                } else if (t < 60) {
2808                        TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2809                } else {
2810                        TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2811                }
2812                TEMP += S(5, A) + E + HashWorkingPointer[t];
2813                E = D;
2814                D = C;
2815                C = S(30, B);
2816                B = A;
2817                A = TEMP;
2818        } while (++t <= 79);
2819
2820        HashResultPointer[0] += A;
2821        HashResultPointer[1] += B;
2822        HashResultPointer[2] += C;
2823        HashResultPointer[3] += D;
2824        HashResultPointer[4] += E;
2825
2826}
2827
2828/**
2829 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2830 * @RandomChallenge: pointer to the entry of host challenge random number array.
2831 * @HashWorking: pointer to the entry of the working hash array.
2832 *
2833 * This routine calculates the working hash array referred by @HashWorking
2834 * from the challenge random numbers associated with the host, referred by
2835 * @RandomChallenge. The result is put into the entry of the working hash
2836 * array and returned by reference through @HashWorking.
2837 **/
2838static void
2839lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2840{
2841        *HashWorking = (*RandomChallenge ^ *HashWorking);
2842}
2843
2844/**
2845 * lpfc_hba_init - Perform special handling for LC HBA initialization
2846 * @phba: pointer to lpfc hba data structure.
2847 * @hbainit: pointer to an array of unsigned 32-bit integers.
2848 *
2849 * This routine performs the special handling for LC HBA initialization.
2850 **/
2851void
2852lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2853{
2854        int t;
2855        uint32_t *HashWorking;
2856        uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2857
2858        HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2859        if (!HashWorking)
2860                return;
2861
2862        HashWorking[0] = HashWorking[78] = *pwwnn++;
2863        HashWorking[1] = HashWorking[79] = *pwwnn;
2864
2865        for (t = 0; t < 7; t++)
2866                lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2867
2868        lpfc_sha_init(hbainit);
2869        lpfc_sha_iterate(hbainit, HashWorking);
2870        kfree(HashWorking);
2871}
2872
2873/**
2874 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2875 * @vport: pointer to a virtual N_Port data structure.
2876 *
2877 * This routine performs the necessary cleanups before deleting the @vport.
2878 * It invokes the discovery state machine to perform necessary state
2879 * transitions and to release the ndlps associated with the @vport. Note,
2880 * the physical port is treated as @vport 0.
2881 **/
2882void
2883lpfc_cleanup(struct lpfc_vport *vport)
2884{
2885        struct lpfc_hba   *phba = vport->phba;
2886        struct lpfc_nodelist *ndlp, *next_ndlp;
2887        int i = 0;
2888
2889        if (phba->link_state > LPFC_LINK_DOWN)
2890                lpfc_port_link_failure(vport);
2891
2892        /* Clean up VMID resources */
2893        if (lpfc_is_vmid_enabled(phba))
2894                lpfc_vmid_vport_cleanup(vport);
2895
2896        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2897                if (vport->port_type != LPFC_PHYSICAL_PORT &&
2898                    ndlp->nlp_DID == Fabric_DID) {
2899                        /* Just free up ndlp with Fabric_DID for vports */
2900                        lpfc_nlp_put(ndlp);
2901                        continue;
2902                }
2903
2904                if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2905                    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2906                        lpfc_nlp_put(ndlp);
2907                        continue;
2908                }
2909
2910                /* Fabric Ports not in UNMAPPED state are cleaned up in the
2911                 * DEVICE_RM event.
2912                 */
2913                if (ndlp->nlp_type & NLP_FABRIC &&
2914                    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2915                        lpfc_disc_state_machine(vport, ndlp, NULL,
2916                                        NLP_EVT_DEVICE_RECOVERY);
2917
2918                if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2919                        lpfc_disc_state_machine(vport, ndlp, NULL,
2920                                        NLP_EVT_DEVICE_RM);
2921        }
2922
2923        /* At this point, ALL ndlp's should be gone
2924         * because of the previous NLP_EVT_DEVICE_RM.
2925         * Lets wait for this to happen, if needed.
2926         */
2927        while (!list_empty(&vport->fc_nodes)) {
2928                if (i++ > 3000) {
2929                        lpfc_printf_vlog(vport, KERN_ERR,
2930                                         LOG_TRACE_EVENT,
2931                                "0233 Nodelist not empty\n");
2932                        list_for_each_entry_safe(ndlp, next_ndlp,
2933                                                &vport->fc_nodes, nlp_listp) {
2934                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2935                                                 LOG_TRACE_EVENT,
2936                                                 "0282 did:x%x ndlp:x%px "
2937                                                 "refcnt:%d xflags x%x nflag x%x\n",
2938                                                 ndlp->nlp_DID, (void *)ndlp,
2939                                                 kref_read(&ndlp->kref),
2940                                                 ndlp->fc4_xpt_flags,
2941                                                 ndlp->nlp_flag);
2942                        }
2943                        break;
2944                }
2945
2946                /* Wait for any activity on ndlps to settle */
2947                msleep(10);
2948        }
2949        lpfc_cleanup_vports_rrqs(vport, NULL);
2950}
2951
2952/**
2953 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2954 * @vport: pointer to a virtual N_Port data structure.
2955 *
2956 * This routine stops all the timers associated with a @vport. This function
2957 * is invoked before disabling or deleting a @vport. Note that the physical
2958 * port is treated as @vport 0.
2959 **/
2960void
2961lpfc_stop_vport_timers(struct lpfc_vport *vport)
2962{
2963        del_timer_sync(&vport->els_tmofunc);
2964        del_timer_sync(&vport->delayed_disc_tmo);
2965        lpfc_can_disctmo(vport);
2966        return;
2967}
2968
2969/**
2970 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2971 * @phba: pointer to lpfc hba data structure.
2972 *
2973 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2974 * caller of this routine should already hold the host lock.
2975 **/
2976void
2977__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2978{
2979        /* Clear pending FCF rediscovery wait flag */
2980        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2981
2982        /* Now, try to stop the timer */
2983        del_timer(&phba->fcf.redisc_wait);
2984}
2985
2986/**
2987 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2988 * @phba: pointer to lpfc hba data structure.
2989 *
2990 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2991 * checks whether the FCF rediscovery wait timer is pending with the host
2992 * lock held before proceeding with disabling the timer and clearing the
2993 * wait timer pendig flag.
2994 **/
2995void
2996lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2997{
2998        spin_lock_irq(&phba->hbalock);
2999        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3000                /* FCF rediscovery timer already fired or stopped */
3001                spin_unlock_irq(&phba->hbalock);
3002                return;
3003        }
3004        __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3005        /* Clear failover in progress flags */
3006        phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3007        spin_unlock_irq(&phba->hbalock);
3008}
3009
3010/**
3011 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3012 * @phba: pointer to lpfc hba data structure.
3013 *
3014 * This routine stops all the timers associated with a HBA. This function is
3015 * invoked before either putting a HBA offline or unloading the driver.
3016 **/
3017void
3018lpfc_stop_hba_timers(struct lpfc_hba *phba)
3019{
3020        if (phba->pport)
3021                lpfc_stop_vport_timers(phba->pport);
3022        cancel_delayed_work_sync(&phba->eq_delay_work);
3023        cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3024        del_timer_sync(&phba->sli.mbox_tmo);
3025        del_timer_sync(&phba->fabric_block_timer);
3026        del_timer_sync(&phba->eratt_poll);
3027        del_timer_sync(&phba->hb_tmofunc);
3028        if (phba->sli_rev == LPFC_SLI_REV4) {
3029                del_timer_sync(&phba->rrq_tmr);
3030                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3031        }
3032        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3033
3034        switch (phba->pci_dev_grp) {
3035        case LPFC_PCI_DEV_LP:
3036                /* Stop any LightPulse device specific driver timers */
3037                del_timer_sync(&phba->fcp_poll_timer);
3038                break;
3039        case LPFC_PCI_DEV_OC:
3040                /* Stop any OneConnect device specific driver timers */
3041                lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3042                break;
3043        default:
3044                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3045                                "0297 Invalid device group (x%x)\n",
3046                                phba->pci_dev_grp);
3047                break;
3048        }
3049        return;
3050}
3051
3052/**
3053 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3054 * @phba: pointer to lpfc hba data structure.
3055 * @mbx_action: flag for mailbox no wait action.
3056 *
3057 * This routine marks a HBA's management interface as blocked. Once the HBA's
3058 * management interface is marked as blocked, all the user space access to
3059 * the HBA, whether they are from sysfs interface or libdfc interface will
3060 * all be blocked. The HBA is set to block the management interface when the
3061 * driver prepares the HBA interface for online or offline.
3062 **/
3063static void
3064lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3065{
3066        unsigned long iflag;
3067        uint8_t actcmd = MBX_HEARTBEAT;
3068        unsigned long timeout;
3069
3070        spin_lock_irqsave(&phba->hbalock, iflag);
3071        phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3072        spin_unlock_irqrestore(&phba->hbalock, iflag);
3073        if (mbx_action == LPFC_MBX_NO_WAIT)
3074                return;
3075        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3076        spin_lock_irqsave(&phba->hbalock, iflag);
3077        if (phba->sli.mbox_active) {
3078                actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3079                /* Determine how long we might wait for the active mailbox
3080                 * command to be gracefully completed by firmware.
3081                 */
3082                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3083                                phba->sli.mbox_active) * 1000) + jiffies;
3084        }
3085        spin_unlock_irqrestore(&phba->hbalock, iflag);
3086
3087        /* Wait for the outstnading mailbox command to complete */
3088        while (phba->sli.mbox_active) {
3089                /* Check active mailbox complete status every 2ms */
3090                msleep(2);
3091                if (time_after(jiffies, timeout)) {
3092                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3093                                        "2813 Mgmt IO is Blocked %x "
3094                                        "- mbox cmd %x still active\n",
3095                                        phba->sli.sli_flag, actcmd);
3096                        break;
3097                }
3098        }
3099}
3100
3101/**
3102 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3103 * @phba: pointer to lpfc hba data structure.
3104 *
3105 * Allocate RPIs for all active remote nodes. This is needed whenever
3106 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3107 * is to fixup the temporary rpi assignments.
3108 **/
3109void
3110lpfc_sli4_node_prep(struct lpfc_hba *phba)
3111{
3112        struct lpfc_nodelist  *ndlp, *next_ndlp;
3113        struct lpfc_vport **vports;
3114        int i, rpi;
3115
3116        if (phba->sli_rev != LPFC_SLI_REV4)
3117                return;
3118
3119        vports = lpfc_create_vport_work_array(phba);
3120        if (vports == NULL)
3121                return;
3122
3123        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3124                if (vports[i]->load_flag & FC_UNLOADING)
3125                        continue;
3126
3127                list_for_each_entry_safe(ndlp, next_ndlp,
3128                                         &vports[i]->fc_nodes,
3129                                         nlp_listp) {
3130                        rpi = lpfc_sli4_alloc_rpi(phba);
3131                        if (rpi == LPFC_RPI_ALLOC_ERROR) {
3132                                /* TODO print log? */
3133                                continue;
3134                        }
3135                        ndlp->nlp_rpi = rpi;
3136                        lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3137                                         LOG_NODE | LOG_DISCOVERY,
3138                                         "0009 Assign RPI x%x to ndlp x%px "
3139                                         "DID:x%06x flg:x%x\n",
3140                                         ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3141                                         ndlp->nlp_flag);
3142                }
3143        }
3144        lpfc_destroy_vport_work_array(phba, vports);
3145}
3146
3147/**
3148 * lpfc_create_expedite_pool - create expedite pool
3149 * @phba: pointer to lpfc hba data structure.
3150 *
3151 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3152 * to expedite pool. Mark them as expedite.
3153 **/
3154static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3155{
3156        struct lpfc_sli4_hdw_queue *qp;
3157        struct lpfc_io_buf *lpfc_ncmd;
3158        struct lpfc_io_buf *lpfc_ncmd_next;
3159        struct lpfc_epd_pool *epd_pool;
3160        unsigned long iflag;
3161
3162        epd_pool = &phba->epd_pool;
3163        qp = &phba->sli4_hba.hdwq[0];
3164
3165        spin_lock_init(&epd_pool->lock);
3166        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3167        spin_lock(&epd_pool->lock);
3168        INIT_LIST_HEAD(&epd_pool->list);
3169        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3170                                 &qp->lpfc_io_buf_list_put, list) {
3171                list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3172                lpfc_ncmd->expedite = true;
3173                qp->put_io_bufs--;
3174                epd_pool->count++;
3175                if (epd_pool->count >= XRI_BATCH)
3176                        break;
3177        }
3178        spin_unlock(&epd_pool->lock);
3179        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3180}
3181
3182/**
3183 * lpfc_destroy_expedite_pool - destroy expedite pool
3184 * @phba: pointer to lpfc hba data structure.
3185 *
3186 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3187 * of HWQ 0. Clear the mark.
3188 **/
3189static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3190{
3191        struct lpfc_sli4_hdw_queue *qp;
3192        struct lpfc_io_buf *lpfc_ncmd;
3193        struct lpfc_io_buf *lpfc_ncmd_next;
3194        struct lpfc_epd_pool *epd_pool;
3195        unsigned long iflag;
3196
3197        epd_pool = &phba->epd_pool;
3198        qp = &phba->sli4_hba.hdwq[0];
3199
3200        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3201        spin_lock(&epd_pool->lock);
3202        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3203                                 &epd_pool->list, list) {
3204                list_move_tail(&lpfc_ncmd->list,
3205                               &qp->lpfc_io_buf_list_put);
3206                lpfc_ncmd->flags = false;
3207                qp->put_io_bufs++;
3208                epd_pool->count--;
3209        }
3210        spin_unlock(&epd_pool->lock);
3211        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3212}
3213
3214/**
3215 * lpfc_create_multixri_pools - create multi-XRI pools
3216 * @phba: pointer to lpfc hba data structure.
3217 *
3218 * This routine initialize public, private per HWQ. Then, move XRIs from
3219 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3220 * Initialized.
3221 **/
3222void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3223{
3224        u32 i, j;
3225        u32 hwq_count;
3226        u32 count_per_hwq;
3227        struct lpfc_io_buf *lpfc_ncmd;
3228        struct lpfc_io_buf *lpfc_ncmd_next;
3229        unsigned long iflag;
3230        struct lpfc_sli4_hdw_queue *qp;
3231        struct lpfc_multixri_pool *multixri_pool;
3232        struct lpfc_pbl_pool *pbl_pool;
3233        struct lpfc_pvt_pool *pvt_pool;
3234
3235        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3236                        "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3237                        phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3238                        phba->sli4_hba.io_xri_cnt);
3239
3240        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3241                lpfc_create_expedite_pool(phba);
3242
3243        hwq_count = phba->cfg_hdw_queue;
3244        count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3245
3246        for (i = 0; i < hwq_count; i++) {
3247                multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3248
3249                if (!multixri_pool) {
3250                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3251                                        "1238 Failed to allocate memory for "
3252                                        "multixri_pool\n");
3253
3254                        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3255                                lpfc_destroy_expedite_pool(phba);
3256
3257                        j = 0;
3258                        while (j < i) {
3259                                qp = &phba->sli4_hba.hdwq[j];
3260                                kfree(qp->p_multixri_pool);
3261                                j++;
3262                        }
3263                        phba->cfg_xri_rebalancing = 0;
3264                        return;
3265                }
3266
3267                qp = &phba->sli4_hba.hdwq[i];
3268                qp->p_multixri_pool = multixri_pool;
3269
3270                multixri_pool->xri_limit = count_per_hwq;
3271                multixri_pool->rrb_next_hwqid = i;
3272
3273                /* Deal with public free xri pool */
3274                pbl_pool = &multixri_pool->pbl_pool;
3275                spin_lock_init(&pbl_pool->lock);
3276                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3277                spin_lock(&pbl_pool->lock);
3278                INIT_LIST_HEAD(&pbl_pool->list);
3279                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3280                                         &qp->lpfc_io_buf_list_put, list) {
3281                        list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3282                        qp->put_io_bufs--;
3283                        pbl_pool->count++;
3284                }
3285                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3286                                "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3287                                pbl_pool->count, i);
3288                spin_unlock(&pbl_pool->lock);
3289                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3290
3291                /* Deal with private free xri pool */
3292                pvt_pool = &multixri_pool->pvt_pool;
3293                pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3294                pvt_pool->low_watermark = XRI_BATCH;
3295                spin_lock_init(&pvt_pool->lock);
3296                spin_lock_irqsave(&pvt_pool->lock, iflag);
3297                INIT_LIST_HEAD(&pvt_pool->list);
3298                pvt_pool->count = 0;
3299                spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3300        }
3301}
3302
3303/**
3304 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3305 * @phba: pointer to lpfc hba data structure.
3306 *
3307 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3308 **/
3309static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3310{
3311        u32 i;
3312        u32 hwq_count;
3313        struct lpfc_io_buf *lpfc_ncmd;
3314        struct lpfc_io_buf *lpfc_ncmd_next;
3315        unsigned long iflag;
3316        struct lpfc_sli4_hdw_queue *qp;
3317        struct lpfc_multixri_pool *multixri_pool;
3318        struct lpfc_pbl_pool *pbl_pool;
3319        struct lpfc_pvt_pool *pvt_pool;
3320
3321        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3322                lpfc_destroy_expedite_pool(phba);
3323
3324        if (!(phba->pport->load_flag & FC_UNLOADING))
3325                lpfc_sli_flush_io_rings(phba);
3326
3327        hwq_count = phba->cfg_hdw_queue;
3328
3329        for (i = 0; i < hwq_count; i++) {
3330                qp = &phba->sli4_hba.hdwq[i];
3331                multixri_pool = qp->p_multixri_pool;
3332                if (!multixri_pool)
3333                        continue;
3334
3335                qp->p_multixri_pool = NULL;
3336
3337                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3338
3339                /* Deal with public free xri pool */
3340                pbl_pool = &multixri_pool->pbl_pool;
3341                spin_lock(&pbl_pool->lock);
3342
3343                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3344                                "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3345                                pbl_pool->count, i);
3346
3347                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3348                                         &pbl_pool->list, list) {
3349                        list_move_tail(&lpfc_ncmd->list,
3350                                       &qp->lpfc_io_buf_list_put);
3351                        qp->put_io_bufs++;
3352                        pbl_pool->count--;
3353                }
3354
3355                INIT_LIST_HEAD(&pbl_pool->list);
3356                pbl_pool->count = 0;
3357
3358                spin_unlock(&pbl_pool->lock);
3359
3360                /* Deal with private free xri pool */
3361                pvt_pool = &multixri_pool->pvt_pool;
3362                spin_lock(&pvt_pool->lock);
3363
3364                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3365                                "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3366                                pvt_pool->count, i);
3367
3368                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3369                                         &pvt_pool->list, list) {
3370                        list_move_tail(&lpfc_ncmd->list,
3371                                       &qp->lpfc_io_buf_list_put);
3372                        qp->put_io_bufs++;
3373                        pvt_pool->count--;
3374                }
3375
3376                INIT_LIST_HEAD(&pvt_pool->list);
3377                pvt_pool->count = 0;
3378
3379                spin_unlock(&pvt_pool->lock);
3380                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3381
3382                kfree(multixri_pool);
3383        }
3384}
3385
3386/**
3387 * lpfc_online - Initialize and bring a HBA online
3388 * @phba: pointer to lpfc hba data structure.
3389 *
3390 * This routine initializes the HBA and brings a HBA online. During this
3391 * process, the management interface is blocked to prevent user space access
3392 * to the HBA interfering with the driver initialization.
3393 *
3394 * Return codes
3395 *   0 - successful
3396 *   1 - failed
3397 **/
3398int
3399lpfc_online(struct lpfc_hba *phba)
3400{
3401        struct lpfc_vport *vport;
3402        struct lpfc_vport **vports;
3403        int i, error = 0;
3404        bool vpis_cleared = false;
3405
3406        if (!phba)
3407                return 0;
3408        vport = phba->pport;
3409
3410        if (!(vport->fc_flag & FC_OFFLINE_MODE))
3411                return 0;
3412
3413        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3414                        "0458 Bring Adapter online\n");
3415
3416        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3417
3418        if (phba->sli_rev == LPFC_SLI_REV4) {
3419                if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3420                        lpfc_unblock_mgmt_io(phba);
3421                        return 1;
3422                }
3423                spin_lock_irq(&phba->hbalock);
3424                if (!phba->sli4_hba.max_cfg_param.vpi_used)
3425                        vpis_cleared = true;
3426                spin_unlock_irq(&phba->hbalock);
3427
3428                /* Reestablish the local initiator port.
3429                 * The offline process destroyed the previous lport.
3430                 */
3431                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3432                                !phba->nvmet_support) {
3433                        error = lpfc_nvme_create_localport(phba->pport);
3434                        if (error)
3435                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3436                                        "6132 NVME restore reg failed "
3437                                        "on nvmei error x%x\n", error);
3438                }
3439        } else {
3440                lpfc_sli_queue_init(phba);
3441                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3442                        lpfc_unblock_mgmt_io(phba);
3443                        return 1;
3444                }
3445        }
3446
3447        vports = lpfc_create_vport_work_array(phba);
3448        if (vports != NULL) {
3449                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3450                        struct Scsi_Host *shost;
3451                        shost = lpfc_shost_from_vport(vports[i]);
3452                        spin_lock_irq(shost->host_lock);
3453                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3454                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3455                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3456                        if (phba->sli_rev == LPFC_SLI_REV4) {
3457                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3458                                if ((vpis_cleared) &&
3459                                    (vports[i]->port_type !=
3460                                        LPFC_PHYSICAL_PORT))
3461                                        vports[i]->vpi = 0;
3462                        }
3463                        spin_unlock_irq(shost->host_lock);
3464                }
3465        }
3466        lpfc_destroy_vport_work_array(phba, vports);
3467
3468        if (phba->cfg_xri_rebalancing)
3469                lpfc_create_multixri_pools(phba);
3470
3471        lpfc_cpuhp_add(phba);
3472
3473        lpfc_unblock_mgmt_io(phba);
3474        return 0;
3475}
3476
3477/**
3478 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3479 * @phba: pointer to lpfc hba data structure.
3480 *
3481 * This routine marks a HBA's management interface as not blocked. Once the
3482 * HBA's management interface is marked as not blocked, all the user space
3483 * access to the HBA, whether they are from sysfs interface or libdfc
3484 * interface will be allowed. The HBA is set to block the management interface
3485 * when the driver prepares the HBA interface for online or offline and then
3486 * set to unblock the management interface afterwards.
3487 **/
3488void
3489lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3490{
3491        unsigned long iflag;
3492
3493        spin_lock_irqsave(&phba->hbalock, iflag);
3494        phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3495        spin_unlock_irqrestore(&phba->hbalock, iflag);
3496}
3497
3498/**
3499 * lpfc_offline_prep - Prepare a HBA to be brought offline
3500 * @phba: pointer to lpfc hba data structure.
3501 * @mbx_action: flag for mailbox shutdown action.
3502 *
3503 * This routine is invoked to prepare a HBA to be brought offline. It performs
3504 * unregistration login to all the nodes on all vports and flushes the mailbox
3505 * queue to make it ready to be brought offline.
3506 **/
3507void
3508lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3509{
3510        struct lpfc_vport *vport = phba->pport;
3511        struct lpfc_nodelist  *ndlp, *next_ndlp;
3512        struct lpfc_vport **vports;
3513        struct Scsi_Host *shost;
3514        int i;
3515
3516        if (vport->fc_flag & FC_OFFLINE_MODE)
3517                return;
3518
3519        lpfc_block_mgmt_io(phba, mbx_action);
3520
3521        lpfc_linkdown(phba);
3522
3523        /* Issue an unreg_login to all nodes on all vports */
3524        vports = lpfc_create_vport_work_array(phba);
3525        if (vports != NULL) {
3526                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3527                        if (vports[i]->load_flag & FC_UNLOADING)
3528                                continue;
3529                        shost = lpfc_shost_from_vport(vports[i]);
3530                        spin_lock_irq(shost->host_lock);
3531                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3532                        vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3533                        vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3534                        spin_unlock_irq(shost->host_lock);
3535
3536                        shost = lpfc_shost_from_vport(vports[i]);
3537                        list_for_each_entry_safe(ndlp, next_ndlp,
3538                                                 &vports[i]->fc_nodes,
3539                                                 nlp_listp) {
3540
3541                                spin_lock_irq(&ndlp->lock);
3542                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3543                                spin_unlock_irq(&ndlp->lock);
3544                                /*
3545                                 * Whenever an SLI4 port goes offline, free the
3546                                 * RPI. Get a new RPI when the adapter port
3547                                 * comes back online.
3548                                 */
3549                                if (phba->sli_rev == LPFC_SLI_REV4) {
3550                                        lpfc_printf_vlog(vports[i], KERN_INFO,
3551                                                 LOG_NODE | LOG_DISCOVERY,
3552                                                 "0011 Free RPI x%x on "
3553                                                 "ndlp: x%px did x%x\n",
3554                                                 ndlp->nlp_rpi, ndlp,
3555                                                 ndlp->nlp_DID);
3556                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3557                                        ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3558                                }
3559                                lpfc_unreg_rpi(vports[i], ndlp);
3560
3561                                if (ndlp->nlp_type & NLP_FABRIC) {
3562                                        lpfc_disc_state_machine(vports[i], ndlp,
3563                                                NULL, NLP_EVT_DEVICE_RECOVERY);
3564
3565                                        /* Don't remove the node unless the
3566                                         * has been unregistered with the
3567                                         * transport.  If so, let dev_loss
3568                                         * take care of the node.
3569                                         */
3570                                        if (!(ndlp->fc4_xpt_flags &
3571                                              (NVME_XPT_REGD | SCSI_XPT_REGD)))
3572                                                lpfc_disc_state_machine
3573                                                        (vports[i], ndlp,
3574                                                         NULL,
3575                                                         NLP_EVT_DEVICE_RM);
3576                                }
3577                        }
3578                }
3579        }
3580        lpfc_destroy_vport_work_array(phba, vports);
3581
3582        lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3583
3584        if (phba->wq)
3585                flush_workqueue(phba->wq);
3586}
3587
3588/**
3589 * lpfc_offline - Bring a HBA offline
3590 * @phba: pointer to lpfc hba data structure.
3591 *
3592 * This routine actually brings a HBA offline. It stops all the timers
3593 * associated with the HBA, brings down the SLI layer, and eventually
3594 * marks the HBA as in offline state for the upper layer protocol.
3595 **/
3596void
3597lpfc_offline(struct lpfc_hba *phba)
3598{
3599        struct Scsi_Host  *shost;
3600        struct lpfc_vport **vports;
3601        int i;
3602
3603        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3604                return;
3605
3606        /* stop port and all timers associated with this hba */
3607        lpfc_stop_port(phba);
3608
3609        /* Tear down the local and target port registrations.  The
3610         * nvme transports need to cleanup.
3611         */
3612        lpfc_nvmet_destroy_targetport(phba);
3613        lpfc_nvme_destroy_localport(phba->pport);
3614
3615        vports = lpfc_create_vport_work_array(phba);
3616        if (vports != NULL)
3617                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3618                        lpfc_stop_vport_timers(vports[i]);
3619        lpfc_destroy_vport_work_array(phba, vports);
3620        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3621                        "0460 Bring Adapter offline\n");
3622        /* Bring down the SLI Layer and cleanup.  The HBA is offline
3623           now.  */
3624        lpfc_sli_hba_down(phba);
3625        spin_lock_irq(&phba->hbalock);
3626        phba->work_ha = 0;
3627        spin_unlock_irq(&phba->hbalock);
3628        vports = lpfc_create_vport_work_array(phba);
3629        if (vports != NULL)
3630                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3631                        shost = lpfc_shost_from_vport(vports[i]);
3632                        spin_lock_irq(shost->host_lock);
3633                        vports[i]->work_port_events = 0;
3634                        vports[i]->fc_flag |= FC_OFFLINE_MODE;
3635                        spin_unlock_irq(shost->host_lock);
3636                }
3637        lpfc_destroy_vport_work_array(phba, vports);
3638        /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3639         * in hba_unset
3640         */
3641        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3642                __lpfc_cpuhp_remove(phba);
3643
3644        if (phba->cfg_xri_rebalancing)
3645                lpfc_destroy_multixri_pools(phba);
3646}
3647
3648/**
3649 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3650 * @phba: pointer to lpfc hba data structure.
3651 *
3652 * This routine is to free all the SCSI buffers and IOCBs from the driver
3653 * list back to kernel. It is called from lpfc_pci_remove_one to free
3654 * the internal resources before the device is removed from the system.
3655 **/
3656static void
3657lpfc_scsi_free(struct lpfc_hba *phba)
3658{
3659        struct lpfc_io_buf *sb, *sb_next;
3660
3661        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3662                return;
3663
3664        spin_lock_irq(&phba->hbalock);
3665
3666        /* Release all the lpfc_scsi_bufs maintained by this host. */
3667
3668        spin_lock(&phba->scsi_buf_list_put_lock);
3669        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3670                                 list) {
3671                list_del(&sb->list);
3672                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3673                              sb->dma_handle);
3674                kfree(sb);
3675                phba->total_scsi_bufs--;
3676        }
3677        spin_unlock(&phba->scsi_buf_list_put_lock);
3678
3679        spin_lock(&phba->scsi_buf_list_get_lock);
3680        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3681                                 list) {
3682                list_del(&sb->list);
3683                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3684                              sb->dma_handle);
3685                kfree(sb);
3686                phba->total_scsi_bufs--;
3687        }
3688        spin_unlock(&phba->scsi_buf_list_get_lock);
3689        spin_unlock_irq(&phba->hbalock);
3690}
3691
3692/**
3693 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3694 * @phba: pointer to lpfc hba data structure.
3695 *
3696 * This routine is to free all the IO buffers and IOCBs from the driver
3697 * list back to kernel. It is called from lpfc_pci_remove_one to free
3698 * the internal resources before the device is removed from the system.
3699 **/
3700void
3701lpfc_io_free(struct lpfc_hba *phba)
3702{
3703        struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3704        struct lpfc_sli4_hdw_queue *qp;
3705        int idx;
3706
3707        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3708                qp = &phba->sli4_hba.hdwq[idx];
3709                /* Release all the lpfc_nvme_bufs maintained by this host. */
3710                spin_lock(&qp->io_buf_list_put_lock);
3711                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3712                                         &qp->lpfc_io_buf_list_put,
3713                                         list) {
3714                        list_del(&lpfc_ncmd->list);
3715                        qp->put_io_bufs--;
3716                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3717                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3718                        if (phba->cfg_xpsgl && !phba->nvmet_support)
3719                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3720                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3721                        kfree(lpfc_ncmd);
3722                        qp->total_io_bufs--;
3723                }
3724                spin_unlock(&qp->io_buf_list_put_lock);
3725
3726                spin_lock(&qp->io_buf_list_get_lock);
3727                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3728                                         &qp->lpfc_io_buf_list_get,
3729                                         list) {
3730                        list_del(&lpfc_ncmd->list);
3731                        qp->get_io_bufs--;
3732                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3733                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3734                        if (phba->cfg_xpsgl && !phba->nvmet_support)
3735                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3736                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3737                        kfree(lpfc_ncmd);
3738                        qp->total_io_bufs--;
3739                }
3740                spin_unlock(&qp->io_buf_list_get_lock);
3741        }
3742}
3743
3744/**
3745 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3746 * @phba: pointer to lpfc hba data structure.
3747 *
3748 * This routine first calculates the sizes of the current els and allocated
3749 * scsi sgl lists, and then goes through all sgls to updates the physical
3750 * XRIs assigned due to port function reset. During port initialization, the
3751 * current els and allocated scsi sgl lists are 0s.
3752 *
3753 * Return codes
3754 *   0 - successful (for now, it always returns 0)
3755 **/
3756int
3757lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3758{
3759        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3760        uint16_t i, lxri, xri_cnt, els_xri_cnt;
3761        LIST_HEAD(els_sgl_list);
3762        int rc;
3763
3764        /*
3765         * update on pci function's els xri-sgl list
3766         */
3767        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3768
3769        if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3770                /* els xri-sgl expanded */
3771                xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3772                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3773                                "3157 ELS xri-sgl count increased from "
3774                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3775                                els_xri_cnt);
3776                /* allocate the additional els sgls */
3777                for (i = 0; i < xri_cnt; i++) {
3778                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3779                                             GFP_KERNEL);
3780                        if (sglq_entry == NULL) {
3781                                lpfc_printf_log(phba, KERN_ERR,
3782                                                LOG_TRACE_EVENT,
3783                                                "2562 Failure to allocate an "
3784                                                "ELS sgl entry:%d\n", i);
3785                                rc = -ENOMEM;
3786                                goto out_free_mem;
3787                        }
3788                        sglq_entry->buff_type = GEN_BUFF_TYPE;
3789                        sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3790                                                           &sglq_entry->phys);
3791                        if (sglq_entry->virt == NULL) {
3792                                kfree(sglq_entry);
3793                                lpfc_printf_log(phba, KERN_ERR,
3794                                                LOG_TRACE_EVENT,
3795                                                "2563 Failure to allocate an "
3796                                                "ELS mbuf:%d\n", i);
3797                                rc = -ENOMEM;
3798                                goto out_free_mem;
3799                        }
3800                        sglq_entry->sgl = sglq_entry->virt;
3801                        memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3802                        sglq_entry->state = SGL_FREED;
3803                        list_add_tail(&sglq_entry->list, &els_sgl_list);
3804                }
3805                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3806                list_splice_init(&els_sgl_list,
3807                                 &phba->sli4_hba.lpfc_els_sgl_list);
3808                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3809        } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3810                /* els xri-sgl shrinked */
3811                xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3812                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3813                                "3158 ELS xri-sgl count decreased from "
3814                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3815                                els_xri_cnt);
3816                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3817                list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3818                                 &els_sgl_list);
3819                /* release extra els sgls from list */
3820                for (i = 0; i < xri_cnt; i++) {
3821                        list_remove_head(&els_sgl_list,
3822                                         sglq_entry, struct lpfc_sglq, list);
3823                        if (sglq_entry) {
3824                                __lpfc_mbuf_free(phba, sglq_entry->virt,
3825                                                 sglq_entry->phys);
3826                                kfree(sglq_entry);
3827                        }
3828                }
3829                list_splice_init(&els_sgl_list,
3830                                 &phba->sli4_hba.lpfc_els_sgl_list);
3831                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3832        } else
3833                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834                                "3163 ELS xri-sgl count unchanged: %d\n",
3835                                els_xri_cnt);
3836        phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3837
3838        /* update xris to els sgls on the list */
3839        sglq_entry = NULL;
3840        sglq_entry_next = NULL;
3841        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3842                                 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3843                lxri = lpfc_sli4_next_xritag(phba);
3844                if (lxri == NO_XRI) {
3845                        lpfc_printf_log(phba, KERN_ERR,
3846                                        LOG_TRACE_EVENT,
3847                                        "2400 Failed to allocate xri for "
3848                                        "ELS sgl\n");
3849                        rc = -ENOMEM;
3850                        goto out_free_mem;
3851                }
3852                sglq_entry->sli4_lxritag = lxri;
3853                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3854        }
3855        return 0;
3856
3857out_free_mem:
3858        lpfc_free_els_sgl_list(phba);
3859        return rc;
3860}
3861
3862/**
3863 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3864 * @phba: pointer to lpfc hba data structure.
3865 *
3866 * This routine first calculates the sizes of the current els and allocated
3867 * scsi sgl lists, and then goes through all sgls to updates the physical
3868 * XRIs assigned due to port function reset. During port initialization, the
3869 * current els and allocated scsi sgl lists are 0s.
3870 *
3871 * Return codes
3872 *   0 - successful (for now, it always returns 0)
3873 **/
3874int
3875lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3876{
3877        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3878        uint16_t i, lxri, xri_cnt, els_xri_cnt;
3879        uint16_t nvmet_xri_cnt;
3880        LIST_HEAD(nvmet_sgl_list);
3881        int rc;
3882
3883        /*
3884         * update on pci function's nvmet xri-sgl list
3885         */
3886        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3887
3888        /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3889        nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3890        if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3891                /* els xri-sgl expanded */
3892                xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3893                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3894                                "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3895                                phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3896                /* allocate the additional nvmet sgls */
3897                for (i = 0; i < xri_cnt; i++) {
3898                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3899                                             GFP_KERNEL);
3900                        if (sglq_entry == NULL) {
3901                                lpfc_printf_log(phba, KERN_ERR,
3902                                                LOG_TRACE_EVENT,
3903                                                "6303 Failure to allocate an "
3904                                                "NVMET sgl entry:%d\n", i);
3905                                rc = -ENOMEM;
3906                                goto out_free_mem;
3907                        }
3908                        sglq_entry->buff_type = NVMET_BUFF_TYPE;
3909                        sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3910                                                           &sglq_entry->phys);
3911                        if (sglq_entry->virt == NULL) {
3912                                kfree(sglq_entry);
3913                                lpfc_printf_log(phba, KERN_ERR,
3914                                                LOG_TRACE_EVENT,
3915                                                "6304 Failure to allocate an "
3916                                                "NVMET buf:%d\n", i);
3917                                rc = -ENOMEM;
3918                                goto out_free_mem;
3919                        }
3920                        sglq_entry->sgl = sglq_entry->virt;
3921                        memset(sglq_entry->sgl, 0,
3922                               phba->cfg_sg_dma_buf_size);
3923                        sglq_entry->state = SGL_FREED;
3924                        list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3925                }
3926                spin_lock_irq(&phba->hbalock);
3927                spin_lock(&phba->sli4_hba.sgl_list_lock);
3928                list_splice_init(&nvmet_sgl_list,
3929                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3930                spin_unlock(&phba->sli4_hba.sgl_list_lock);
3931                spin_unlock_irq(&phba->hbalock);
3932        } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3933                /* nvmet xri-sgl shrunk */
3934                xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3935                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3936                                "6305 NVMET xri-sgl count decreased from "
3937                                "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3938                                nvmet_xri_cnt);
3939                spin_lock_irq(&phba->hbalock);
3940                spin_lock(&phba->sli4_hba.sgl_list_lock);
3941                list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3942                                 &nvmet_sgl_list);
3943                /* release extra nvmet sgls from list */
3944                for (i = 0; i < xri_cnt; i++) {
3945                        list_remove_head(&nvmet_sgl_list,
3946                                         sglq_entry, struct lpfc_sglq, list);
3947                        if (sglq_entry) {
3948                                lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3949                                                    sglq_entry->phys);
3950                                kfree(sglq_entry);
3951                        }
3952                }
3953                list_splice_init(&nvmet_sgl_list,
3954                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3955                spin_unlock(&phba->sli4_hba.sgl_list_lock);
3956                spin_unlock_irq(&phba->hbalock);
3957        } else
3958                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3959                                "6306 NVMET xri-sgl count unchanged: %d\n",
3960                                nvmet_xri_cnt);
3961        phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3962
3963        /* update xris to nvmet sgls on the list */
3964        sglq_entry = NULL;
3965        sglq_entry_next = NULL;
3966        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3967                                 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3968                lxri = lpfc_sli4_next_xritag(phba);
3969                if (lxri == NO_XRI) {
3970                        lpfc_printf_log(phba, KERN_ERR,
3971                                        LOG_TRACE_EVENT,
3972                                        "6307 Failed to allocate xri for "
3973                                        "NVMET sgl\n");
3974                        rc = -ENOMEM;
3975                        goto out_free_mem;
3976                }
3977                sglq_entry->sli4_lxritag = lxri;
3978                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3979        }
3980        return 0;
3981
3982out_free_mem:
3983        lpfc_free_nvmet_sgl_list(phba);
3984        return rc;
3985}
3986
3987int
3988lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3989{
3990        LIST_HEAD(blist);
3991        struct lpfc_sli4_hdw_queue *qp;
3992        struct lpfc_io_buf *lpfc_cmd;
3993        struct lpfc_io_buf *iobufp, *prev_iobufp;
3994        int idx, cnt, xri, inserted;
3995
3996        cnt = 0;
3997        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3998                qp = &phba->sli4_hba.hdwq[idx];
3999                spin_lock_irq(&qp->io_buf_list_get_lock);
4000                spin_lock(&qp->io_buf_list_put_lock);
4001
4002                /* Take everything off the get and put lists */
4003                list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4004                list_splice(&qp->lpfc_io_buf_list_put, &blist);
4005                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4006                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4007                cnt += qp->get_io_bufs + qp->put_io_bufs;
4008                qp->get_io_bufs = 0;
4009                qp->put_io_bufs = 0;
4010                qp->total_io_bufs = 0;
4011                spin_unlock(&qp->io_buf_list_put_lock);
4012                spin_unlock_irq(&qp->io_buf_list_get_lock);
4013        }
4014
4015        /*
4016         * Take IO buffers off blist and put on cbuf sorted by XRI.
4017         * This is because POST_SGL takes a sequential range of XRIs
4018         * to post to the firmware.
4019         */
4020        for (idx = 0; idx < cnt; idx++) {
4021                list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4022                if (!lpfc_cmd)
4023                        return cnt;
4024                if (idx == 0) {
4025                        list_add_tail(&lpfc_cmd->list, cbuf);
4026                        continue;
4027                }
4028                xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4029                inserted = 0;
4030                prev_iobufp = NULL;
4031                list_for_each_entry(iobufp, cbuf, list) {
4032                        if (xri < iobufp->cur_iocbq.sli4_xritag) {
4033                                if (prev_iobufp)
4034                                        list_add(&lpfc_cmd->list,
4035                                                 &prev_iobufp->list);
4036                                else
4037                                        list_add(&lpfc_cmd->list, cbuf);
4038                                inserted = 1;
4039                                break;
4040                        }
4041                        prev_iobufp = iobufp;
4042                }
4043                if (!inserted)
4044                        list_add_tail(&lpfc_cmd->list, cbuf);
4045        }
4046        return cnt;
4047}
4048
4049int
4050lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4051{
4052        struct lpfc_sli4_hdw_queue *qp;
4053        struct lpfc_io_buf *lpfc_cmd;
4054        int idx, cnt;
4055
4056        qp = phba->sli4_hba.hdwq;
4057        cnt = 0;
4058        while (!list_empty(cbuf)) {
4059                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4060                        list_remove_head(cbuf, lpfc_cmd,
4061                                         struct lpfc_io_buf, list);
4062                        if (!lpfc_cmd)
4063                                return cnt;
4064                        cnt++;
4065                        qp = &phba->sli4_hba.hdwq[idx];
4066                        lpfc_cmd->hdwq_no = idx;
4067                        lpfc_cmd->hdwq = qp;
4068                        lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4069                        lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4070                        spin_lock(&qp->io_buf_list_put_lock);
4071                        list_add_tail(&lpfc_cmd->list,
4072                                      &qp->lpfc_io_buf_list_put);
4073                        qp->put_io_bufs++;
4074                        qp->total_io_bufs++;
4075                        spin_unlock(&qp->io_buf_list_put_lock);
4076                }
4077        }
4078        return cnt;
4079}
4080
4081/**
4082 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4083 * @phba: pointer to lpfc hba data structure.
4084 *
4085 * This routine first calculates the sizes of the current els and allocated
4086 * scsi sgl lists, and then goes through all sgls to updates the physical
4087 * XRIs assigned due to port function reset. During port initialization, the
4088 * current els and allocated scsi sgl lists are 0s.
4089 *
4090 * Return codes
4091 *   0 - successful (for now, it always returns 0)
4092 **/
4093int
4094lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4095{
4096        struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4097        uint16_t i, lxri, els_xri_cnt;
4098        uint16_t io_xri_cnt, io_xri_max;
4099        LIST_HEAD(io_sgl_list);
4100        int rc, cnt;
4101
4102        /*
4103         * update on pci function's allocated nvme xri-sgl list
4104         */
4105
4106        /* maximum number of xris available for nvme buffers */
4107        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4108        io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4109        phba->sli4_hba.io_xri_max = io_xri_max;
4110
4111        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4112                        "6074 Current allocated XRI sgl count:%d, "
4113                        "maximum XRI count:%d\n",
4114                        phba->sli4_hba.io_xri_cnt,
4115                        phba->sli4_hba.io_xri_max);
4116
4117        cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4118
4119        if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4120                /* max nvme xri shrunk below the allocated nvme buffers */
4121                io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4122                                        phba->sli4_hba.io_xri_max;
4123                /* release the extra allocated nvme buffers */
4124                for (i = 0; i < io_xri_cnt; i++) {
4125                        list_remove_head(&io_sgl_list, lpfc_ncmd,
4126                                         struct lpfc_io_buf, list);
4127                        if (lpfc_ncmd) {
4128                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4129                                              lpfc_ncmd->data,
4130                                              lpfc_ncmd->dma_handle);
4131                                kfree(lpfc_ncmd);
4132                        }
4133                }
4134                phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4135        }
4136
4137        /* update xris associated to remaining allocated nvme buffers */
4138        lpfc_ncmd = NULL;
4139        lpfc_ncmd_next = NULL;
4140        phba->sli4_hba.io_xri_cnt = cnt;
4141        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4142                                 &io_sgl_list, list) {
4143                lxri = lpfc_sli4_next_xritag(phba);
4144                if (lxri == NO_XRI) {
4145                        lpfc_printf_log(phba, KERN_ERR,
4146                                        LOG_TRACE_EVENT,
4147                                        "6075 Failed to allocate xri for "
4148                                        "nvme buffer\n");
4149                        rc = -ENOMEM;
4150                        goto out_free_mem;
4151                }
4152                lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4153                lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4154        }
4155        cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4156        return 0;
4157
4158out_free_mem:
4159        lpfc_io_free(phba);
4160        return rc;
4161}
4162
4163/**
4164 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4165 * @phba: Pointer to lpfc hba data structure.
4166 * @num_to_alloc: The requested number of buffers to allocate.
4167 *
4168 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4169 * the nvme buffer contains all the necessary information needed to initiate
4170 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4171 * them on a list, it post them to the port by using SGL block post.
4172 *
4173 * Return codes:
4174 *   int - number of IO buffers that were allocated and posted.
4175 *   0 = failure, less than num_to_alloc is a partial failure.
4176 **/
4177int
4178lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4179{
4180        struct lpfc_io_buf *lpfc_ncmd;
4181        struct lpfc_iocbq *pwqeq;
4182        uint16_t iotag, lxri = 0;
4183        int bcnt, num_posted;
4184        LIST_HEAD(prep_nblist);
4185        LIST_HEAD(post_nblist);
4186        LIST_HEAD(nvme_nblist);
4187
4188        phba->sli4_hba.io_xri_cnt = 0;
4189        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4190                lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4191                if (!lpfc_ncmd)
4192                        break;
4193                /*
4194                 * Get memory from the pci pool to map the virt space to
4195                 * pci bus space for an I/O. The DMA buffer includes the
4196                 * number of SGE's necessary to support the sg_tablesize.
4197                 */
4198                lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4199                                                  GFP_KERNEL,
4200                                                  &lpfc_ncmd->dma_handle);
4201                if (!lpfc_ncmd->data) {
4202                        kfree(lpfc_ncmd);
4203                        break;
4204                }
4205
4206                if (phba->cfg_xpsgl && !phba->nvmet_support) {
4207                        INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4208                } else {
4209                        /*
4210                         * 4K Page alignment is CRITICAL to BlockGuard, double
4211                         * check to be sure.
4212                         */
4213                        if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4214                            (((unsigned long)(lpfc_ncmd->data) &
4215                            (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4216                                lpfc_printf_log(phba, KERN_ERR,
4217                                                LOG_TRACE_EVENT,
4218                                                "3369 Memory alignment err: "
4219                                                "addr=%lx\n",
4220                                                (unsigned long)lpfc_ncmd->data);
4221                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4222                                              lpfc_ncmd->data,
4223                                              lpfc_ncmd->dma_handle);
4224                                kfree(lpfc_ncmd);
4225                                break;
4226                        }
4227                }
4228
4229                INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4230
4231                lxri = lpfc_sli4_next_xritag(phba);
4232                if (lxri == NO_XRI) {
4233                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4234                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4235                        kfree(lpfc_ncmd);
4236                        break;
4237                }
4238                pwqeq = &lpfc_ncmd->cur_iocbq;
4239
4240                /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4241                iotag = lpfc_sli_next_iotag(phba, pwqeq);
4242                if (iotag == 0) {
4243                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4244                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4245                        kfree(lpfc_ncmd);
4246                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4247                                        "6121 Failed to allocate IOTAG for"
4248                                        " XRI:0x%x\n", lxri);
4249                        lpfc_sli4_free_xri(phba, lxri);
4250                        break;
4251                }
4252                pwqeq->sli4_lxritag = lxri;
4253                pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4254                pwqeq->context1 = lpfc_ncmd;
4255
4256                /* Initialize local short-hand pointers. */
4257                lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4258                lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4259                lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4260                spin_lock_init(&lpfc_ncmd->buf_lock);
4261
4262                /* add the nvme buffer to a post list */
4263                list_add_tail(&lpfc_ncmd->list, &post_nblist);
4264                phba->sli4_hba.io_xri_cnt++;
4265        }
4266        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4267                        "6114 Allocate %d out of %d requested new NVME "
4268                        "buffers\n", bcnt, num_to_alloc);
4269
4270        /* post the list of nvme buffer sgls to port if available */
4271        if (!list_empty(&post_nblist))
4272                num_posted = lpfc_sli4_post_io_sgl_list(
4273                                phba, &post_nblist, bcnt);
4274        else
4275                num_posted = 0;
4276
4277        return num_posted;
4278}
4279
4280static uint64_t
4281lpfc_get_wwpn(struct lpfc_hba *phba)
4282{
4283        uint64_t wwn;
4284        int rc;
4285        LPFC_MBOXQ_t *mboxq;
4286        MAILBOX_t *mb;
4287
4288        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4289                                                GFP_KERNEL);
4290        if (!mboxq)
4291                return (uint64_t)-1;
4292
4293        /* First get WWN of HBA instance */
4294        lpfc_read_nv(phba, mboxq);
4295        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4296        if (rc != MBX_SUCCESS) {
4297                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4298                                "6019 Mailbox failed , mbxCmd x%x "
4299                                "READ_NV, mbxStatus x%x\n",
4300                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4301                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4302                mempool_free(mboxq, phba->mbox_mem_pool);
4303                return (uint64_t) -1;
4304        }
4305        mb = &mboxq->u.mb;
4306        memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4307        /* wwn is WWPN of HBA instance */
4308        mempool_free(mboxq, phba->mbox_mem_pool);
4309        if (phba->sli_rev == LPFC_SLI_REV4)
4310                return be64_to_cpu(wwn);
4311        else
4312                return rol64(wwn, 32);
4313}
4314
4315/**
4316 * lpfc_vmid_res_alloc - Allocates resources for VMID
4317 * @phba: pointer to lpfc hba data structure.
4318 * @vport: pointer to vport data structure
4319 *
4320 * This routine allocated the resources needed for the VMID.
4321 *
4322 * Return codes
4323 *      0 on Success
4324 *      Non-0 on Failure
4325 */
4326static int
4327lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4328{
4329        /* VMID feature is supported only on SLI4 */
4330        if (phba->sli_rev == LPFC_SLI_REV3) {
4331                phba->cfg_vmid_app_header = 0;
4332                phba->cfg_vmid_priority_tagging = 0;
4333        }
4334
4335        if (lpfc_is_vmid_enabled(phba)) {
4336                vport->vmid =
4337                    kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4338                            GFP_KERNEL);
4339                if (!vport->vmid)
4340                        return -ENOMEM;
4341
4342                rwlock_init(&vport->vmid_lock);
4343
4344                /* Set the VMID parameters for the vport */
4345                vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4346                vport->vmid_inactivity_timeout =
4347                    phba->cfg_vmid_inactivity_timeout;
4348                vport->max_vmid = phba->cfg_max_vmid;
4349                vport->cur_vmid_cnt = 0;
4350
4351                vport->vmid_priority_range = bitmap_zalloc
4352                        (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4353
4354                if (!vport->vmid_priority_range) {
4355                        kfree(vport->vmid);
4356                        return -ENOMEM;
4357                }
4358
4359                hash_init(vport->hash_table);
4360        }
4361        return 0;
4362}
4363
4364/**
4365 * lpfc_create_port - Create an FC port
4366 * @phba: pointer to lpfc hba data structure.
4367 * @instance: a unique integer ID to this FC port.
4368 * @dev: pointer to the device data structure.
4369 *
4370 * This routine creates a FC port for the upper layer protocol. The FC port
4371 * can be created on top of either a physical port or a virtual port provided
4372 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4373 * and associates the FC port created before adding the shost into the SCSI
4374 * layer.
4375 *
4376 * Return codes
4377 *   @vport - pointer to the virtual N_Port data structure.
4378 *   NULL - port create failed.
4379 **/
4380struct lpfc_vport *
4381lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4382{
4383        struct lpfc_vport *vport;
4384        struct Scsi_Host  *shost = NULL;
4385        struct scsi_host_template *template;
4386        int error = 0;
4387        int i;
4388        uint64_t wwn;
4389        bool use_no_reset_hba = false;
4390        int rc;
4391
4392        if (lpfc_no_hba_reset_cnt) {
4393                if (phba->sli_rev < LPFC_SLI_REV4 &&
4394                    dev == &phba->pcidev->dev) {
4395                        /* Reset the port first */
4396                        lpfc_sli_brdrestart(phba);
4397                        rc = lpfc_sli_chipset_init(phba);
4398                        if (rc)
4399                                return NULL;
4400                }
4401                wwn = lpfc_get_wwpn(phba);
4402        }
4403
4404        for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4405                if (wwn == lpfc_no_hba_reset[i]) {
4406                        lpfc_printf_log(phba, KERN_ERR,
4407                                        LOG_TRACE_EVENT,
4408                                        "6020 Setting use_no_reset port=%llx\n",
4409                                        wwn);
4410                        use_no_reset_hba = true;
4411                        break;
4412                }
4413        }
4414
4415        /* Seed template for SCSI host registration */
4416        if (dev == &phba->pcidev->dev) {
4417                template = &phba->port_template;
4418
4419                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4420                        /* Seed physical port template */
4421                        memcpy(template, &lpfc_template, sizeof(*template));
4422
4423                        if (use_no_reset_hba)
4424                                /* template is for a no reset SCSI Host */
4425                                template->eh_host_reset_handler = NULL;
4426
4427                        /* Template for all vports this physical port creates */
4428                        memcpy(&phba->vport_template, &lpfc_template,
4429                               sizeof(*template));
4430                        phba->vport_template.shost_attrs = lpfc_vport_attrs;
4431                        phba->vport_template.eh_bus_reset_handler = NULL;
4432                        phba->vport_template.eh_host_reset_handler = NULL;
4433                        phba->vport_template.vendor_id = 0;
4434
4435                        /* Initialize the host templates with updated value */
4436                        if (phba->sli_rev == LPFC_SLI_REV4) {
4437                                template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4438                                phba->vport_template.sg_tablesize =
4439                                        phba->cfg_scsi_seg_cnt;
4440                        } else {
4441                                template->sg_tablesize = phba->cfg_sg_seg_cnt;
4442                                phba->vport_template.sg_tablesize =
4443                                        phba->cfg_sg_seg_cnt;
4444                        }
4445
4446                } else {
4447                        /* NVMET is for physical port only */
4448                        memcpy(template, &lpfc_template_nvme,
4449                               sizeof(*template));
4450                }
4451        } else {
4452                template = &phba->vport_template;
4453        }
4454
4455        shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4456        if (!shost)
4457                goto out;
4458
4459        vport = (struct lpfc_vport *) shost->hostdata;
4460        vport->phba = phba;
4461        vport->load_flag |= FC_LOADING;
4462        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4463        vport->fc_rscn_flush = 0;
4464        lpfc_get_vport_cfgparam(vport);
4465
4466        /* Adjust value in vport */
4467        vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4468
4469        shost->unique_id = instance;
4470        shost->max_id = LPFC_MAX_TARGET;
4471        shost->max_lun = vport->cfg_max_luns;
4472        shost->this_id = -1;
4473        shost->max_cmd_len = 16;
4474
4475        if (phba->sli_rev == LPFC_SLI_REV4) {
4476                if (!phba->cfg_fcp_mq_threshold ||
4477                    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4478                        phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4479
4480                shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4481                                            phba->cfg_fcp_mq_threshold);
4482
4483                shost->dma_boundary =
4484                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4485
4486                if (phba->cfg_xpsgl && !phba->nvmet_support)
4487                        shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4488                else
4489                        shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4490        } else
4491                /* SLI-3 has a limited number of hardware queues (3),
4492                 * thus there is only one for FCP processing.
4493                 */
4494                shost->nr_hw_queues = 1;
4495
4496        /*
4497         * Set initial can_queue value since 0 is no longer supported and
4498         * scsi_add_host will fail. This will be adjusted later based on the
4499         * max xri value determined in hba setup.
4500         */
4501        shost->can_queue = phba->cfg_hba_queue_depth - 10;
4502        if (dev != &phba->pcidev->dev) {
4503                shost->transportt = lpfc_vport_transport_template;
4504                vport->port_type = LPFC_NPIV_PORT;
4505        } else {
4506                shost->transportt = lpfc_transport_template;
4507                vport->port_type = LPFC_PHYSICAL_PORT;
4508        }
4509
4510        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4511                        "9081 CreatePort TMPLATE type %x TBLsize %d "
4512                        "SEGcnt %d/%d\n",
4513                        vport->port_type, shost->sg_tablesize,
4514                        phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4515
4516        /* Allocate the resources for VMID */
4517        rc = lpfc_vmid_res_alloc(phba, vport);
4518
4519        if (rc)
4520                goto out;
4521
4522        /* Initialize all internally managed lists. */
4523        INIT_LIST_HEAD(&vport->fc_nodes);
4524        INIT_LIST_HEAD(&vport->rcv_buffer_list);
4525        spin_lock_init(&vport->work_port_lock);
4526
4527        timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4528
4529        timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4530
4531        timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4532
4533        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4534                lpfc_setup_bg(phba, shost);
4535
4536        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4537        if (error)
4538                goto out_put_shost;
4539
4540        spin_lock_irq(&phba->port_list_lock);
4541        list_add_tail(&vport->listentry, &phba->port_list);
4542        spin_unlock_irq(&phba->port_list_lock);
4543        return vport;
4544
4545out_put_shost:
4546        kfree(vport->vmid);
4547        bitmap_free(vport->vmid_priority_range);
4548        scsi_host_put(shost);
4549out:
4550        return NULL;
4551}
4552
4553/**
4554 * destroy_port -  destroy an FC port
4555 * @vport: pointer to an lpfc virtual N_Port data structure.
4556 *
4557 * This routine destroys a FC port from the upper layer protocol. All the
4558 * resources associated with the port are released.
4559 **/
4560void
4561destroy_port(struct lpfc_vport *vport)
4562{
4563        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4564        struct lpfc_hba  *phba = vport->phba;
4565
4566        lpfc_debugfs_terminate(vport);
4567        fc_remove_host(shost);
4568        scsi_remove_host(shost);
4569
4570        spin_lock_irq(&phba->port_list_lock);
4571        list_del_init(&vport->listentry);
4572        spin_unlock_irq(&phba->port_list_lock);
4573
4574        lpfc_cleanup(vport);
4575        return;
4576}
4577
4578/**
4579 * lpfc_get_instance - Get a unique integer ID
4580 *
4581 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4582 * uses the kernel idr facility to perform the task.
4583 *
4584 * Return codes:
4585 *   instance - a unique integer ID allocated as the new instance.
4586 *   -1 - lpfc get instance failed.
4587 **/
4588int
4589lpfc_get_instance(void)
4590{
4591        int ret;
4592
4593        ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4594        return ret < 0 ? -1 : ret;
4595}
4596
4597/**
4598 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4599 * @shost: pointer to SCSI host data structure.
4600 * @time: elapsed time of the scan in jiffies.
4601 *
4602 * This routine is called by the SCSI layer with a SCSI host to determine
4603 * whether the scan host is finished.
4604 *
4605 * Note: there is no scan_start function as adapter initialization will have
4606 * asynchronously kicked off the link initialization.
4607 *
4608 * Return codes
4609 *   0 - SCSI host scan is not over yet.
4610 *   1 - SCSI host scan is over.
4611 **/
4612int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4613{
4614        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4615        struct lpfc_hba   *phba = vport->phba;
4616        int stat = 0;
4617
4618        spin_lock_irq(shost->host_lock);
4619
4620        if (vport->load_flag & FC_UNLOADING) {
4621                stat = 1;
4622                goto finished;
4623        }
4624        if (time >= msecs_to_jiffies(30 * 1000)) {
4625                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4626                                "0461 Scanning longer than 30 "
4627                                "seconds.  Continuing initialization\n");
4628                stat = 1;
4629                goto finished;
4630        }
4631        if (time >= msecs_to_jiffies(15 * 1000) &&
4632            phba->link_state <= LPFC_LINK_DOWN) {
4633                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4634                                "0465 Link down longer than 15 "
4635                                "seconds.  Continuing initialization\n");
4636                stat = 1;
4637                goto finished;
4638        }
4639
4640        if (vport->port_state != LPFC_VPORT_READY)
4641                goto finished;
4642        if (vport->num_disc_nodes || vport->fc_prli_sent)
4643                goto finished;
4644        if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4645                goto finished;
4646        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4647                goto finished;
4648
4649        stat = 1;
4650
4651finished:
4652        spin_unlock_irq(shost->host_lock);
4653        return stat;
4654}
4655
4656static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4657{
4658        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4659        struct lpfc_hba   *phba = vport->phba;
4660
4661        fc_host_supported_speeds(shost) = 0;
4662        /*
4663         * Avoid reporting supported link speed for FCoE as it can't be
4664         * controlled via FCoE.
4665         */
4666        if (phba->hba_flag & HBA_FCOE_MODE)
4667                return;
4668
4669        if (phba->lmt & LMT_128Gb)
4670                fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4671        if (phba->lmt & LMT_64Gb)
4672                fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4673        if (phba->lmt & LMT_32Gb)
4674                fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4675        if (phba->lmt & LMT_16Gb)
4676                fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4677        if (phba->lmt & LMT_10Gb)
4678                fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4679        if (phba->lmt & LMT_8Gb)
4680                fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4681        if (phba->lmt & LMT_4Gb)
4682                fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4683        if (phba->lmt & LMT_2Gb)
4684                fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4685        if (phba->lmt & LMT_1Gb)
4686                fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4687}
4688
4689/**
4690 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4691 * @shost: pointer to SCSI host data structure.
4692 *
4693 * This routine initializes a given SCSI host attributes on a FC port. The
4694 * SCSI host can be either on top of a physical port or a virtual port.
4695 **/
4696void lpfc_host_attrib_init(struct Scsi_Host *shost)
4697{
4698        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4699        struct lpfc_hba   *phba = vport->phba;
4700        /*
4701         * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4702         */
4703
4704        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4705        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4706        fc_host_supported_classes(shost) = FC_COS_CLASS3;
4707
4708        memset(fc_host_supported_fc4s(shost), 0,
4709               sizeof(fc_host_supported_fc4s(shost)));
4710        fc_host_supported_fc4s(shost)[2] = 1;
4711        fc_host_supported_fc4s(shost)[7] = 1;
4712
4713        lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4714                                 sizeof fc_host_symbolic_name(shost));
4715
4716        lpfc_host_supported_speeds_set(shost);
4717
4718        fc_host_maxframe_size(shost) =
4719                (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4720                (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4721
4722        fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4723
4724        /* This value is also unchanging */
4725        memset(fc_host_active_fc4s(shost), 0,
4726               sizeof(fc_host_active_fc4s(shost)));
4727        fc_host_active_fc4s(shost)[2] = 1;
4728        fc_host_active_fc4s(shost)[7] = 1;
4729
4730        fc_host_max_npiv_vports(shost) = phba->max_vpi;
4731        spin_lock_irq(shost->host_lock);
4732        vport->load_flag &= ~FC_LOADING;
4733        spin_unlock_irq(shost->host_lock);
4734}
4735
4736/**
4737 * lpfc_stop_port_s3 - Stop SLI3 device port
4738 * @phba: pointer to lpfc hba data structure.
4739 *
4740 * This routine is invoked to stop an SLI3 device port, it stops the device
4741 * from generating interrupts and stops the device driver's timers for the
4742 * device.
4743 **/
4744static void
4745lpfc_stop_port_s3(struct lpfc_hba *phba)
4746{
4747        /* Clear all interrupt enable conditions */
4748        writel(0, phba->HCregaddr);
4749        readl(phba->HCregaddr); /* flush */
4750        /* Clear all pending interrupts */
4751        writel(0xffffffff, phba->HAregaddr);
4752        readl(phba->HAregaddr); /* flush */
4753
4754        /* Reset some HBA SLI setup states */
4755        lpfc_stop_hba_timers(phba);
4756        phba->pport->work_port_events = 0;
4757}
4758
4759/**
4760 * lpfc_stop_port_s4 - Stop SLI4 device port
4761 * @phba: pointer to lpfc hba data structure.
4762 *
4763 * This routine is invoked to stop an SLI4 device port, it stops the device
4764 * from generating interrupts and stops the device driver's timers for the
4765 * device.
4766 **/
4767static void
4768lpfc_stop_port_s4(struct lpfc_hba *phba)
4769{
4770        /* Reset some HBA SLI4 setup states */
4771        lpfc_stop_hba_timers(phba);
4772        if (phba->pport)
4773                phba->pport->work_port_events = 0;
4774        phba->sli4_hba.intr_enable = 0;
4775}
4776
4777/**
4778 * lpfc_stop_port - Wrapper function for stopping hba port
4779 * @phba: Pointer to HBA context object.
4780 *
4781 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4782 * the API jump table function pointer from the lpfc_hba struct.
4783 **/
4784void
4785lpfc_stop_port(struct lpfc_hba *phba)
4786{
4787        phba->lpfc_stop_port(phba);
4788
4789        if (phba->wq)
4790                flush_workqueue(phba->wq);
4791}
4792
4793/**
4794 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4795 * @phba: Pointer to hba for which this call is being executed.
4796 *
4797 * This routine starts the timer waiting for the FCF rediscovery to complete.
4798 **/
4799void
4800lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4801{
4802        unsigned long fcf_redisc_wait_tmo =
4803                (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4804        /* Start fcf rediscovery wait period timer */
4805        mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4806        spin_lock_irq(&phba->hbalock);
4807        /* Allow action to new fcf asynchronous event */
4808        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4809        /* Mark the FCF rediscovery pending state */
4810        phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4811        spin_unlock_irq(&phba->hbalock);
4812}
4813
4814/**
4815 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4816 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4817 *
4818 * This routine is invoked when waiting for FCF table rediscover has been
4819 * timed out. If new FCF record(s) has (have) been discovered during the
4820 * wait period, a new FCF event shall be added to the FCOE async event
4821 * list, and then worker thread shall be waked up for processing from the
4822 * worker thread context.
4823 **/
4824static void
4825lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4826{
4827        struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4828
4829        /* Don't send FCF rediscovery event if timer cancelled */
4830        spin_lock_irq(&phba->hbalock);
4831        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4832                spin_unlock_irq(&phba->hbalock);
4833                return;
4834        }
4835        /* Clear FCF rediscovery timer pending flag */
4836        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4837        /* FCF rediscovery event to worker thread */
4838        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4839        spin_unlock_irq(&phba->hbalock);
4840        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4841                        "2776 FCF rediscover quiescent timer expired\n");
4842        /* wake up worker thread */
4843        lpfc_worker_wake_up(phba);
4844}
4845
4846/**
4847 * lpfc_vmid_poll - VMID timeout detection
4848 * @ptr: Map to lpfc_hba data structure pointer.
4849 *
4850 * This routine is invoked when there is no I/O on by a VM for the specified
4851 * amount of time. When this situation is detected, the VMID has to be
4852 * deregistered from the switch and all the local resources freed. The VMID
4853 * will be reassigned to the VM once the I/O begins.
4854 **/
4855static void
4856lpfc_vmid_poll(struct timer_list *t)
4857{
4858        struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4859        u32 wake_up = 0;
4860
4861        /* check if there is a need to issue QFPA */
4862        if (phba->pport->vmid_priority_tagging) {
4863                wake_up = 1;
4864                phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4865        }
4866
4867        /* Is the vmid inactivity timer enabled */
4868        if (phba->pport->vmid_inactivity_timeout ||
4869            phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
4870                wake_up = 1;
4871                phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
4872        }
4873
4874        if (wake_up)
4875                lpfc_worker_wake_up(phba);
4876
4877        /* restart the timer for the next iteration */
4878        mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
4879                                                        LPFC_VMID_TIMER));
4880}
4881
4882/**
4883 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4884 * @phba: pointer to lpfc hba data structure.
4885 * @acqe_link: pointer to the async link completion queue entry.
4886 *
4887 * This routine is to parse the SLI4 link-attention link fault code.
4888 **/
4889static void
4890lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4891                           struct lpfc_acqe_link *acqe_link)
4892{
4893        switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4894        case LPFC_ASYNC_LINK_FAULT_NONE:
4895        case LPFC_ASYNC_LINK_FAULT_LOCAL:
4896        case LPFC_ASYNC_LINK_FAULT_REMOTE:
4897        case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4898                break;
4899        default:
4900                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4901                                "0398 Unknown link fault code: x%x\n",
4902                                bf_get(lpfc_acqe_link_fault, acqe_link));
4903                break;
4904        }
4905}
4906
4907/**
4908 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4909 * @phba: pointer to lpfc hba data structure.
4910 * @acqe_link: pointer to the async link completion queue entry.
4911 *
4912 * This routine is to parse the SLI4 link attention type and translate it
4913 * into the base driver's link attention type coding.
4914 *
4915 * Return: Link attention type in terms of base driver's coding.
4916 **/
4917static uint8_t
4918lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4919                          struct lpfc_acqe_link *acqe_link)
4920{
4921        uint8_t att_type;
4922
4923        switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4924        case LPFC_ASYNC_LINK_STATUS_DOWN:
4925        case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4926                att_type = LPFC_ATT_LINK_DOWN;
4927                break;
4928        case LPFC_ASYNC_LINK_STATUS_UP:
4929                /* Ignore physical link up events - wait for logical link up */
4930                att_type = LPFC_ATT_RESERVED;
4931                break;
4932        case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4933                att_type = LPFC_ATT_LINK_UP;
4934                break;
4935        default:
4936                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4937                                "0399 Invalid link attention type: x%x\n",
4938                                bf_get(lpfc_acqe_link_status, acqe_link));
4939                att_type = LPFC_ATT_RESERVED;
4940                break;
4941        }
4942        return att_type;
4943}
4944
4945/**
4946 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4947 * @phba: pointer to lpfc hba data structure.
4948 *
4949 * This routine is to get an SLI3 FC port's link speed in Mbps.
4950 *
4951 * Return: link speed in terms of Mbps.
4952 **/
4953uint32_t
4954lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4955{
4956        uint32_t link_speed;
4957
4958        if (!lpfc_is_link_up(phba))
4959                return 0;
4960
4961        if (phba->sli_rev <= LPFC_SLI_REV3) {
4962                switch (phba->fc_linkspeed) {
4963                case LPFC_LINK_SPEED_1GHZ:
4964                        link_speed = 1000;
4965                        break;
4966                case LPFC_LINK_SPEED_2GHZ:
4967                        link_speed = 2000;
4968                        break;
4969                case LPFC_LINK_SPEED_4GHZ:
4970                        link_speed = 4000;
4971                        break;
4972                case LPFC_LINK_SPEED_8GHZ:
4973                        link_speed = 8000;
4974                        break;
4975                case LPFC_LINK_SPEED_10GHZ:
4976                        link_speed = 10000;
4977                        break;
4978                case LPFC_LINK_SPEED_16GHZ:
4979                        link_speed = 16000;
4980                        break;
4981                default:
4982                        link_speed = 0;
4983                }
4984        } else {
4985                if (phba->sli4_hba.link_state.logical_speed)
4986                        link_speed =
4987                              phba->sli4_hba.link_state.logical_speed;
4988                else
4989                        link_speed = phba->sli4_hba.link_state.speed;
4990        }
4991        return link_speed;
4992}
4993
4994/**
4995 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4996 * @phba: pointer to lpfc hba data structure.
4997 * @evt_code: asynchronous event code.
4998 * @speed_code: asynchronous event link speed code.
4999 *
5000 * This routine is to parse the giving SLI4 async event link speed code into
5001 * value of Mbps for the link speed.
5002 *
5003 * Return: link speed in terms of Mbps.
5004 **/
5005static uint32_t
5006lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5007                           uint8_t speed_code)
5008{
5009        uint32_t port_speed;
5010
5011        switch (evt_code) {
5012        case LPFC_TRAILER_CODE_LINK:
5013                switch (speed_code) {
5014                case LPFC_ASYNC_LINK_SPEED_ZERO:
5015                        port_speed = 0;
5016                        break;
5017                case LPFC_ASYNC_LINK_SPEED_10MBPS:
5018                        port_speed = 10;
5019                        break;
5020                case LPFC_ASYNC_LINK_SPEED_100MBPS:
5021                        port_speed = 100;
5022                        break;
5023                case LPFC_ASYNC_LINK_SPEED_1GBPS:
5024                        port_speed = 1000;
5025                        break;
5026                case LPFC_ASYNC_LINK_SPEED_10GBPS:
5027                        port_speed = 10000;
5028                        break;
5029                case LPFC_ASYNC_LINK_SPEED_20GBPS:
5030                        port_speed = 20000;
5031                        break;
5032                case LPFC_ASYNC_LINK_SPEED_25GBPS:
5033                        port_speed = 25000;
5034                        break;
5035                case LPFC_ASYNC_LINK_SPEED_40GBPS:
5036                        port_speed = 40000;
5037                        break;
5038                case LPFC_ASYNC_LINK_SPEED_100GBPS:
5039                        port_speed = 100000;
5040                        break;
5041                default:
5042                        port_speed = 0;
5043                }
5044                break;
5045        case LPFC_TRAILER_CODE_FC:
5046                switch (speed_code) {
5047                case LPFC_FC_LA_SPEED_UNKNOWN:
5048                        port_speed = 0;
5049                        break;
5050                case LPFC_FC_LA_SPEED_1G:
5051                        port_speed = 1000;
5052                        break;
5053                case LPFC_FC_LA_SPEED_2G:
5054                        port_speed = 2000;
5055                        break;
5056                case LPFC_FC_LA_SPEED_4G:
5057                        port_speed = 4000;
5058                        break;
5059                case LPFC_FC_LA_SPEED_8G:
5060                        port_speed = 8000;
5061                        break;
5062                case LPFC_FC_LA_SPEED_10G:
5063                        port_speed = 10000;
5064                        break;
5065                case LPFC_FC_LA_SPEED_16G:
5066                        port_speed = 16000;
5067                        break;
5068                case LPFC_FC_LA_SPEED_32G:
5069                        port_speed = 32000;
5070                        break;
5071                case LPFC_FC_LA_SPEED_64G:
5072                        port_speed = 64000;
5073                        break;
5074                case LPFC_FC_LA_SPEED_128G:
5075                        port_speed = 128000;
5076                        break;
5077                default:
5078                        port_speed = 0;
5079                }
5080                break;
5081        default:
5082                port_speed = 0;
5083        }
5084        return port_speed;
5085}
5086
5087/**
5088 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5089 * @phba: pointer to lpfc hba data structure.
5090 * @acqe_link: pointer to the async link completion queue entry.
5091 *
5092 * This routine is to handle the SLI4 asynchronous FCoE link event.
5093 **/
5094static void
5095lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5096                         struct lpfc_acqe_link *acqe_link)
5097{
5098        struct lpfc_dmabuf *mp;
5099        LPFC_MBOXQ_t *pmb;
5100        MAILBOX_t *mb;
5101        struct lpfc_mbx_read_top *la;
5102        uint8_t att_type;
5103        int rc;
5104
5105        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5106        if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5107                return;
5108        phba->fcoe_eventtag = acqe_link->event_tag;
5109        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5110        if (!pmb) {
5111                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5112                                "0395 The mboxq allocation failed\n");
5113                return;
5114        }
5115        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5116        if (!mp) {
5117                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5118                                "0396 The lpfc_dmabuf allocation failed\n");
5119                goto out_free_pmb;
5120        }
5121        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5122        if (!mp->virt) {
5123                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5124                                "0397 The mbuf allocation failed\n");
5125                goto out_free_dmabuf;
5126        }
5127
5128        /* Cleanup any outstanding ELS commands */
5129        lpfc_els_flush_all_cmd(phba);
5130
5131        /* Block ELS IOCBs until we have done process link event */
5132        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5133
5134        /* Update link event statistics */
5135        phba->sli.slistat.link_event++;
5136
5137        /* Create lpfc_handle_latt mailbox command from link ACQE */
5138        lpfc_read_topology(phba, pmb, mp);
5139        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5140        pmb->vport = phba->pport;
5141
5142        /* Keep the link status for extra SLI4 state machine reference */
5143        phba->sli4_hba.link_state.speed =
5144                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5145                                bf_get(lpfc_acqe_link_speed, acqe_link));
5146        phba->sli4_hba.link_state.duplex =
5147                                bf_get(lpfc_acqe_link_duplex, acqe_link);
5148        phba->sli4_hba.link_state.status =
5149                                bf_get(lpfc_acqe_link_status, acqe_link);
5150        phba->sli4_hba.link_state.type =
5151                                bf_get(lpfc_acqe_link_type, acqe_link);
5152        phba->sli4_hba.link_state.number =
5153                                bf_get(lpfc_acqe_link_number, acqe_link);
5154        phba->sli4_hba.link_state.fault =
5155                                bf_get(lpfc_acqe_link_fault, acqe_link);
5156        phba->sli4_hba.link_state.logical_speed =
5157                        bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5158
5159        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5160                        "2900 Async FC/FCoE Link event - Speed:%dGBit "
5161                        "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5162                        "Logical speed:%dMbps Fault:%d\n",
5163                        phba->sli4_hba.link_state.speed,
5164                        phba->sli4_hba.link_state.topology,
5165                        phba->sli4_hba.link_state.status,
5166                        phba->sli4_hba.link_state.type,
5167                        phba->sli4_hba.link_state.number,
5168                        phba->sli4_hba.link_state.logical_speed,
5169                        phba->sli4_hba.link_state.fault);
5170        /*
5171         * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5172         * topology info. Note: Optional for non FC-AL ports.
5173         */
5174        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5175                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5176                if (rc == MBX_NOT_FINISHED)
5177                        goto out_free_dmabuf;
5178                return;
5179        }
5180        /*
5181         * For FCoE Mode: fill in all the topology information we need and call
5182         * the READ_TOPOLOGY completion routine to continue without actually
5183         * sending the READ_TOPOLOGY mailbox command to the port.
5184         */
5185        /* Initialize completion status */
5186        mb = &pmb->u.mb;
5187        mb->mbxStatus = MBX_SUCCESS;
5188
5189        /* Parse port fault information field */
5190        lpfc_sli4_parse_latt_fault(phba, acqe_link);
5191
5192        /* Parse and translate link attention fields */
5193        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5194        la->eventTag = acqe_link->event_tag;
5195        bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5196        bf_set(lpfc_mbx_read_top_link_spd, la,
5197               (bf_get(lpfc_acqe_link_speed, acqe_link)));
5198
5199        /* Fake the the following irrelvant fields */
5200        bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5201        bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5202        bf_set(lpfc_mbx_read_top_il, la, 0);
5203        bf_set(lpfc_mbx_read_top_pb, la, 0);
5204        bf_set(lpfc_mbx_read_top_fa, la, 0);
5205        bf_set(lpfc_mbx_read_top_mm, la, 0);
5206
5207        /* Invoke the lpfc_handle_latt mailbox command callback function */
5208        lpfc_mbx_cmpl_read_topology(phba, pmb);
5209
5210        return;
5211
5212out_free_dmabuf:
5213        kfree(mp);
5214out_free_pmb:
5215        mempool_free(pmb, phba->mbox_mem_pool);
5216}
5217
5218/**
5219 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5220 * topology.
5221 * @phba: pointer to lpfc hba data structure.
5222 * @speed_code: asynchronous event link speed code.
5223 *
5224 * This routine is to parse the giving SLI4 async event link speed code into
5225 * value of Read topology link speed.
5226 *
5227 * Return: link speed in terms of Read topology.
5228 **/
5229static uint8_t
5230lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5231{
5232        uint8_t port_speed;
5233
5234        switch (speed_code) {
5235        case LPFC_FC_LA_SPEED_1G:
5236                port_speed = LPFC_LINK_SPEED_1GHZ;
5237                break;
5238        case LPFC_FC_LA_SPEED_2G:
5239                port_speed = LPFC_LINK_SPEED_2GHZ;
5240                break;
5241        case LPFC_FC_LA_SPEED_4G:
5242                port_speed = LPFC_LINK_SPEED_4GHZ;
5243                break;
5244        case LPFC_FC_LA_SPEED_8G:
5245                port_speed = LPFC_LINK_SPEED_8GHZ;
5246                break;
5247        case LPFC_FC_LA_SPEED_16G:
5248                port_speed = LPFC_LINK_SPEED_16GHZ;
5249                break;
5250        case LPFC_FC_LA_SPEED_32G:
5251                port_speed = LPFC_LINK_SPEED_32GHZ;
5252                break;
5253        case LPFC_FC_LA_SPEED_64G:
5254                port_speed = LPFC_LINK_SPEED_64GHZ;
5255                break;
5256        case LPFC_FC_LA_SPEED_128G:
5257                port_speed = LPFC_LINK_SPEED_128GHZ;
5258                break;
5259        case LPFC_FC_LA_SPEED_256G:
5260                port_speed = LPFC_LINK_SPEED_256GHZ;
5261                break;
5262        default:
5263                port_speed = 0;
5264                break;
5265        }
5266
5267        return port_speed;
5268}
5269
5270#define trunk_link_status(__idx)\
5271        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5272               ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5273                "Link up" : "Link down") : "NA"
5274/* Did port __idx reported an error */
5275#define trunk_port_fault(__idx)\
5276        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5277               (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5278
5279static void
5280lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5281                              struct lpfc_acqe_fc_la *acqe_fc)
5282{
5283        uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5284        uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5285
5286        phba->sli4_hba.link_state.speed =
5287                lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5288                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5289
5290        phba->sli4_hba.link_state.logical_speed =
5291                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5292        /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5293        phba->fc_linkspeed =
5294                 lpfc_async_link_speed_to_read_top(
5295                                phba,
5296                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5297
5298        if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5299                phba->trunk_link.link0.state =
5300                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5301                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5302                phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5303        }
5304        if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5305                phba->trunk_link.link1.state =
5306                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5307                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5308                phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5309        }
5310        if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5311                phba->trunk_link.link2.state =
5312                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5313                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5314                phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5315        }
5316        if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5317                phba->trunk_link.link3.state =
5318                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5319                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5320                phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5321        }
5322
5323        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5324                        "2910 Async FC Trunking Event - Speed:%d\n"
5325                        "\tLogical speed:%d "
5326                        "port0: %s port1: %s port2: %s port3: %s\n",
5327                        phba->sli4_hba.link_state.speed,
5328                        phba->sli4_hba.link_state.logical_speed,
5329                        trunk_link_status(0), trunk_link_status(1),
5330                        trunk_link_status(2), trunk_link_status(3));
5331
5332        if (port_fault)
5333                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5334                                "3202 trunk error:0x%x (%s) seen on port0:%s "
5335                                /*
5336                                 * SLI-4: We have only 0xA error codes
5337                                 * defined as of now. print an appropriate
5338                                 * message in case driver needs to be updated.
5339                                 */
5340                                "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5341                                "UNDEFINED. update driver." : trunk_errmsg[err],
5342                                trunk_port_fault(0), trunk_port_fault(1),
5343                                trunk_port_fault(2), trunk_port_fault(3));
5344}
5345
5346
5347/**
5348 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5349 * @phba: pointer to lpfc hba data structure.
5350 * @acqe_fc: pointer to the async fc completion queue entry.
5351 *
5352 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5353 * that the event was received and then issue a read_topology mailbox command so
5354 * that the rest of the driver will treat it the same as SLI3.
5355 **/
5356static void
5357lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5358{
5359        struct lpfc_dmabuf *mp;
5360        LPFC_MBOXQ_t *pmb;
5361        MAILBOX_t *mb;
5362        struct lpfc_mbx_read_top *la;
5363        int rc;
5364
5365        if (bf_get(lpfc_trailer_type, acqe_fc) !=
5366            LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5367                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5368                                "2895 Non FC link Event detected.(%d)\n",
5369                                bf_get(lpfc_trailer_type, acqe_fc));
5370                return;
5371        }
5372
5373        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5374            LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5375                lpfc_update_trunk_link_status(phba, acqe_fc);
5376                return;
5377        }
5378
5379        /* Keep the link status for extra SLI4 state machine reference */
5380        phba->sli4_hba.link_state.speed =
5381                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5382                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5383        phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5384        phba->sli4_hba.link_state.topology =
5385                                bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5386        phba->sli4_hba.link_state.status =
5387                                bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5388        phba->sli4_hba.link_state.type =
5389                                bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5390        phba->sli4_hba.link_state.number =
5391                                bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5392        phba->sli4_hba.link_state.fault =
5393                                bf_get(lpfc_acqe_link_fault, acqe_fc);
5394
5395        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5396            LPFC_FC_LA_TYPE_LINK_DOWN)
5397                phba->sli4_hba.link_state.logical_speed = 0;
5398        else if (!phba->sli4_hba.conf_trunk)
5399                phba->sli4_hba.link_state.logical_speed =
5400                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5401
5402        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5403                        "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5404                        "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5405                        "%dMbps Fault:%d\n",
5406                        phba->sli4_hba.link_state.speed,
5407                        phba->sli4_hba.link_state.topology,
5408                        phba->sli4_hba.link_state.status,
5409                        phba->sli4_hba.link_state.type,
5410                        phba->sli4_hba.link_state.number,
5411                        phba->sli4_hba.link_state.logical_speed,
5412                        phba->sli4_hba.link_state.fault);
5413        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5414        if (!pmb) {
5415                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5416                                "2897 The mboxq allocation failed\n");
5417                return;
5418        }
5419        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5420        if (!mp) {
5421                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5422                                "2898 The lpfc_dmabuf allocation failed\n");
5423                goto out_free_pmb;
5424        }
5425        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5426        if (!mp->virt) {
5427                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5428                                "2899 The mbuf allocation failed\n");
5429                goto out_free_dmabuf;
5430        }
5431
5432        /* Cleanup any outstanding ELS commands */
5433        lpfc_els_flush_all_cmd(phba);
5434
5435        /* Block ELS IOCBs until we have done process link event */
5436        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5437
5438        /* Update link event statistics */
5439        phba->sli.slistat.link_event++;
5440
5441        /* Create lpfc_handle_latt mailbox command from link ACQE */
5442        lpfc_read_topology(phba, pmb, mp);
5443        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5444        pmb->vport = phba->pport;
5445
5446        if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5447                phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5448
5449                switch (phba->sli4_hba.link_state.status) {
5450                case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5451                        phba->link_flag |= LS_MDS_LINK_DOWN;
5452                        break;
5453                case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5454                        phba->link_flag |= LS_MDS_LOOPBACK;
5455                        break;
5456                default:
5457                        break;
5458                }
5459
5460                /* Initialize completion status */
5461                mb = &pmb->u.mb;
5462                mb->mbxStatus = MBX_SUCCESS;
5463
5464                /* Parse port fault information field */
5465                lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5466
5467                /* Parse and translate link attention fields */
5468                la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5469                la->eventTag = acqe_fc->event_tag;
5470
5471                if (phba->sli4_hba.link_state.status ==
5472                    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5473                        bf_set(lpfc_mbx_read_top_att_type, la,
5474                               LPFC_FC_LA_TYPE_UNEXP_WWPN);
5475                } else {
5476                        bf_set(lpfc_mbx_read_top_att_type, la,
5477                               LPFC_FC_LA_TYPE_LINK_DOWN);
5478                }
5479                /* Invoke the mailbox command callback function */
5480                lpfc_mbx_cmpl_read_topology(phba, pmb);
5481
5482                return;
5483        }
5484
5485        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5486        if (rc == MBX_NOT_FINISHED)
5487                goto out_free_dmabuf;
5488        return;
5489
5490out_free_dmabuf:
5491        kfree(mp);
5492out_free_pmb:
5493        mempool_free(pmb, phba->mbox_mem_pool);
5494}
5495
5496/**
5497 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5498 * @phba: pointer to lpfc hba data structure.
5499 * @acqe_sli: pointer to the async SLI completion queue entry.
5500 *
5501 * This routine is to handle the SLI4 asynchronous SLI events.
5502 **/
5503static void
5504lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5505{
5506        char port_name;
5507        char message[128];
5508        uint8_t status;
5509        uint8_t evt_type;
5510        uint8_t operational = 0;
5511        struct temp_event temp_event_data;
5512        struct lpfc_acqe_misconfigured_event *misconfigured;
5513        struct Scsi_Host  *shost;
5514        struct lpfc_vport **vports;
5515        int rc, i;
5516
5517        evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5518
5519        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5520                        "2901 Async SLI event - Type:%d, Event Data: x%08x "
5521                        "x%08x x%08x x%08x\n", evt_type,
5522                        acqe_sli->event_data1, acqe_sli->event_data2,
5523                        acqe_sli->reserved, acqe_sli->trailer);
5524
5525        port_name = phba->Port[0];
5526        if (port_name == 0x00)
5527                port_name = '?'; /* get port name is empty */
5528
5529        switch (evt_type) {
5530        case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5531                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5532                temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5533                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5534
5535                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5536                                "3190 Over Temperature:%d Celsius- Port Name %c\n",
5537                                acqe_sli->event_data1, port_name);
5538
5539                phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5540                shost = lpfc_shost_from_vport(phba->pport);
5541                fc_host_post_vendor_event(shost, fc_get_event_number(),
5542                                          sizeof(temp_event_data),
5543                                          (char *)&temp_event_data,
5544                                          SCSI_NL_VID_TYPE_PCI
5545                                          | PCI_VENDOR_ID_EMULEX);
5546                break;
5547        case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5548                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5549                temp_event_data.event_code = LPFC_NORMAL_TEMP;
5550                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5551
5552                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5553                                "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5554                                acqe_sli->event_data1, port_name);
5555
5556                shost = lpfc_shost_from_vport(phba->pport);
5557                fc_host_post_vendor_event(shost, fc_get_event_number(),
5558                                          sizeof(temp_event_data),
5559                                          (char *)&temp_event_data,
5560                                          SCSI_NL_VID_TYPE_PCI
5561                                          | PCI_VENDOR_ID_EMULEX);
5562                break;
5563        case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5564                misconfigured = (struct lpfc_acqe_misconfigured_event *)
5565                                        &acqe_sli->event_data1;
5566
5567                /* fetch the status for this port */
5568                switch (phba->sli4_hba.lnk_info.lnk_no) {
5569                case LPFC_LINK_NUMBER_0:
5570                        status = bf_get(lpfc_sli_misconfigured_port0_state,
5571                                        &misconfigured->theEvent);
5572                        operational = bf_get(lpfc_sli_misconfigured_port0_op,
5573                                        &misconfigured->theEvent);
5574                        break;
5575                case LPFC_LINK_NUMBER_1:
5576                        status = bf_get(lpfc_sli_misconfigured_port1_state,
5577                                        &misconfigured->theEvent);
5578                        operational = bf_get(lpfc_sli_misconfigured_port1_op,
5579                                        &misconfigured->theEvent);
5580                        break;
5581                case LPFC_LINK_NUMBER_2:
5582                        status = bf_get(lpfc_sli_misconfigured_port2_state,
5583                                        &misconfigured->theEvent);
5584                        operational = bf_get(lpfc_sli_misconfigured_port2_op,
5585                                        &misconfigured->theEvent);
5586                        break;
5587                case LPFC_LINK_NUMBER_3:
5588                        status = bf_get(lpfc_sli_misconfigured_port3_state,
5589                                        &misconfigured->theEvent);
5590                        operational = bf_get(lpfc_sli_misconfigured_port3_op,
5591                                        &misconfigured->theEvent);
5592                        break;
5593                default:
5594                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5595                                        "3296 "
5596                                        "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5597                                        "event: Invalid link %d",
5598                                        phba->sli4_hba.lnk_info.lnk_no);
5599                        return;
5600                }
5601
5602                /* Skip if optic state unchanged */
5603                if (phba->sli4_hba.lnk_info.optic_state == status)
5604                        return;
5605
5606                switch (status) {
5607                case LPFC_SLI_EVENT_STATUS_VALID:
5608                        sprintf(message, "Physical Link is functional");
5609                        break;
5610                case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5611                        sprintf(message, "Optics faulted/incorrectly "
5612                                "installed/not installed - Reseat optics, "
5613                                "if issue not resolved, replace.");
5614                        break;
5615                case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5616                        sprintf(message,
5617                                "Optics of two types installed - Remove one "
5618                                "optic or install matching pair of optics.");
5619                        break;
5620                case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5621                        sprintf(message, "Incompatible optics - Replace with "
5622                                "compatible optics for card to function.");
5623                        break;
5624                case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5625                        sprintf(message, "Unqualified optics - Replace with "
5626                                "Avago optics for Warranty and Technical "
5627                                "Support - Link is%s operational",
5628                                (operational) ? " not" : "");
5629                        break;
5630                case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5631                        sprintf(message, "Uncertified optics - Replace with "
5632                                "Avago-certified optics to enable link "
5633                                "operation - Link is%s operational",
5634                                (operational) ? " not" : "");
5635                        break;
5636                default:
5637                        /* firmware is reporting a status we don't know about */
5638                        sprintf(message, "Unknown event status x%02x", status);
5639                        break;
5640                }
5641
5642                /* Issue READ_CONFIG mbox command to refresh supported speeds */
5643                rc = lpfc_sli4_read_config(phba);
5644                if (rc) {
5645                        phba->lmt = 0;
5646                        lpfc_printf_log(phba, KERN_ERR,
5647                                        LOG_TRACE_EVENT,
5648                                        "3194 Unable to retrieve supported "
5649                                        "speeds, rc = 0x%x\n", rc);
5650                }
5651                vports = lpfc_create_vport_work_array(phba);
5652                if (vports != NULL) {
5653                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5654                                        i++) {
5655                                shost = lpfc_shost_from_vport(vports[i]);
5656                                lpfc_host_supported_speeds_set(shost);
5657                        }
5658                }
5659                lpfc_destroy_vport_work_array(phba, vports);
5660
5661                phba->sli4_hba.lnk_info.optic_state = status;
5662                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5663                                "3176 Port Name %c %s\n", port_name, message);
5664                break;
5665        case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5666                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5667                                "3192 Remote DPort Test Initiated - "
5668                                "Event Data1:x%08x Event Data2: x%08x\n",
5669                                acqe_sli->event_data1, acqe_sli->event_data2);
5670                break;
5671        case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5672                /* Misconfigured WWN. Reports that the SLI Port is configured
5673                 * to use FA-WWN, but the attached device doesn’t support it.
5674                 * No driver action is required.
5675                 * Event Data1 - N.A, Event Data2 - N.A
5676                 */
5677                lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5678                             "2699 Misconfigured FA-WWN - Attached device does "
5679                             "not support FA-WWN\n");
5680                break;
5681        case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5682                /* EEPROM failure. No driver action is required */
5683                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5684                             "2518 EEPROM failure - "
5685                             "Event Data1: x%08x Event Data2: x%08x\n",
5686                             acqe_sli->event_data1, acqe_sli->event_data2);
5687                break;
5688        default:
5689                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5690                                "3193 Unrecognized SLI event, type: 0x%x",
5691                                evt_type);
5692                break;
5693        }
5694}
5695
5696/**
5697 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5698 * @vport: pointer to vport data structure.
5699 *
5700 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5701 * response to a CVL event.
5702 *
5703 * Return the pointer to the ndlp with the vport if successful, otherwise
5704 * return NULL.
5705 **/
5706static struct lpfc_nodelist *
5707lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5708{
5709        struct lpfc_nodelist *ndlp;
5710        struct Scsi_Host *shost;
5711        struct lpfc_hba *phba;
5712
5713        if (!vport)
5714                return NULL;
5715        phba = vport->phba;
5716        if (!phba)
5717                return NULL;
5718        ndlp = lpfc_findnode_did(vport, Fabric_DID);
5719        if (!ndlp) {
5720                /* Cannot find existing Fabric ndlp, so allocate a new one */
5721                ndlp = lpfc_nlp_init(vport, Fabric_DID);
5722                if (!ndlp)
5723                        return 0;
5724                /* Set the node type */
5725                ndlp->nlp_type |= NLP_FABRIC;
5726                /* Put ndlp onto node list */
5727                lpfc_enqueue_node(vport, ndlp);
5728        }
5729        if ((phba->pport->port_state < LPFC_FLOGI) &&
5730                (phba->pport->port_state != LPFC_VPORT_FAILED))
5731                return NULL;
5732        /* If virtual link is not yet instantiated ignore CVL */
5733        if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5734                && (vport->port_state != LPFC_VPORT_FAILED))
5735                return NULL;
5736        shost = lpfc_shost_from_vport(vport);
5737        if (!shost)
5738                return NULL;
5739        lpfc_linkdown_port(vport);
5740        lpfc_cleanup_pending_mbox(vport);
5741        spin_lock_irq(shost->host_lock);
5742        vport->fc_flag |= FC_VPORT_CVL_RCVD;
5743        spin_unlock_irq(shost->host_lock);
5744
5745        return ndlp;
5746}
5747
5748/**
5749 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5750 * @phba: pointer to lpfc hba data structure.
5751 *
5752 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5753 * response to a FCF dead event.
5754 **/
5755static void
5756lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5757{
5758        struct lpfc_vport **vports;
5759        int i;
5760
5761        vports = lpfc_create_vport_work_array(phba);
5762        if (vports)
5763                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5764                        lpfc_sli4_perform_vport_cvl(vports[i]);
5765        lpfc_destroy_vport_work_array(phba, vports);
5766}
5767
5768/**
5769 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5770 * @phba: pointer to lpfc hba data structure.
5771 * @acqe_fip: pointer to the async fcoe completion queue entry.
5772 *
5773 * This routine is to handle the SLI4 asynchronous fcoe event.
5774 **/
5775static void
5776lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5777                        struct lpfc_acqe_fip *acqe_fip)
5778{
5779        uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5780        int rc;
5781        struct lpfc_vport *vport;
5782        struct lpfc_nodelist *ndlp;
5783        int active_vlink_present;
5784        struct lpfc_vport **vports;
5785        int i;
5786
5787        phba->fc_eventTag = acqe_fip->event_tag;
5788        phba->fcoe_eventtag = acqe_fip->event_tag;
5789        switch (event_type) {
5790        case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5791        case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5792                if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5793                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5794                                        "2546 New FCF event, evt_tag:x%x, "
5795                                        "index:x%x\n",
5796                                        acqe_fip->event_tag,
5797                                        acqe_fip->index);
5798                else
5799                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5800                                        LOG_DISCOVERY,
5801                                        "2788 FCF param modified event, "
5802                                        "evt_tag:x%x, index:x%x\n",
5803                                        acqe_fip->event_tag,
5804                                        acqe_fip->index);
5805                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5806                        /*
5807                         * During period of FCF discovery, read the FCF
5808                         * table record indexed by the event to update
5809                         * FCF roundrobin failover eligible FCF bmask.
5810                         */
5811                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5812                                        LOG_DISCOVERY,
5813                                        "2779 Read FCF (x%x) for updating "
5814                                        "roundrobin FCF failover bmask\n",
5815                                        acqe_fip->index);
5816                        rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5817                }
5818
5819                /* If the FCF discovery is in progress, do nothing. */
5820                spin_lock_irq(&phba->hbalock);
5821                if (phba->hba_flag & FCF_TS_INPROG) {
5822                        spin_unlock_irq(&phba->hbalock);
5823                        break;
5824                }
5825                /* If fast FCF failover rescan event is pending, do nothing */
5826                if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5827                        spin_unlock_irq(&phba->hbalock);
5828                        break;
5829                }
5830
5831                /* If the FCF has been in discovered state, do nothing. */
5832                if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5833                        spin_unlock_irq(&phba->hbalock);
5834                        break;
5835                }
5836                spin_unlock_irq(&phba->hbalock);
5837
5838                /* Otherwise, scan the entire FCF table and re-discover SAN */
5839                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5840                                "2770 Start FCF table scan per async FCF "
5841                                "event, evt_tag:x%x, index:x%x\n",
5842                                acqe_fip->event_tag, acqe_fip->index);
5843                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5844                                                     LPFC_FCOE_FCF_GET_FIRST);
5845                if (rc)
5846                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5847                                        "2547 Issue FCF scan read FCF mailbox "
5848                                        "command failed (x%x)\n", rc);
5849                break;
5850
5851        case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5852                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5853                                "2548 FCF Table full count 0x%x tag 0x%x\n",
5854                                bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5855                                acqe_fip->event_tag);
5856                break;
5857
5858        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5859                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5860                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5861                                "2549 FCF (x%x) disconnected from network, "
5862                                 "tag:x%x\n", acqe_fip->index,
5863                                 acqe_fip->event_tag);
5864                /*
5865                 * If we are in the middle of FCF failover process, clear
5866                 * the corresponding FCF bit in the roundrobin bitmap.
5867                 */
5868                spin_lock_irq(&phba->hbalock);
5869                if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5870                    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5871                        spin_unlock_irq(&phba->hbalock);
5872                        /* Update FLOGI FCF failover eligible FCF bmask */
5873                        lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5874                        break;
5875                }
5876                spin_unlock_irq(&phba->hbalock);
5877
5878                /* If the event is not for currently used fcf do nothing */
5879                if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5880                        break;
5881
5882                /*
5883                 * Otherwise, request the port to rediscover the entire FCF
5884                 * table for a fast recovery from case that the current FCF
5885                 * is no longer valid as we are not in the middle of FCF
5886                 * failover process already.
5887                 */
5888                spin_lock_irq(&phba->hbalock);
5889                /* Mark the fast failover process in progress */
5890                phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5891                spin_unlock_irq(&phba->hbalock);
5892
5893                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5894                                "2771 Start FCF fast failover process due to "
5895                                "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5896                                "\n", acqe_fip->event_tag, acqe_fip->index);
5897                rc = lpfc_sli4_redisc_fcf_table(phba);
5898                if (rc) {
5899                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5900                                        LOG_TRACE_EVENT,
5901                                        "2772 Issue FCF rediscover mailbox "
5902                                        "command failed, fail through to FCF "
5903                                        "dead event\n");
5904                        spin_lock_irq(&phba->hbalock);
5905                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5906                        spin_unlock_irq(&phba->hbalock);
5907                        /*
5908                         * Last resort will fail over by treating this
5909                         * as a link down to FCF registration.
5910                         */
5911                        lpfc_sli4_fcf_dead_failthrough(phba);
5912                } else {
5913                        /* Reset FCF roundrobin bmask for new discovery */
5914                        lpfc_sli4_clear_fcf_rr_bmask(phba);
5915                        /*
5916                         * Handling fast FCF failover to a DEAD FCF event is
5917                         * considered equalivant to receiving CVL to all vports.
5918                         */
5919                        lpfc_sli4_perform_all_vport_cvl(phba);
5920                }
5921                break;
5922        case LPFC_FIP_EVENT_TYPE_CVL:
5923                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5924                lpfc_printf_log(phba, KERN_ERR,
5925                                LOG_TRACE_EVENT,
5926                        "2718 Clear Virtual Link Received for VPI 0x%x"
5927                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5928
5929                vport = lpfc_find_vport_by_vpid(phba,
5930                                                acqe_fip->index);
5931                ndlp = lpfc_sli4_perform_vport_cvl(vport);
5932                if (!ndlp)
5933                        break;
5934                active_vlink_present = 0;
5935
5936                vports = lpfc_create_vport_work_array(phba);
5937                if (vports) {
5938                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5939                                        i++) {
5940                                if ((!(vports[i]->fc_flag &
5941                                        FC_VPORT_CVL_RCVD)) &&
5942                                        (vports[i]->port_state > LPFC_FDISC)) {
5943                                        active_vlink_present = 1;
5944                                        break;
5945                                }
5946                        }
5947                        lpfc_destroy_vport_work_array(phba, vports);
5948                }
5949
5950                /*
5951                 * Don't re-instantiate if vport is marked for deletion.
5952                 * If we are here first then vport_delete is going to wait
5953                 * for discovery to complete.
5954                 */
5955                if (!(vport->load_flag & FC_UNLOADING) &&
5956                                        active_vlink_present) {
5957                        /*
5958                         * If there are other active VLinks present,
5959                         * re-instantiate the Vlink using FDISC.
5960                         */
5961                        mod_timer(&ndlp->nlp_delayfunc,
5962                                  jiffies + msecs_to_jiffies(1000));
5963                        spin_lock_irq(&ndlp->lock);
5964                        ndlp->nlp_flag |= NLP_DELAY_TMO;
5965                        spin_unlock_irq(&ndlp->lock);
5966                        ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5967                        vport->port_state = LPFC_FDISC;
5968                } else {
5969                        /*
5970                         * Otherwise, we request port to rediscover
5971                         * the entire FCF table for a fast recovery
5972                         * from possible case that the current FCF
5973                         * is no longer valid if we are not already
5974                         * in the FCF failover process.
5975                         */
5976                        spin_lock_irq(&phba->hbalock);
5977                        if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5978                                spin_unlock_irq(&phba->hbalock);
5979                                break;
5980                        }
5981                        /* Mark the fast failover process in progress */
5982                        phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5983                        spin_unlock_irq(&phba->hbalock);
5984                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5985                                        LOG_DISCOVERY,
5986                                        "2773 Start FCF failover per CVL, "
5987                                        "evt_tag:x%x\n", acqe_fip->event_tag);
5988                        rc = lpfc_sli4_redisc_fcf_table(phba);
5989                        if (rc) {
5990                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5991                                                LOG_TRACE_EVENT,
5992                                                "2774 Issue FCF rediscover "
5993                                                "mailbox command failed, "
5994                                                "through to CVL event\n");
5995                                spin_lock_irq(&phba->hbalock);
5996                                phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5997                                spin_unlock_irq(&phba->hbalock);
5998                                /*
5999                                 * Last resort will be re-try on the
6000                                 * the current registered FCF entry.
6001                                 */
6002                                lpfc_retry_pport_discovery(phba);
6003                        } else
6004                                /*
6005                                 * Reset FCF roundrobin bmask for new
6006                                 * discovery.
6007                                 */
6008                                lpfc_sli4_clear_fcf_rr_bmask(phba);
6009                }
6010                break;
6011        default:
6012                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6013                                "0288 Unknown FCoE event type 0x%x event tag "
6014                                "0x%x\n", event_type, acqe_fip->event_tag);
6015                break;
6016        }
6017}
6018
6019/**
6020 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6021 * @phba: pointer to lpfc hba data structure.
6022 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6023 *
6024 * This routine is to handle the SLI4 asynchronous dcbx event.
6025 **/
6026static void
6027lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6028                         struct lpfc_acqe_dcbx *acqe_dcbx)
6029{
6030        phba->fc_eventTag = acqe_dcbx->event_tag;
6031        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6032                        "0290 The SLI4 DCBX asynchronous event is not "
6033                        "handled yet\n");
6034}
6035
6036/**
6037 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6038 * @phba: pointer to lpfc hba data structure.
6039 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6040 *
6041 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6042 * is an asynchronous notified of a logical link speed change.  The Port
6043 * reports the logical link speed in units of 10Mbps.
6044 **/
6045static void
6046lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6047                         struct lpfc_acqe_grp5 *acqe_grp5)
6048{
6049        uint16_t prev_ll_spd;
6050
6051        phba->fc_eventTag = acqe_grp5->event_tag;
6052        phba->fcoe_eventtag = acqe_grp5->event_tag;
6053        prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6054        phba->sli4_hba.link_state.logical_speed =
6055                (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6056        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6057                        "2789 GRP5 Async Event: Updating logical link speed "
6058                        "from %dMbps to %dMbps\n", prev_ll_spd,
6059                        phba->sli4_hba.link_state.logical_speed);
6060}
6061
6062/**
6063 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
6064 * @phba: pointer to lpfc hba data structure.
6065 *
6066 * This routine is invoked by the worker thread to process all the pending
6067 * SLI4 asynchronous events.
6068 **/
6069void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
6070{
6071        struct lpfc_cq_event *cq_event;
6072        unsigned long iflags;
6073
6074        /* First, declare the async event has been handled */
6075        spin_lock_irqsave(&phba->hbalock, iflags);
6076        phba->hba_flag &= ~ASYNC_EVENT;
6077        spin_unlock_irqrestore(&phba->hbalock, iflags);
6078
6079        /* Now, handle all the async events */
6080        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
6081        while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
6082                list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
6083                                 cq_event, struct lpfc_cq_event, list);
6084                spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
6085                                       iflags);
6086
6087                /* Process the asynchronous event */
6088                switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
6089                case LPFC_TRAILER_CODE_LINK:
6090                        lpfc_sli4_async_link_evt(phba,
6091                                                 &cq_event->cqe.acqe_link);
6092                        break;
6093                case LPFC_TRAILER_CODE_FCOE:
6094                        lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
6095                        break;
6096                case LPFC_TRAILER_CODE_DCBX:
6097                        lpfc_sli4_async_dcbx_evt(phba,
6098                                                 &cq_event->cqe.acqe_dcbx);
6099                        break;
6100                case LPFC_TRAILER_CODE_GRP5:
6101                        lpfc_sli4_async_grp5_evt(phba,
6102                                                 &cq_event->cqe.acqe_grp5);
6103                        break;
6104                case LPFC_TRAILER_CODE_FC:
6105                        lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
6106                        break;
6107                case LPFC_TRAILER_CODE_SLI:
6108                        lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
6109                        break;
6110                default:
6111                        lpfc_printf_log(phba, KERN_ERR,
6112                                        LOG_TRACE_EVENT,
6113                                        "1804 Invalid asynchronous event code: "
6114                                        "x%x\n", bf_get(lpfc_trailer_code,
6115                                        &cq_event->cqe.mcqe_cmpl));
6116                        break;
6117                }
6118
6119                /* Free the completion event processed to the free pool */
6120                lpfc_sli4_cq_event_release(phba, cq_event);
6121                spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
6122        }
6123        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
6124}
6125
6126/**
6127 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
6128 * @phba: pointer to lpfc hba data structure.
6129 *
6130 * This routine is invoked by the worker thread to process FCF table
6131 * rediscovery pending completion event.
6132 **/
6133void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6134{
6135        int rc;
6136
6137        spin_lock_irq(&phba->hbalock);
6138        /* Clear FCF rediscovery timeout event */
6139        phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6140        /* Clear driver fast failover FCF record flag */
6141        phba->fcf.failover_rec.flag = 0;
6142        /* Set state for FCF fast failover */
6143        phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6144        spin_unlock_irq(&phba->hbalock);
6145
6146        /* Scan FCF table from the first entry to re-discover SAN */
6147        lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6148                        "2777 Start post-quiescent FCF table scan\n");
6149        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6150        if (rc)
6151                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6152                                "2747 Issue FCF scan read FCF mailbox "
6153                                "command failed 0x%x\n", rc);
6154}
6155
6156/**
6157 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
6158 * @phba: pointer to lpfc hba data structure.
6159 * @dev_grp: The HBA PCI-Device group number.
6160 *
6161 * This routine is invoked to set up the per HBA PCI-Device group function
6162 * API jump table entries.
6163 *
6164 * Return: 0 if success, otherwise -ENODEV
6165 **/
6166int
6167lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6168{
6169        int rc;
6170
6171        /* Set up lpfc PCI-device group */
6172        phba->pci_dev_grp = dev_grp;
6173
6174        /* The LPFC_PCI_DEV_OC uses SLI4 */
6175        if (dev_grp == LPFC_PCI_DEV_OC)
6176                phba->sli_rev = LPFC_SLI_REV4;
6177
6178        /* Set up device INIT API function jump table */
6179        rc = lpfc_init_api_table_setup(phba, dev_grp);
6180        if (rc)
6181                return -ENODEV;
6182        /* Set up SCSI API function jump table */
6183        rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6184        if (rc)
6185                return -ENODEV;
6186        /* Set up SLI API function jump table */
6187        rc = lpfc_sli_api_table_setup(phba, dev_grp);
6188        if (rc)
6189                return -ENODEV;
6190        /* Set up MBOX API function jump table */
6191        rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6192        if (rc)
6193                return -ENODEV;
6194
6195        return 0;
6196}
6197
6198/**
6199 * lpfc_log_intr_mode - Log the active interrupt mode
6200 * @phba: pointer to lpfc hba data structure.
6201 * @intr_mode: active interrupt mode adopted.
6202 *
6203 * This routine it invoked to log the currently used active interrupt mode
6204 * to the device.
6205 **/
6206static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6207{
6208        switch (intr_mode) {
6209        case 0:
6210                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6211                                "0470 Enable INTx interrupt mode.\n");
6212                break;
6213        case 1:
6214                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6215                                "0481 Enabled MSI interrupt mode.\n");
6216                break;
6217        case 2:
6218                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6219                                "0480 Enabled MSI-X interrupt mode.\n");
6220                break;
6221        default:
6222                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6223                                "0482 Illegal interrupt mode.\n");
6224                break;
6225        }
6226        return;
6227}
6228
6229/**
6230 * lpfc_enable_pci_dev - Enable a generic PCI device.
6231 * @phba: pointer to lpfc hba data structure.
6232 *
6233 * This routine is invoked to enable the PCI device that is common to all
6234 * PCI devices.
6235 *
6236 * Return codes
6237 *      0 - successful
6238 *      other values - error
6239 **/
6240static int
6241lpfc_enable_pci_dev(struct lpfc_hba *phba)
6242{
6243        struct pci_dev *pdev;
6244
6245        /* Obtain PCI device reference */
6246        if (!phba->pcidev)
6247                goto out_error;
6248        else
6249                pdev = phba->pcidev;
6250        /* Enable PCI device */
6251        if (pci_enable_device_mem(pdev))
6252                goto out_error;
6253        /* Request PCI resource for the device */
6254        if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6255                goto out_disable_device;
6256        /* Set up device as PCI master and save state for EEH */
6257        pci_set_master(pdev);
6258        pci_try_set_mwi(pdev);
6259        pci_save_state(pdev);
6260
6261        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6262        if (pci_is_pcie(pdev))
6263                pdev->needs_freset = 1;
6264
6265        return 0;
6266
6267out_disable_device:
6268        pci_disable_device(pdev);
6269out_error:
6270        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6271                        "1401 Failed to enable pci device\n");
6272        return -ENODEV;
6273}
6274
6275/**
6276 * lpfc_disable_pci_dev - Disable a generic PCI device.
6277 * @phba: pointer to lpfc hba data structure.
6278 *
6279 * This routine is invoked to disable the PCI device that is common to all
6280 * PCI devices.
6281 **/
6282static void
6283lpfc_disable_pci_dev(struct lpfc_hba *phba)
6284{
6285        struct pci_dev *pdev;
6286
6287        /* Obtain PCI device reference */
6288        if (!phba->pcidev)
6289                return;
6290        else
6291                pdev = phba->pcidev;
6292        /* Release PCI resource and disable PCI device */
6293        pci_release_mem_regions(pdev);
6294        pci_disable_device(pdev);
6295
6296        return;
6297}
6298
6299/**
6300 * lpfc_reset_hba - Reset a hba
6301 * @phba: pointer to lpfc hba data structure.
6302 *
6303 * This routine is invoked to reset a hba device. It brings the HBA
6304 * offline, performs a board restart, and then brings the board back
6305 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6306 * on outstanding mailbox commands.
6307 **/
6308void
6309lpfc_reset_hba(struct lpfc_hba *phba)
6310{
6311        /* If resets are disabled then set error state and return. */
6312        if (!phba->cfg_enable_hba_reset) {
6313                phba->link_state = LPFC_HBA_ERROR;
6314                return;
6315        }
6316
6317        /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
6318        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
6319                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6320        } else {
6321                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6322                lpfc_sli_flush_io_rings(phba);
6323        }
6324        lpfc_offline(phba);
6325        lpfc_sli_brdrestart(phba);
6326        lpfc_online(phba);
6327        lpfc_unblock_mgmt_io(phba);
6328}
6329
6330/**
6331 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6332 * @phba: pointer to lpfc hba data structure.
6333 *
6334 * This function enables the PCI SR-IOV virtual functions to a physical
6335 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6336 * enable the number of virtual functions to the physical function. As
6337 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6338 * API call does not considered as an error condition for most of the device.
6339 **/
6340uint16_t
6341lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6342{
6343        struct pci_dev *pdev = phba->pcidev;
6344        uint16_t nr_virtfn;
6345        int pos;
6346
6347        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6348        if (pos == 0)
6349                return 0;
6350
6351        pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6352        return nr_virtfn;
6353}
6354
6355/**
6356 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6357 * @phba: pointer to lpfc hba data structure.
6358 * @nr_vfn: number of virtual functions to be enabled.
6359 *
6360 * This function enables the PCI SR-IOV virtual functions to a physical
6361 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6362 * enable the number of virtual functions to the physical function. As
6363 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6364 * API call does not considered as an error condition for most of the device.
6365 **/
6366int
6367lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6368{
6369        struct pci_dev *pdev = phba->pcidev;
6370        uint16_t max_nr_vfn;
6371        int rc;
6372
6373        max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6374        if (nr_vfn > max_nr_vfn) {
6375                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376                                "3057 Requested vfs (%d) greater than "
6377                                "supported vfs (%d)", nr_vfn, max_nr_vfn);
6378                return -EINVAL;
6379        }
6380
6381        rc = pci_enable_sriov(pdev, nr_vfn);
6382        if (rc) {
6383                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6384                                "2806 Failed to enable sriov on this device "
6385                                "with vfn number nr_vf:%d, rc:%d\n",
6386                                nr_vfn, rc);
6387        } else
6388                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6389                                "2807 Successful enable sriov on this device "
6390                                "with vfn number nr_vf:%d\n", nr_vfn);
6391        return rc;
6392}
6393
6394/**
6395 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6396 * @phba: pointer to lpfc hba data structure.
6397 *
6398 * This routine is invoked to set up the driver internal resources before the
6399 * device specific resource setup to support the HBA device it attached to.
6400 *
6401 * Return codes
6402 *      0 - successful
6403 *      other values - error
6404 **/
6405static int
6406lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6407{
6408        struct lpfc_sli *psli = &phba->sli;
6409
6410        /*
6411         * Driver resources common to all SLI revisions
6412         */
6413        atomic_set(&phba->fast_event_count, 0);
6414        atomic_set(&phba->dbg_log_idx, 0);
6415        atomic_set(&phba->dbg_log_cnt, 0);
6416        atomic_set(&phba->dbg_log_dmping, 0);
6417        spin_lock_init(&phba->hbalock);
6418
6419        /* Initialize port_list spinlock */
6420        spin_lock_init(&phba->port_list_lock);
6421        INIT_LIST_HEAD(&phba->port_list);
6422
6423        INIT_LIST_HEAD(&phba->work_list);
6424        init_waitqueue_head(&phba->wait_4_mlo_m_q);
6425
6426        /* Initialize the wait queue head for the kernel thread */
6427        init_waitqueue_head(&phba->work_waitq);
6428
6429        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430                        "1403 Protocols supported %s %s %s\n",
6431                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6432                                "SCSI" : " "),
6433                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6434                                "NVME" : " "),
6435                        (phba->nvmet_support ? "NVMET" : " "));
6436
6437        /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6438        spin_lock_init(&phba->scsi_buf_list_get_lock);
6439        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6440        spin_lock_init(&phba->scsi_buf_list_put_lock);
6441        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6442
6443        /* Initialize the fabric iocb list */
6444        INIT_LIST_HEAD(&phba->fabric_iocb_list);
6445
6446        /* Initialize list to save ELS buffers */
6447        INIT_LIST_HEAD(&phba->elsbuf);
6448
6449        /* Initialize FCF connection rec list */
6450        INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6451
6452        /* Initialize OAS configuration list */
6453        spin_lock_init(&phba->devicelock);
6454        INIT_LIST_HEAD(&phba->luns);
6455
6456        /* MBOX heartbeat timer */
6457        timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6458        /* Fabric block timer */
6459        timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6460        /* EA polling mode timer */
6461        timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6462        /* Heartbeat timer */
6463        timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6464
6465        INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6466
6467        INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6468                          lpfc_idle_stat_delay_work);
6469
6470        return 0;
6471}
6472
6473/**
6474 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6475 * @phba: pointer to lpfc hba data structure.
6476 *
6477 * This routine is invoked to set up the driver internal resources specific to
6478 * support the SLI-3 HBA device it attached to.
6479 *
6480 * Return codes
6481 * 0 - successful
6482 * other values - error
6483 **/
6484static int
6485lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6486{
6487        int rc, entry_sz;
6488
6489        /*
6490         * Initialize timers used by driver
6491         */
6492
6493        /* FCP polling mode timer */
6494        timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6495
6496        /* Host attention work mask setup */
6497        phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6498        phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6499
6500        /* Get all the module params for configuring this host */
6501        lpfc_get_cfgparam(phba);
6502        /* Set up phase-1 common device driver resources */
6503
6504        rc = lpfc_setup_driver_resource_phase1(phba);
6505        if (rc)
6506                return -ENODEV;
6507
6508        if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6509                phba->menlo_flag |= HBA_MENLO_SUPPORT;
6510                /* check for menlo minimum sg count */
6511                if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6512                        phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6513        }
6514
6515        if (!phba->sli.sli3_ring)
6516                phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6517                                              sizeof(struct lpfc_sli_ring),
6518                                              GFP_KERNEL);
6519        if (!phba->sli.sli3_ring)
6520                return -ENOMEM;
6521
6522        /*
6523         * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6524         * used to create the sg_dma_buf_pool must be dynamically calculated.
6525         */
6526
6527        if (phba->sli_rev == LPFC_SLI_REV4)
6528                entry_sz = sizeof(struct sli4_sge);
6529        else
6530                entry_sz = sizeof(struct ulp_bde64);
6531
6532        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6533        if (phba->cfg_enable_bg) {
6534                /*
6535                 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6536                 * the FCP rsp, and a BDE for each. Sice we have no control
6537                 * over how many protection data segments the SCSI Layer
6538                 * will hand us (ie: there could be one for every block
6539                 * in the IO), we just allocate enough BDEs to accomidate
6540                 * our max amount and we need to limit lpfc_sg_seg_cnt to
6541                 * minimize the risk of running out.
6542                 */
6543                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6544                        sizeof(struct fcp_rsp) +
6545                        (LPFC_MAX_SG_SEG_CNT * entry_sz);
6546
6547                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6548                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6549
6550                /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6551                phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6552        } else {
6553                /*
6554                 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6555                 * the FCP rsp, a BDE for each, and a BDE for up to
6556                 * cfg_sg_seg_cnt data segments.
6557                 */
6558                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6559                        sizeof(struct fcp_rsp) +
6560                        ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6561
6562                /* Total BDEs in BPL for scsi_sg_list */
6563                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6564        }
6565
6566        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6567                        "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6568                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6569                        phba->cfg_total_seg_cnt);
6570
6571        phba->max_vpi = LPFC_MAX_VPI;
6572        /* This will be set to correct value after config_port mbox */
6573        phba->max_vports = 0;
6574
6575        /*
6576         * Initialize the SLI Layer to run with lpfc HBAs.
6577         */
6578        lpfc_sli_setup(phba);
6579        lpfc_sli_queue_init(phba);
6580
6581        /* Allocate device driver memory */
6582        if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6583                return -ENOMEM;
6584
6585        phba->lpfc_sg_dma_buf_pool =
6586                dma_pool_create("lpfc_sg_dma_buf_pool",
6587                                &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6588                                BPL_ALIGN_SZ, 0);
6589
6590        if (!phba->lpfc_sg_dma_buf_pool)
6591                goto fail_free_mem;
6592
6593        phba->lpfc_cmd_rsp_buf_pool =
6594                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
6595                                        &phba->pcidev->dev,
6596                                        sizeof(struct fcp_cmnd) +
6597                                        sizeof(struct fcp_rsp),
6598                                        BPL_ALIGN_SZ, 0);
6599
6600        if (!phba->lpfc_cmd_rsp_buf_pool)
6601                goto fail_free_dma_buf_pool;
6602
6603        /*
6604         * Enable sr-iov virtual functions if supported and configured
6605         * through the module parameter.
6606         */
6607        if (phba->cfg_sriov_nr_virtfn > 0) {
6608                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6609                                                 phba->cfg_sriov_nr_virtfn);
6610                if (rc) {
6611                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6612                                        "2808 Requested number of SR-IOV "
6613                                        "virtual functions (%d) is not "
6614                                        "supported\n",
6615                                        phba->cfg_sriov_nr_virtfn);
6616                        phba->cfg_sriov_nr_virtfn = 0;
6617                }
6618        }
6619
6620        return 0;
6621
6622fail_free_dma_buf_pool:
6623        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6624        phba->lpfc_sg_dma_buf_pool = NULL;
6625fail_free_mem:
6626        lpfc_mem_free(phba);
6627        return -ENOMEM;
6628}
6629
6630/**
6631 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6632 * @phba: pointer to lpfc hba data structure.
6633 *
6634 * This routine is invoked to unset the driver internal resources set up
6635 * specific for supporting the SLI-3 HBA device it attached to.
6636 **/
6637static void
6638lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6639{
6640        /* Free device driver memory allocated */
6641        lpfc_mem_free_all(phba);
6642
6643        return;
6644}
6645
6646/**
6647 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6648 * @phba: pointer to lpfc hba data structure.
6649 *
6650 * This routine is invoked to set up the driver internal resources specific to
6651 * support the SLI-4 HBA device it attached to.
6652 *
6653 * Return codes
6654 *      0 - successful
6655 *      other values - error
6656 **/
6657static int
6658lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6659{
6660        LPFC_MBOXQ_t *mboxq;
6661        MAILBOX_t *mb;
6662        int rc, i, max_buf_size;
6663        int longs;
6664        int extra;
6665        uint64_t wwn;
6666        u32 if_type;
6667        u32 if_fam;
6668
6669        phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6670        phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6671        phba->sli4_hba.curr_disp_cpu = 0;
6672
6673        /* Get all the module params for configuring this host */
6674        lpfc_get_cfgparam(phba);
6675
6676        /* Set up phase-1 common device driver resources */
6677        rc = lpfc_setup_driver_resource_phase1(phba);
6678        if (rc)
6679                return -ENODEV;
6680
6681        /* Before proceed, wait for POST done and device ready */
6682        rc = lpfc_sli4_post_status_check(phba);
6683        if (rc)
6684                return -ENODEV;
6685
6686        /* Allocate all driver workqueues here */
6687
6688        /* The lpfc_wq workqueue for deferred irq use */
6689        phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6690
6691        /*
6692         * Initialize timers used by driver
6693         */
6694
6695        timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6696
6697        /* FCF rediscover timer */
6698        timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6699
6700        /*
6701         * Control structure for handling external multi-buffer mailbox
6702         * command pass-through.
6703         */
6704        memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6705                sizeof(struct lpfc_mbox_ext_buf_ctx));
6706        INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6707
6708        phba->max_vpi = LPFC_MAX_VPI;
6709
6710        /* This will be set to correct value after the read_config mbox */
6711        phba->max_vports = 0;
6712
6713        /* Program the default value of vlan_id and fc_map */
6714        phba->valid_vlan = 0;
6715        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6716        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6717        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6718
6719        /*
6720         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6721         * we will associate a new ring, for each EQ/CQ/WQ tuple.
6722         * The WQ create will allocate the ring.
6723         */
6724
6725        /* Initialize buffer queue management fields */
6726        INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6727        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6728        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6729
6730        /* for VMID idle timeout if VMID is enabled */
6731        if (lpfc_is_vmid_enabled(phba))
6732                timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
6733
6734        /*
6735         * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6736         */
6737        /* Initialize the Abort buffer list used by driver */
6738        spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6739        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6740
6741        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6742                /* Initialize the Abort nvme buffer list used by driver */
6743                spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6744                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6745                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6746                spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6747                INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6748        }
6749
6750        /* This abort list used by worker thread */
6751        spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6752        spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6753        spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6754        spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
6755
6756        /*
6757         * Initialize driver internal slow-path work queues
6758         */
6759
6760        /* Driver internel slow-path CQ Event pool */
6761        INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6762        /* Response IOCB work queue list */
6763        INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6764        /* Asynchronous event CQ Event work queue list */
6765        INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6766        /* Slow-path XRI aborted CQ Event work queue list */
6767        INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6768        /* Receive queue CQ Event work queue list */
6769        INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6770
6771        /* Initialize extent block lists. */
6772        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6773        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6774        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6775        INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6776
6777        /* Initialize mboxq lists. If the early init routines fail
6778         * these lists need to be correctly initialized.
6779         */
6780        INIT_LIST_HEAD(&phba->sli.mboxq);
6781        INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6782
6783        /* initialize optic_state to 0xFF */
6784        phba->sli4_hba.lnk_info.optic_state = 0xff;
6785
6786        /* Allocate device driver memory */
6787        rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6788        if (rc)
6789                return -ENOMEM;
6790
6791        /* IF Type 2 ports get initialized now. */
6792        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6793            LPFC_SLI_INTF_IF_TYPE_2) {
6794                rc = lpfc_pci_function_reset(phba);
6795                if (unlikely(rc)) {
6796                        rc = -ENODEV;
6797                        goto out_free_mem;
6798                }
6799                phba->temp_sensor_support = 1;
6800        }
6801
6802        /* Create the bootstrap mailbox command */
6803        rc = lpfc_create_bootstrap_mbox(phba);
6804        if (unlikely(rc))
6805                goto out_free_mem;
6806
6807        /* Set up the host's endian order with the device. */
6808        rc = lpfc_setup_endian_order(phba);
6809        if (unlikely(rc))
6810                goto out_free_bsmbx;
6811
6812        /* Set up the hba's configuration parameters. */
6813        rc = lpfc_sli4_read_config(phba);
6814        if (unlikely(rc))
6815                goto out_free_bsmbx;
6816        rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6817        if (unlikely(rc))
6818                goto out_free_bsmbx;
6819
6820        /* IF Type 0 ports get initialized now. */
6821        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6822            LPFC_SLI_INTF_IF_TYPE_0) {
6823                rc = lpfc_pci_function_reset(phba);
6824                if (unlikely(rc))
6825                        goto out_free_bsmbx;
6826        }
6827
6828        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6829                                                       GFP_KERNEL);
6830        if (!mboxq) {
6831                rc = -ENOMEM;
6832                goto out_free_bsmbx;
6833        }
6834
6835        /* Check for NVMET being configured */
6836        phba->nvmet_support = 0;
6837        if (lpfc_enable_nvmet_cnt) {
6838
6839                /* First get WWN of HBA instance */
6840                lpfc_read_nv(phba, mboxq);
6841                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6842                if (rc != MBX_SUCCESS) {
6843                        lpfc_printf_log(phba, KERN_ERR,
6844                                        LOG_TRACE_EVENT,
6845                                        "6016 Mailbox failed , mbxCmd x%x "
6846                                        "READ_NV, mbxStatus x%x\n",
6847                                        bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6848                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6849                        mempool_free(mboxq, phba->mbox_mem_pool);
6850                        rc = -EIO;
6851                        goto out_free_bsmbx;
6852                }
6853                mb = &mboxq->u.mb;
6854                memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6855                       sizeof(uint64_t));
6856                wwn = cpu_to_be64(wwn);
6857                phba->sli4_hba.wwnn.u.name = wwn;
6858                memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6859                       sizeof(uint64_t));
6860                /* wwn is WWPN of HBA instance */
6861                wwn = cpu_to_be64(wwn);
6862                phba->sli4_hba.wwpn.u.name = wwn;
6863
6864                /* Check to see if it matches any module parameter */
6865                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6866                        if (wwn == lpfc_enable_nvmet[i]) {
6867#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6868                                if (lpfc_nvmet_mem_alloc(phba))
6869                                        break;
6870
6871                                phba->nvmet_support = 1; /* a match */
6872
6873                                lpfc_printf_log(phba, KERN_ERR,
6874                                                LOG_TRACE_EVENT,
6875                                                "6017 NVME Target %016llx\n",
6876                                                wwn);
6877#else
6878                                lpfc_printf_log(phba, KERN_ERR,
6879                                                LOG_TRACE_EVENT,
6880                                                "6021 Can't enable NVME Target."
6881                                                " NVME_TARGET_FC infrastructure"
6882                                                " is not in kernel\n");
6883#endif
6884                                /* Not supported for NVMET */
6885                                phba->cfg_xri_rebalancing = 0;
6886                                if (phba->irq_chann_mode == NHT_MODE) {
6887                                        phba->cfg_irq_chann =
6888                                                phba->sli4_hba.num_present_cpu;
6889                                        phba->cfg_hdw_queue =
6890                                                phba->sli4_hba.num_present_cpu;
6891                                        phba->irq_chann_mode = NORMAL_MODE;
6892                                }
6893                                break;
6894                        }
6895                }
6896        }
6897
6898        lpfc_nvme_mod_param_dep(phba);
6899
6900        /*
6901         * Get sli4 parameters that override parameters from Port capabilities.
6902         * If this call fails, it isn't critical unless the SLI4 parameters come
6903         * back in conflict.
6904         */
6905        rc = lpfc_get_sli4_parameters(phba, mboxq);
6906        if (rc) {
6907                if_type = bf_get(lpfc_sli_intf_if_type,
6908                                 &phba->sli4_hba.sli_intf);
6909                if_fam = bf_get(lpfc_sli_intf_sli_family,
6910                                &phba->sli4_hba.sli_intf);
6911                if (phba->sli4_hba.extents_in_use &&
6912                    phba->sli4_hba.rpi_hdrs_in_use) {
6913                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6914                                        "2999 Unsupported SLI4 Parameters "
6915                                        "Extents and RPI headers enabled.\n");
6916                        if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6917                            if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
6918                                mempool_free(mboxq, phba->mbox_mem_pool);
6919                                rc = -EIO;
6920                                goto out_free_bsmbx;
6921                        }
6922                }
6923                if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6924                      if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6925                        mempool_free(mboxq, phba->mbox_mem_pool);
6926                        rc = -EIO;
6927                        goto out_free_bsmbx;
6928                }
6929        }
6930
6931        /*
6932         * 1 for cmd, 1 for rsp, NVME adds an extra one
6933         * for boundary conditions in its max_sgl_segment template.
6934         */
6935        extra = 2;
6936        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6937                extra++;
6938
6939        /*
6940         * It doesn't matter what family our adapter is in, we are
6941         * limited to 2 Pages, 512 SGEs, for our SGL.
6942         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6943         */
6944        max_buf_size = (2 * SLI4_PAGE_SIZE);
6945
6946        /*
6947         * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6948         * used to create the sg_dma_buf_pool must be calculated.
6949         */
6950        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6951                /* Both cfg_enable_bg and cfg_external_dif code paths */
6952
6953                /*
6954                 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6955                 * the FCP rsp, and a SGE. Sice we have no control
6956                 * over how many protection segments the SCSI Layer
6957                 * will hand us (ie: there could be one for every block
6958                 * in the IO), just allocate enough SGEs to accomidate
6959                 * our max amount and we need to limit lpfc_sg_seg_cnt
6960                 * to minimize the risk of running out.
6961                 */
6962                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6963                                sizeof(struct fcp_rsp) + max_buf_size;
6964
6965                /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6966                phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6967
6968                /*
6969                 * If supporting DIF, reduce the seg count for scsi to
6970                 * allow room for the DIF sges.
6971                 */
6972                if (phba->cfg_enable_bg &&
6973                    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6974                        phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6975                else
6976                        phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6977
6978        } else {
6979                /*
6980                 * The scsi_buf for a regular I/O holds the FCP cmnd,
6981                 * the FCP rsp, a SGE for each, and a SGE for up to
6982                 * cfg_sg_seg_cnt data segments.
6983                 */
6984                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6985                                sizeof(struct fcp_rsp) +
6986                                ((phba->cfg_sg_seg_cnt + extra) *
6987                                sizeof(struct sli4_sge));
6988
6989                /* Total SGEs for scsi_sg_list */
6990                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6991                phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6992
6993                /*
6994                 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6995                 * need to post 1 page for the SGL.
6996                 */
6997        }
6998
6999        if (phba->cfg_xpsgl && !phba->nvmet_support)
7000                phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
7001        else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
7002                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
7003        else
7004                phba->cfg_sg_dma_buf_size =
7005                                SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
7006
7007        phba->border_sge_num = phba->cfg_sg_dma_buf_size /
7008                               sizeof(struct sli4_sge);
7009
7010        /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
7011        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7012                if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
7013                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
7014                                        "6300 Reducing NVME sg segment "
7015                                        "cnt to %d\n",
7016                                        LPFC_MAX_NVME_SEG_CNT);
7017                        phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7018                } else
7019                        phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
7020        }
7021
7022        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7023                        "9087 sg_seg_cnt:%d dmabuf_size:%d "
7024                        "total:%d scsi:%d nvme:%d\n",
7025                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7026                        phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
7027                        phba->cfg_nvme_seg_cnt);
7028
7029        if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
7030                i = phba->cfg_sg_dma_buf_size;
7031        else
7032                i = SLI4_PAGE_SIZE;
7033
7034        phba->lpfc_sg_dma_buf_pool =
7035                        dma_pool_create("lpfc_sg_dma_buf_pool",
7036                                        &phba->pcidev->dev,
7037                                        phba->cfg_sg_dma_buf_size,
7038                                        i, 0);
7039        if (!phba->lpfc_sg_dma_buf_pool)
7040                goto out_free_bsmbx;
7041
7042        phba->lpfc_cmd_rsp_buf_pool =
7043                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
7044                                        &phba->pcidev->dev,
7045                                        sizeof(struct fcp_cmnd) +
7046                                        sizeof(struct fcp_rsp),
7047                                        i, 0);
7048        if (!phba->lpfc_cmd_rsp_buf_pool)
7049                goto out_free_sg_dma_buf;
7050
7051        mempool_free(mboxq, phba->mbox_mem_pool);
7052
7053        /* Verify OAS is supported */
7054        lpfc_sli4_oas_verify(phba);
7055
7056        /* Verify RAS support on adapter */
7057        lpfc_sli4_ras_init(phba);
7058
7059        /* Verify all the SLI4 queues */
7060        rc = lpfc_sli4_queue_verify(phba);
7061        if (rc)
7062                goto out_free_cmd_rsp_buf;
7063
7064        /* Create driver internal CQE event pool */
7065        rc = lpfc_sli4_cq_event_pool_create(phba);
7066        if (rc)
7067                goto out_free_cmd_rsp_buf;
7068
7069        /* Initialize sgl lists per host */
7070        lpfc_init_sgl_list(phba);
7071
7072        /* Allocate and initialize active sgl array */
7073        rc = lpfc_init_active_sgl_array(phba);
7074        if (rc) {
7075                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7076                                "1430 Failed to initialize sgl list.\n");
7077                goto out_destroy_cq_event_pool;
7078        }
7079        rc = lpfc_sli4_init_rpi_hdrs(phba);
7080        if (rc) {
7081                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7082                                "1432 Failed to initialize rpi headers.\n");
7083                goto out_free_active_sgl;
7084        }
7085
7086        /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
7087        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
7088        phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
7089                                         GFP_KERNEL);
7090        if (!phba->fcf.fcf_rr_bmask) {
7091                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7092                                "2759 Failed allocate memory for FCF round "
7093                                "robin failover bmask\n");
7094                rc = -ENOMEM;
7095                goto out_remove_rpi_hdrs;
7096        }
7097
7098        phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
7099                                            sizeof(struct lpfc_hba_eq_hdl),
7100                                            GFP_KERNEL);
7101        if (!phba->sli4_hba.hba_eq_hdl) {
7102                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7103                                "2572 Failed allocate memory for "
7104                                "fast-path per-EQ handle array\n");
7105                rc = -ENOMEM;
7106                goto out_free_fcf_rr_bmask;
7107        }
7108
7109        phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
7110                                        sizeof(struct lpfc_vector_map_info),
7111                                        GFP_KERNEL);
7112        if (!phba->sli4_hba.cpu_map) {
7113                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7114                                "3327 Failed allocate memory for msi-x "
7115                                "interrupt vector mapping\n");
7116                rc = -ENOMEM;
7117                goto out_free_hba_eq_hdl;
7118        }
7119
7120        phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7121        if (!phba->sli4_hba.eq_info) {
7122                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7123                                "3321 Failed allocation for per_cpu stats\n");
7124                rc = -ENOMEM;
7125                goto out_free_hba_cpu_map;
7126        }
7127
7128        phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7129                                           sizeof(*phba->sli4_hba.idle_stat),
7130                                           GFP_KERNEL);
7131        if (!phba->sli4_hba.idle_stat) {
7132                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7133                                "3390 Failed allocation for idle_stat\n");
7134                rc = -ENOMEM;
7135                goto out_free_hba_eq_info;
7136        }
7137
7138#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7139        phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7140        if (!phba->sli4_hba.c_stat) {
7141                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7142                                "3332 Failed allocating per cpu hdwq stats\n");
7143                rc = -ENOMEM;
7144                goto out_free_hba_idle_stat;
7145        }
7146#endif
7147
7148        /*
7149         * Enable sr-iov virtual functions if supported and configured
7150         * through the module parameter.
7151         */
7152        if (phba->cfg_sriov_nr_virtfn > 0) {
7153                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7154                                                 phba->cfg_sriov_nr_virtfn);
7155                if (rc) {
7156                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7157                                        "3020 Requested number of SR-IOV "
7158                                        "virtual functions (%d) is not "
7159                                        "supported\n",
7160                                        phba->cfg_sriov_nr_virtfn);
7161                        phba->cfg_sriov_nr_virtfn = 0;
7162                }
7163        }
7164
7165        return 0;
7166
7167#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7168out_free_hba_idle_stat:
7169        kfree(phba->sli4_hba.idle_stat);
7170#endif
7171out_free_hba_eq_info:
7172        free_percpu(phba->sli4_hba.eq_info);
7173out_free_hba_cpu_map:
7174        kfree(phba->sli4_hba.cpu_map);
7175out_free_hba_eq_hdl:
7176        kfree(phba->sli4_hba.hba_eq_hdl);
7177out_free_fcf_rr_bmask:
7178        kfree(phba->fcf.fcf_rr_bmask);
7179out_remove_rpi_hdrs:
7180        lpfc_sli4_remove_rpi_hdrs(phba);
7181out_free_active_sgl:
7182        lpfc_free_active_sgl(phba);
7183out_destroy_cq_event_pool:
7184        lpfc_sli4_cq_event_pool_destroy(phba);
7185out_free_cmd_rsp_buf:
7186        dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7187        phba->lpfc_cmd_rsp_buf_pool = NULL;
7188out_free_sg_dma_buf:
7189        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7190        phba->lpfc_sg_dma_buf_pool = NULL;
7191out_free_bsmbx:
7192        lpfc_destroy_bootstrap_mbox(phba);
7193out_free_mem:
7194        lpfc_mem_free(phba);
7195        return rc;
7196}
7197
7198/**
7199 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7200 * @phba: pointer to lpfc hba data structure.
7201 *
7202 * This routine is invoked to unset the driver internal resources set up
7203 * specific for supporting the SLI-4 HBA device it attached to.
7204 **/
7205static void
7206lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7207{
7208        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7209
7210        free_percpu(phba->sli4_hba.eq_info);
7211#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7212        free_percpu(phba->sli4_hba.c_stat);
7213#endif
7214        kfree(phba->sli4_hba.idle_stat);
7215
7216        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7217        kfree(phba->sli4_hba.cpu_map);
7218        phba->sli4_hba.num_possible_cpu = 0;
7219        phba->sli4_hba.num_present_cpu = 0;
7220        phba->sli4_hba.curr_disp_cpu = 0;
7221        cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7222
7223        /* Free memory allocated for fast-path work queue handles */
7224        kfree(phba->sli4_hba.hba_eq_hdl);
7225
7226        /* Free the allocated rpi headers. */
7227        lpfc_sli4_remove_rpi_hdrs(phba);
7228        lpfc_sli4_remove_rpis(phba);
7229
7230        /* Free eligible FCF index bmask */
7231        kfree(phba->fcf.fcf_rr_bmask);
7232
7233        /* Free the ELS sgl list */
7234        lpfc_free_active_sgl(phba);
7235        lpfc_free_els_sgl_list(phba);
7236        lpfc_free_nvmet_sgl_list(phba);
7237
7238        /* Free the completion queue EQ event pool */
7239        lpfc_sli4_cq_event_release_all(phba);
7240        lpfc_sli4_cq_event_pool_destroy(phba);
7241
7242        /* Release resource identifiers. */
7243        lpfc_sli4_dealloc_resource_identifiers(phba);
7244
7245        /* Free the bsmbx region. */
7246        lpfc_destroy_bootstrap_mbox(phba);
7247
7248        /* Free the SLI Layer memory with SLI4 HBAs */
7249        lpfc_mem_free_all(phba);
7250
7251        /* Free the current connect table */
7252        list_for_each_entry_safe(conn_entry, next_conn_entry,
7253                &phba->fcf_conn_rec_list, list) {
7254                list_del_init(&conn_entry->list);
7255                kfree(conn_entry);
7256        }
7257
7258        return;
7259}
7260
7261/**
7262 * lpfc_init_api_table_setup - Set up init api function jump table
7263 * @phba: The hba struct for which this call is being executed.
7264 * @dev_grp: The HBA PCI-Device group number.
7265 *
7266 * This routine sets up the device INIT interface API function jump table
7267 * in @phba struct.
7268 *
7269 * Returns: 0 - success, -ENODEV - failure.
7270 **/
7271int
7272lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7273{
7274        phba->lpfc_hba_init_link = lpfc_hba_init_link;
7275        phba->lpfc_hba_down_link = lpfc_hba_down_link;
7276        phba->lpfc_selective_reset = lpfc_selective_reset;
7277        switch (dev_grp) {
7278        case LPFC_PCI_DEV_LP:
7279                phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7280                phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7281                phba->lpfc_stop_port = lpfc_stop_port_s3;
7282                break;
7283        case LPFC_PCI_DEV_OC:
7284                phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7285                phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7286                phba->lpfc_stop_port = lpfc_stop_port_s4;
7287                break;
7288        default:
7289                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7290                                "1431 Invalid HBA PCI-device group: 0x%x\n",
7291                                dev_grp);
7292                return -ENODEV;
7293        }
7294        return 0;
7295}
7296
7297/**
7298 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7299 * @phba: pointer to lpfc hba data structure.
7300 *
7301 * This routine is invoked to set up the driver internal resources after the
7302 * device specific resource setup to support the HBA device it attached to.
7303 *
7304 * Return codes
7305 *      0 - successful
7306 *      other values - error
7307 **/
7308static int
7309lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7310{
7311        int error;
7312
7313        /* Startup the kernel thread for this host adapter. */
7314        phba->worker_thread = kthread_run(lpfc_do_work, phba,
7315                                          "lpfc_worker_%d", phba->brd_no);
7316        if (IS_ERR(phba->worker_thread)) {
7317                error = PTR_ERR(phba->worker_thread);
7318                return error;
7319        }
7320
7321        return 0;
7322}
7323
7324/**
7325 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7326 * @phba: pointer to lpfc hba data structure.
7327 *
7328 * This routine is invoked to unset the driver internal resources set up after
7329 * the device specific resource setup for supporting the HBA device it
7330 * attached to.
7331 **/
7332static void
7333lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7334{
7335        if (phba->wq) {
7336                flush_workqueue(phba->wq);
7337                destroy_workqueue(phba->wq);
7338                phba->wq = NULL;
7339        }
7340
7341        /* Stop kernel worker thread */
7342        if (phba->worker_thread)
7343                kthread_stop(phba->worker_thread);
7344}
7345
7346/**
7347 * lpfc_free_iocb_list - Free iocb list.
7348 * @phba: pointer to lpfc hba data structure.
7349 *
7350 * This routine is invoked to free the driver's IOCB list and memory.
7351 **/
7352void
7353lpfc_free_iocb_list(struct lpfc_hba *phba)
7354{
7355        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7356
7357        spin_lock_irq(&phba->hbalock);
7358        list_for_each_entry_safe(iocbq_entry, iocbq_next,
7359                                 &phba->lpfc_iocb_list, list) {
7360                list_del(&iocbq_entry->list);
7361                kfree(iocbq_entry);
7362                phba->total_iocbq_bufs--;
7363        }
7364        spin_unlock_irq(&phba->hbalock);
7365
7366        return;
7367}
7368
7369/**
7370 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7371 * @phba: pointer to lpfc hba data structure.
7372 * @iocb_count: number of requested iocbs
7373 *
7374 * This routine is invoked to allocate and initizlize the driver's IOCB
7375 * list and set up the IOCB tag array accordingly.
7376 *
7377 * Return codes
7378 *      0 - successful
7379 *      other values - error
7380 **/
7381int
7382lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7383{
7384        struct lpfc_iocbq *iocbq_entry = NULL;
7385        uint16_t iotag;
7386        int i;
7387
7388        /* Initialize and populate the iocb list per host.  */
7389        INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7390        for (i = 0; i < iocb_count; i++) {
7391                iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7392                if (iocbq_entry == NULL) {
7393                        printk(KERN_ERR "%s: only allocated %d iocbs of "
7394                                "expected %d count. Unloading driver.\n",
7395                                __func__, i, iocb_count);
7396                        goto out_free_iocbq;
7397                }
7398
7399                iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7400                if (iotag == 0) {
7401                        kfree(iocbq_entry);
7402                        printk(KERN_ERR "%s: failed to allocate IOTAG. "
7403                                "Unloading driver.\n", __func__);
7404                        goto out_free_iocbq;
7405                }
7406                iocbq_entry->sli4_lxritag = NO_XRI;
7407                iocbq_entry->sli4_xritag = NO_XRI;
7408
7409                spin_lock_irq(&phba->hbalock);
7410                list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7411                phba->total_iocbq_bufs++;
7412                spin_unlock_irq(&phba->hbalock);
7413        }
7414
7415        return 0;
7416
7417out_free_iocbq:
7418        lpfc_free_iocb_list(phba);
7419
7420        return -ENOMEM;
7421}
7422
7423/**
7424 * lpfc_free_sgl_list - Free a given sgl list.
7425 * @phba: pointer to lpfc hba data structure.
7426 * @sglq_list: pointer to the head of sgl list.
7427 *
7428 * This routine is invoked to free a give sgl list and memory.
7429 **/
7430void
7431lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7432{
7433        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7434
7435        list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7436                list_del(&sglq_entry->list);
7437                lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7438                kfree(sglq_entry);
7439        }
7440}
7441
7442/**
7443 * lpfc_free_els_sgl_list - Free els sgl list.
7444 * @phba: pointer to lpfc hba data structure.
7445 *
7446 * This routine is invoked to free the driver's els sgl list and memory.
7447 **/
7448static void
7449lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7450{
7451        LIST_HEAD(sglq_list);
7452
7453        /* Retrieve all els sgls from driver list */
7454        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
7455        list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7456        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
7457
7458        /* Now free the sgl list */
7459        lpfc_free_sgl_list(phba, &sglq_list);
7460}
7461
7462/**
7463 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7464 * @phba: pointer to lpfc hba data structure.
7465 *
7466 * This routine is invoked to free the driver's nvmet sgl list and memory.
7467 **/
7468static void
7469lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7470{
7471        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7472        LIST_HEAD(sglq_list);
7473
7474        /* Retrieve all nvmet sgls from driver list */
7475        spin_lock_irq(&phba->hbalock);
7476        spin_lock(&phba->sli4_hba.sgl_list_lock);
7477        list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7478        spin_unlock(&phba->sli4_hba.sgl_list_lock);
7479        spin_unlock_irq(&phba->hbalock);
7480
7481        /* Now free the sgl list */
7482        list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7483                list_del(&sglq_entry->list);
7484                lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7485                kfree(sglq_entry);
7486        }
7487
7488        /* Update the nvmet_xri_cnt to reflect no current sgls.
7489         * The next initialization cycle sets the count and allocates
7490         * the sgls over again.
7491         */
7492        phba->sli4_hba.nvmet_xri_cnt = 0;
7493}
7494
7495/**
7496 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7497 * @phba: pointer to lpfc hba data structure.
7498 *
7499 * This routine is invoked to allocate the driver's active sgl memory.
7500 * This array will hold the sglq_entry's for active IOs.
7501 **/
7502static int
7503lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7504{
7505        int size;
7506        size = sizeof(struct lpfc_sglq *);
7507        size *= phba->sli4_hba.max_cfg_param.max_xri;
7508
7509        phba->sli4_hba.lpfc_sglq_active_list =
7510                kzalloc(size, GFP_KERNEL);
7511        if (!phba->sli4_hba.lpfc_sglq_active_list)
7512                return -ENOMEM;
7513        return 0;
7514}
7515
7516/**
7517 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7518 * @phba: pointer to lpfc hba data structure.
7519 *
7520 * This routine is invoked to walk through the array of active sglq entries
7521 * and free all of the resources.
7522 * This is just a place holder for now.
7523 **/
7524static void
7525lpfc_free_active_sgl(struct lpfc_hba *phba)
7526{
7527        kfree(phba->sli4_hba.lpfc_sglq_active_list);
7528}
7529
7530/**
7531 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7532 * @phba: pointer to lpfc hba data structure.
7533 *
7534 * This routine is invoked to allocate and initizlize the driver's sgl
7535 * list and set up the sgl xritag tag array accordingly.
7536 *
7537 **/
7538static void
7539lpfc_init_sgl_list(struct lpfc_hba *phba)
7540{
7541        /* Initialize and populate the sglq list per host/VF. */
7542        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7543        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7544        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7545        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7546
7547        /* els xri-sgl book keeping */
7548        phba->sli4_hba.els_xri_cnt = 0;
7549
7550        /* nvme xri-buffer book keeping */
7551        phba->sli4_hba.io_xri_cnt = 0;
7552}
7553
7554/**
7555 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7556 * @phba: pointer to lpfc hba data structure.
7557 *
7558 * This routine is invoked to post rpi header templates to the
7559 * port for those SLI4 ports that do not support extents.  This routine
7560 * posts a PAGE_SIZE memory region to the port to hold up to
7561 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
7562 * and should be called only when interrupts are disabled.
7563 *
7564 * Return codes
7565 *      0 - successful
7566 *      -ERROR - otherwise.
7567 **/
7568int
7569lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7570{
7571        int rc = 0;
7572        struct lpfc_rpi_hdr *rpi_hdr;
7573
7574        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7575        if (!phba->sli4_hba.rpi_hdrs_in_use)
7576                return rc;
7577        if (phba->sli4_hba.extents_in_use)
7578                return -EIO;
7579
7580        rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7581        if (!rpi_hdr) {
7582                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7583                                "0391 Error during rpi post operation\n");
7584                lpfc_sli4_remove_rpis(phba);
7585                rc = -ENODEV;
7586        }
7587
7588        return rc;
7589}
7590
7591/**
7592 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7593 * @phba: pointer to lpfc hba data structure.
7594 *
7595 * This routine is invoked to allocate a single 4KB memory region to
7596 * support rpis and stores them in the phba.  This single region
7597 * provides support for up to 64 rpis.  The region is used globally
7598 * by the device.
7599 *
7600 * Returns:
7601 *   A valid rpi hdr on success.
7602 *   A NULL pointer on any failure.
7603 **/
7604struct lpfc_rpi_hdr *
7605lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7606{
7607        uint16_t rpi_limit, curr_rpi_range;
7608        struct lpfc_dmabuf *dmabuf;
7609        struct lpfc_rpi_hdr *rpi_hdr;
7610
7611        /*
7612         * If the SLI4 port supports extents, posting the rpi header isn't
7613         * required.  Set the expected maximum count and let the actual value
7614         * get set when extents are fully allocated.
7615         */
7616        if (!phba->sli4_hba.rpi_hdrs_in_use)
7617                return NULL;
7618        if (phba->sli4_hba.extents_in_use)
7619                return NULL;
7620
7621        /* The limit on the logical index is just the max_rpi count. */
7622        rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7623
7624        spin_lock_irq(&phba->hbalock);
7625        /*
7626         * Establish the starting RPI in this header block.  The starting
7627         * rpi is normalized to a zero base because the physical rpi is
7628         * port based.
7629         */
7630        curr_rpi_range = phba->sli4_hba.next_rpi;
7631        spin_unlock_irq(&phba->hbalock);
7632
7633        /* Reached full RPI range */
7634        if (curr_rpi_range == rpi_limit)
7635                return NULL;
7636
7637        /*
7638         * First allocate the protocol header region for the port.  The
7639         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7640         */
7641        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7642        if (!dmabuf)
7643                return NULL;
7644
7645        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7646                                          LPFC_HDR_TEMPLATE_SIZE,
7647                                          &dmabuf->phys, GFP_KERNEL);
7648        if (!dmabuf->virt) {
7649                rpi_hdr = NULL;
7650                goto err_free_dmabuf;
7651        }
7652
7653        if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7654                rpi_hdr = NULL;
7655                goto err_free_coherent;
7656        }
7657
7658        /* Save the rpi header data for cleanup later. */
7659        rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7660        if (!rpi_hdr)
7661                goto err_free_coherent;
7662
7663        rpi_hdr->dmabuf = dmabuf;
7664        rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7665        rpi_hdr->page_count = 1;
7666        spin_lock_irq(&phba->hbalock);
7667
7668        /* The rpi_hdr stores the logical index only. */
7669        rpi_hdr->start_rpi = curr_rpi_range;
7670        rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7671        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7672
7673        spin_unlock_irq(&phba->hbalock);
7674        return rpi_hdr;
7675
7676 err_free_coherent:
7677        dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7678                          dmabuf->virt, dmabuf->phys);
7679 err_free_dmabuf:
7680        kfree(dmabuf);
7681        return NULL;
7682}
7683
7684/**
7685 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7686 * @phba: pointer to lpfc hba data structure.
7687 *
7688 * This routine is invoked to remove all memory resources allocated
7689 * to support rpis for SLI4 ports not supporting extents. This routine
7690 * presumes the caller has released all rpis consumed by fabric or port
7691 * logins and is prepared to have the header pages removed.
7692 **/
7693void
7694lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7695{
7696        struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7697
7698        if (!phba->sli4_hba.rpi_hdrs_in_use)
7699                goto exit;
7700
7701        list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7702                                 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7703                list_del(&rpi_hdr->list);
7704                dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7705                                  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7706                kfree(rpi_hdr->dmabuf);
7707                kfree(rpi_hdr);
7708        }
7709 exit:
7710        /* There are no rpis available to the port now. */
7711        phba->sli4_hba.next_rpi = 0;
7712}
7713
7714/**
7715 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7716 * @pdev: pointer to pci device data structure.
7717 *
7718 * This routine is invoked to allocate the driver hba data structure for an
7719 * HBA device. If the allocation is successful, the phba reference to the
7720 * PCI device data structure is set.
7721 *
7722 * Return codes
7723 *      pointer to @phba - successful
7724 *      NULL - error
7725 **/
7726static struct lpfc_hba *
7727lpfc_hba_alloc(struct pci_dev *pdev)
7728{
7729        struct lpfc_hba *phba;
7730
7731        /* Allocate memory for HBA structure */
7732        phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7733        if (!phba) {
7734                dev_err(&pdev->dev, "failed to allocate hba struct\n");
7735                return NULL;
7736        }
7737
7738        /* Set reference to PCI device in HBA structure */
7739        phba->pcidev = pdev;
7740
7741        /* Assign an unused board number */
7742        phba->brd_no = lpfc_get_instance();
7743        if (phba->brd_no < 0) {
7744                kfree(phba);
7745                return NULL;
7746        }
7747        phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7748
7749        spin_lock_init(&phba->ct_ev_lock);
7750        INIT_LIST_HEAD(&phba->ct_ev_waiters);
7751
7752        return phba;
7753}
7754
7755/**
7756 * lpfc_hba_free - Free driver hba data structure with a device.
7757 * @phba: pointer to lpfc hba data structure.
7758 *
7759 * This routine is invoked to free the driver hba data structure with an
7760 * HBA device.
7761 **/
7762static void
7763lpfc_hba_free(struct lpfc_hba *phba)
7764{
7765        if (phba->sli_rev == LPFC_SLI_REV4)
7766                kfree(phba->sli4_hba.hdwq);
7767
7768        /* Release the driver assigned board number */
7769        idr_remove(&lpfc_hba_index, phba->brd_no);
7770
7771        /* Free memory allocated with sli3 rings */
7772        kfree(phba->sli.sli3_ring);
7773        phba->sli.sli3_ring = NULL;
7774
7775        kfree(phba);
7776        return;
7777}
7778
7779/**
7780 * lpfc_create_shost - Create hba physical port with associated scsi host.
7781 * @phba: pointer to lpfc hba data structure.
7782 *
7783 * This routine is invoked to create HBA physical port and associate a SCSI
7784 * host with it.
7785 *
7786 * Return codes
7787 *      0 - successful
7788 *      other values - error
7789 **/
7790static int
7791lpfc_create_shost(struct lpfc_hba *phba)
7792{
7793        struct lpfc_vport *vport;
7794        struct Scsi_Host  *shost;
7795
7796        /* Initialize HBA FC structure */
7797        phba->fc_edtov = FF_DEF_EDTOV;
7798        phba->fc_ratov = FF_DEF_RATOV;
7799        phba->fc_altov = FF_DEF_ALTOV;
7800        phba->fc_arbtov = FF_DEF_ARBTOV;
7801
7802        atomic_set(&phba->sdev_cnt, 0);
7803        vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7804        if (!vport)
7805                return -ENODEV;
7806
7807        shost = lpfc_shost_from_vport(vport);
7808        phba->pport = vport;
7809
7810        if (phba->nvmet_support) {
7811                /* Only 1 vport (pport) will support NVME target */
7812                phba->targetport = NULL;
7813                phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7814                lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7815                                "6076 NVME Target Found\n");
7816        }
7817
7818        lpfc_debugfs_initialize(vport);
7819        /* Put reference to SCSI host to driver's device private data */
7820        pci_set_drvdata(phba->pcidev, shost);
7821
7822        /*
7823         * At this point we are fully registered with PSA. In addition,
7824         * any initial discovery should be completed.
7825         */
7826        vport->load_flag |= FC_ALLOW_FDMI;
7827        if (phba->cfg_enable_SmartSAN ||
7828            (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7829
7830                /* Setup appropriate attribute masks */
7831                vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7832                if (phba->cfg_enable_SmartSAN)
7833                        vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7834                else
7835                        vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7836        }
7837        return 0;
7838}
7839
7840/**
7841 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7842 * @phba: pointer to lpfc hba data structure.
7843 *
7844 * This routine is invoked to destroy HBA physical port and the associated
7845 * SCSI host.
7846 **/
7847static void
7848lpfc_destroy_shost(struct lpfc_hba *phba)
7849{
7850        struct lpfc_vport *vport = phba->pport;
7851
7852        /* Destroy physical port that associated with the SCSI host */
7853        destroy_port(vport);
7854
7855        return;
7856}
7857
7858/**
7859 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7860 * @phba: pointer to lpfc hba data structure.
7861 * @shost: the shost to be used to detect Block guard settings.
7862 *
7863 * This routine sets up the local Block guard protocol settings for @shost.
7864 * This routine also allocates memory for debugging bg buffers.
7865 **/
7866static void
7867lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7868{
7869        uint32_t old_mask;
7870        uint32_t old_guard;
7871
7872        if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7873                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7874                                "1478 Registering BlockGuard with the "
7875                                "SCSI layer\n");
7876
7877                old_mask = phba->cfg_prot_mask;
7878                old_guard = phba->cfg_prot_guard;
7879
7880                /* Only allow supported values */
7881                phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7882                        SHOST_DIX_TYPE0_PROTECTION |
7883                        SHOST_DIX_TYPE1_PROTECTION);
7884                phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7885                                         SHOST_DIX_GUARD_CRC);
7886
7887                /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7888                if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7889                        phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7890
7891                if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7892                        if ((old_mask != phba->cfg_prot_mask) ||
7893                                (old_guard != phba->cfg_prot_guard))
7894                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7895                                        "1475 Registering BlockGuard with the "
7896                                        "SCSI layer: mask %d  guard %d\n",
7897                                        phba->cfg_prot_mask,
7898                                        phba->cfg_prot_guard);
7899
7900                        scsi_host_set_prot(shost, phba->cfg_prot_mask);
7901                        scsi_host_set_guard(shost, phba->cfg_prot_guard);
7902                } else
7903                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7904                                "1479 Not Registering BlockGuard with the SCSI "
7905                                "layer, Bad protection parameters: %d %d\n",
7906                                old_mask, old_guard);
7907        }
7908}
7909
7910/**
7911 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7912 * @phba: pointer to lpfc hba data structure.
7913 *
7914 * This routine is invoked to perform all the necessary post initialization
7915 * setup for the device.
7916 **/
7917static void
7918lpfc_post_init_setup(struct lpfc_hba *phba)
7919{
7920        struct Scsi_Host  *shost;
7921        struct lpfc_adapter_event_header adapter_event;
7922
7923        /* Get the default values for Model Name and Description */
7924        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7925
7926        /*
7927         * hba setup may have changed the hba_queue_depth so we need to
7928         * adjust the value of can_queue.
7929         */
7930        shost = pci_get_drvdata(phba->pcidev);
7931        shost->can_queue = phba->cfg_hba_queue_depth - 10;
7932
7933        lpfc_host_attrib_init(shost);
7934
7935        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7936                spin_lock_irq(shost->host_lock);
7937                lpfc_poll_start_timer(phba);
7938                spin_unlock_irq(shost->host_lock);
7939        }
7940
7941        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7942                        "0428 Perform SCSI scan\n");
7943        /* Send board arrival event to upper layer */
7944        adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7945        adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7946        fc_host_post_vendor_event(shost, fc_get_event_number(),
7947                                  sizeof(adapter_event),
7948                                  (char *) &adapter_event,
7949                                  LPFC_NL_VENDOR_ID);
7950        return;
7951}
7952
7953/**
7954 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7955 * @phba: pointer to lpfc hba data structure.
7956 *
7957 * This routine is invoked to set up the PCI device memory space for device
7958 * with SLI-3 interface spec.
7959 *
7960 * Return codes
7961 *      0 - successful
7962 *      other values - error
7963 **/
7964static int
7965lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7966{
7967        struct pci_dev *pdev = phba->pcidev;
7968        unsigned long bar0map_len, bar2map_len;
7969        int i, hbq_count;
7970        void *ptr;
7971        int error;
7972
7973        if (!pdev)
7974                return -ENODEV;
7975
7976        /* Set the device DMA mask size */
7977        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7978        if (error)
7979                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7980        if (error)
7981                return error;
7982        error = -ENODEV;
7983
7984        /* Get the bus address of Bar0 and Bar2 and the number of bytes
7985         * required by each mapping.
7986         */
7987        phba->pci_bar0_map = pci_resource_start(pdev, 0);
7988        bar0map_len = pci_resource_len(pdev, 0);
7989
7990        phba->pci_bar2_map = pci_resource_start(pdev, 2);
7991        bar2map_len = pci_resource_len(pdev, 2);
7992
7993        /* Map HBA SLIM to a kernel virtual address. */
7994        phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7995        if (!phba->slim_memmap_p) {
7996                dev_printk(KERN_ERR, &pdev->dev,
7997                           "ioremap failed for SLIM memory.\n");
7998                goto out;
7999        }
8000
8001        /* Map HBA Control Registers to a kernel virtual address. */
8002        phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
8003        if (!phba->ctrl_regs_memmap_p) {
8004                dev_printk(KERN_ERR, &pdev->dev,
8005                           "ioremap failed for HBA control registers.\n");
8006                goto out_iounmap_slim;
8007        }
8008
8009        /* Allocate memory for SLI-2 structures */
8010        phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8011                                               &phba->slim2p.phys, GFP_KERNEL);
8012        if (!phba->slim2p.virt)
8013                goto out_iounmap;
8014
8015        phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
8016        phba->mbox_ext = (phba->slim2p.virt +
8017                offsetof(struct lpfc_sli2_slim, mbx_ext_words));
8018        phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
8019        phba->IOCBs = (phba->slim2p.virt +
8020                       offsetof(struct lpfc_sli2_slim, IOCBs));
8021
8022        phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
8023                                                 lpfc_sli_hbq_size(),
8024                                                 &phba->hbqslimp.phys,
8025                                                 GFP_KERNEL);
8026        if (!phba->hbqslimp.virt)
8027                goto out_free_slim;
8028
8029        hbq_count = lpfc_sli_hbq_count();
8030        ptr = phba->hbqslimp.virt;
8031        for (i = 0; i < hbq_count; ++i) {
8032                phba->hbqs[i].hbq_virt = ptr;
8033                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
8034                ptr += (lpfc_hbq_defs[i]->entry_count *
8035                        sizeof(struct lpfc_hbq_entry));
8036        }
8037        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
8038        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
8039
8040        memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
8041
8042        phba->MBslimaddr = phba->slim_memmap_p;
8043        phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
8044        phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
8045        phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
8046        phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
8047
8048        return 0;
8049
8050out_free_slim:
8051        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8052                          phba->slim2p.virt, phba->slim2p.phys);
8053out_iounmap:
8054        iounmap(phba->ctrl_regs_memmap_p);
8055out_iounmap_slim:
8056        iounmap(phba->slim_memmap_p);
8057out:
8058        return error;
8059}
8060
8061/**
8062 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
8063 * @phba: pointer to lpfc hba data structure.
8064 *
8065 * This routine is invoked to unset the PCI device memory space for device
8066 * with SLI-3 interface spec.
8067 **/
8068static void
8069lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
8070{
8071        struct pci_dev *pdev;
8072
8073        /* Obtain PCI device reference */
8074        if (!phba->pcidev)
8075                return;
8076        else
8077                pdev = phba->pcidev;
8078
8079        /* Free coherent DMA memory allocated */
8080        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8081                          phba->hbqslimp.virt, phba->hbqslimp.phys);
8082        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8083                          phba->slim2p.virt, phba->slim2p.phys);
8084
8085        /* I/O memory unmap */
8086        iounmap(phba->ctrl_regs_memmap_p);
8087        iounmap(phba->slim_memmap_p);
8088
8089        return;
8090}
8091
8092/**
8093 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
8094 * @phba: pointer to lpfc hba data structure.
8095 *
8096 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
8097 * done and check status.
8098 *
8099 * Return 0 if successful, otherwise -ENODEV.
8100 **/
8101int
8102lpfc_sli4_post_status_check(struct lpfc_hba *phba)
8103{
8104        struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8105        struct lpfc_register reg_data;
8106        int i, port_error = 0;
8107        uint32_t if_type;
8108
8109        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8110        memset(&reg_data, 0, sizeof(reg_data));
8111        if (!phba->sli4_hba.PSMPHRregaddr)
8112                return -ENODEV;
8113
8114        /* Wait up to 30 seconds for the SLI Port POST done and ready */
8115        for (i = 0; i < 3000; i++) {
8116                if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8117                        &portsmphr_reg.word0) ||
8118                        (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
8119                        /* Port has a fatal POST error, break out */
8120                        port_error = -ENODEV;
8121                        break;
8122                }
8123                if (LPFC_POST_STAGE_PORT_READY ==
8124                    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
8125                        break;
8126                msleep(10);
8127        }
8128
8129        /*
8130         * If there was a port error during POST, then don't proceed with
8131         * other register reads as the data may not be valid.  Just exit.
8132         */
8133        if (port_error) {
8134                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8135                        "1408 Port Failed POST - portsmphr=0x%x, "
8136                        "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8137                        "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8138                        portsmphr_reg.word0,
8139                        bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8140                        bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8141                        bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8142                        bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8143                        bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8144                        bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8145                        bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8146                        bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8147        } else {
8148                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8149                                "2534 Device Info: SLIFamily=0x%x, "
8150                                "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8151                                "SLIHint_2=0x%x, FT=0x%x\n",
8152                                bf_get(lpfc_sli_intf_sli_family,
8153                                       &phba->sli4_hba.sli_intf),
8154                                bf_get(lpfc_sli_intf_slirev,
8155                                       &phba->sli4_hba.sli_intf),
8156                                bf_get(lpfc_sli_intf_if_type,
8157                                       &phba->sli4_hba.sli_intf),
8158                                bf_get(lpfc_sli_intf_sli_hint1,
8159                                       &phba->sli4_hba.sli_intf),
8160                                bf_get(lpfc_sli_intf_sli_hint2,
8161                                       &phba->sli4_hba.sli_intf),
8162                                bf_get(lpfc_sli_intf_func_type,
8163                                       &phba->sli4_hba.sli_intf));
8164                /*
8165                 * Check for other Port errors during the initialization
8166                 * process.  Fail the load if the port did not come up
8167                 * correctly.
8168                 */
8169                if_type = bf_get(lpfc_sli_intf_if_type,
8170                                 &phba->sli4_hba.sli_intf);
8171                switch (if_type) {
8172                case LPFC_SLI_INTF_IF_TYPE_0:
8173                        phba->sli4_hba.ue_mask_lo =
8174                              readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8175                        phba->sli4_hba.ue_mask_hi =
8176                              readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8177                        uerrlo_reg.word0 =
8178                              readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8179                        uerrhi_reg.word0 =
8180                                readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8181                        if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8182                            (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
8183                                lpfc_printf_log(phba, KERN_ERR,
8184                                                LOG_TRACE_EVENT,
8185                                                "1422 Unrecoverable Error "
8186                                                "Detected during POST "
8187                                                "uerr_lo_reg=0x%x, "
8188                                                "uerr_hi_reg=0x%x, "
8189                                                "ue_mask_lo_reg=0x%x, "
8190                                                "ue_mask_hi_reg=0x%x\n",
8191                                                uerrlo_reg.word0,
8192                                                uerrhi_reg.word0,
8193                                                phba->sli4_hba.ue_mask_lo,
8194                                                phba->sli4_hba.ue_mask_hi);
8195                                port_error = -ENODEV;
8196                        }
8197                        break;
8198                case LPFC_SLI_INTF_IF_TYPE_2:
8199                case LPFC_SLI_INTF_IF_TYPE_6:
8200                        /* Final checks.  The port status should be clean. */
8201                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8202                                &reg_data.word0) ||
8203                                (bf_get(lpfc_sliport_status_err, &reg_data) &&
8204                                 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
8205                                phba->work_status[0] =
8206                                        readl(phba->sli4_hba.u.if_type2.
8207                                              ERR1regaddr);
8208                                phba->work_status[1] =
8209                                        readl(phba->sli4_hba.u.if_type2.
8210                                              ERR2regaddr);
8211                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8212                                        "2888 Unrecoverable port error "
8213                                        "following POST: port status reg "
8214                                        "0x%x, port_smphr reg 0x%x, "
8215                                        "error 1=0x%x, error 2=0x%x\n",
8216                                        reg_data.word0,
8217                                        portsmphr_reg.word0,
8218                                        phba->work_status[0],
8219                                        phba->work_status[1]);
8220                                port_error = -ENODEV;
8221                        }
8222                        break;
8223                case LPFC_SLI_INTF_IF_TYPE_1:
8224                default:
8225                        break;
8226                }
8227        }
8228        return port_error;
8229}
8230
8231/**
8232 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8233 * @phba: pointer to lpfc hba data structure.
8234 * @if_type:  The SLI4 interface type getting configured.
8235 *
8236 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8237 * memory map.
8238 **/
8239static void
8240lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8241{
8242        switch (if_type) {
8243        case LPFC_SLI_INTF_IF_TYPE_0:
8244                phba->sli4_hba.u.if_type0.UERRLOregaddr =
8245                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8246                phba->sli4_hba.u.if_type0.UERRHIregaddr =
8247                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8248                phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8249                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8250                phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8251                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8252                phba->sli4_hba.SLIINTFregaddr =
8253                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8254                break;
8255        case LPFC_SLI_INTF_IF_TYPE_2:
8256                phba->sli4_hba.u.if_type2.EQDregaddr =
8257                        phba->sli4_hba.conf_regs_memmap_p +
8258                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8259                phba->sli4_hba.u.if_type2.ERR1regaddr =
8260                        phba->sli4_hba.conf_regs_memmap_p +
8261                                                LPFC_CTL_PORT_ER1_OFFSET;
8262                phba->sli4_hba.u.if_type2.ERR2regaddr =
8263                        phba->sli4_hba.conf_regs_memmap_p +
8264                                                LPFC_CTL_PORT_ER2_OFFSET;
8265                phba->sli4_hba.u.if_type2.CTRLregaddr =
8266                        phba->sli4_hba.conf_regs_memmap_p +
8267                                                LPFC_CTL_PORT_CTL_OFFSET;
8268                phba->sli4_hba.u.if_type2.STATUSregaddr =
8269                        phba->sli4_hba.conf_regs_memmap_p +
8270                                                LPFC_CTL_PORT_STA_OFFSET;
8271                phba->sli4_hba.SLIINTFregaddr =
8272                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8273                phba->sli4_hba.PSMPHRregaddr =
8274                        phba->sli4_hba.conf_regs_memmap_p +
8275                                                LPFC_CTL_PORT_SEM_OFFSET;
8276                phba->sli4_hba.RQDBregaddr =
8277                        phba->sli4_hba.conf_regs_memmap_p +
8278                                                LPFC_ULP0_RQ_DOORBELL;
8279                phba->sli4_hba.WQDBregaddr =
8280                        phba->sli4_hba.conf_regs_memmap_p +
8281                                                LPFC_ULP0_WQ_DOORBELL;
8282                phba->sli4_hba.CQDBregaddr =
8283                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8284                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8285                phba->sli4_hba.MQDBregaddr =
8286                        phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8287                phba->sli4_hba.BMBXregaddr =
8288                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8289                break;
8290        case LPFC_SLI_INTF_IF_TYPE_6:
8291                phba->sli4_hba.u.if_type2.EQDregaddr =
8292                        phba->sli4_hba.conf_regs_memmap_p +
8293                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8294                phba->sli4_hba.u.if_type2.ERR1regaddr =
8295                        phba->sli4_hba.conf_regs_memmap_p +
8296                                                LPFC_CTL_PORT_ER1_OFFSET;
8297                phba->sli4_hba.u.if_type2.ERR2regaddr =
8298                        phba->sli4_hba.conf_regs_memmap_p +
8299                                                LPFC_CTL_PORT_ER2_OFFSET;
8300                phba->sli4_hba.u.if_type2.CTRLregaddr =
8301                        phba->sli4_hba.conf_regs_memmap_p +
8302                                                LPFC_CTL_PORT_CTL_OFFSET;
8303                phba->sli4_hba.u.if_type2.STATUSregaddr =
8304                        phba->sli4_hba.conf_regs_memmap_p +
8305                                                LPFC_CTL_PORT_STA_OFFSET;
8306                phba->sli4_hba.PSMPHRregaddr =
8307                        phba->sli4_hba.conf_regs_memmap_p +
8308                                                LPFC_CTL_PORT_SEM_OFFSET;
8309                phba->sli4_hba.BMBXregaddr =
8310                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8311                break;
8312        case LPFC_SLI_INTF_IF_TYPE_1:
8313        default:
8314                dev_printk(KERN_ERR, &phba->pcidev->dev,
8315                           "FATAL - unsupported SLI4 interface type - %d\n",
8316                           if_type);
8317                break;
8318        }
8319}
8320
8321/**
8322 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8323 * @phba: pointer to lpfc hba data structure.
8324 * @if_type: sli if type to operate on.
8325 *
8326 * This routine is invoked to set up SLI4 BAR1 register memory map.
8327 **/
8328static void
8329lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8330{
8331        switch (if_type) {
8332        case LPFC_SLI_INTF_IF_TYPE_0:
8333                phba->sli4_hba.PSMPHRregaddr =
8334                        phba->sli4_hba.ctrl_regs_memmap_p +
8335                        LPFC_SLIPORT_IF0_SMPHR;
8336                phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8337                        LPFC_HST_ISR0;
8338                phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8339                        LPFC_HST_IMR0;
8340                phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8341                        LPFC_HST_ISCR0;
8342                break;
8343        case LPFC_SLI_INTF_IF_TYPE_6:
8344                phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8345                        LPFC_IF6_RQ_DOORBELL;
8346                phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8347                        LPFC_IF6_WQ_DOORBELL;
8348                phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8349                        LPFC_IF6_CQ_DOORBELL;
8350                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8351                        LPFC_IF6_EQ_DOORBELL;
8352                phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8353                        LPFC_IF6_MQ_DOORBELL;
8354                break;
8355        case LPFC_SLI_INTF_IF_TYPE_2:
8356        case LPFC_SLI_INTF_IF_TYPE_1:
8357        default:
8358                dev_err(&phba->pcidev->dev,
8359                           "FATAL - unsupported SLI4 interface type - %d\n",
8360                           if_type);
8361                break;
8362        }
8363}
8364
8365/**
8366 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8367 * @phba: pointer to lpfc hba data structure.
8368 * @vf: virtual function number
8369 *
8370 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8371 * based on the given viftual function number, @vf.
8372 *
8373 * Return 0 if successful, otherwise -ENODEV.
8374 **/
8375static int
8376lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8377{
8378        if (vf > LPFC_VIR_FUNC_MAX)
8379                return -ENODEV;
8380
8381        phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8382                                vf * LPFC_VFR_PAGE_SIZE +
8383                                        LPFC_ULP0_RQ_DOORBELL);
8384        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8385                                vf * LPFC_VFR_PAGE_SIZE +
8386                                        LPFC_ULP0_WQ_DOORBELL);
8387        phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8388                                vf * LPFC_VFR_PAGE_SIZE +
8389                                        LPFC_EQCQ_DOORBELL);
8390        phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8391        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8392                                vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8393        phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8394                                vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8395        return 0;
8396}
8397
8398/**
8399 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8400 * @phba: pointer to lpfc hba data structure.
8401 *
8402 * This routine is invoked to create the bootstrap mailbox
8403 * region consistent with the SLI-4 interface spec.  This
8404 * routine allocates all memory necessary to communicate
8405 * mailbox commands to the port and sets up all alignment
8406 * needs.  No locks are expected to be held when calling
8407 * this routine.
8408 *
8409 * Return codes
8410 *      0 - successful
8411 *      -ENOMEM - could not allocated memory.
8412 **/
8413static int
8414lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8415{
8416        uint32_t bmbx_size;
8417        struct lpfc_dmabuf *dmabuf;
8418        struct dma_address *dma_address;
8419        uint32_t pa_addr;
8420        uint64_t phys_addr;
8421
8422        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8423        if (!dmabuf)
8424                return -ENOMEM;
8425
8426        /*
8427         * The bootstrap mailbox region is comprised of 2 parts
8428         * plus an alignment restriction of 16 bytes.
8429         */
8430        bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8431        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8432                                          &dmabuf->phys, GFP_KERNEL);
8433        if (!dmabuf->virt) {
8434                kfree(dmabuf);
8435                return -ENOMEM;
8436        }
8437
8438        /*
8439         * Initialize the bootstrap mailbox pointers now so that the register
8440         * operations are simple later.  The mailbox dma address is required
8441         * to be 16-byte aligned.  Also align the virtual memory as each
8442         * maibox is copied into the bmbx mailbox region before issuing the
8443         * command to the port.
8444         */
8445        phba->sli4_hba.bmbx.dmabuf = dmabuf;
8446        phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8447
8448        phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8449                                              LPFC_ALIGN_16_BYTE);
8450        phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8451                                              LPFC_ALIGN_16_BYTE);
8452
8453        /*
8454         * Set the high and low physical addresses now.  The SLI4 alignment
8455         * requirement is 16 bytes and the mailbox is posted to the port
8456         * as two 30-bit addresses.  The other data is a bit marking whether
8457         * the 30-bit address is the high or low address.
8458         * Upcast bmbx aphys to 64bits so shift instruction compiles
8459         * clean on 32 bit machines.
8460         */
8461        dma_address = &phba->sli4_hba.bmbx.dma_address;
8462        phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8463        pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8464        dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8465                                           LPFC_BMBX_BIT1_ADDR_HI);
8466
8467        pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8468        dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8469                                           LPFC_BMBX_BIT1_ADDR_LO);
8470        return 0;
8471}
8472
8473/**
8474 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8475 * @phba: pointer to lpfc hba data structure.
8476 *
8477 * This routine is invoked to teardown the bootstrap mailbox
8478 * region and release all host resources. This routine requires
8479 * the caller to ensure all mailbox commands recovered, no
8480 * additional mailbox comands are sent, and interrupts are disabled
8481 * before calling this routine.
8482 *
8483 **/
8484static void
8485lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8486{
8487        dma_free_coherent(&phba->pcidev->dev,
8488                          phba->sli4_hba.bmbx.bmbx_size,
8489                          phba->sli4_hba.bmbx.dmabuf->virt,
8490                          phba->sli4_hba.bmbx.dmabuf->phys);
8491
8492        kfree(phba->sli4_hba.bmbx.dmabuf);
8493        memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8494}
8495
8496static const char * const lpfc_topo_to_str[] = {
8497        "Loop then P2P",
8498        "Loopback",
8499        "P2P Only",
8500        "Unsupported",
8501        "Loop Only",
8502        "Unsupported",
8503        "P2P then Loop",
8504};
8505
8506#define LINK_FLAGS_DEF  0x0
8507#define LINK_FLAGS_P2P  0x1
8508#define LINK_FLAGS_LOOP 0x2
8509/**
8510 * lpfc_map_topology - Map the topology read from READ_CONFIG
8511 * @phba: pointer to lpfc hba data structure.
8512 * @rd_config: pointer to read config data
8513 *
8514 * This routine is invoked to map the topology values as read
8515 * from the read config mailbox command. If the persistent
8516 * topology feature is supported, the firmware will provide the
8517 * saved topology information to be used in INIT_LINK
8518 **/
8519static void
8520lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8521{
8522        u8 ptv, tf, pt;
8523
8524        ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8525        tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8526        pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8527
8528        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8529                        "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8530                         ptv, tf, pt);
8531        if (!ptv) {
8532                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8533                                "2019 FW does not support persistent topology "
8534                                "Using driver parameter defined value [%s]",
8535                                lpfc_topo_to_str[phba->cfg_topology]);
8536                return;
8537        }
8538        /* FW supports persistent topology - override module parameter value */
8539        phba->hba_flag |= HBA_PERSISTENT_TOPO;
8540        switch (phba->pcidev->device) {
8541        case PCI_DEVICE_ID_LANCER_G7_FC:
8542        case PCI_DEVICE_ID_LANCER_G6_FC:
8543                if (!tf) {
8544                        phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8545                                        ? FLAGS_TOPOLOGY_MODE_LOOP
8546                                        : FLAGS_TOPOLOGY_MODE_PT_PT);
8547                } else {
8548                        phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8549                }
8550                break;
8551        default:        /* G5 */
8552                if (tf) {
8553                        /* If topology failover set - pt is '0' or '1' */
8554                        phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8555                                              FLAGS_TOPOLOGY_MODE_LOOP_PT);
8556                } else {
8557                        phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8558                                        ? FLAGS_TOPOLOGY_MODE_PT_PT
8559                                        : FLAGS_TOPOLOGY_MODE_LOOP);
8560                }
8561                break;
8562        }
8563        if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8564                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8565                                "2020 Using persistent topology value [%s]",
8566                                lpfc_topo_to_str[phba->cfg_topology]);
8567        } else {
8568                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8569                                "2021 Invalid topology values from FW "
8570                                "Using driver parameter defined value [%s]",
8571                                lpfc_topo_to_str[phba->cfg_topology]);
8572        }
8573}
8574
8575/**
8576 * lpfc_sli4_read_config - Get the config parameters.
8577 * @phba: pointer to lpfc hba data structure.
8578 *
8579 * This routine is invoked to read the configuration parameters from the HBA.
8580 * The configuration parameters are used to set the base and maximum values
8581 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8582 * allocation for the port.
8583 *
8584 * Return codes
8585 *      0 - successful
8586 *      -ENOMEM - No available memory
8587 *      -EIO - The mailbox failed to complete successfully.
8588 **/
8589int
8590lpfc_sli4_read_config(struct lpfc_hba *phba)
8591{
8592        LPFC_MBOXQ_t *pmb;
8593        struct lpfc_mbx_read_config *rd_config;
8594        union  lpfc_sli4_cfg_shdr *shdr;
8595        uint32_t shdr_status, shdr_add_status;
8596        struct lpfc_mbx_get_func_cfg *get_func_cfg;
8597        struct lpfc_rsrc_desc_fcfcoe *desc;
8598        char *pdesc_0;
8599        uint16_t forced_link_speed;
8600        uint32_t if_type, qmin;
8601        int length, i, rc = 0, rc2;
8602
8603        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8604        if (!pmb) {
8605                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8606                                "2011 Unable to allocate memory for issuing "
8607                                "SLI_CONFIG_SPECIAL mailbox command\n");
8608                return -ENOMEM;
8609        }
8610
8611        lpfc_read_config(phba, pmb);
8612
8613        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8614        if (rc != MBX_SUCCESS) {
8615                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8616                                "2012 Mailbox failed , mbxCmd x%x "
8617                                "READ_CONFIG, mbxStatus x%x\n",
8618                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
8619                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
8620                rc = -EIO;
8621        } else {
8622                rd_config = &pmb->u.mqe.un.rd_config;
8623                if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8624                        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8625                        phba->sli4_hba.lnk_info.lnk_tp =
8626                                bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8627                        phba->sli4_hba.lnk_info.lnk_no =
8628                                bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8629                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8630                                        "3081 lnk_type:%d, lnk_numb:%d\n",
8631                                        phba->sli4_hba.lnk_info.lnk_tp,
8632                                        phba->sli4_hba.lnk_info.lnk_no);
8633                } else
8634                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8635                                        "3082 Mailbox (x%x) returned ldv:x0\n",
8636                                        bf_get(lpfc_mqe_command, &pmb->u.mqe));
8637                if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8638                        phba->bbcredit_support = 1;
8639                        phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8640                }
8641
8642                phba->sli4_hba.conf_trunk =
8643                        bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8644                phba->sli4_hba.extents_in_use =
8645                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8646                phba->sli4_hba.max_cfg_param.max_xri =
8647                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8648                /* Reduce resource usage in kdump environment */
8649                if (is_kdump_kernel() &&
8650                    phba->sli4_hba.max_cfg_param.max_xri > 512)
8651                        phba->sli4_hba.max_cfg_param.max_xri = 512;
8652                phba->sli4_hba.max_cfg_param.xri_base =
8653                        bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8654                phba->sli4_hba.max_cfg_param.max_vpi =
8655                        bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8656                /* Limit the max we support */
8657                if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8658                        phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8659                phba->sli4_hba.max_cfg_param.vpi_base =
8660                        bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8661                phba->sli4_hba.max_cfg_param.max_rpi =
8662                        bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8663                phba->sli4_hba.max_cfg_param.rpi_base =
8664                        bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8665                phba->sli4_hba.max_cfg_param.max_vfi =
8666                        bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8667                phba->sli4_hba.max_cfg_param.vfi_base =
8668                        bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8669                phba->sli4_hba.max_cfg_param.max_fcfi =
8670                        bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8671                phba->sli4_hba.max_cfg_param.max_eq =
8672                        bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8673                phba->sli4_hba.max_cfg_param.max_rq =
8674                        bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8675                phba->sli4_hba.max_cfg_param.max_wq =
8676                        bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8677                phba->sli4_hba.max_cfg_param.max_cq =
8678                        bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8679                phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8680                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8681                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8682                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8683                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8684                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8685                phba->max_vports = phba->max_vpi;
8686                lpfc_map_topology(phba, rd_config);
8687                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8688                                "2003 cfg params Extents? %d "
8689                                "XRI(B:%d M:%d), "
8690                                "VPI(B:%d M:%d) "
8691                                "VFI(B:%d M:%d) "
8692                                "RPI(B:%d M:%d) "
8693                                "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
8694                                phba->sli4_hba.extents_in_use,
8695                                phba->sli4_hba.max_cfg_param.xri_base,
8696                                phba->sli4_hba.max_cfg_param.max_xri,
8697                                phba->sli4_hba.max_cfg_param.vpi_base,
8698                                phba->sli4_hba.max_cfg_param.max_vpi,
8699                                phba->sli4_hba.max_cfg_param.vfi_base,
8700                                phba->sli4_hba.max_cfg_param.max_vfi,
8701                                phba->sli4_hba.max_cfg_param.rpi_base,
8702                                phba->sli4_hba.max_cfg_param.max_rpi,
8703                                phba->sli4_hba.max_cfg_param.max_fcfi,
8704                                phba->sli4_hba.max_cfg_param.max_eq,
8705                                phba->sli4_hba.max_cfg_param.max_cq,
8706                                phba->sli4_hba.max_cfg_param.max_wq,
8707                                phba->sli4_hba.max_cfg_param.max_rq,
8708                                phba->lmt);
8709
8710                /*
8711                 * Calculate queue resources based on how
8712                 * many WQ/CQ/EQs are available.
8713                 */
8714                qmin = phba->sli4_hba.max_cfg_param.max_wq;
8715                if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8716                        qmin = phba->sli4_hba.max_cfg_param.max_cq;
8717                if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8718                        qmin = phba->sli4_hba.max_cfg_param.max_eq;
8719                /*
8720                 * Whats left after this can go toward NVME / FCP.
8721                 * The minus 4 accounts for ELS, NVME LS, MBOX
8722                 * plus one extra. When configured for
8723                 * NVMET, FCP io channel WQs are not created.
8724                 */
8725                qmin -= 4;
8726
8727                /* Check to see if there is enough for NVME */
8728                if ((phba->cfg_irq_chann > qmin) ||
8729                    (phba->cfg_hdw_queue > qmin)) {
8730                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8731                                        "2005 Reducing Queues - "
8732                                        "FW resource limitation: "
8733                                        "WQ %d CQ %d EQ %d: min %d: "
8734                                        "IRQ %d HDWQ %d\n",
8735                                        phba->sli4_hba.max_cfg_param.max_wq,
8736                                        phba->sli4_hba.max_cfg_param.max_cq,
8737                                        phba->sli4_hba.max_cfg_param.max_eq,
8738                                        qmin, phba->cfg_irq_chann,
8739                                        phba->cfg_hdw_queue);
8740
8741                        if (phba->cfg_irq_chann > qmin)
8742                                phba->cfg_irq_chann = qmin;
8743                        if (phba->cfg_hdw_queue > qmin)
8744                                phba->cfg_hdw_queue = qmin;
8745                }
8746        }
8747
8748        if (rc)
8749                goto read_cfg_out;
8750
8751        /* Update link speed if forced link speed is supported */
8752        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8753        if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8754                forced_link_speed =
8755                        bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8756                if (forced_link_speed) {
8757                        phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8758
8759                        switch (forced_link_speed) {
8760                        case LINK_SPEED_1G:
8761                                phba->cfg_link_speed =
8762                                        LPFC_USER_LINK_SPEED_1G;
8763                                break;
8764                        case LINK_SPEED_2G:
8765                                phba->cfg_link_speed =
8766                                        LPFC_USER_LINK_SPEED_2G;
8767                                break;
8768                        case LINK_SPEED_4G:
8769                                phba->cfg_link_speed =
8770                                        LPFC_USER_LINK_SPEED_4G;
8771                                break;
8772                        case LINK_SPEED_8G:
8773                                phba->cfg_link_speed =
8774                                        LPFC_USER_LINK_SPEED_8G;
8775                                break;
8776                        case LINK_SPEED_10G:
8777                                phba->cfg_link_speed =
8778                                        LPFC_USER_LINK_SPEED_10G;
8779                                break;
8780                        case LINK_SPEED_16G:
8781                                phba->cfg_link_speed =
8782                                        LPFC_USER_LINK_SPEED_16G;
8783                                break;
8784                        case LINK_SPEED_32G:
8785                                phba->cfg_link_speed =
8786                                        LPFC_USER_LINK_SPEED_32G;
8787                                break;
8788                        case LINK_SPEED_64G:
8789                                phba->cfg_link_speed =
8790                                        LPFC_USER_LINK_SPEED_64G;
8791                                break;
8792                        case 0xffff:
8793                                phba->cfg_link_speed =
8794                                        LPFC_USER_LINK_SPEED_AUTO;
8795                                break;
8796                        default:
8797                                lpfc_printf_log(phba, KERN_ERR,
8798                                                LOG_TRACE_EVENT,
8799                                                "0047 Unrecognized link "
8800                                                "speed : %d\n",
8801                                                forced_link_speed);
8802                                phba->cfg_link_speed =
8803                                        LPFC_USER_LINK_SPEED_AUTO;
8804                        }
8805                }
8806        }
8807
8808        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
8809        length = phba->sli4_hba.max_cfg_param.max_xri -
8810                        lpfc_sli4_get_els_iocb_cnt(phba);
8811        if (phba->cfg_hba_queue_depth > length) {
8812                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8813                                "3361 HBA queue depth changed from %d to %d\n",
8814                                phba->cfg_hba_queue_depth, length);
8815                phba->cfg_hba_queue_depth = length;
8816        }
8817
8818        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8819            LPFC_SLI_INTF_IF_TYPE_2)
8820                goto read_cfg_out;
8821
8822        /* get the pf# and vf# for SLI4 if_type 2 port */
8823        length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8824                  sizeof(struct lpfc_sli4_cfg_mhdr));
8825        lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8826                         LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8827                         length, LPFC_SLI4_MBX_EMBED);
8828
8829        rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8830        shdr = (union lpfc_sli4_cfg_shdr *)
8831                                &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8832        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8833        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8834        if (rc2 || shdr_status || shdr_add_status) {
8835                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8836                                "3026 Mailbox failed , mbxCmd x%x "
8837                                "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8838                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
8839                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
8840                goto read_cfg_out;
8841        }
8842
8843        /* search for fc_fcoe resrouce descriptor */
8844        get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8845
8846        pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8847        desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8848        length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8849        if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8850                length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8851        else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8852                goto read_cfg_out;
8853
8854        for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8855                desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8856                if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8857                    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8858                        phba->sli4_hba.iov.pf_number =
8859                                bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8860                        phba->sli4_hba.iov.vf_number =
8861                                bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8862                        break;
8863                }
8864        }
8865
8866        if (i < LPFC_RSRC_DESC_MAX_NUM)
8867                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8868                                "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8869                                "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8870                                phba->sli4_hba.iov.vf_number);
8871        else
8872                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8873                                "3028 GET_FUNCTION_CONFIG: failed to find "
8874                                "Resource Descriptor:x%x\n",
8875                                LPFC_RSRC_DESC_TYPE_FCFCOE);
8876
8877read_cfg_out:
8878        mempool_free(pmb, phba->mbox_mem_pool);
8879        return rc;
8880}
8881
8882/**
8883 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8884 * @phba: pointer to lpfc hba data structure.
8885 *
8886 * This routine is invoked to setup the port-side endian order when
8887 * the port if_type is 0.  This routine has no function for other
8888 * if_types.
8889 *
8890 * Return codes
8891 *      0 - successful
8892 *      -ENOMEM - No available memory
8893 *      -EIO - The mailbox failed to complete successfully.
8894 **/
8895static int
8896lpfc_setup_endian_order(struct lpfc_hba *phba)
8897{
8898        LPFC_MBOXQ_t *mboxq;
8899        uint32_t if_type, rc = 0;
8900        uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8901                                      HOST_ENDIAN_HIGH_WORD1};
8902
8903        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8904        switch (if_type) {
8905        case LPFC_SLI_INTF_IF_TYPE_0:
8906                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8907                                                       GFP_KERNEL);
8908                if (!mboxq) {
8909                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8910                                        "0492 Unable to allocate memory for "
8911                                        "issuing SLI_CONFIG_SPECIAL mailbox "
8912                                        "command\n");
8913                        return -ENOMEM;
8914                }
8915
8916                /*
8917                 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8918                 * two words to contain special data values and no other data.
8919                 */
8920                memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8921                memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8922                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8923                if (rc != MBX_SUCCESS) {
8924                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8925                                        "0493 SLI_CONFIG_SPECIAL mailbox "
8926                                        "failed with status x%x\n",
8927                                        rc);
8928                        rc = -EIO;
8929                }
8930                mempool_free(mboxq, phba->mbox_mem_pool);
8931                break;
8932        case LPFC_SLI_INTF_IF_TYPE_6:
8933        case LPFC_SLI_INTF_IF_TYPE_2:
8934        case LPFC_SLI_INTF_IF_TYPE_1:
8935        default:
8936                break;
8937        }
8938        return rc;
8939}
8940
8941/**
8942 * lpfc_sli4_queue_verify - Verify and update EQ counts
8943 * @phba: pointer to lpfc hba data structure.
8944 *
8945 * This routine is invoked to check the user settable queue counts for EQs.
8946 * After this routine is called the counts will be set to valid values that
8947 * adhere to the constraints of the system's interrupt vectors and the port's
8948 * queue resources.
8949 *
8950 * Return codes
8951 *      0 - successful
8952 *      -ENOMEM - No available memory
8953 **/
8954static int
8955lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8956{
8957        /*
8958         * Sanity check for configured queue parameters against the run-time
8959         * device parameters
8960         */
8961
8962        if (phba->nvmet_support) {
8963                if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8964                        phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8965                if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8966                        phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8967        }
8968
8969        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8970                        "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8971                        phba->cfg_hdw_queue, phba->cfg_irq_chann,
8972                        phba->cfg_nvmet_mrq);
8973
8974        /* Get EQ depth from module parameter, fake the default for now */
8975        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8976        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8977
8978        /* Get CQ depth from module parameter, fake the default for now */
8979        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8980        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8981        return 0;
8982}
8983
8984static int
8985lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8986{
8987        struct lpfc_queue *qdesc;
8988        u32 wqesize;
8989        int cpu;
8990
8991        cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8992        /* Create Fast Path IO CQs */
8993        if (phba->enab_exp_wqcq_pages)
8994                /* Increase the CQ size when WQEs contain an embedded cdb */
8995                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8996                                              phba->sli4_hba.cq_esize,
8997                                              LPFC_CQE_EXP_COUNT, cpu);
8998
8999        else
9000                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9001                                              phba->sli4_hba.cq_esize,
9002                                              phba->sli4_hba.cq_ecount, cpu);
9003        if (!qdesc) {
9004                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9005                                "0499 Failed allocate fast-path IO CQ (%d)\n",
9006                                idx);
9007                return 1;
9008        }
9009        qdesc->qe_valid = 1;
9010        qdesc->hdwq = idx;
9011        qdesc->chann = cpu;
9012        phba->sli4_hba.hdwq[idx].io_cq = qdesc;
9013
9014        /* Create Fast Path IO WQs */
9015        if (phba->enab_exp_wqcq_pages) {
9016                /* Increase the WQ size when WQEs contain an embedded cdb */
9017                wqesize = (phba->fcp_embed_io) ?
9018                        LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
9019                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
9020                                              wqesize,
9021                                              LPFC_WQE_EXP_COUNT, cpu);
9022        } else
9023                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9024                                              phba->sli4_hba.wq_esize,
9025                                              phba->sli4_hba.wq_ecount, cpu);
9026
9027        if (!qdesc) {
9028                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9029                                "0503 Failed allocate fast-path IO WQ (%d)\n",
9030                                idx);
9031                return 1;
9032        }
9033        qdesc->hdwq = idx;
9034        qdesc->chann = cpu;
9035        phba->sli4_hba.hdwq[idx].io_wq = qdesc;
9036        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9037        return 0;
9038}
9039
9040/**
9041 * lpfc_sli4_queue_create - Create all the SLI4 queues
9042 * @phba: pointer to lpfc hba data structure.
9043 *
9044 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
9045 * operation. For each SLI4 queue type, the parameters such as queue entry
9046 * count (queue depth) shall be taken from the module parameter. For now,
9047 * we just use some constant number as place holder.
9048 *
9049 * Return codes
9050 *      0 - successful
9051 *      -ENOMEM - No availble memory
9052 *      -EIO - The mailbox failed to complete successfully.
9053 **/
9054int
9055lpfc_sli4_queue_create(struct lpfc_hba *phba)
9056{
9057        struct lpfc_queue *qdesc;
9058        int idx, cpu, eqcpu;
9059        struct lpfc_sli4_hdw_queue *qp;
9060        struct lpfc_vector_map_info *cpup;
9061        struct lpfc_vector_map_info *eqcpup;
9062        struct lpfc_eq_intr_info *eqi;
9063
9064        /*
9065         * Create HBA Record arrays.
9066         * Both NVME and FCP will share that same vectors / EQs
9067         */
9068        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
9069        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
9070        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
9071        phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
9072        phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
9073        phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
9074        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
9075        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
9076        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
9077        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
9078
9079        if (!phba->sli4_hba.hdwq) {
9080                phba->sli4_hba.hdwq = kcalloc(
9081                        phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
9082                        GFP_KERNEL);
9083                if (!phba->sli4_hba.hdwq) {
9084                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9085                                        "6427 Failed allocate memory for "
9086                                        "fast-path Hardware Queue array\n");
9087                        goto out_error;
9088                }
9089                /* Prepare hardware queues to take IO buffers */
9090                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9091                        qp = &phba->sli4_hba.hdwq[idx];
9092                        spin_lock_init(&qp->io_buf_list_get_lock);
9093                        spin_lock_init(&qp->io_buf_list_put_lock);
9094                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9095                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9096                        qp->get_io_bufs = 0;
9097                        qp->put_io_bufs = 0;
9098                        qp->total_io_bufs = 0;
9099                        spin_lock_init(&qp->abts_io_buf_list_lock);
9100                        INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
9101                        qp->abts_scsi_io_bufs = 0;
9102                        qp->abts_nvme_io_bufs = 0;
9103                        INIT_LIST_HEAD(&qp->sgl_list);
9104                        INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9105                        spin_lock_init(&qp->hdwq_lock);
9106                }
9107        }
9108
9109        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9110                if (phba->nvmet_support) {
9111                        phba->sli4_hba.nvmet_cqset = kcalloc(
9112                                        phba->cfg_nvmet_mrq,
9113                                        sizeof(struct lpfc_queue *),
9114                                        GFP_KERNEL);
9115                        if (!phba->sli4_hba.nvmet_cqset) {
9116                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9117                                        "3121 Fail allocate memory for "
9118                                        "fast-path CQ set array\n");
9119                                goto out_error;
9120                        }
9121                        phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9122                                        phba->cfg_nvmet_mrq,
9123                                        sizeof(struct lpfc_queue *),
9124                                        GFP_KERNEL);
9125                        if (!phba->sli4_hba.nvmet_mrq_hdr) {
9126                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9127                                        "3122 Fail allocate memory for "
9128                                        "fast-path RQ set hdr array\n");
9129                                goto out_error;
9130                        }
9131                        phba->sli4_hba.nvmet_mrq_data = kcalloc(
9132                                        phba->cfg_nvmet_mrq,
9133                                        sizeof(struct lpfc_queue *),
9134                                        GFP_KERNEL);
9135                        if (!phba->sli4_hba.nvmet_mrq_data) {
9136                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9137                                        "3124 Fail allocate memory for "
9138                                        "fast-path RQ set data array\n");
9139                                goto out_error;
9140                        }
9141                }
9142        }
9143
9144        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9145
9146        /* Create HBA Event Queues (EQs) */
9147        for_each_present_cpu(cpu) {
9148                /* We only want to create 1 EQ per vector, even though
9149                 * multiple CPUs might be using that vector. so only
9150                 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
9151                 */
9152                cpup = &phba->sli4_hba.cpu_map[cpu];
9153                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9154                        continue;
9155
9156                /* Get a ptr to the Hardware Queue associated with this CPU */
9157                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9158
9159                /* Allocate an EQ */
9160                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9161                                              phba->sli4_hba.eq_esize,
9162                                              phba->sli4_hba.eq_ecount, cpu);
9163                if (!qdesc) {
9164                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9165                                        "0497 Failed allocate EQ (%d)\n",
9166                                        cpup->hdwq);
9167                        goto out_error;
9168                }
9169                qdesc->qe_valid = 1;
9170                qdesc->hdwq = cpup->hdwq;
9171                qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
9172                qdesc->last_cpu = qdesc->chann;
9173
9174                /* Save the allocated EQ in the Hardware Queue */
9175                qp->hba_eq = qdesc;
9176
9177                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9178                list_add(&qdesc->cpu_list, &eqi->list);
9179        }
9180
9181        /* Now we need to populate the other Hardware Queues, that share
9182         * an IRQ vector, with the associated EQ ptr.
9183         */
9184        for_each_present_cpu(cpu) {
9185                cpup = &phba->sli4_hba.cpu_map[cpu];
9186
9187                /* Check for EQ already allocated in previous loop */
9188                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9189                        continue;
9190
9191                /* Check for multiple CPUs per hdwq */
9192                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9193                if (qp->hba_eq)
9194                        continue;
9195
9196                /* We need to share an EQ for this hdwq */
9197                eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9198                eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9199                qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9200        }
9201
9202        /* Allocate IO Path SLI4 CQ/WQs */
9203        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9204                if (lpfc_alloc_io_wq_cq(phba, idx))
9205                        goto out_error;
9206        }
9207
9208        if (phba->nvmet_support) {
9209                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9210                        cpu = lpfc_find_cpu_handle(phba, idx,
9211                                                   LPFC_FIND_BY_HDWQ);
9212                        qdesc = lpfc_sli4_queue_alloc(phba,
9213                                                      LPFC_DEFAULT_PAGE_SIZE,
9214                                                      phba->sli4_hba.cq_esize,
9215                                                      phba->sli4_hba.cq_ecount,
9216                                                      cpu);
9217                        if (!qdesc) {
9218                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9219                                                "3142 Failed allocate NVME "
9220                                                "CQ Set (%d)\n", idx);
9221                                goto out_error;
9222                        }
9223                        qdesc->qe_valid = 1;
9224                        qdesc->hdwq = idx;
9225                        qdesc->chann = cpu;
9226                        phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9227                }
9228        }
9229
9230        /*
9231         * Create Slow Path Completion Queues (CQs)
9232         */
9233
9234        cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9235        /* Create slow-path Mailbox Command Complete Queue */
9236        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9237                                      phba->sli4_hba.cq_esize,
9238                                      phba->sli4_hba.cq_ecount, cpu);
9239        if (!qdesc) {
9240                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9241                                "0500 Failed allocate slow-path mailbox CQ\n");
9242                goto out_error;
9243        }
9244        qdesc->qe_valid = 1;
9245        phba->sli4_hba.mbx_cq = qdesc;
9246
9247        /* Create slow-path ELS Complete Queue */
9248        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9249                                      phba->sli4_hba.cq_esize,
9250                                      phba->sli4_hba.cq_ecount, cpu);
9251        if (!qdesc) {
9252                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9253                                "0501 Failed allocate slow-path ELS CQ\n");
9254                goto out_error;
9255        }
9256        qdesc->qe_valid = 1;
9257        qdesc->chann = cpu;
9258        phba->sli4_hba.els_cq = qdesc;
9259
9260
9261        /*
9262         * Create Slow Path Work Queues (WQs)
9263         */
9264
9265        /* Create Mailbox Command Queue */
9266
9267        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9268                                      phba->sli4_hba.mq_esize,
9269                                      phba->sli4_hba.mq_ecount, cpu);
9270        if (!qdesc) {
9271                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9272                                "0505 Failed allocate slow-path MQ\n");
9273                goto out_error;
9274        }
9275        qdesc->chann = cpu;
9276        phba->sli4_hba.mbx_wq = qdesc;
9277
9278        /*
9279         * Create ELS Work Queues
9280         */
9281
9282        /* Create slow-path ELS Work Queue */
9283        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9284                                      phba->sli4_hba.wq_esize,
9285                                      phba->sli4_hba.wq_ecount, cpu);
9286        if (!qdesc) {
9287                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9288                                "0504 Failed allocate slow-path ELS WQ\n");
9289                goto out_error;
9290        }
9291        qdesc->chann = cpu;
9292        phba->sli4_hba.els_wq = qdesc;
9293        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9294
9295        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9296                /* Create NVME LS Complete Queue */
9297                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9298                                              phba->sli4_hba.cq_esize,
9299                                              phba->sli4_hba.cq_ecount, cpu);
9300                if (!qdesc) {
9301                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9302                                        "6079 Failed allocate NVME LS CQ\n");
9303                        goto out_error;
9304                }
9305                qdesc->chann = cpu;
9306                qdesc->qe_valid = 1;
9307                phba->sli4_hba.nvmels_cq = qdesc;
9308
9309                /* Create NVME LS Work Queue */
9310                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9311                                              phba->sli4_hba.wq_esize,
9312                                              phba->sli4_hba.wq_ecount, cpu);
9313                if (!qdesc) {
9314                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9315                                        "6080 Failed allocate NVME LS WQ\n");
9316                        goto out_error;
9317                }
9318                qdesc->chann = cpu;
9319                phba->sli4_hba.nvmels_wq = qdesc;
9320                list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9321        }
9322
9323        /*
9324         * Create Receive Queue (RQ)
9325         */
9326
9327        /* Create Receive Queue for header */
9328        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9329                                      phba->sli4_hba.rq_esize,
9330                                      phba->sli4_hba.rq_ecount, cpu);
9331        if (!qdesc) {
9332                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9333                                "0506 Failed allocate receive HRQ\n");
9334                goto out_error;
9335        }
9336        phba->sli4_hba.hdr_rq = qdesc;
9337
9338        /* Create Receive Queue for data */
9339        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9340                                      phba->sli4_hba.rq_esize,
9341                                      phba->sli4_hba.rq_ecount, cpu);
9342        if (!qdesc) {
9343                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9344                                "0507 Failed allocate receive DRQ\n");
9345                goto out_error;
9346        }
9347        phba->sli4_hba.dat_rq = qdesc;
9348
9349        if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9350            phba->nvmet_support) {
9351                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9352                        cpu = lpfc_find_cpu_handle(phba, idx,
9353                                                   LPFC_FIND_BY_HDWQ);
9354                        /* Create NVMET Receive Queue for header */
9355                        qdesc = lpfc_sli4_queue_alloc(phba,
9356                                                      LPFC_DEFAULT_PAGE_SIZE,
9357                                                      phba->sli4_hba.rq_esize,
9358                                                      LPFC_NVMET_RQE_DEF_COUNT,
9359                                                      cpu);
9360                        if (!qdesc) {
9361                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9362                                                "3146 Failed allocate "
9363                                                "receive HRQ\n");
9364                                goto out_error;
9365                        }
9366                        qdesc->hdwq = idx;
9367                        phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9368
9369                        /* Only needed for header of RQ pair */
9370                        qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9371                                                   GFP_KERNEL,
9372                                                   cpu_to_node(cpu));
9373                        if (qdesc->rqbp == NULL) {
9374                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9375                                                "6131 Failed allocate "
9376                                                "Header RQBP\n");
9377                                goto out_error;
9378                        }
9379
9380                        /* Put list in known state in case driver load fails. */
9381                        INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9382
9383                        /* Create NVMET Receive Queue for data */
9384                        qdesc = lpfc_sli4_queue_alloc(phba,
9385                                                      LPFC_DEFAULT_PAGE_SIZE,
9386                                                      phba->sli4_hba.rq_esize,
9387                                                      LPFC_NVMET_RQE_DEF_COUNT,
9388                                                      cpu);
9389                        if (!qdesc) {
9390                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9391                                                "3156 Failed allocate "
9392                                                "receive DRQ\n");
9393                                goto out_error;
9394                        }
9395                        qdesc->hdwq = idx;
9396                        phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9397                }
9398        }
9399
9400        /* Clear NVME stats */
9401        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9402                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9403                        memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9404                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9405                }
9406        }
9407
9408        /* Clear SCSI stats */
9409        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9410                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9411                        memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9412                               sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9413                }
9414        }
9415
9416        return 0;
9417
9418out_error:
9419        lpfc_sli4_queue_destroy(phba);
9420        return -ENOMEM;
9421}
9422
9423static inline void
9424__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9425{
9426        if (*qp != NULL) {
9427                lpfc_sli4_queue_free(*qp);
9428                *qp = NULL;
9429        }
9430}
9431
9432static inline void
9433lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9434{
9435        int idx;
9436
9437        if (*qs == NULL)
9438                return;
9439
9440        for (idx = 0; idx < max; idx++)
9441                __lpfc_sli4_release_queue(&(*qs)[idx]);
9442
9443        kfree(*qs);
9444        *qs = NULL;
9445}
9446
9447static inline void
9448lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9449{
9450        struct lpfc_sli4_hdw_queue *hdwq;
9451        struct lpfc_queue *eq;
9452        uint32_t idx;
9453
9454        hdwq = phba->sli4_hba.hdwq;
9455
9456        /* Loop thru all Hardware Queues */
9457        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9458                /* Free the CQ/WQ corresponding to the Hardware Queue */
9459                lpfc_sli4_queue_free(hdwq[idx].io_cq);
9460                lpfc_sli4_queue_free(hdwq[idx].io_wq);
9461                hdwq[idx].hba_eq = NULL;
9462                hdwq[idx].io_cq = NULL;
9463                hdwq[idx].io_wq = NULL;
9464                if (phba->cfg_xpsgl && !phba->nvmet_support)
9465                        lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9466                lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9467        }
9468        /* Loop thru all IRQ vectors */
9469        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9470                /* Free the EQ corresponding to the IRQ vector */
9471                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9472                lpfc_sli4_queue_free(eq);
9473                phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9474        }
9475}
9476
9477/**
9478 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9479 * @phba: pointer to lpfc hba data structure.
9480 *
9481 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9482 * operation.
9483 *
9484 * Return codes
9485 *      0 - successful
9486 *      -ENOMEM - No available memory
9487 *      -EIO - The mailbox failed to complete successfully.
9488 **/
9489void
9490lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9491{
9492        /*
9493         * Set FREE_INIT before beginning to free the queues.
9494         * Wait until the users of queues to acknowledge to
9495         * release queues by clearing FREE_WAIT.
9496         */
9497        spin_lock_irq(&phba->hbalock);
9498        phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9499        while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9500                spin_unlock_irq(&phba->hbalock);
9501                msleep(20);
9502                spin_lock_irq(&phba->hbalock);
9503        }
9504        spin_unlock_irq(&phba->hbalock);
9505
9506        lpfc_sli4_cleanup_poll_list(phba);
9507
9508        /* Release HBA eqs */
9509        if (phba->sli4_hba.hdwq)
9510                lpfc_sli4_release_hdwq(phba);
9511
9512        if (phba->nvmet_support) {
9513                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9514                                         phba->cfg_nvmet_mrq);
9515
9516                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9517                                         phba->cfg_nvmet_mrq);
9518                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9519                                         phba->cfg_nvmet_mrq);
9520        }
9521
9522        /* Release mailbox command work queue */
9523        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9524
9525        /* Release ELS work queue */
9526        __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9527
9528        /* Release ELS work queue */
9529        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9530
9531        /* Release unsolicited receive queue */
9532        __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9533        __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9534
9535        /* Release ELS complete queue */
9536        __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9537
9538        /* Release NVME LS complete queue */
9539        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9540
9541        /* Release mailbox command complete queue */
9542        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9543
9544        /* Everything on this list has been freed */
9545        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9546
9547        /* Done with freeing the queues */
9548        spin_lock_irq(&phba->hbalock);
9549        phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9550        spin_unlock_irq(&phba->hbalock);
9551}
9552
9553int
9554lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9555{
9556        struct lpfc_rqb *rqbp;
9557        struct lpfc_dmabuf *h_buf;
9558        struct rqb_dmabuf *rqb_buffer;
9559
9560        rqbp = rq->rqbp;
9561        while (!list_empty(&rqbp->rqb_buffer_list)) {
9562                list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9563                                 struct lpfc_dmabuf, list);
9564
9565                rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9566                (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9567                rqbp->buffer_count--;
9568        }
9569        return 1;
9570}
9571
9572static int
9573lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9574        struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9575        int qidx, uint32_t qtype)
9576{
9577        struct lpfc_sli_ring *pring;
9578        int rc;
9579
9580        if (!eq || !cq || !wq) {
9581                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9582                        "6085 Fast-path %s (%d) not allocated\n",
9583                        ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9584                return -ENOMEM;
9585        }
9586
9587        /* create the Cq first */
9588        rc = lpfc_cq_create(phba, cq, eq,
9589                        (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9590        if (rc) {
9591                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9592                                "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9593                                qidx, (uint32_t)rc);
9594                return rc;
9595        }
9596
9597        if (qtype != LPFC_MBOX) {
9598                /* Setup cq_map for fast lookup */
9599                if (cq_map)
9600                        *cq_map = cq->queue_id;
9601
9602                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9603                        "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9604                        qidx, cq->queue_id, qidx, eq->queue_id);
9605
9606                /* create the wq */
9607                rc = lpfc_wq_create(phba, wq, cq, qtype);
9608                if (rc) {
9609                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9610                                "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9611                                qidx, (uint32_t)rc);
9612                        /* no need to tear down cq - caller will do so */
9613                        return rc;
9614                }
9615
9616                /* Bind this CQ/WQ to the NVME ring */
9617                pring = wq->pring;
9618                pring->sli.sli4.wqp = (void *)wq;
9619                cq->pring = pring;
9620
9621                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9622                        "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9623                        qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9624        } else {
9625                rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9626                if (rc) {
9627                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9628                                        "0539 Failed setup of slow-path MQ: "
9629                                        "rc = 0x%x\n", rc);
9630                        /* no need to tear down cq - caller will do so */
9631                        return rc;
9632                }
9633
9634                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9635                        "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9636                        phba->sli4_hba.mbx_wq->queue_id,
9637                        phba->sli4_hba.mbx_cq->queue_id);
9638        }
9639
9640        return 0;
9641}
9642
9643/**
9644 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9645 * @phba: pointer to lpfc hba data structure.
9646 *
9647 * This routine will populate the cq_lookup table by all
9648 * available CQ queue_id's.
9649 **/
9650static void
9651lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9652{
9653        struct lpfc_queue *eq, *childq;
9654        int qidx;
9655
9656        memset(phba->sli4_hba.cq_lookup, 0,
9657               (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9658        /* Loop thru all IRQ vectors */
9659        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9660                /* Get the EQ corresponding to the IRQ vector */
9661                eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9662                if (!eq)
9663                        continue;
9664                /* Loop through all CQs associated with that EQ */
9665                list_for_each_entry(childq, &eq->child_list, list) {
9666                        if (childq->queue_id > phba->sli4_hba.cq_max)
9667                                continue;
9668                        if (childq->subtype == LPFC_IO)
9669                                phba->sli4_hba.cq_lookup[childq->queue_id] =
9670                                        childq;
9671                }
9672        }
9673}
9674
9675/**
9676 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9677 * @phba: pointer to lpfc hba data structure.
9678 *
9679 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9680 * operation.
9681 *
9682 * Return codes
9683 *      0 - successful
9684 *      -ENOMEM - No available memory
9685 *      -EIO - The mailbox failed to complete successfully.
9686 **/
9687int
9688lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9689{
9690        uint32_t shdr_status, shdr_add_status;
9691        union lpfc_sli4_cfg_shdr *shdr;
9692        struct lpfc_vector_map_info *cpup;
9693        struct lpfc_sli4_hdw_queue *qp;
9694        LPFC_MBOXQ_t *mboxq;
9695        int qidx, cpu;
9696        uint32_t length, usdelay;
9697        int rc = -ENOMEM;
9698
9699        /* Check for dual-ULP support */
9700        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9701        if (!mboxq) {
9702                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9703                                "3249 Unable to allocate memory for "
9704                                "QUERY_FW_CFG mailbox command\n");
9705                return -ENOMEM;
9706        }
9707        length = (sizeof(struct lpfc_mbx_query_fw_config) -
9708                  sizeof(struct lpfc_sli4_cfg_mhdr));
9709        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9710                         LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9711                         length, LPFC_SLI4_MBX_EMBED);
9712
9713        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9714
9715        shdr = (union lpfc_sli4_cfg_shdr *)
9716                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9717        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9718        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9719        if (shdr_status || shdr_add_status || rc) {
9720                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9721                                "3250 QUERY_FW_CFG mailbox failed with status "
9722                                "x%x add_status x%x, mbx status x%x\n",
9723                                shdr_status, shdr_add_status, rc);
9724                mempool_free(mboxq, phba->mbox_mem_pool);
9725                rc = -ENXIO;
9726                goto out_error;
9727        }
9728
9729        phba->sli4_hba.fw_func_mode =
9730                        mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9731        phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9732        phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9733        phba->sli4_hba.physical_port =
9734                        mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9735        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9736                        "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9737                        "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9738                        phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9739
9740        mempool_free(mboxq, phba->mbox_mem_pool);
9741
9742        /*
9743         * Set up HBA Event Queues (EQs)
9744         */
9745        qp = phba->sli4_hba.hdwq;
9746
9747        /* Set up HBA event queue */
9748        if (!qp) {
9749                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9750                                "3147 Fast-path EQs not allocated\n");
9751                rc = -ENOMEM;
9752                goto out_error;
9753        }
9754
9755        /* Loop thru all IRQ vectors */
9756        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9757                /* Create HBA Event Queues (EQs) in order */
9758                for_each_present_cpu(cpu) {
9759                        cpup = &phba->sli4_hba.cpu_map[cpu];
9760
9761                        /* Look for the CPU thats using that vector with
9762                         * LPFC_CPU_FIRST_IRQ set.
9763                         */
9764                        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9765                                continue;
9766                        if (qidx != cpup->eq)
9767                                continue;
9768
9769                        /* Create an EQ for that vector */
9770                        rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9771                                            phba->cfg_fcp_imax);
9772                        if (rc) {
9773                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9774                                                "0523 Failed setup of fast-path"
9775                                                " EQ (%d), rc = 0x%x\n",
9776                                                cpup->eq, (uint32_t)rc);
9777                                goto out_destroy;
9778                        }
9779
9780                        /* Save the EQ for that vector in the hba_eq_hdl */
9781                        phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9782                                qp[cpup->hdwq].hba_eq;
9783
9784                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9785                                        "2584 HBA EQ setup: queue[%d]-id=%d\n",
9786                                        cpup->eq,
9787                                        qp[cpup->hdwq].hba_eq->queue_id);
9788                }
9789        }
9790
9791        /* Loop thru all Hardware Queues */
9792        for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9793                cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9794                cpup = &phba->sli4_hba.cpu_map[cpu];
9795
9796                /* Create the CQ/WQ corresponding to the Hardware Queue */
9797                rc = lpfc_create_wq_cq(phba,
9798                                       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9799                                       qp[qidx].io_cq,
9800                                       qp[qidx].io_wq,
9801                                       &phba->sli4_hba.hdwq[qidx].io_cq_map,
9802                                       qidx,
9803                                       LPFC_IO);
9804                if (rc) {
9805                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9806                                        "0535 Failed to setup fastpath "
9807                                        "IO WQ/CQ (%d), rc = 0x%x\n",
9808                                        qidx, (uint32_t)rc);
9809                        goto out_destroy;
9810                }
9811        }
9812
9813        /*
9814         * Set up Slow Path Complete Queues (CQs)
9815         */
9816
9817        /* Set up slow-path MBOX CQ/MQ */
9818
9819        if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9820                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9821                                "0528 %s not allocated\n",
9822                                phba->sli4_hba.mbx_cq ?
9823                                "Mailbox WQ" : "Mailbox CQ");
9824                rc = -ENOMEM;
9825                goto out_destroy;
9826        }
9827
9828        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9829                               phba->sli4_hba.mbx_cq,
9830                               phba->sli4_hba.mbx_wq,
9831                               NULL, 0, LPFC_MBOX);
9832        if (rc) {
9833                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9834                        "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9835                        (uint32_t)rc);
9836                goto out_destroy;
9837        }
9838        if (phba->nvmet_support) {
9839                if (!phba->sli4_hba.nvmet_cqset) {
9840                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9841                                        "3165 Fast-path NVME CQ Set "
9842                                        "array not allocated\n");
9843                        rc = -ENOMEM;
9844                        goto out_destroy;
9845                }
9846                if (phba->cfg_nvmet_mrq > 1) {
9847                        rc = lpfc_cq_create_set(phba,
9848                                        phba->sli4_hba.nvmet_cqset,
9849                                        qp,
9850                                        LPFC_WCQ, LPFC_NVMET);
9851                        if (rc) {
9852                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9853                                                "3164 Failed setup of NVME CQ "
9854                                                "Set, rc = 0x%x\n",
9855                                                (uint32_t)rc);
9856                                goto out_destroy;
9857                        }
9858                } else {
9859                        /* Set up NVMET Receive Complete Queue */
9860                        rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9861                                            qp[0].hba_eq,
9862                                            LPFC_WCQ, LPFC_NVMET);
9863                        if (rc) {
9864                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9865                                                "6089 Failed setup NVMET CQ: "
9866                                                "rc = 0x%x\n", (uint32_t)rc);
9867                                goto out_destroy;
9868                        }
9869                        phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9870
9871                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9872                                        "6090 NVMET CQ setup: cq-id=%d, "
9873                                        "parent eq-id=%d\n",
9874                                        phba->sli4_hba.nvmet_cqset[0]->queue_id,
9875                                        qp[0].hba_eq->queue_id);
9876                }
9877        }
9878
9879        /* Set up slow-path ELS WQ/CQ */
9880        if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9881                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9882                                "0530 ELS %s not allocated\n",
9883                                phba->sli4_hba.els_cq ? "WQ" : "CQ");
9884                rc = -ENOMEM;
9885                goto out_destroy;
9886        }
9887        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9888                               phba->sli4_hba.els_cq,
9889                               phba->sli4_hba.els_wq,
9890                               NULL, 0, LPFC_ELS);
9891        if (rc) {
9892                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893                                "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9894                                (uint32_t)rc);
9895                goto out_destroy;
9896        }
9897        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9898                        "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9899                        phba->sli4_hba.els_wq->queue_id,
9900                        phba->sli4_hba.els_cq->queue_id);
9901
9902        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9903                /* Set up NVME LS Complete Queue */
9904                if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9905                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9906                                        "6091 LS %s not allocated\n",
9907                                        phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9908                        rc = -ENOMEM;
9909                        goto out_destroy;
9910                }
9911                rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9912                                       phba->sli4_hba.nvmels_cq,
9913                                       phba->sli4_hba.nvmels_wq,
9914                                       NULL, 0, LPFC_NVME_LS);
9915                if (rc) {
9916                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9917                                        "0526 Failed setup of NVVME LS WQ/CQ: "
9918                                        "rc = 0x%x\n", (uint32_t)rc);
9919                        goto out_destroy;
9920                }
9921
9922                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9923                                "6096 ELS WQ setup: wq-id=%d, "
9924                                "parent cq-id=%d\n",
9925                                phba->sli4_hba.nvmels_wq->queue_id,
9926                                phba->sli4_hba.nvmels_cq->queue_id);
9927        }
9928
9929        /*
9930         * Create NVMET Receive Queue (RQ)
9931         */
9932        if (phba->nvmet_support) {
9933                if ((!phba->sli4_hba.nvmet_cqset) ||
9934                    (!phba->sli4_hba.nvmet_mrq_hdr) ||
9935                    (!phba->sli4_hba.nvmet_mrq_data)) {
9936                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9937                                        "6130 MRQ CQ Queues not "
9938                                        "allocated\n");
9939                        rc = -ENOMEM;
9940                        goto out_destroy;
9941                }
9942                if (phba->cfg_nvmet_mrq > 1) {
9943                        rc = lpfc_mrq_create(phba,
9944                                             phba->sli4_hba.nvmet_mrq_hdr,
9945                                             phba->sli4_hba.nvmet_mrq_data,
9946                                             phba->sli4_hba.nvmet_cqset,
9947                                             LPFC_NVMET);
9948                        if (rc) {
9949                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9950                                                "6098 Failed setup of NVMET "
9951                                                "MRQ: rc = 0x%x\n",
9952                                                (uint32_t)rc);
9953                                goto out_destroy;
9954                        }
9955
9956                } else {
9957                        rc = lpfc_rq_create(phba,
9958                                            phba->sli4_hba.nvmet_mrq_hdr[0],
9959                                            phba->sli4_hba.nvmet_mrq_data[0],
9960                                            phba->sli4_hba.nvmet_cqset[0],
9961                                            LPFC_NVMET);
9962                        if (rc) {
9963                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9964                                                "6057 Failed setup of NVMET "
9965                                                "Receive Queue: rc = 0x%x\n",
9966                                                (uint32_t)rc);
9967                                goto out_destroy;
9968                        }
9969
9970                        lpfc_printf_log(
9971                                phba, KERN_INFO, LOG_INIT,
9972                                "6099 NVMET RQ setup: hdr-rq-id=%d, "
9973                                "dat-rq-id=%d parent cq-id=%d\n",
9974                                phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9975                                phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9976                                phba->sli4_hba.nvmet_cqset[0]->queue_id);
9977
9978                }
9979        }
9980
9981        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9982                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9983                                "0540 Receive Queue not allocated\n");
9984                rc = -ENOMEM;
9985                goto out_destroy;
9986        }
9987
9988        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9989                            phba->sli4_hba.els_cq, LPFC_USOL);
9990        if (rc) {
9991                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9992                                "0541 Failed setup of Receive Queue: "
9993                                "rc = 0x%x\n", (uint32_t)rc);
9994                goto out_destroy;
9995        }
9996
9997        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9998                        "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9999                        "parent cq-id=%d\n",
10000                        phba->sli4_hba.hdr_rq->queue_id,
10001                        phba->sli4_hba.dat_rq->queue_id,
10002                        phba->sli4_hba.els_cq->queue_id);
10003
10004        if (phba->cfg_fcp_imax)
10005                usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
10006        else
10007                usdelay = 0;
10008
10009        for (qidx = 0; qidx < phba->cfg_irq_chann;
10010             qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
10011                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
10012                                         usdelay);
10013
10014        if (phba->sli4_hba.cq_max) {
10015                kfree(phba->sli4_hba.cq_lookup);
10016                phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
10017                        sizeof(struct lpfc_queue *), GFP_KERNEL);
10018                if (!phba->sli4_hba.cq_lookup) {
10019                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10020                                        "0549 Failed setup of CQ Lookup table: "
10021                                        "size 0x%x\n", phba->sli4_hba.cq_max);
10022                        rc = -ENOMEM;
10023                        goto out_destroy;
10024                }
10025                lpfc_setup_cq_lookup(phba);
10026        }
10027        return 0;
10028
10029out_destroy:
10030        lpfc_sli4_queue_unset(phba);
10031out_error:
10032        return rc;
10033}
10034
10035/**
10036 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
10037 * @phba: pointer to lpfc hba data structure.
10038 *
10039 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
10040 * operation.
10041 *
10042 * Return codes
10043 *      0 - successful
10044 *      -ENOMEM - No available memory
10045 *      -EIO - The mailbox failed to complete successfully.
10046 **/
10047void
10048lpfc_sli4_queue_unset(struct lpfc_hba *phba)
10049{
10050        struct lpfc_sli4_hdw_queue *qp;
10051        struct lpfc_queue *eq;
10052        int qidx;
10053
10054        /* Unset mailbox command work queue */
10055        if (phba->sli4_hba.mbx_wq)
10056                lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
10057
10058        /* Unset NVME LS work queue */
10059        if (phba->sli4_hba.nvmels_wq)
10060                lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
10061
10062        /* Unset ELS work queue */
10063        if (phba->sli4_hba.els_wq)
10064                lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
10065
10066        /* Unset unsolicited receive queue */
10067        if (phba->sli4_hba.hdr_rq)
10068                lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
10069                                phba->sli4_hba.dat_rq);
10070
10071        /* Unset mailbox command complete queue */
10072        if (phba->sli4_hba.mbx_cq)
10073                lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
10074
10075        /* Unset ELS complete queue */
10076        if (phba->sli4_hba.els_cq)
10077                lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
10078
10079        /* Unset NVME LS complete queue */
10080        if (phba->sli4_hba.nvmels_cq)
10081                lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
10082
10083        if (phba->nvmet_support) {
10084                /* Unset NVMET MRQ queue */
10085                if (phba->sli4_hba.nvmet_mrq_hdr) {
10086                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10087                                lpfc_rq_destroy(
10088                                        phba,
10089                                        phba->sli4_hba.nvmet_mrq_hdr[qidx],
10090                                        phba->sli4_hba.nvmet_mrq_data[qidx]);
10091                }
10092
10093                /* Unset NVMET CQ Set complete queue */
10094                if (phba->sli4_hba.nvmet_cqset) {
10095                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10096                                lpfc_cq_destroy(
10097                                        phba, phba->sli4_hba.nvmet_cqset[qidx]);
10098                }
10099        }
10100
10101        /* Unset fast-path SLI4 queues */
10102        if (phba->sli4_hba.hdwq) {
10103                /* Loop thru all Hardware Queues */
10104                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10105                        /* Destroy the CQ/WQ corresponding to Hardware Queue */
10106                        qp = &phba->sli4_hba.hdwq[qidx];
10107                        lpfc_wq_destroy(phba, qp->io_wq);
10108                        lpfc_cq_destroy(phba, qp->io_cq);
10109                }
10110                /* Loop thru all IRQ vectors */
10111                for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10112                        /* Destroy the EQ corresponding to the IRQ vector */
10113                        eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10114                        lpfc_eq_destroy(phba, eq);
10115                }
10116        }
10117
10118        kfree(phba->sli4_hba.cq_lookup);
10119        phba->sli4_hba.cq_lookup = NULL;
10120        phba->sli4_hba.cq_max = 0;
10121}
10122
10123/**
10124 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
10125 * @phba: pointer to lpfc hba data structure.
10126 *
10127 * This routine is invoked to allocate and set up a pool of completion queue
10128 * events. The body of the completion queue event is a completion queue entry
10129 * CQE. For now, this pool is used for the interrupt service routine to queue
10130 * the following HBA completion queue events for the worker thread to process:
10131 *   - Mailbox asynchronous events
10132 *   - Receive queue completion unsolicited events
10133 * Later, this can be used for all the slow-path events.
10134 *
10135 * Return codes
10136 *      0 - successful
10137 *      -ENOMEM - No available memory
10138 **/
10139static int
10140lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10141{
10142        struct lpfc_cq_event *cq_event;
10143        int i;
10144
10145        for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10146                cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10147                if (!cq_event)
10148                        goto out_pool_create_fail;
10149                list_add_tail(&cq_event->list,
10150                              &phba->sli4_hba.sp_cqe_event_pool);
10151        }
10152        return 0;
10153
10154out_pool_create_fail:
10155        lpfc_sli4_cq_event_pool_destroy(phba);
10156        return -ENOMEM;
10157}
10158
10159/**
10160 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
10161 * @phba: pointer to lpfc hba data structure.
10162 *
10163 * This routine is invoked to free the pool of completion queue events at
10164 * driver unload time. Note that, it is the responsibility of the driver
10165 * cleanup routine to free all the outstanding completion-queue events
10166 * allocated from this pool back into the pool before invoking this routine
10167 * to destroy the pool.
10168 **/
10169static void
10170lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10171{
10172        struct lpfc_cq_event *cq_event, *next_cq_event;
10173
10174        list_for_each_entry_safe(cq_event, next_cq_event,
10175                                 &phba->sli4_hba.sp_cqe_event_pool, list) {
10176                list_del(&cq_event->list);
10177                kfree(cq_event);
10178        }
10179}
10180
10181/**
10182 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10183 * @phba: pointer to lpfc hba data structure.
10184 *
10185 * This routine is the lock free version of the API invoked to allocate a
10186 * completion-queue event from the free pool.
10187 *
10188 * Return: Pointer to the newly allocated completion-queue event if successful
10189 *         NULL otherwise.
10190 **/
10191struct lpfc_cq_event *
10192__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10193{
10194        struct lpfc_cq_event *cq_event = NULL;
10195
10196        list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10197                         struct lpfc_cq_event, list);
10198        return cq_event;
10199}
10200
10201/**
10202 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10203 * @phba: pointer to lpfc hba data structure.
10204 *
10205 * This routine is the lock version of the API invoked to allocate a
10206 * completion-queue event from the free pool.
10207 *
10208 * Return: Pointer to the newly allocated completion-queue event if successful
10209 *         NULL otherwise.
10210 **/
10211struct lpfc_cq_event *
10212lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10213{
10214        struct lpfc_cq_event *cq_event;
10215        unsigned long iflags;
10216
10217        spin_lock_irqsave(&phba->hbalock, iflags);
10218        cq_event = __lpfc_sli4_cq_event_alloc(phba);
10219        spin_unlock_irqrestore(&phba->hbalock, iflags);
10220        return cq_event;
10221}
10222
10223/**
10224 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10225 * @phba: pointer to lpfc hba data structure.
10226 * @cq_event: pointer to the completion queue event to be freed.
10227 *
10228 * This routine is the lock free version of the API invoked to release a
10229 * completion-queue event back into the free pool.
10230 **/
10231void
10232__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10233                             struct lpfc_cq_event *cq_event)
10234{
10235        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10236}
10237
10238/**
10239 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10240 * @phba: pointer to lpfc hba data structure.
10241 * @cq_event: pointer to the completion queue event to be freed.
10242 *
10243 * This routine is the lock version of the API invoked to release a
10244 * completion-queue event back into the free pool.
10245 **/
10246void
10247lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10248                           struct lpfc_cq_event *cq_event)
10249{
10250        unsigned long iflags;
10251        spin_lock_irqsave(&phba->hbalock, iflags);
10252        __lpfc_sli4_cq_event_release(phba, cq_event);
10253        spin_unlock_irqrestore(&phba->hbalock, iflags);
10254}
10255
10256/**
10257 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10258 * @phba: pointer to lpfc hba data structure.
10259 *
10260 * This routine is to free all the pending completion-queue events to the
10261 * back into the free pool for device reset.
10262 **/
10263static void
10264lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10265{
10266        LIST_HEAD(cq_event_list);
10267        struct lpfc_cq_event *cq_event;
10268        unsigned long iflags;
10269
10270        /* Retrieve all the pending WCQEs from pending WCQE lists */
10271
10272        /* Pending ELS XRI abort events */
10273        spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10274        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10275                         &cq_event_list);
10276        spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10277
10278        /* Pending asynnc events */
10279        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10280        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10281                         &cq_event_list);
10282        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10283
10284        while (!list_empty(&cq_event_list)) {
10285                list_remove_head(&cq_event_list, cq_event,
10286                                 struct lpfc_cq_event, list);
10287                lpfc_sli4_cq_event_release(phba, cq_event);
10288        }
10289}
10290
10291/**
10292 * lpfc_pci_function_reset - Reset pci function.
10293 * @phba: pointer to lpfc hba data structure.
10294 *
10295 * This routine is invoked to request a PCI function reset. It will destroys
10296 * all resources assigned to the PCI function which originates this request.
10297 *
10298 * Return codes
10299 *      0 - successful
10300 *      -ENOMEM - No available memory
10301 *      -EIO - The mailbox failed to complete successfully.
10302 **/
10303int
10304lpfc_pci_function_reset(struct lpfc_hba *phba)
10305{
10306        LPFC_MBOXQ_t *mboxq;
10307        uint32_t rc = 0, if_type;
10308        uint32_t shdr_status, shdr_add_status;
10309        uint32_t rdy_chk;
10310        uint32_t port_reset = 0;
10311        union lpfc_sli4_cfg_shdr *shdr;
10312        struct lpfc_register reg_data;
10313        uint16_t devid;
10314
10315        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10316        switch (if_type) {
10317        case LPFC_SLI_INTF_IF_TYPE_0:
10318                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10319                                                       GFP_KERNEL);
10320                if (!mboxq) {
10321                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10322                                        "0494 Unable to allocate memory for "
10323                                        "issuing SLI_FUNCTION_RESET mailbox "
10324                                        "command\n");
10325                        return -ENOMEM;
10326                }
10327
10328                /* Setup PCI function reset mailbox-ioctl command */
10329                lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10330                                 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10331                                 LPFC_SLI4_MBX_EMBED);
10332                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10333                shdr = (union lpfc_sli4_cfg_shdr *)
10334                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10335                shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10336                shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10337                                         &shdr->response);
10338                mempool_free(mboxq, phba->mbox_mem_pool);
10339                if (shdr_status || shdr_add_status || rc) {
10340                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10341                                        "0495 SLI_FUNCTION_RESET mailbox "
10342                                        "failed with status x%x add_status x%x,"
10343                                        " mbx status x%x\n",
10344                                        shdr_status, shdr_add_status, rc);
10345                        rc = -ENXIO;
10346                }
10347                break;
10348        case LPFC_SLI_INTF_IF_TYPE_2:
10349        case LPFC_SLI_INTF_IF_TYPE_6:
10350wait:
10351                /*
10352                 * Poll the Port Status Register and wait for RDY for
10353                 * up to 30 seconds. If the port doesn't respond, treat
10354                 * it as an error.
10355                 */
10356                for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10357                        if (lpfc_readl(phba->sli4_hba.u.if_type2.
10358                                STATUSregaddr, &reg_data.word0)) {
10359                                rc = -ENODEV;
10360                                goto out;
10361                        }
10362                        if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10363                                break;
10364                        msleep(20);
10365                }
10366
10367                if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10368                        phba->work_status[0] = readl(
10369                                phba->sli4_hba.u.if_type2.ERR1regaddr);
10370                        phba->work_status[1] = readl(
10371                                phba->sli4_hba.u.if_type2.ERR2regaddr);
10372                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10373                                        "2890 Port not ready, port status reg "
10374                                        "0x%x error 1=0x%x, error 2=0x%x\n",
10375                                        reg_data.word0,
10376                                        phba->work_status[0],
10377                                        phba->work_status[1]);
10378                        rc = -ENODEV;
10379                        goto out;
10380                }
10381
10382                if (!port_reset) {
10383                        /*
10384                         * Reset the port now
10385                         */
10386                        reg_data.word0 = 0;
10387                        bf_set(lpfc_sliport_ctrl_end, &reg_data,
10388                               LPFC_SLIPORT_LITTLE_ENDIAN);
10389                        bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10390                               LPFC_SLIPORT_INIT_PORT);
10391                        writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10392                               CTRLregaddr);
10393                        /* flush */
10394                        pci_read_config_word(phba->pcidev,
10395                                             PCI_DEVICE_ID, &devid);
10396
10397                        port_reset = 1;
10398                        msleep(20);
10399                        goto wait;
10400                } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10401                        rc = -ENODEV;
10402                        goto out;
10403                }
10404                break;
10405
10406        case LPFC_SLI_INTF_IF_TYPE_1:
10407        default:
10408                break;
10409        }
10410
10411out:
10412        /* Catch the not-ready port failure after a port reset. */
10413        if (rc) {
10414                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415                                "3317 HBA not functional: IP Reset Failed "
10416                                "try: echo fw_reset > board_mode\n");
10417                rc = -ENODEV;
10418        }
10419
10420        return rc;
10421}
10422
10423/**
10424 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10425 * @phba: pointer to lpfc hba data structure.
10426 *
10427 * This routine is invoked to set up the PCI device memory space for device
10428 * with SLI-4 interface spec.
10429 *
10430 * Return codes
10431 *      0 - successful
10432 *      other values - error
10433 **/
10434static int
10435lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10436{
10437        struct pci_dev *pdev = phba->pcidev;
10438        unsigned long bar0map_len, bar1map_len, bar2map_len;
10439        int error;
10440        uint32_t if_type;
10441
10442        if (!pdev)
10443                return -ENODEV;
10444
10445        /* Set the device DMA mask size */
10446        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10447        if (error)
10448                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10449        if (error)
10450                return error;
10451
10452        /*
10453         * The BARs and register set definitions and offset locations are
10454         * dependent on the if_type.
10455         */
10456        if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10457                                  &phba->sli4_hba.sli_intf.word0)) {
10458                return -ENODEV;
10459        }
10460
10461        /* There is no SLI3 failback for SLI4 devices. */
10462        if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10463            LPFC_SLI_INTF_VALID) {
10464                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10465                                "2894 SLI_INTF reg contents invalid "
10466                                "sli_intf reg 0x%x\n",
10467                                phba->sli4_hba.sli_intf.word0);
10468                return -ENODEV;
10469        }
10470
10471        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10472        /*
10473         * Get the bus address of SLI4 device Bar regions and the
10474         * number of bytes required by each mapping. The mapping of the
10475         * particular PCI BARs regions is dependent on the type of
10476         * SLI4 device.
10477         */
10478        if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10479                phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10480                bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10481
10482                /*
10483                 * Map SLI4 PCI Config Space Register base to a kernel virtual
10484                 * addr
10485                 */
10486                phba->sli4_hba.conf_regs_memmap_p =
10487                        ioremap(phba->pci_bar0_map, bar0map_len);
10488                if (!phba->sli4_hba.conf_regs_memmap_p) {
10489                        dev_printk(KERN_ERR, &pdev->dev,
10490                                   "ioremap failed for SLI4 PCI config "
10491                                   "registers.\n");
10492                        return -ENODEV;
10493                }
10494                phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10495                /* Set up BAR0 PCI config space register memory map */
10496                lpfc_sli4_bar0_register_memmap(phba, if_type);
10497        } else {
10498                phba->pci_bar0_map = pci_resource_start(pdev, 1);
10499                bar0map_len = pci_resource_len(pdev, 1);
10500                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10501                        dev_printk(KERN_ERR, &pdev->dev,
10502                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10503                        return -ENODEV;
10504                }
10505                phba->sli4_hba.conf_regs_memmap_p =
10506                                ioremap(phba->pci_bar0_map, bar0map_len);
10507                if (!phba->sli4_hba.conf_regs_memmap_p) {
10508                        dev_printk(KERN_ERR, &pdev->dev,
10509                                "ioremap failed for SLI4 PCI config "
10510                                "registers.\n");
10511                        return -ENODEV;
10512                }
10513                lpfc_sli4_bar0_register_memmap(phba, if_type);
10514        }
10515
10516        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10517                if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10518                        /*
10519                         * Map SLI4 if type 0 HBA Control Register base to a
10520                         * kernel virtual address and setup the registers.
10521                         */
10522                        phba->pci_bar1_map = pci_resource_start(pdev,
10523                                                                PCI_64BIT_BAR2);
10524                        bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10525                        phba->sli4_hba.ctrl_regs_memmap_p =
10526                                        ioremap(phba->pci_bar1_map,
10527                                                bar1map_len);
10528                        if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10529                                dev_err(&pdev->dev,
10530                                           "ioremap failed for SLI4 HBA "
10531                                            "control registers.\n");
10532                                error = -ENOMEM;
10533                                goto out_iounmap_conf;
10534                        }
10535                        phba->pci_bar2_memmap_p =
10536                                         phba->sli4_hba.ctrl_regs_memmap_p;
10537                        lpfc_sli4_bar1_register_memmap(phba, if_type);
10538                } else {
10539                        error = -ENOMEM;
10540                        goto out_iounmap_conf;
10541                }
10542        }
10543
10544        if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10545            (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10546                /*
10547                 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10548                 * virtual address and setup the registers.
10549                 */
10550                phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10551                bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10552                phba->sli4_hba.drbl_regs_memmap_p =
10553                                ioremap(phba->pci_bar1_map, bar1map_len);
10554                if (!phba->sli4_hba.drbl_regs_memmap_p) {
10555                        dev_err(&pdev->dev,
10556                           "ioremap failed for SLI4 HBA doorbell registers.\n");
10557                        error = -ENOMEM;
10558                        goto out_iounmap_conf;
10559                }
10560                phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10561                lpfc_sli4_bar1_register_memmap(phba, if_type);
10562        }
10563
10564        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10565                if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10566                        /*
10567                         * Map SLI4 if type 0 HBA Doorbell Register base to
10568                         * a kernel virtual address and setup the registers.
10569                         */
10570                        phba->pci_bar2_map = pci_resource_start(pdev,
10571                                                                PCI_64BIT_BAR4);
10572                        bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10573                        phba->sli4_hba.drbl_regs_memmap_p =
10574                                        ioremap(phba->pci_bar2_map,
10575                                                bar2map_len);
10576                        if (!phba->sli4_hba.drbl_regs_memmap_p) {
10577                                dev_err(&pdev->dev,
10578                                           "ioremap failed for SLI4 HBA"
10579                                           " doorbell registers.\n");
10580                                error = -ENOMEM;
10581                                goto out_iounmap_ctrl;
10582                        }
10583                        phba->pci_bar4_memmap_p =
10584                                        phba->sli4_hba.drbl_regs_memmap_p;
10585                        error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10586                        if (error)
10587                                goto out_iounmap_all;
10588                } else {
10589                        error = -ENOMEM;
10590                        goto out_iounmap_all;
10591                }
10592        }
10593
10594        if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10595            pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10596                /*
10597                 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10598                 * virtual address and setup the registers.
10599                 */
10600                phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10601                bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10602                phba->sli4_hba.dpp_regs_memmap_p =
10603                                ioremap(phba->pci_bar2_map, bar2map_len);
10604                if (!phba->sli4_hba.dpp_regs_memmap_p) {
10605                        dev_err(&pdev->dev,
10606                           "ioremap failed for SLI4 HBA dpp registers.\n");
10607                        error = -ENOMEM;
10608                        goto out_iounmap_ctrl;
10609                }
10610                phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10611        }
10612
10613        /* Set up the EQ/CQ register handeling functions now */
10614        switch (if_type) {
10615        case LPFC_SLI_INTF_IF_TYPE_0:
10616        case LPFC_SLI_INTF_IF_TYPE_2:
10617                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10618                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10619                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10620                break;
10621        case LPFC_SLI_INTF_IF_TYPE_6:
10622                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10623                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10624                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10625                break;
10626        default:
10627                break;
10628        }
10629
10630        return 0;
10631
10632out_iounmap_all:
10633        iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10634out_iounmap_ctrl:
10635        iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10636out_iounmap_conf:
10637        iounmap(phba->sli4_hba.conf_regs_memmap_p);
10638
10639        return error;
10640}
10641
10642/**
10643 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10644 * @phba: pointer to lpfc hba data structure.
10645 *
10646 * This routine is invoked to unset the PCI device memory space for device
10647 * with SLI-4 interface spec.
10648 **/
10649static void
10650lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10651{
10652        uint32_t if_type;
10653        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10654
10655        switch (if_type) {
10656        case LPFC_SLI_INTF_IF_TYPE_0:
10657                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10658                iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10659                iounmap(phba->sli4_hba.conf_regs_memmap_p);
10660                break;
10661        case LPFC_SLI_INTF_IF_TYPE_2:
10662                iounmap(phba->sli4_hba.conf_regs_memmap_p);
10663                break;
10664        case LPFC_SLI_INTF_IF_TYPE_6:
10665                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10666                iounmap(phba->sli4_hba.conf_regs_memmap_p);
10667                if (phba->sli4_hba.dpp_regs_memmap_p)
10668                        iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10669                break;
10670        case LPFC_SLI_INTF_IF_TYPE_1:
10671        default:
10672                dev_printk(KERN_ERR, &phba->pcidev->dev,
10673                           "FATAL - unsupported SLI4 interface type - %d\n",
10674                           if_type);
10675                break;
10676        }
10677}
10678
10679/**
10680 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10681 * @phba: pointer to lpfc hba data structure.
10682 *
10683 * This routine is invoked to enable the MSI-X interrupt vectors to device
10684 * with SLI-3 interface specs.
10685 *
10686 * Return codes
10687 *   0 - successful
10688 *   other values - error
10689 **/
10690static int
10691lpfc_sli_enable_msix(struct lpfc_hba *phba)
10692{
10693        int rc;
10694        LPFC_MBOXQ_t *pmb;
10695
10696        /* Set up MSI-X multi-message vectors */
10697        rc = pci_alloc_irq_vectors(phba->pcidev,
10698                        LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10699        if (rc < 0) {
10700                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10701                                "0420 PCI enable MSI-X failed (%d)\n", rc);
10702                goto vec_fail_out;
10703        }
10704
10705        /*
10706         * Assign MSI-X vectors to interrupt handlers
10707         */
10708
10709        /* vector-0 is associated to slow-path handler */
10710        rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10711                         &lpfc_sli_sp_intr_handler, 0,
10712                         LPFC_SP_DRIVER_HANDLER_NAME, phba);
10713        if (rc) {
10714                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10715                                "0421 MSI-X slow-path request_irq failed "
10716                                "(%d)\n", rc);
10717                goto msi_fail_out;
10718        }
10719
10720        /* vector-1 is associated to fast-path handler */
10721        rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10722                         &lpfc_sli_fp_intr_handler, 0,
10723                         LPFC_FP_DRIVER_HANDLER_NAME, phba);
10724
10725        if (rc) {
10726                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10727                                "0429 MSI-X fast-path request_irq failed "
10728                                "(%d)\n", rc);
10729                goto irq_fail_out;
10730        }
10731
10732        /*
10733         * Configure HBA MSI-X attention conditions to messages
10734         */
10735        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10736
10737        if (!pmb) {
10738                rc = -ENOMEM;
10739                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10740                                "0474 Unable to allocate memory for issuing "
10741                                "MBOX_CONFIG_MSI command\n");
10742                goto mem_fail_out;
10743        }
10744        rc = lpfc_config_msi(phba, pmb);
10745        if (rc)
10746                goto mbx_fail_out;
10747        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10748        if (rc != MBX_SUCCESS) {
10749                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10750                                "0351 Config MSI mailbox command failed, "
10751                                "mbxCmd x%x, mbxStatus x%x\n",
10752                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10753                goto mbx_fail_out;
10754        }
10755
10756        /* Free memory allocated for mailbox command */
10757        mempool_free(pmb, phba->mbox_mem_pool);
10758        return rc;
10759
10760mbx_fail_out:
10761        /* Free memory allocated for mailbox command */
10762        mempool_free(pmb, phba->mbox_mem_pool);
10763
10764mem_fail_out:
10765        /* free the irq already requested */
10766        free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10767
10768irq_fail_out:
10769        /* free the irq already requested */
10770        free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10771
10772msi_fail_out:
10773        /* Unconfigure MSI-X capability structure */
10774        pci_free_irq_vectors(phba->pcidev);
10775
10776vec_fail_out:
10777        return rc;
10778}
10779
10780/**
10781 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10782 * @phba: pointer to lpfc hba data structure.
10783 *
10784 * This routine is invoked to enable the MSI interrupt mode to device with
10785 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10786 * enable the MSI vector. The device driver is responsible for calling the
10787 * request_irq() to register MSI vector with a interrupt the handler, which
10788 * is done in this function.
10789 *
10790 * Return codes
10791 *      0 - successful
10792 *      other values - error
10793 */
10794static int
10795lpfc_sli_enable_msi(struct lpfc_hba *phba)
10796{
10797        int rc;
10798
10799        rc = pci_enable_msi(phba->pcidev);
10800        if (!rc)
10801                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10802                                "0462 PCI enable MSI mode success.\n");
10803        else {
10804                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10805                                "0471 PCI enable MSI mode failed (%d)\n", rc);
10806                return rc;
10807        }
10808
10809        rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10810                         0, LPFC_DRIVER_NAME, phba);
10811        if (rc) {
10812                pci_disable_msi(phba->pcidev);
10813                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10814                                "0478 MSI request_irq failed (%d)\n", rc);
10815        }
10816        return rc;
10817}
10818
10819/**
10820 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10821 * @phba: pointer to lpfc hba data structure.
10822 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
10823 *
10824 * This routine is invoked to enable device interrupt and associate driver's
10825 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10826 * spec. Depends on the interrupt mode configured to the driver, the driver
10827 * will try to fallback from the configured interrupt mode to an interrupt
10828 * mode which is supported by the platform, kernel, and device in the order
10829 * of:
10830 * MSI-X -> MSI -> IRQ.
10831 *
10832 * Return codes
10833 *   0 - successful
10834 *   other values - error
10835 **/
10836static uint32_t
10837lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10838{
10839        uint32_t intr_mode = LPFC_INTR_ERROR;
10840        int retval;
10841
10842        /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10843        retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10844        if (retval)
10845                return intr_mode;
10846        phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
10847
10848        if (cfg_mode == 2) {
10849                /* Now, try to enable MSI-X interrupt mode */
10850                retval = lpfc_sli_enable_msix(phba);
10851                if (!retval) {
10852                        /* Indicate initialization to MSI-X mode */
10853                        phba->intr_type = MSIX;
10854                        intr_mode = 2;
10855                }
10856        }
10857
10858        /* Fallback to MSI if MSI-X initialization failed */
10859        if (cfg_mode >= 1 && phba->intr_type == NONE) {
10860                retval = lpfc_sli_enable_msi(phba);
10861                if (!retval) {
10862                        /* Indicate initialization to MSI mode */
10863                        phba->intr_type = MSI;
10864                        intr_mode = 1;
10865                }
10866        }
10867
10868        /* Fallback to INTx if both MSI-X/MSI initalization failed */
10869        if (phba->intr_type == NONE) {
10870                retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10871                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10872                if (!retval) {
10873                        /* Indicate initialization to INTx mode */
10874                        phba->intr_type = INTx;
10875                        intr_mode = 0;
10876                }
10877        }
10878        return intr_mode;
10879}
10880
10881/**
10882 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10883 * @phba: pointer to lpfc hba data structure.
10884 *
10885 * This routine is invoked to disable device interrupt and disassociate the
10886 * driver's interrupt handler(s) from interrupt vector(s) to device with
10887 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10888 * release the interrupt vector(s) for the message signaled interrupt.
10889 **/
10890static void
10891lpfc_sli_disable_intr(struct lpfc_hba *phba)
10892{
10893        int nr_irqs, i;
10894
10895        if (phba->intr_type == MSIX)
10896                nr_irqs = LPFC_MSIX_VECTORS;
10897        else
10898                nr_irqs = 1;
10899
10900        for (i = 0; i < nr_irqs; i++)
10901                free_irq(pci_irq_vector(phba->pcidev, i), phba);
10902        pci_free_irq_vectors(phba->pcidev);
10903
10904        /* Reset interrupt management states */
10905        phba->intr_type = NONE;
10906        phba->sli.slistat.sli_intr = 0;
10907}
10908
10909/**
10910 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10911 * @phba: pointer to lpfc hba data structure.
10912 * @id: EQ vector index or Hardware Queue index
10913 * @match: LPFC_FIND_BY_EQ = match by EQ
10914 *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
10915 * Return the CPU that matches the selection criteria
10916 */
10917static uint16_t
10918lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10919{
10920        struct lpfc_vector_map_info *cpup;
10921        int cpu;
10922
10923        /* Loop through all CPUs */
10924        for_each_present_cpu(cpu) {
10925                cpup = &phba->sli4_hba.cpu_map[cpu];
10926
10927                /* If we are matching by EQ, there may be multiple CPUs using
10928                 * using the same vector, so select the one with
10929                 * LPFC_CPU_FIRST_IRQ set.
10930                 */
10931                if ((match == LPFC_FIND_BY_EQ) &&
10932                    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10933                    (cpup->eq == id))
10934                        return cpu;
10935
10936                /* If matching by HDWQ, select the first CPU that matches */
10937                if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10938                        return cpu;
10939        }
10940        return 0;
10941}
10942
10943#ifdef CONFIG_X86
10944/**
10945 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10946 * @phba: pointer to lpfc hba data structure.
10947 * @cpu: CPU map index
10948 * @phys_id: CPU package physical id
10949 * @core_id: CPU core id
10950 */
10951static int
10952lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10953                uint16_t phys_id, uint16_t core_id)
10954{
10955        struct lpfc_vector_map_info *cpup;
10956        int idx;
10957
10958        for_each_present_cpu(idx) {
10959                cpup = &phba->sli4_hba.cpu_map[idx];
10960                /* Does the cpup match the one we are looking for */
10961                if ((cpup->phys_id == phys_id) &&
10962                    (cpup->core_id == core_id) &&
10963                    (cpu != idx))
10964                        return 1;
10965        }
10966        return 0;
10967}
10968#endif
10969
10970/*
10971 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10972 * @phba: pointer to lpfc hba data structure.
10973 * @eqidx: index for eq and irq vector
10974 * @flag: flags to set for vector_map structure
10975 * @cpu: cpu used to index vector_map structure
10976 *
10977 * The routine assigns eq info into vector_map structure
10978 */
10979static inline void
10980lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10981                        unsigned int cpu)
10982{
10983        struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10984        struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10985
10986        cpup->eq = eqidx;
10987        cpup->flag |= flag;
10988
10989        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10990                        "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10991                        cpu, eqhdl->irq, cpup->eq, cpup->flag);
10992}
10993
10994/**
10995 * lpfc_cpu_map_array_init - Initialize cpu_map structure
10996 * @phba: pointer to lpfc hba data structure.
10997 *
10998 * The routine initializes the cpu_map array structure
10999 */
11000static void
11001lpfc_cpu_map_array_init(struct lpfc_hba *phba)
11002{
11003        struct lpfc_vector_map_info *cpup;
11004        struct lpfc_eq_intr_info *eqi;
11005        int cpu;
11006
11007        for_each_possible_cpu(cpu) {
11008                cpup = &phba->sli4_hba.cpu_map[cpu];
11009                cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
11010                cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
11011                cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
11012                cpup->eq = LPFC_VECTOR_MAP_EMPTY;
11013                cpup->flag = 0;
11014                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
11015                INIT_LIST_HEAD(&eqi->list);
11016                eqi->icnt = 0;
11017        }
11018}
11019
11020/**
11021 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
11022 * @phba: pointer to lpfc hba data structure.
11023 *
11024 * The routine initializes the hba_eq_hdl array structure
11025 */
11026static void
11027lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
11028{
11029        struct lpfc_hba_eq_hdl *eqhdl;
11030        int i;
11031
11032        for (i = 0; i < phba->cfg_irq_chann; i++) {
11033                eqhdl = lpfc_get_eq_hdl(i);
11034                eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
11035                eqhdl->phba = phba;
11036        }
11037}
11038
11039/**
11040 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
11041 * @phba: pointer to lpfc hba data structure.
11042 * @vectors: number of msix vectors allocated.
11043 *
11044 * The routine will figure out the CPU affinity assignment for every
11045 * MSI-X vector allocated for the HBA.
11046 * In addition, the CPU to IO channel mapping will be calculated
11047 * and the phba->sli4_hba.cpu_map array will reflect this.
11048 */
11049static void
11050lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
11051{
11052        int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
11053        int max_phys_id, min_phys_id;
11054        int max_core_id, min_core_id;
11055        struct lpfc_vector_map_info *cpup;
11056        struct lpfc_vector_map_info *new_cpup;
11057#ifdef CONFIG_X86
11058        struct cpuinfo_x86 *cpuinfo;
11059#endif
11060#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11061        struct lpfc_hdwq_stat *c_stat;
11062#endif
11063
11064        max_phys_id = 0;
11065        min_phys_id = LPFC_VECTOR_MAP_EMPTY;
11066        max_core_id = 0;
11067        min_core_id = LPFC_VECTOR_MAP_EMPTY;
11068
11069        /* Update CPU map with physical id and core id of each CPU */
11070        for_each_present_cpu(cpu) {
11071                cpup = &phba->sli4_hba.cpu_map[cpu];
11072#ifdef CONFIG_X86
11073                cpuinfo = &cpu_data(cpu);
11074                cpup->phys_id = cpuinfo->phys_proc_id;
11075                cpup->core_id = cpuinfo->cpu_core_id;
11076                if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
11077                        cpup->flag |= LPFC_CPU_MAP_HYPER;
11078#else
11079                /* No distinction between CPUs for other platforms */
11080                cpup->phys_id = 0;
11081                cpup->core_id = cpu;
11082#endif
11083
11084                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11085                                "3328 CPU %d physid %d coreid %d flag x%x\n",
11086                                cpu, cpup->phys_id, cpup->core_id, cpup->flag);
11087
11088                if (cpup->phys_id > max_phys_id)
11089                        max_phys_id = cpup->phys_id;
11090                if (cpup->phys_id < min_phys_id)
11091                        min_phys_id = cpup->phys_id;
11092
11093                if (cpup->core_id > max_core_id)
11094                        max_core_id = cpup->core_id;
11095                if (cpup->core_id < min_core_id)
11096                        min_core_id = cpup->core_id;
11097        }
11098
11099        /* After looking at each irq vector assigned to this pcidev, its
11100         * possible to see that not ALL CPUs have been accounted for.
11101         * Next we will set any unassigned (unaffinitized) cpu map
11102         * entries to a IRQ on the same phys_id.
11103         */
11104        first_cpu = cpumask_first(cpu_present_mask);
11105        start_cpu = first_cpu;
11106
11107        for_each_present_cpu(cpu) {
11108                cpup = &phba->sli4_hba.cpu_map[cpu];
11109
11110                /* Is this CPU entry unassigned */
11111                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11112                        /* Mark CPU as IRQ not assigned by the kernel */
11113                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11114
11115                        /* If so, find a new_cpup thats on the the SAME
11116                         * phys_id as cpup. start_cpu will start where we
11117                         * left off so all unassigned entries don't get assgined
11118                         * the IRQ of the first entry.
11119                         */
11120                        new_cpu = start_cpu;
11121                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11122                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11123                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11124                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
11125                                    (new_cpup->phys_id == cpup->phys_id))
11126                                        goto found_same;
11127                                new_cpu = cpumask_next(
11128                                        new_cpu, cpu_present_mask);
11129                                if (new_cpu == nr_cpumask_bits)
11130                                        new_cpu = first_cpu;
11131                        }
11132                        /* At this point, we leave the CPU as unassigned */
11133                        continue;
11134found_same:
11135                        /* We found a matching phys_id, so copy the IRQ info */
11136                        cpup->eq = new_cpup->eq;
11137
11138                        /* Bump start_cpu to the next slot to minmize the
11139                         * chance of having multiple unassigned CPU entries
11140                         * selecting the same IRQ.
11141                         */
11142                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11143                        if (start_cpu == nr_cpumask_bits)
11144                                start_cpu = first_cpu;
11145
11146                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11147                                        "3337 Set Affinity: CPU %d "
11148                                        "eq %d from peer cpu %d same "
11149                                        "phys_id (%d)\n",
11150                                        cpu, cpup->eq, new_cpu,
11151                                        cpup->phys_id);
11152                }
11153        }
11154
11155        /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11156        start_cpu = first_cpu;
11157
11158        for_each_present_cpu(cpu) {
11159                cpup = &phba->sli4_hba.cpu_map[cpu];
11160
11161                /* Is this entry unassigned */
11162                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11163                        /* Mark it as IRQ not assigned by the kernel */
11164                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11165
11166                        /* If so, find a new_cpup thats on ANY phys_id
11167                         * as the cpup. start_cpu will start where we
11168                         * left off so all unassigned entries don't get
11169                         * assigned the IRQ of the first entry.
11170                         */
11171                        new_cpu = start_cpu;
11172                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11173                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11174                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11175                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
11176                                        goto found_any;
11177                                new_cpu = cpumask_next(
11178                                        new_cpu, cpu_present_mask);
11179                                if (new_cpu == nr_cpumask_bits)
11180                                        new_cpu = first_cpu;
11181                        }
11182                        /* We should never leave an entry unassigned */
11183                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11184                                        "3339 Set Affinity: CPU %d "
11185                                        "eq %d UNASSIGNED\n",
11186                                        cpup->hdwq, cpup->eq);
11187                        continue;
11188found_any:
11189                        /* We found an available entry, copy the IRQ info */
11190                        cpup->eq = new_cpup->eq;
11191
11192                        /* Bump start_cpu to the next slot to minmize the
11193                         * chance of having multiple unassigned CPU entries
11194                         * selecting the same IRQ.
11195                         */
11196                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11197                        if (start_cpu == nr_cpumask_bits)
11198                                start_cpu = first_cpu;
11199
11200                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11201                                        "3338 Set Affinity: CPU %d "
11202                                        "eq %d from peer cpu %d (%d/%d)\n",
11203                                        cpu, cpup->eq, new_cpu,
11204                                        new_cpup->phys_id, new_cpup->core_id);
11205                }
11206        }
11207
11208        /* Assign hdwq indices that are unique across all cpus in the map
11209         * that are also FIRST_CPUs.
11210         */
11211        idx = 0;
11212        for_each_present_cpu(cpu) {
11213                cpup = &phba->sli4_hba.cpu_map[cpu];
11214
11215                /* Only FIRST IRQs get a hdwq index assignment. */
11216                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11217                        continue;
11218
11219                /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11220                cpup->hdwq = idx;
11221                idx++;
11222                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11223                                "3333 Set Affinity: CPU %d (phys %d core %d): "
11224                                "hdwq %d eq %d flg x%x\n",
11225                                cpu, cpup->phys_id, cpup->core_id,
11226                                cpup->hdwq, cpup->eq, cpup->flag);
11227        }
11228        /* Associate a hdwq with each cpu_map entry
11229         * This will be 1 to 1 - hdwq to cpu, unless there are less
11230         * hardware queues then CPUs. For that case we will just round-robin
11231         * the available hardware queues as they get assigned to CPUs.
11232         * The next_idx is the idx from the FIRST_CPU loop above to account
11233         * for irq_chann < hdwq.  The idx is used for round-robin assignments
11234         * and needs to start at 0.
11235         */
11236        next_idx = idx;
11237        start_cpu = 0;
11238        idx = 0;
11239        for_each_present_cpu(cpu) {
11240                cpup = &phba->sli4_hba.cpu_map[cpu];
11241
11242                /* FIRST cpus are already mapped. */
11243                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11244                        continue;
11245
11246                /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11247                 * of the unassigned cpus to the next idx so that all
11248                 * hdw queues are fully utilized.
11249                 */
11250                if (next_idx < phba->cfg_hdw_queue) {
11251                        cpup->hdwq = next_idx;
11252                        next_idx++;
11253                        continue;
11254                }
11255
11256                /* Not a First CPU and all hdw_queues are used.  Reuse a
11257                 * Hardware Queue for another CPU, so be smart about it
11258                 * and pick one that has its IRQ/EQ mapped to the same phys_id
11259                 * (CPU package) and core_id.
11260                 */
11261                new_cpu = start_cpu;
11262                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11263                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11264                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11265                            new_cpup->phys_id == cpup->phys_id &&
11266                            new_cpup->core_id == cpup->core_id) {
11267                                goto found_hdwq;
11268                        }
11269                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11270                        if (new_cpu == nr_cpumask_bits)
11271                                new_cpu = first_cpu;
11272                }
11273
11274                /* If we can't match both phys_id and core_id,
11275                 * settle for just a phys_id match.
11276                 */
11277                new_cpu = start_cpu;
11278                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11279                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11280                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11281                            new_cpup->phys_id == cpup->phys_id)
11282                                goto found_hdwq;
11283
11284                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11285                        if (new_cpu == nr_cpumask_bits)
11286                                new_cpu = first_cpu;
11287                }
11288
11289                /* Otherwise just round robin on cfg_hdw_queue */
11290                cpup->hdwq = idx % phba->cfg_hdw_queue;
11291                idx++;
11292                goto logit;
11293 found_hdwq:
11294                /* We found an available entry, copy the IRQ info */
11295                start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11296                if (start_cpu == nr_cpumask_bits)
11297                        start_cpu = first_cpu;
11298                cpup->hdwq = new_cpup->hdwq;
11299 logit:
11300                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11301                                "3335 Set Affinity: CPU %d (phys %d core %d): "
11302                                "hdwq %d eq %d flg x%x\n",
11303                                cpu, cpup->phys_id, cpup->core_id,
11304                                cpup->hdwq, cpup->eq, cpup->flag);
11305        }
11306
11307        /*
11308         * Initialize the cpu_map slots for not-present cpus in case
11309         * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11310         */
11311        idx = 0;
11312        for_each_possible_cpu(cpu) {
11313                cpup = &phba->sli4_hba.cpu_map[cpu];
11314#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11315                c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11316                c_stat->hdwq_no = cpup->hdwq;
11317#endif
11318                if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11319                        continue;
11320
11321                cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11322#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11323                c_stat->hdwq_no = cpup->hdwq;
11324#endif
11325                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11326                                "3340 Set Affinity: not present "
11327                                "CPU %d hdwq %d\n",
11328                                cpu, cpup->hdwq);
11329        }
11330
11331        /* The cpu_map array will be used later during initialization
11332         * when EQ / CQ / WQs are allocated and configured.
11333         */
11334        return;
11335}
11336
11337/**
11338 * lpfc_cpuhp_get_eq
11339 *
11340 * @phba:   pointer to lpfc hba data structure.
11341 * @cpu:    cpu going offline
11342 * @eqlist: eq list to append to
11343 */
11344static int
11345lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11346                  struct list_head *eqlist)
11347{
11348        const struct cpumask *maskp;
11349        struct lpfc_queue *eq;
11350        struct cpumask *tmp;
11351        u16 idx;
11352
11353        tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11354        if (!tmp)
11355                return -ENOMEM;
11356
11357        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11358                maskp = pci_irq_get_affinity(phba->pcidev, idx);
11359                if (!maskp)
11360                        continue;
11361                /*
11362                 * if irq is not affinitized to the cpu going
11363                 * then we don't need to poll the eq attached
11364                 * to it.
11365                 */
11366                if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11367                        continue;
11368                /* get the cpus that are online and are affini-
11369                 * tized to this irq vector.  If the count is
11370                 * more than 1 then cpuhp is not going to shut-
11371                 * down this vector.  Since this cpu has not
11372                 * gone offline yet, we need >1.
11373                 */
11374                cpumask_and(tmp, maskp, cpu_online_mask);
11375                if (cpumask_weight(tmp) > 1)
11376                        continue;
11377
11378                /* Now that we have an irq to shutdown, get the eq
11379                 * mapped to this irq.  Note: multiple hdwq's in
11380                 * the software can share an eq, but eventually
11381                 * only eq will be mapped to this vector
11382                 */
11383                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11384                list_add(&eq->_poll_list, eqlist);
11385        }
11386        kfree(tmp);
11387        return 0;
11388}
11389
11390static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11391{
11392        if (phba->sli_rev != LPFC_SLI_REV4)
11393                return;
11394
11395        cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11396                                            &phba->cpuhp);
11397        /*
11398         * unregistering the instance doesn't stop the polling
11399         * timer. Wait for the poll timer to retire.
11400         */
11401        synchronize_rcu();
11402        del_timer_sync(&phba->cpuhp_poll_timer);
11403}
11404
11405static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11406{
11407        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11408                return;
11409
11410        __lpfc_cpuhp_remove(phba);
11411}
11412
11413static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11414{
11415        if (phba->sli_rev != LPFC_SLI_REV4)
11416                return;
11417
11418        rcu_read_lock();
11419
11420        if (!list_empty(&phba->poll_list))
11421                mod_timer(&phba->cpuhp_poll_timer,
11422                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11423
11424        rcu_read_unlock();
11425
11426        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11427                                         &phba->cpuhp);
11428}
11429
11430static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11431{
11432        if (phba->pport->load_flag & FC_UNLOADING) {
11433                *retval = -EAGAIN;
11434                return true;
11435        }
11436
11437        if (phba->sli_rev != LPFC_SLI_REV4) {
11438                *retval = 0;
11439                return true;
11440        }
11441
11442        /* proceed with the hotplug */
11443        return false;
11444}
11445
11446/**
11447 * lpfc_irq_set_aff - set IRQ affinity
11448 * @eqhdl: EQ handle
11449 * @cpu: cpu to set affinity
11450 *
11451 **/
11452static inline void
11453lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11454{
11455        cpumask_clear(&eqhdl->aff_mask);
11456        cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11457        irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11458        irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11459}
11460
11461/**
11462 * lpfc_irq_clear_aff - clear IRQ affinity
11463 * @eqhdl: EQ handle
11464 *
11465 **/
11466static inline void
11467lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11468{
11469        cpumask_clear(&eqhdl->aff_mask);
11470        irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11471}
11472
11473/**
11474 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11475 * @phba: pointer to HBA context object.
11476 * @cpu: cpu going offline/online
11477 * @offline: true, cpu is going offline. false, cpu is coming online.
11478 *
11479 * If cpu is going offline, we'll try our best effort to find the next
11480 * online cpu on the phba's original_mask and migrate all offlining IRQ
11481 * affinities.
11482 *
11483 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
11484 *
11485 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
11486 *       PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11487 *
11488 **/
11489static void
11490lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11491{
11492        struct lpfc_vector_map_info *cpup;
11493        struct cpumask *aff_mask;
11494        unsigned int cpu_select, cpu_next, idx;
11495        const struct cpumask *orig_mask;
11496
11497        if (phba->irq_chann_mode == NORMAL_MODE)
11498                return;
11499
11500        orig_mask = &phba->sli4_hba.irq_aff_mask;
11501
11502        if (!cpumask_test_cpu(cpu, orig_mask))
11503                return;
11504
11505        cpup = &phba->sli4_hba.cpu_map[cpu];
11506
11507        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11508                return;
11509
11510        if (offline) {
11511                /* Find next online CPU on original mask */
11512                cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11513                cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11514
11515                /* Found a valid CPU */
11516                if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11517                        /* Go through each eqhdl and ensure offlining
11518                         * cpu aff_mask is migrated
11519                         */
11520                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11521                                aff_mask = lpfc_get_aff_mask(idx);
11522
11523                                /* Migrate affinity */
11524                                if (cpumask_test_cpu(cpu, aff_mask))
11525                                        lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11526                                                         cpu_select);
11527                        }
11528                } else {
11529                        /* Rely on irqbalance if no online CPUs left on NUMA */
11530                        for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11531                                lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11532                }
11533        } else {
11534                /* Migrate affinity back to this CPU */
11535                lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11536        }
11537}
11538
11539static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11540{
11541        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11542        struct lpfc_queue *eq, *next;
11543        LIST_HEAD(eqlist);
11544        int retval;
11545
11546        if (!phba) {
11547                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11548                return 0;
11549        }
11550
11551        if (__lpfc_cpuhp_checks(phba, &retval))
11552                return retval;
11553
11554        lpfc_irq_rebalance(phba, cpu, true);
11555
11556        retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11557        if (retval)
11558                return retval;
11559
11560        /* start polling on these eq's */
11561        list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11562                list_del_init(&eq->_poll_list);
11563                lpfc_sli4_start_polling(eq);
11564        }
11565
11566        return 0;
11567}
11568
11569static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11570{
11571        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11572        struct lpfc_queue *eq, *next;
11573        unsigned int n;
11574        int retval;
11575
11576        if (!phba) {
11577                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11578                return 0;
11579        }
11580
11581        if (__lpfc_cpuhp_checks(phba, &retval))
11582                return retval;
11583
11584        lpfc_irq_rebalance(phba, cpu, false);
11585
11586        list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11587                n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11588                if (n == cpu)
11589                        lpfc_sli4_stop_polling(eq);
11590        }
11591
11592        return 0;
11593}
11594
11595/**
11596 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11597 * @phba: pointer to lpfc hba data structure.
11598 *
11599 * This routine is invoked to enable the MSI-X interrupt vectors to device
11600 * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
11601 * to cpus on the system.
11602 *
11603 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11604 * the number of cpus on the same numa node as this adapter.  The vectors are
11605 * allocated without requesting OS affinity mapping.  A vector will be
11606 * allocated and assigned to each online and offline cpu.  If the cpu is
11607 * online, then affinity will be set to that cpu.  If the cpu is offline, then
11608 * affinity will be set to the nearest peer cpu within the numa node that is
11609 * online.  If there are no online cpus within the numa node, affinity is not
11610 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11611 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11612 * configured.
11613 *
11614 * If numa mode is not enabled and there is more than 1 vector allocated, then
11615 * the driver relies on the managed irq interface where the OS assigns vector to
11616 * cpu affinity.  The driver will then use that affinity mapping to setup its
11617 * cpu mapping table.
11618 *
11619 * Return codes
11620 * 0 - successful
11621 * other values - error
11622 **/
11623static int
11624lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11625{
11626        int vectors, rc, index;
11627        char *name;
11628        const struct cpumask *aff_mask = NULL;
11629        unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11630        struct lpfc_vector_map_info *cpup;
11631        struct lpfc_hba_eq_hdl *eqhdl;
11632        const struct cpumask *maskp;
11633        unsigned int flags = PCI_IRQ_MSIX;
11634
11635        /* Set up MSI-X multi-message vectors */
11636        vectors = phba->cfg_irq_chann;
11637
11638        if (phba->irq_chann_mode != NORMAL_MODE)
11639                aff_mask = &phba->sli4_hba.irq_aff_mask;
11640
11641        if (aff_mask) {
11642                cpu_cnt = cpumask_weight(aff_mask);
11643                vectors = min(phba->cfg_irq_chann, cpu_cnt);
11644
11645                /* cpu: iterates over aff_mask including offline or online
11646                 * cpu_select: iterates over online aff_mask to set affinity
11647                 */
11648                cpu = cpumask_first(aff_mask);
11649                cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11650        } else {
11651                flags |= PCI_IRQ_AFFINITY;
11652        }
11653
11654        rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11655        if (rc < 0) {
11656                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11657                                "0484 PCI enable MSI-X failed (%d)\n", rc);
11658                goto vec_fail_out;
11659        }
11660        vectors = rc;
11661
11662        /* Assign MSI-X vectors to interrupt handlers */
11663        for (index = 0; index < vectors; index++) {
11664                eqhdl = lpfc_get_eq_hdl(index);
11665                name = eqhdl->handler_name;
11666                memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11667                snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11668                         LPFC_DRIVER_HANDLER_NAME"%d", index);
11669
11670                eqhdl->idx = index;
11671                rc = request_irq(pci_irq_vector(phba->pcidev, index),
11672                         &lpfc_sli4_hba_intr_handler, 0,
11673                         name, eqhdl);
11674                if (rc) {
11675                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11676                                        "0486 MSI-X fast-path (%d) "
11677                                        "request_irq failed (%d)\n", index, rc);
11678                        goto cfg_fail_out;
11679                }
11680
11681                eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11682
11683                if (aff_mask) {
11684                        /* If found a neighboring online cpu, set affinity */
11685                        if (cpu_select < nr_cpu_ids)
11686                                lpfc_irq_set_aff(eqhdl, cpu_select);
11687
11688                        /* Assign EQ to cpu_map */
11689                        lpfc_assign_eq_map_info(phba, index,
11690                                                LPFC_CPU_FIRST_IRQ,
11691                                                cpu);
11692
11693                        /* Iterate to next offline or online cpu in aff_mask */
11694                        cpu = cpumask_next(cpu, aff_mask);
11695
11696                        /* Find next online cpu in aff_mask to set affinity */
11697                        cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11698                } else if (vectors == 1) {
11699                        cpu = cpumask_first(cpu_present_mask);
11700                        lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11701                                                cpu);
11702                } else {
11703                        maskp = pci_irq_get_affinity(phba->pcidev, index);
11704
11705                        /* Loop through all CPUs associated with vector index */
11706                        for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11707                                cpup = &phba->sli4_hba.cpu_map[cpu];
11708
11709                                /* If this is the first CPU thats assigned to
11710                                 * this vector, set LPFC_CPU_FIRST_IRQ.
11711                                 *
11712                                 * With certain platforms its possible that irq
11713                                 * vectors are affinitized to all the cpu's.
11714                                 * This can result in each cpu_map.eq to be set
11715                                 * to the last vector, resulting in overwrite
11716                                 * of all the previous cpu_map.eq.  Ensure that
11717                                 * each vector receives a place in cpu_map.
11718                                 * Later call to lpfc_cpu_affinity_check will
11719                                 * ensure we are nicely balanced out.
11720                                 */
11721                                if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11722                                        continue;
11723                                lpfc_assign_eq_map_info(phba, index,
11724                                                        LPFC_CPU_FIRST_IRQ,
11725                                                        cpu);
11726                                break;
11727                        }
11728                }
11729        }
11730
11731        if (vectors != phba->cfg_irq_chann) {
11732                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11733                                "3238 Reducing IO channels to match number of "
11734                                "MSI-X vectors, requested %d got %d\n",
11735                                phba->cfg_irq_chann, vectors);
11736                if (phba->cfg_irq_chann > vectors)
11737                        phba->cfg_irq_chann = vectors;
11738        }
11739
11740        return rc;
11741
11742cfg_fail_out:
11743        /* free the irq already requested */
11744        for (--index; index >= 0; index--) {
11745                eqhdl = lpfc_get_eq_hdl(index);
11746                lpfc_irq_clear_aff(eqhdl);
11747                irq_set_affinity_hint(eqhdl->irq, NULL);
11748                free_irq(eqhdl->irq, eqhdl);
11749        }
11750
11751        /* Unconfigure MSI-X capability structure */
11752        pci_free_irq_vectors(phba->pcidev);
11753
11754vec_fail_out:
11755        return rc;
11756}
11757
11758/**
11759 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11760 * @phba: pointer to lpfc hba data structure.
11761 *
11762 * This routine is invoked to enable the MSI interrupt mode to device with
11763 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11764 * called to enable the MSI vector. The device driver is responsible for
11765 * calling the request_irq() to register MSI vector with a interrupt the
11766 * handler, which is done in this function.
11767 *
11768 * Return codes
11769 *      0 - successful
11770 *      other values - error
11771 **/
11772static int
11773lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11774{
11775        int rc, index;
11776        unsigned int cpu;
11777        struct lpfc_hba_eq_hdl *eqhdl;
11778
11779        rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11780                                   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11781        if (rc > 0)
11782                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11783                                "0487 PCI enable MSI mode success.\n");
11784        else {
11785                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11786                                "0488 PCI enable MSI mode failed (%d)\n", rc);
11787                return rc ? rc : -1;
11788        }
11789
11790        rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11791                         0, LPFC_DRIVER_NAME, phba);
11792        if (rc) {
11793                pci_free_irq_vectors(phba->pcidev);
11794                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11795                                "0490 MSI request_irq failed (%d)\n", rc);
11796                return rc;
11797        }
11798
11799        eqhdl = lpfc_get_eq_hdl(0);
11800        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11801
11802        cpu = cpumask_first(cpu_present_mask);
11803        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11804
11805        for (index = 0; index < phba->cfg_irq_chann; index++) {
11806                eqhdl = lpfc_get_eq_hdl(index);
11807                eqhdl->idx = index;
11808        }
11809
11810        return 0;
11811}
11812
11813/**
11814 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11815 * @phba: pointer to lpfc hba data structure.
11816 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11817 *
11818 * This routine is invoked to enable device interrupt and associate driver's
11819 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11820 * interface spec. Depends on the interrupt mode configured to the driver,
11821 * the driver will try to fallback from the configured interrupt mode to an
11822 * interrupt mode which is supported by the platform, kernel, and device in
11823 * the order of:
11824 * MSI-X -> MSI -> IRQ.
11825 *
11826 * Return codes
11827 *      0 - successful
11828 *      other values - error
11829 **/
11830static uint32_t
11831lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11832{
11833        uint32_t intr_mode = LPFC_INTR_ERROR;
11834        int retval, idx;
11835
11836        if (cfg_mode == 2) {
11837                /* Preparation before conf_msi mbox cmd */
11838                retval = 0;
11839                if (!retval) {
11840                        /* Now, try to enable MSI-X interrupt mode */
11841                        retval = lpfc_sli4_enable_msix(phba);
11842                        if (!retval) {
11843                                /* Indicate initialization to MSI-X mode */
11844                                phba->intr_type = MSIX;
11845                                intr_mode = 2;
11846                        }
11847                }
11848        }
11849
11850        /* Fallback to MSI if MSI-X initialization failed */
11851        if (cfg_mode >= 1 && phba->intr_type == NONE) {
11852                retval = lpfc_sli4_enable_msi(phba);
11853                if (!retval) {
11854                        /* Indicate initialization to MSI mode */
11855                        phba->intr_type = MSI;
11856                        intr_mode = 1;
11857                }
11858        }
11859
11860        /* Fallback to INTx if both MSI-X/MSI initalization failed */
11861        if (phba->intr_type == NONE) {
11862                retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11863                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11864                if (!retval) {
11865                        struct lpfc_hba_eq_hdl *eqhdl;
11866                        unsigned int cpu;
11867
11868                        /* Indicate initialization to INTx mode */
11869                        phba->intr_type = INTx;
11870                        intr_mode = 0;
11871
11872                        eqhdl = lpfc_get_eq_hdl(0);
11873                        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11874
11875                        cpu = cpumask_first(cpu_present_mask);
11876                        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11877                                                cpu);
11878                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11879                                eqhdl = lpfc_get_eq_hdl(idx);
11880                                eqhdl->idx = idx;
11881                        }
11882                }
11883        }
11884        return intr_mode;
11885}
11886
11887/**
11888 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11889 * @phba: pointer to lpfc hba data structure.
11890 *
11891 * This routine is invoked to disable device interrupt and disassociate
11892 * the driver's interrupt handler(s) from interrupt vector(s) to device
11893 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11894 * will release the interrupt vector(s) for the message signaled interrupt.
11895 **/
11896static void
11897lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11898{
11899        /* Disable the currently initialized interrupt mode */
11900        if (phba->intr_type == MSIX) {
11901                int index;
11902                struct lpfc_hba_eq_hdl *eqhdl;
11903
11904                /* Free up MSI-X multi-message vectors */
11905                for (index = 0; index < phba->cfg_irq_chann; index++) {
11906                        eqhdl = lpfc_get_eq_hdl(index);
11907                        lpfc_irq_clear_aff(eqhdl);
11908                        irq_set_affinity_hint(eqhdl->irq, NULL);
11909                        free_irq(eqhdl->irq, eqhdl);
11910                }
11911        } else {
11912                free_irq(phba->pcidev->irq, phba);
11913        }
11914
11915        pci_free_irq_vectors(phba->pcidev);
11916
11917        /* Reset interrupt management states */
11918        phba->intr_type = NONE;
11919        phba->sli.slistat.sli_intr = 0;
11920}
11921
11922/**
11923 * lpfc_unset_hba - Unset SLI3 hba device initialization
11924 * @phba: pointer to lpfc hba data structure.
11925 *
11926 * This routine is invoked to unset the HBA device initialization steps to
11927 * a device with SLI-3 interface spec.
11928 **/
11929static void
11930lpfc_unset_hba(struct lpfc_hba *phba)
11931{
11932        struct lpfc_vport *vport = phba->pport;
11933        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
11934
11935        spin_lock_irq(shost->host_lock);
11936        vport->load_flag |= FC_UNLOADING;
11937        spin_unlock_irq(shost->host_lock);
11938
11939        kfree(phba->vpi_bmask);
11940        kfree(phba->vpi_ids);
11941
11942        lpfc_stop_hba_timers(phba);
11943
11944        phba->pport->work_port_events = 0;
11945
11946        lpfc_sli_hba_down(phba);
11947
11948        lpfc_sli_brdrestart(phba);
11949
11950        lpfc_sli_disable_intr(phba);
11951
11952        return;
11953}
11954
11955/**
11956 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11957 * @phba: Pointer to HBA context object.
11958 *
11959 * This function is called in the SLI4 code path to wait for completion
11960 * of device's XRIs exchange busy. It will check the XRI exchange busy
11961 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11962 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11963 * I/Os every 30 seconds, log error message, and wait forever. Only when
11964 * all XRI exchange busy complete, the driver unload shall proceed with
11965 * invoking the function reset ioctl mailbox command to the CNA and the
11966 * the rest of the driver unload resource release.
11967 **/
11968static void
11969lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11970{
11971        struct lpfc_sli4_hdw_queue *qp;
11972        int idx, ccnt;
11973        int wait_time = 0;
11974        int io_xri_cmpl = 1;
11975        int nvmet_xri_cmpl = 1;
11976        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11977
11978        /* Driver just aborted IOs during the hba_unset process.  Pause
11979         * here to give the HBA time to complete the IO and get entries
11980         * into the abts lists.
11981         */
11982        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11983
11984        /* Wait for NVME pending IO to flush back to transport. */
11985        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11986                lpfc_nvme_wait_for_io_drain(phba);
11987
11988        ccnt = 0;
11989        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11990                qp = &phba->sli4_hba.hdwq[idx];
11991                io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11992                if (!io_xri_cmpl) /* if list is NOT empty */
11993                        ccnt++;
11994        }
11995        if (ccnt)
11996                io_xri_cmpl = 0;
11997
11998        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11999                nvmet_xri_cmpl =
12000                        list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12001        }
12002
12003        while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
12004                if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
12005                        if (!nvmet_xri_cmpl)
12006                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12007                                                "6424 NVMET XRI exchange busy "
12008                                                "wait time: %d seconds.\n",
12009                                                wait_time/1000);
12010                        if (!io_xri_cmpl)
12011                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12012                                                "6100 IO XRI exchange busy "
12013                                                "wait time: %d seconds.\n",
12014                                                wait_time/1000);
12015                        if (!els_xri_cmpl)
12016                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12017                                                "2878 ELS XRI exchange busy "
12018                                                "wait time: %d seconds.\n",
12019                                                wait_time/1000);
12020                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
12021                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
12022                } else {
12023                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
12024                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
12025                }
12026
12027                ccnt = 0;
12028                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12029                        qp = &phba->sli4_hba.hdwq[idx];
12030                        io_xri_cmpl = list_empty(
12031                            &qp->lpfc_abts_io_buf_list);
12032                        if (!io_xri_cmpl) /* if list is NOT empty */
12033                                ccnt++;
12034                }
12035                if (ccnt)
12036                        io_xri_cmpl = 0;
12037
12038                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12039                        nvmet_xri_cmpl = list_empty(
12040                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12041                }
12042                els_xri_cmpl =
12043                        list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
12044
12045        }
12046}
12047
12048/**
12049 * lpfc_sli4_hba_unset - Unset the fcoe hba
12050 * @phba: Pointer to HBA context object.
12051 *
12052 * This function is called in the SLI4 code path to reset the HBA's FCoE
12053 * function. The caller is not required to hold any lock. This routine
12054 * issues PCI function reset mailbox command to reset the FCoE function.
12055 * At the end of the function, it calls lpfc_hba_down_post function to
12056 * free any pending commands.
12057 **/
12058static void
12059lpfc_sli4_hba_unset(struct lpfc_hba *phba)
12060{
12061        int wait_cnt = 0;
12062        LPFC_MBOXQ_t *mboxq;
12063        struct pci_dev *pdev = phba->pcidev;
12064
12065        lpfc_stop_hba_timers(phba);
12066        if (phba->pport)
12067                phba->sli4_hba.intr_enable = 0;
12068
12069        /*
12070         * Gracefully wait out the potential current outstanding asynchronous
12071         * mailbox command.
12072         */
12073
12074        /* First, block any pending async mailbox command from posted */
12075        spin_lock_irq(&phba->hbalock);
12076        phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12077        spin_unlock_irq(&phba->hbalock);
12078        /* Now, trying to wait it out if we can */
12079        while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12080                msleep(10);
12081                if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
12082                        break;
12083        }
12084        /* Forcefully release the outstanding mailbox command if timed out */
12085        if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12086                spin_lock_irq(&phba->hbalock);
12087                mboxq = phba->sli.mbox_active;
12088                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
12089                __lpfc_mbox_cmpl_put(phba, mboxq);
12090                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12091                phba->sli.mbox_active = NULL;
12092                spin_unlock_irq(&phba->hbalock);
12093        }
12094
12095        /* Abort all iocbs associated with the hba */
12096        lpfc_sli_hba_iocb_abort(phba);
12097
12098        /* Wait for completion of device XRI exchange busy */
12099        lpfc_sli4_xri_exchange_busy_wait(phba);
12100
12101        /* per-phba callback de-registration for hotplug event */
12102        if (phba->pport)
12103                lpfc_cpuhp_remove(phba);
12104
12105        /* Disable PCI subsystem interrupt */
12106        lpfc_sli4_disable_intr(phba);
12107
12108        /* Disable SR-IOV if enabled */
12109        if (phba->cfg_sriov_nr_virtfn)
12110                pci_disable_sriov(pdev);
12111
12112        /* Stop kthread signal shall trigger work_done one more time */
12113        kthread_stop(phba->worker_thread);
12114
12115        /* Disable FW logging to host memory */
12116        lpfc_ras_stop_fwlog(phba);
12117
12118        /* Unset the queues shared with the hardware then release all
12119         * allocated resources.
12120         */
12121        lpfc_sli4_queue_unset(phba);
12122        lpfc_sli4_queue_destroy(phba);
12123
12124        /* Reset SLI4 HBA FCoE function */
12125        lpfc_pci_function_reset(phba);
12126
12127        /* Free RAS DMA memory */
12128        if (phba->ras_fwlog.ras_enabled)
12129                lpfc_sli4_ras_dma_free(phba);
12130
12131        /* Stop the SLI4 device port */
12132        if (phba->pport)
12133                phba->pport->work_port_events = 0;
12134}
12135
12136/**
12137 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12138 * @phba: Pointer to HBA context object.
12139 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12140 *
12141 * This function is called in the SLI4 code path to read the port's
12142 * sli4 capabilities.
12143 *
12144 * This function may be be called from any context that can block-wait
12145 * for the completion.  The expectation is that this routine is called
12146 * typically from probe_one or from the online routine.
12147 **/
12148int
12149lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12150{
12151        int rc;
12152        struct lpfc_mqe *mqe = &mboxq->u.mqe;
12153        struct lpfc_pc_sli4_params *sli4_params;
12154        uint32_t mbox_tmo;
12155        int length;
12156        bool exp_wqcq_pages = true;
12157        struct lpfc_sli4_parameters *mbx_sli4_parameters;
12158
12159        /*
12160         * By default, the driver assumes the SLI4 port requires RPI
12161         * header postings.  The SLI4_PARAM response will correct this
12162         * assumption.
12163         */
12164        phba->sli4_hba.rpi_hdrs_in_use = 1;
12165
12166        /* Read the port's SLI4 Config Parameters */
12167        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12168                  sizeof(struct lpfc_sli4_cfg_mhdr));
12169        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12170                         LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12171                         length, LPFC_SLI4_MBX_EMBED);
12172        if (!phba->sli4_hba.intr_enable)
12173                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12174        else {
12175                mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12176                rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12177        }
12178        if (unlikely(rc))
12179                return rc;
12180        sli4_params = &phba->sli4_hba.pc_sli4_params;
12181        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12182        sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12183        sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12184        sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12185        sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12186                                             mbx_sli4_parameters);
12187        sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12188                                             mbx_sli4_parameters);
12189        if (bf_get(cfg_phwq, mbx_sli4_parameters))
12190                phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12191        else
12192                phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12193        sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12194        sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
12195                                           mbx_sli4_parameters);
12196        sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12197        sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12198        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12199        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12200        sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12201        sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12202        sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12203        sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12204        sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12205        sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12206        sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12207                                            mbx_sli4_parameters);
12208        sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12209        sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12210                                           mbx_sli4_parameters);
12211        phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12212        phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12213
12214        /* Check for Extended Pre-Registered SGL support */
12215        phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12216
12217        /* Check for firmware nvme support */
12218        rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12219                     bf_get(cfg_xib, mbx_sli4_parameters));
12220
12221        if (rc) {
12222                /* Save this to indicate the Firmware supports NVME */
12223                sli4_params->nvme = 1;
12224
12225                /* Firmware NVME support, check driver FC4 NVME support */
12226                if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12227                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12228                                        "6133 Disabling NVME support: "
12229                                        "FC4 type not supported: x%x\n",
12230                                        phba->cfg_enable_fc4_type);
12231                        goto fcponly;
12232                }
12233        } else {
12234                /* No firmware NVME support, check driver FC4 NVME support */
12235                sli4_params->nvme = 0;
12236                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12237                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12238                                        "6101 Disabling NVME support: Not "
12239                                        "supported by firmware (%d %d) x%x\n",
12240                                        bf_get(cfg_nvme, mbx_sli4_parameters),
12241                                        bf_get(cfg_xib, mbx_sli4_parameters),
12242                                        phba->cfg_enable_fc4_type);
12243fcponly:
12244                        phba->nvme_support = 0;
12245                        phba->nvmet_support = 0;
12246                        phba->cfg_nvmet_mrq = 0;
12247                        phba->cfg_nvme_seg_cnt = 0;
12248
12249                        /* If no FC4 type support, move to just SCSI support */
12250                        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12251                                return -ENODEV;
12252                        phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12253                }
12254        }
12255
12256        /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
12257         * accommodate 512K and 1M IOs in a single nvme buf.
12258         */
12259        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12260                phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12261
12262        /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12263        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12264            LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12265                phba->cfg_enable_pbde = 0;
12266
12267        /*
12268         * To support Suppress Response feature we must satisfy 3 conditions.
12269         * lpfc_suppress_rsp module parameter must be set (default).
12270         * In SLI4-Parameters Descriptor:
12271         * Extended Inline Buffers (XIB) must be supported.
12272         * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12273         * (double negative).
12274         */
12275        if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12276            !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12277                phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12278        else
12279                phba->cfg_suppress_rsp = 0;
12280
12281        if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12282                phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12283
12284        /* Make sure that sge_supp_len can be handled by the driver */
12285        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12286                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12287
12288        /*
12289         * Check whether the adapter supports an embedded copy of the
12290         * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12291         * to use this option, 128-byte WQEs must be used.
12292         */
12293        if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12294                phba->fcp_embed_io = 1;
12295        else
12296                phba->fcp_embed_io = 0;
12297
12298        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12299                        "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12300                        bf_get(cfg_xib, mbx_sli4_parameters),
12301                        phba->cfg_enable_pbde,
12302                        phba->fcp_embed_io, phba->nvme_support,
12303                        phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12304
12305        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12306            LPFC_SLI_INTF_IF_TYPE_2) &&
12307            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12308                 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12309                exp_wqcq_pages = false;
12310
12311        if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12312            (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12313            exp_wqcq_pages &&
12314            (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12315                phba->enab_exp_wqcq_pages = 1;
12316        else
12317                phba->enab_exp_wqcq_pages = 0;
12318        /*
12319         * Check if the SLI port supports MDS Diagnostics
12320         */
12321        if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12322                phba->mds_diags_support = 1;
12323        else
12324                phba->mds_diags_support = 0;
12325
12326        /*
12327         * Check if the SLI port supports NSLER
12328         */
12329        if (bf_get(cfg_nsler, mbx_sli4_parameters))
12330                phba->nsler = 1;
12331        else
12332                phba->nsler = 0;
12333
12334        /* Save PB info for use during HBA setup */
12335        sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
12336        sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
12337        sli4_params->mib_size = mbx_sli4_parameters->mib_size;
12338        sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
12339
12340        /* Next we check for Vendor MIB support */
12341        if (sli4_params->mi_ver && phba->cfg_enable_mi)
12342                phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
12343
12344        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12345                        "6461 MIB attr %d  enable %d  FDMI %d buf %d:%d\n",
12346                        sli4_params->mi_ver, phba->cfg_enable_mi,
12347                        sli4_params->mi_value, sli4_params->mib_bde_cnt,
12348                        sli4_params->mib_size);
12349        return 0;
12350}
12351
12352/**
12353 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12354 * @pdev: pointer to PCI device
12355 * @pid: pointer to PCI device identifier
12356 *
12357 * This routine is to be called to attach a device with SLI-3 interface spec
12358 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12359 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12360 * information of the device and driver to see if the driver state that it can
12361 * support this kind of device. If the match is successful, the driver core
12362 * invokes this routine. If this routine determines it can claim the HBA, it
12363 * does all the initialization that it needs to do to handle the HBA properly.
12364 *
12365 * Return code
12366 *      0 - driver can claim the device
12367 *      negative value - driver can not claim the device
12368 **/
12369static int
12370lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12371{
12372        struct lpfc_hba   *phba;
12373        struct lpfc_vport *vport = NULL;
12374        struct Scsi_Host  *shost = NULL;
12375        int error;
12376        uint32_t cfg_mode, intr_mode;
12377
12378        /* Allocate memory for HBA structure */
12379        phba = lpfc_hba_alloc(pdev);
12380        if (!phba)
12381                return -ENOMEM;
12382
12383        /* Perform generic PCI device enabling operation */
12384        error = lpfc_enable_pci_dev(phba);
12385        if (error)
12386                goto out_free_phba;
12387
12388        /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12389        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12390        if (error)
12391                goto out_disable_pci_dev;
12392
12393        /* Set up SLI-3 specific device PCI memory space */
12394        error = lpfc_sli_pci_mem_setup(phba);
12395        if (error) {
12396                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12397                                "1402 Failed to set up pci memory space.\n");
12398                goto out_disable_pci_dev;
12399        }
12400
12401        /* Set up SLI-3 specific device driver resources */
12402        error = lpfc_sli_driver_resource_setup(phba);
12403        if (error) {
12404                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12405                                "1404 Failed to set up driver resource.\n");
12406                goto out_unset_pci_mem_s3;
12407        }
12408
12409        /* Initialize and populate the iocb list per host */
12410
12411        error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12412        if (error) {
12413                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12414                                "1405 Failed to initialize iocb list.\n");
12415                goto out_unset_driver_resource_s3;
12416        }
12417
12418        /* Set up common device driver resources */
12419        error = lpfc_setup_driver_resource_phase2(phba);
12420        if (error) {
12421                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12422                                "1406 Failed to set up driver resource.\n");
12423                goto out_free_iocb_list;
12424        }
12425
12426        /* Get the default values for Model Name and Description */
12427        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12428
12429        /* Create SCSI host to the physical port */
12430        error = lpfc_create_shost(phba);
12431        if (error) {
12432                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12433                                "1407 Failed to create scsi host.\n");
12434                goto out_unset_driver_resource;
12435        }
12436
12437        /* Configure sysfs attributes */
12438        vport = phba->pport;
12439        error = lpfc_alloc_sysfs_attr(vport);
12440        if (error) {
12441                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12442                                "1476 Failed to allocate sysfs attr\n");
12443                goto out_destroy_shost;
12444        }
12445
12446        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12447        /* Now, trying to enable interrupt and bring up the device */
12448        cfg_mode = phba->cfg_use_msi;
12449        while (true) {
12450                /* Put device to a known state before enabling interrupt */
12451                lpfc_stop_port(phba);
12452                /* Configure and enable interrupt */
12453                intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12454                if (intr_mode == LPFC_INTR_ERROR) {
12455                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12456                                        "0431 Failed to enable interrupt.\n");
12457                        error = -ENODEV;
12458                        goto out_free_sysfs_attr;
12459                }
12460                /* SLI-3 HBA setup */
12461                if (lpfc_sli_hba_setup(phba)) {
12462                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12463                                        "1477 Failed to set up hba\n");
12464                        error = -ENODEV;
12465                        goto out_remove_device;
12466                }
12467
12468                /* Wait 50ms for the interrupts of previous mailbox commands */
12469                msleep(50);
12470                /* Check active interrupts on message signaled interrupts */
12471                if (intr_mode == 0 ||
12472                    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12473                        /* Log the current active interrupt mode */
12474                        phba->intr_mode = intr_mode;
12475                        lpfc_log_intr_mode(phba, intr_mode);
12476                        break;
12477                } else {
12478                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12479                                        "0447 Configure interrupt mode (%d) "
12480                                        "failed active interrupt test.\n",
12481                                        intr_mode);
12482                        /* Disable the current interrupt mode */
12483                        lpfc_sli_disable_intr(phba);
12484                        /* Try next level of interrupt mode */
12485                        cfg_mode = --intr_mode;
12486                }
12487        }
12488
12489        /* Perform post initialization setup */
12490        lpfc_post_init_setup(phba);
12491
12492        /* Check if there are static vports to be created. */
12493        lpfc_create_static_vport(phba);
12494
12495        return 0;
12496
12497out_remove_device:
12498        lpfc_unset_hba(phba);
12499out_free_sysfs_attr:
12500        lpfc_free_sysfs_attr(vport);
12501out_destroy_shost:
12502        lpfc_destroy_shost(phba);
12503out_unset_driver_resource:
12504        lpfc_unset_driver_resource_phase2(phba);
12505out_free_iocb_list:
12506        lpfc_free_iocb_list(phba);
12507out_unset_driver_resource_s3:
12508        lpfc_sli_driver_resource_unset(phba);
12509out_unset_pci_mem_s3:
12510        lpfc_sli_pci_mem_unset(phba);
12511out_disable_pci_dev:
12512        lpfc_disable_pci_dev(phba);
12513        if (shost)
12514                scsi_host_put(shost);
12515out_free_phba:
12516        lpfc_hba_free(phba);
12517        return error;
12518}
12519
12520/**
12521 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12522 * @pdev: pointer to PCI device
12523 *
12524 * This routine is to be called to disattach a device with SLI-3 interface
12525 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12526 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12527 * device to be removed from the PCI subsystem properly.
12528 **/
12529static void
12530lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12531{
12532        struct Scsi_Host  *shost = pci_get_drvdata(pdev);
12533        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12534        struct lpfc_vport **vports;
12535        struct lpfc_hba   *phba = vport->phba;
12536        int i;
12537
12538        spin_lock_irq(&phba->hbalock);
12539        vport->load_flag |= FC_UNLOADING;
12540        spin_unlock_irq(&phba->hbalock);
12541
12542        lpfc_free_sysfs_attr(vport);
12543
12544        /* Release all the vports against this physical port */
12545        vports = lpfc_create_vport_work_array(phba);
12546        if (vports != NULL)
12547                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12548                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12549                                continue;
12550                        fc_vport_terminate(vports[i]->fc_vport);
12551                }
12552        lpfc_destroy_vport_work_array(phba, vports);
12553
12554        /* Remove FC host with the physical port */
12555        fc_remove_host(shost);
12556        scsi_remove_host(shost);
12557
12558        /* Clean up all nodes, mailboxes and IOs. */
12559        lpfc_cleanup(vport);
12560
12561        /*
12562         * Bring down the SLI Layer. This step disable all interrupts,
12563         * clears the rings, discards all mailbox commands, and resets
12564         * the HBA.
12565         */
12566
12567        /* HBA interrupt will be disabled after this call */
12568        lpfc_sli_hba_down(phba);
12569        /* Stop kthread signal shall trigger work_done one more time */
12570        kthread_stop(phba->worker_thread);
12571        /* Final cleanup of txcmplq and reset the HBA */
12572        lpfc_sli_brdrestart(phba);
12573
12574        kfree(phba->vpi_bmask);
12575        kfree(phba->vpi_ids);
12576
12577        lpfc_stop_hba_timers(phba);
12578        spin_lock_irq(&phba->port_list_lock);
12579        list_del_init(&vport->listentry);
12580        spin_unlock_irq(&phba->port_list_lock);
12581
12582        lpfc_debugfs_terminate(vport);
12583
12584        /* Disable SR-IOV if enabled */
12585        if (phba->cfg_sriov_nr_virtfn)
12586                pci_disable_sriov(pdev);
12587
12588        /* Disable interrupt */
12589        lpfc_sli_disable_intr(phba);
12590
12591        scsi_host_put(shost);
12592
12593        /*
12594         * Call scsi_free before mem_free since scsi bufs are released to their
12595         * corresponding pools here.
12596         */
12597        lpfc_scsi_free(phba);
12598        lpfc_free_iocb_list(phba);
12599
12600        lpfc_mem_free_all(phba);
12601
12602        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12603                          phba->hbqslimp.virt, phba->hbqslimp.phys);
12604
12605        /* Free resources associated with SLI2 interface */
12606        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12607                          phba->slim2p.virt, phba->slim2p.phys);
12608
12609        /* unmap adapter SLIM and Control Registers */
12610        iounmap(phba->ctrl_regs_memmap_p);
12611        iounmap(phba->slim_memmap_p);
12612
12613        lpfc_hba_free(phba);
12614
12615        pci_release_mem_regions(pdev);
12616        pci_disable_device(pdev);
12617}
12618
12619/**
12620 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12621 * @dev_d: pointer to device
12622 *
12623 * This routine is to be called from the kernel's PCI subsystem to support
12624 * system Power Management (PM) to device with SLI-3 interface spec. When
12625 * PM invokes this method, it quiesces the device by stopping the driver's
12626 * worker thread for the device, turning off device's interrupt and DMA,
12627 * and bring the device offline. Note that as the driver implements the
12628 * minimum PM requirements to a power-aware driver's PM support for the
12629 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12630 * to the suspend() method call will be treated as SUSPEND and the driver will
12631 * fully reinitialize its device during resume() method call, the driver will
12632 * set device to PCI_D3hot state in PCI config space instead of setting it
12633 * according to the @msg provided by the PM.
12634 *
12635 * Return code
12636 *      0 - driver suspended the device
12637 *      Error otherwise
12638 **/
12639static int __maybe_unused
12640lpfc_pci_suspend_one_s3(struct device *dev_d)
12641{
12642        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12643        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12644
12645        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12646                        "0473 PCI device Power Management suspend.\n");
12647
12648        /* Bring down the device */
12649        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12650        lpfc_offline(phba);
12651        kthread_stop(phba->worker_thread);
12652
12653        /* Disable interrupt from device */
12654        lpfc_sli_disable_intr(phba);
12655
12656        return 0;
12657}
12658
12659/**
12660 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12661 * @dev_d: pointer to device
12662 *
12663 * This routine is to be called from the kernel's PCI subsystem to support
12664 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12665 * invokes this method, it restores the device's PCI config space state and
12666 * fully reinitializes the device and brings it online. Note that as the
12667 * driver implements the minimum PM requirements to a power-aware driver's
12668 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12669 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12670 * driver will fully reinitialize its device during resume() method call,
12671 * the device will be set to PCI_D0 directly in PCI config space before
12672 * restoring the state.
12673 *
12674 * Return code
12675 *      0 - driver suspended the device
12676 *      Error otherwise
12677 **/
12678static int __maybe_unused
12679lpfc_pci_resume_one_s3(struct device *dev_d)
12680{
12681        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12682        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12683        uint32_t intr_mode;
12684        int error;
12685
12686        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12687                        "0452 PCI device Power Management resume.\n");
12688
12689        /* Startup the kernel thread for this host adapter. */
12690        phba->worker_thread = kthread_run(lpfc_do_work, phba,
12691                                        "lpfc_worker_%d", phba->brd_no);
12692        if (IS_ERR(phba->worker_thread)) {
12693                error = PTR_ERR(phba->worker_thread);
12694                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12695                                "0434 PM resume failed to start worker "
12696                                "thread: error=x%x.\n", error);
12697                return error;
12698        }
12699
12700        /* Configure and enable interrupt */
12701        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12702        if (intr_mode == LPFC_INTR_ERROR) {
12703                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12704                                "0430 PM resume Failed to enable interrupt\n");
12705                return -EIO;
12706        } else
12707                phba->intr_mode = intr_mode;
12708
12709        /* Restart HBA and bring it online */
12710        lpfc_sli_brdrestart(phba);
12711        lpfc_online(phba);
12712
12713        /* Log the current active interrupt mode */
12714        lpfc_log_intr_mode(phba, phba->intr_mode);
12715
12716        return 0;
12717}
12718
12719/**
12720 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12721 * @phba: pointer to lpfc hba data structure.
12722 *
12723 * This routine is called to prepare the SLI3 device for PCI slot recover. It
12724 * aborts all the outstanding SCSI I/Os to the pci device.
12725 **/
12726static void
12727lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12728{
12729        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12730                        "2723 PCI channel I/O abort preparing for recovery\n");
12731
12732        /*
12733         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12734         * and let the SCSI mid-layer to retry them to recover.
12735         */
12736        lpfc_sli_abort_fcp_rings(phba);
12737}
12738
12739/**
12740 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12741 * @phba: pointer to lpfc hba data structure.
12742 *
12743 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12744 * disables the device interrupt and pci device, and aborts the internal FCP
12745 * pending I/Os.
12746 **/
12747static void
12748lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12749{
12750        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12751                        "2710 PCI channel disable preparing for reset\n");
12752
12753        /* Block any management I/Os to the device */
12754        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12755
12756        /* Block all SCSI devices' I/Os on the host */
12757        lpfc_scsi_dev_block(phba);
12758
12759        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12760        lpfc_sli_flush_io_rings(phba);
12761
12762        /* stop all timers */
12763        lpfc_stop_hba_timers(phba);
12764
12765        /* Disable interrupt and pci device */
12766        lpfc_sli_disable_intr(phba);
12767        pci_disable_device(phba->pcidev);
12768}
12769
12770/**
12771 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12772 * @phba: pointer to lpfc hba data structure.
12773 *
12774 * This routine is called to prepare the SLI3 device for PCI slot permanently
12775 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12776 * pending I/Os.
12777 **/
12778static void
12779lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12780{
12781        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12782                        "2711 PCI channel permanent disable for failure\n");
12783        /* Block all SCSI devices' I/Os on the host */
12784        lpfc_scsi_dev_block(phba);
12785
12786        /* stop all timers */
12787        lpfc_stop_hba_timers(phba);
12788
12789        /* Clean up all driver's outstanding SCSI I/Os */
12790        lpfc_sli_flush_io_rings(phba);
12791}
12792
12793/**
12794 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12795 * @pdev: pointer to PCI device.
12796 * @state: the current PCI connection state.
12797 *
12798 * This routine is called from the PCI subsystem for I/O error handling to
12799 * device with SLI-3 interface spec. This function is called by the PCI
12800 * subsystem after a PCI bus error affecting this device has been detected.
12801 * When this function is invoked, it will need to stop all the I/Os and
12802 * interrupt(s) to the device. Once that is done, it will return
12803 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12804 * as desired.
12805 *
12806 * Return codes
12807 *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12808 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12809 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12810 **/
12811static pci_ers_result_t
12812lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12813{
12814        struct Scsi_Host *shost = pci_get_drvdata(pdev);
12815        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12816
12817        switch (state) {
12818        case pci_channel_io_normal:
12819                /* Non-fatal error, prepare for recovery */
12820                lpfc_sli_prep_dev_for_recover(phba);
12821                return PCI_ERS_RESULT_CAN_RECOVER;
12822        case pci_channel_io_frozen:
12823                /* Fatal error, prepare for slot reset */
12824                lpfc_sli_prep_dev_for_reset(phba);
12825                return PCI_ERS_RESULT_NEED_RESET;
12826        case pci_channel_io_perm_failure:
12827                /* Permanent failure, prepare for device down */
12828                lpfc_sli_prep_dev_for_perm_failure(phba);
12829                return PCI_ERS_RESULT_DISCONNECT;
12830        default:
12831                /* Unknown state, prepare and request slot reset */
12832                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12833                                "0472 Unknown PCI error state: x%x\n", state);
12834                lpfc_sli_prep_dev_for_reset(phba);
12835                return PCI_ERS_RESULT_NEED_RESET;
12836        }
12837}
12838
12839/**
12840 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12841 * @pdev: pointer to PCI device.
12842 *
12843 * This routine is called from the PCI subsystem for error handling to
12844 * device with SLI-3 interface spec. This is called after PCI bus has been
12845 * reset to restart the PCI card from scratch, as if from a cold-boot.
12846 * During the PCI subsystem error recovery, after driver returns
12847 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12848 * recovery and then call this routine before calling the .resume method
12849 * to recover the device. This function will initialize the HBA device,
12850 * enable the interrupt, but it will just put the HBA to offline state
12851 * without passing any I/O traffic.
12852 *
12853 * Return codes
12854 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
12855 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12856 */
12857static pci_ers_result_t
12858lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12859{
12860        struct Scsi_Host *shost = pci_get_drvdata(pdev);
12861        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12862        struct lpfc_sli *psli = &phba->sli;
12863        uint32_t intr_mode;
12864
12865        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12866        if (pci_enable_device_mem(pdev)) {
12867                printk(KERN_ERR "lpfc: Cannot re-enable "
12868                        "PCI device after reset.\n");
12869                return PCI_ERS_RESULT_DISCONNECT;
12870        }
12871
12872        pci_restore_state(pdev);
12873
12874        /*
12875         * As the new kernel behavior of pci_restore_state() API call clears
12876         * device saved_state flag, need to save the restored state again.
12877         */
12878        pci_save_state(pdev);
12879
12880        if (pdev->is_busmaster)
12881                pci_set_master(pdev);
12882
12883        spin_lock_irq(&phba->hbalock);
12884        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12885        spin_unlock_irq(&phba->hbalock);
12886
12887        /* Configure and enable interrupt */
12888        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12889        if (intr_mode == LPFC_INTR_ERROR) {
12890                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12891                                "0427 Cannot re-enable interrupt after "
12892                                "slot reset.\n");
12893                return PCI_ERS_RESULT_DISCONNECT;
12894        } else
12895                phba->intr_mode = intr_mode;
12896
12897        /* Take device offline, it will perform cleanup */
12898        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12899        lpfc_offline(phba);
12900        lpfc_sli_brdrestart(phba);
12901
12902        /* Log the current active interrupt mode */
12903        lpfc_log_intr_mode(phba, phba->intr_mode);
12904
12905        return PCI_ERS_RESULT_RECOVERED;
12906}
12907
12908/**
12909 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12910 * @pdev: pointer to PCI device
12911 *
12912 * This routine is called from the PCI subsystem for error handling to device
12913 * with SLI-3 interface spec. It is called when kernel error recovery tells
12914 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12915 * error recovery. After this call, traffic can start to flow from this device
12916 * again.
12917 */
12918static void
12919lpfc_io_resume_s3(struct pci_dev *pdev)
12920{
12921        struct Scsi_Host *shost = pci_get_drvdata(pdev);
12922        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12923
12924        /* Bring device online, it will be no-op for non-fatal error resume */
12925        lpfc_online(phba);
12926}
12927
12928/**
12929 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12930 * @phba: pointer to lpfc hba data structure.
12931 *
12932 * returns the number of ELS/CT IOCBs to reserve
12933 **/
12934int
12935lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12936{
12937        int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12938
12939        if (phba->sli_rev == LPFC_SLI_REV4) {
12940                if (max_xri <= 100)
12941                        return 10;
12942                else if (max_xri <= 256)
12943                        return 25;
12944                else if (max_xri <= 512)
12945                        return 50;
12946                else if (max_xri <= 1024)
12947                        return 100;
12948                else if (max_xri <= 1536)
12949                        return 150;
12950                else if (max_xri <= 2048)
12951                        return 200;
12952                else
12953                        return 250;
12954        } else
12955                return 0;
12956}
12957
12958/**
12959 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12960 * @phba: pointer to lpfc hba data structure.
12961 *
12962 * returns the number of ELS/CT + NVMET IOCBs to reserve
12963 **/
12964int
12965lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12966{
12967        int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12968
12969        if (phba->nvmet_support)
12970                max_xri += LPFC_NVMET_BUF_POST;
12971        return max_xri;
12972}
12973
12974
12975static int
12976lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12977        uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12978        const struct firmware *fw)
12979{
12980        int rc;
12981
12982        /* Three cases:  (1) FW was not supported on the detected adapter.
12983         * (2) FW update has been locked out administratively.
12984         * (3) Some other error during FW update.
12985         * In each case, an unmaskable message is written to the console
12986         * for admin diagnosis.
12987         */
12988        if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12989            (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12990             magic_number != MAGIC_NUMBER_G6) ||
12991            (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12992             magic_number != MAGIC_NUMBER_G7)) {
12993                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12994                                "3030 This firmware version is not supported on"
12995                                " this HBA model. Device:%x Magic:%x Type:%x "
12996                                "ID:%x Size %d %zd\n",
12997                                phba->pcidev->device, magic_number, ftype, fid,
12998                                fsize, fw->size);
12999                rc = -EINVAL;
13000        } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
13001                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13002                                "3021 Firmware downloads have been prohibited "
13003                                "by a system configuration setting on "
13004                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
13005                                "%zd\n",
13006                                phba->pcidev->device, magic_number, ftype, fid,
13007                                fsize, fw->size);
13008                rc = -EACCES;
13009        } else {
13010                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13011                                "3022 FW Download failed. Add Status x%x "
13012                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
13013                                "%zd\n",
13014                                offset, phba->pcidev->device, magic_number,
13015                                ftype, fid, fsize, fw->size);
13016                rc = -EIO;
13017        }
13018        return rc;
13019}
13020
13021/**
13022 * lpfc_write_firmware - attempt to write a firmware image to the port
13023 * @fw: pointer to firmware image returned from request_firmware.
13024 * @context: pointer to firmware image returned from request_firmware.
13025 *
13026 **/
13027static void
13028lpfc_write_firmware(const struct firmware *fw, void *context)
13029{
13030        struct lpfc_hba *phba = (struct lpfc_hba *)context;
13031        char fwrev[FW_REV_STR_SIZE];
13032        struct lpfc_grp_hdr *image;
13033        struct list_head dma_buffer_list;
13034        int i, rc = 0;
13035        struct lpfc_dmabuf *dmabuf, *next;
13036        uint32_t offset = 0, temp_offset = 0;
13037        uint32_t magic_number, ftype, fid, fsize;
13038
13039        /* It can be null in no-wait mode, sanity check */
13040        if (!fw) {
13041                rc = -ENXIO;
13042                goto out;
13043        }
13044        image = (struct lpfc_grp_hdr *)fw->data;
13045
13046        magic_number = be32_to_cpu(image->magic_number);
13047        ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
13048        fid = bf_get_be32(lpfc_grp_hdr_id, image);
13049        fsize = be32_to_cpu(image->size);
13050
13051        INIT_LIST_HEAD(&dma_buffer_list);
13052        lpfc_decode_firmware_rev(phba, fwrev, 1);
13053        if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
13054                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13055                                "3023 Updating Firmware, Current Version:%s "
13056                                "New Version:%s\n",
13057                                fwrev, image->revision);
13058                for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13059                        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13060                                         GFP_KERNEL);
13061                        if (!dmabuf) {
13062                                rc = -ENOMEM;
13063                                goto release_out;
13064                        }
13065                        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13066                                                          SLI4_PAGE_SIZE,
13067                                                          &dmabuf->phys,
13068                                                          GFP_KERNEL);
13069                        if (!dmabuf->virt) {
13070                                kfree(dmabuf);
13071                                rc = -ENOMEM;
13072                                goto release_out;
13073                        }
13074                        list_add_tail(&dmabuf->list, &dma_buffer_list);
13075                }
13076                while (offset < fw->size) {
13077                        temp_offset = offset;
13078                        list_for_each_entry(dmabuf, &dma_buffer_list, list) {
13079                                if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
13080                                        memcpy(dmabuf->virt,
13081                                               fw->data + temp_offset,
13082                                               fw->size - temp_offset);
13083                                        temp_offset = fw->size;
13084                                        break;
13085                                }
13086                                memcpy(dmabuf->virt, fw->data + temp_offset,
13087                                       SLI4_PAGE_SIZE);
13088                                temp_offset += SLI4_PAGE_SIZE;
13089                        }
13090                        rc = lpfc_wr_object(phba, &dma_buffer_list,
13091                                    (fw->size - offset), &offset);
13092                        if (rc) {
13093                                rc = lpfc_log_write_firmware_error(phba, offset,
13094                                                                   magic_number,
13095                                                                   ftype,
13096                                                                   fid,
13097                                                                   fsize,
13098                                                                   fw);
13099                                goto release_out;
13100                        }
13101                }
13102                rc = offset;
13103        } else
13104                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13105                                "3029 Skipped Firmware update, Current "
13106                                "Version:%s New Version:%s\n",
13107                                fwrev, image->revision);
13108
13109release_out:
13110        list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13111                list_del(&dmabuf->list);
13112                dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13113                                  dmabuf->virt, dmabuf->phys);
13114                kfree(dmabuf);
13115        }
13116        release_firmware(fw);
13117out:
13118        if (rc < 0)
13119                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13120                                "3062 Firmware update error, status %d.\n", rc);
13121        else
13122                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13123                                "3024 Firmware update success: size %d.\n", rc);
13124}
13125
13126/**
13127 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
13128 * @phba: pointer to lpfc hba data structure.
13129 * @fw_upgrade: which firmware to update.
13130 *
13131 * This routine is called to perform Linux generic firmware upgrade on device
13132 * that supports such feature.
13133 **/
13134int
13135lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13136{
13137        uint8_t file_name[ELX_MODEL_NAME_SIZE];
13138        int ret;
13139        const struct firmware *fw;
13140
13141        /* Only supported on SLI4 interface type 2 for now */
13142        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13143            LPFC_SLI_INTF_IF_TYPE_2)
13144                return -EPERM;
13145
13146        snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13147
13148        if (fw_upgrade == INT_FW_UPGRADE) {
13149                ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
13150                                        file_name, &phba->pcidev->dev,
13151                                        GFP_KERNEL, (void *)phba,
13152                                        lpfc_write_firmware);
13153        } else if (fw_upgrade == RUN_FW_UPGRADE) {
13154                ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13155                if (!ret)
13156                        lpfc_write_firmware(fw, (void *)phba);
13157        } else {
13158                ret = -EINVAL;
13159        }
13160
13161        return ret;
13162}
13163
13164/**
13165 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13166 * @pdev: pointer to PCI device
13167 * @pid: pointer to PCI device identifier
13168 *
13169 * This routine is called from the kernel's PCI subsystem to device with
13170 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13171 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13172 * information of the device and driver to see if the driver state that it
13173 * can support this kind of device. If the match is successful, the driver
13174 * core invokes this routine. If this routine determines it can claim the HBA,
13175 * it does all the initialization that it needs to do to handle the HBA
13176 * properly.
13177 *
13178 * Return code
13179 *      0 - driver can claim the device
13180 *      negative value - driver can not claim the device
13181 **/
13182static int
13183lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13184{
13185        struct lpfc_hba   *phba;
13186        struct lpfc_vport *vport = NULL;
13187        struct Scsi_Host  *shost = NULL;
13188        int error;
13189        uint32_t cfg_mode, intr_mode;
13190
13191        /* Allocate memory for HBA structure */
13192        phba = lpfc_hba_alloc(pdev);
13193        if (!phba)
13194                return -ENOMEM;
13195
13196        INIT_LIST_HEAD(&phba->poll_list);
13197
13198        /* Perform generic PCI device enabling operation */
13199        error = lpfc_enable_pci_dev(phba);
13200        if (error)
13201                goto out_free_phba;
13202
13203        /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13204        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13205        if (error)
13206                goto out_disable_pci_dev;
13207
13208        /* Set up SLI-4 specific device PCI memory space */
13209        error = lpfc_sli4_pci_mem_setup(phba);
13210        if (error) {
13211                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13212                                "1410 Failed to set up pci memory space.\n");
13213                goto out_disable_pci_dev;
13214        }
13215
13216        /* Set up SLI-4 Specific device driver resources */
13217        error = lpfc_sli4_driver_resource_setup(phba);
13218        if (error) {
13219                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13220                                "1412 Failed to set up driver resource.\n");
13221                goto out_unset_pci_mem_s4;
13222        }
13223
13224        INIT_LIST_HEAD(&phba->active_rrq_list);
13225        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13226
13227        /* Set up common device driver resources */
13228        error = lpfc_setup_driver_resource_phase2(phba);
13229        if (error) {
13230                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13231                                "1414 Failed to set up driver resource.\n");
13232                goto out_unset_driver_resource_s4;
13233        }
13234
13235        /* Get the default values for Model Name and Description */
13236        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13237
13238        /* Now, trying to enable interrupt and bring up the device */
13239        cfg_mode = phba->cfg_use_msi;
13240
13241        /* Put device to a known state before enabling interrupt */
13242        phba->pport = NULL;
13243        lpfc_stop_port(phba);
13244
13245        /* Init cpu_map array */
13246        lpfc_cpu_map_array_init(phba);
13247
13248        /* Init hba_eq_hdl array */
13249        lpfc_hba_eq_hdl_array_init(phba);
13250
13251        /* Configure and enable interrupt */
13252        intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13253        if (intr_mode == LPFC_INTR_ERROR) {
13254                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13255                                "0426 Failed to enable interrupt.\n");
13256                error = -ENODEV;
13257                goto out_unset_driver_resource;
13258        }
13259        /* Default to single EQ for non-MSI-X */
13260        if (phba->intr_type != MSIX) {
13261                phba->cfg_irq_chann = 1;
13262                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13263                        if (phba->nvmet_support)
13264                                phba->cfg_nvmet_mrq = 1;
13265                }
13266        }
13267        lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13268
13269        /* Create SCSI host to the physical port */
13270        error = lpfc_create_shost(phba);
13271        if (error) {
13272                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13273                                "1415 Failed to create scsi host.\n");
13274                goto out_disable_intr;
13275        }
13276        vport = phba->pport;
13277        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13278
13279        /* Configure sysfs attributes */
13280        error = lpfc_alloc_sysfs_attr(vport);
13281        if (error) {
13282                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13283                                "1416 Failed to allocate sysfs attr\n");
13284                goto out_destroy_shost;
13285        }
13286
13287        /* Set up SLI-4 HBA */
13288        if (lpfc_sli4_hba_setup(phba)) {
13289                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13290                                "1421 Failed to set up hba\n");
13291                error = -ENODEV;
13292                goto out_free_sysfs_attr;
13293        }
13294
13295        /* Log the current active interrupt mode */
13296        phba->intr_mode = intr_mode;
13297        lpfc_log_intr_mode(phba, intr_mode);
13298
13299        /* Perform post initialization setup */
13300        lpfc_post_init_setup(phba);
13301
13302        /* NVME support in FW earlier in the driver load corrects the
13303         * FC4 type making a check for nvme_support unnecessary.
13304         */
13305        if (phba->nvmet_support == 0) {
13306                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13307                        /* Create NVME binding with nvme_fc_transport. This
13308                         * ensures the vport is initialized.  If the localport
13309                         * create fails, it should not unload the driver to
13310                         * support field issues.
13311                         */
13312                        error = lpfc_nvme_create_localport(vport);
13313                        if (error) {
13314                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13315                                                "6004 NVME registration "
13316                                                "failed, error x%x\n",
13317                                                error);
13318                        }
13319                }
13320        }
13321
13322        /* check for firmware upgrade or downgrade */
13323        if (phba->cfg_request_firmware_upgrade)
13324                lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13325
13326        /* Check if there are static vports to be created. */
13327        lpfc_create_static_vport(phba);
13328
13329        /* Enable RAS FW log support */
13330        lpfc_sli4_ras_setup(phba);
13331
13332        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13333        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13334
13335        return 0;
13336
13337out_free_sysfs_attr:
13338        lpfc_free_sysfs_attr(vport);
13339out_destroy_shost:
13340        lpfc_destroy_shost(phba);
13341out_disable_intr:
13342        lpfc_sli4_disable_intr(phba);
13343out_unset_driver_resource:
13344        lpfc_unset_driver_resource_phase2(phba);
13345out_unset_driver_resource_s4:
13346        lpfc_sli4_driver_resource_unset(phba);
13347out_unset_pci_mem_s4:
13348        lpfc_sli4_pci_mem_unset(phba);
13349out_disable_pci_dev:
13350        lpfc_disable_pci_dev(phba);
13351        if (shost)
13352                scsi_host_put(shost);
13353out_free_phba:
13354        lpfc_hba_free(phba);
13355        return error;
13356}
13357
13358/**
13359 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13360 * @pdev: pointer to PCI device
13361 *
13362 * This routine is called from the kernel's PCI subsystem to device with
13363 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13364 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13365 * device to be removed from the PCI subsystem properly.
13366 **/
13367static void
13368lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13369{
13370        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13371        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13372        struct lpfc_vport **vports;
13373        struct lpfc_hba *phba = vport->phba;
13374        int i;
13375
13376        /* Mark the device unloading flag */
13377        spin_lock_irq(&phba->hbalock);
13378        vport->load_flag |= FC_UNLOADING;
13379        spin_unlock_irq(&phba->hbalock);
13380
13381        lpfc_free_sysfs_attr(vport);
13382
13383        /* Release all the vports against this physical port */
13384        vports = lpfc_create_vport_work_array(phba);
13385        if (vports != NULL)
13386                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13387                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13388                                continue;
13389                        fc_vport_terminate(vports[i]->fc_vport);
13390                }
13391        lpfc_destroy_vport_work_array(phba, vports);
13392
13393        /* Remove FC host with the physical port */
13394        fc_remove_host(shost);
13395        scsi_remove_host(shost);
13396
13397        /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
13398         * localports are destroyed after to cleanup all transport memory.
13399         */
13400        lpfc_cleanup(vport);
13401        lpfc_nvmet_destroy_targetport(phba);
13402        lpfc_nvme_destroy_localport(vport);
13403
13404        /* De-allocate multi-XRI pools */
13405        if (phba->cfg_xri_rebalancing)
13406                lpfc_destroy_multixri_pools(phba);
13407
13408        /*
13409         * Bring down the SLI Layer. This step disables all interrupts,
13410         * clears the rings, discards all mailbox commands, and resets
13411         * the HBA FCoE function.
13412         */
13413        lpfc_debugfs_terminate(vport);
13414
13415        lpfc_stop_hba_timers(phba);
13416        spin_lock_irq(&phba->port_list_lock);
13417        list_del_init(&vport->listentry);
13418        spin_unlock_irq(&phba->port_list_lock);
13419
13420        /* Perform scsi free before driver resource_unset since scsi
13421         * buffers are released to their corresponding pools here.
13422         */
13423        lpfc_io_free(phba);
13424        lpfc_free_iocb_list(phba);
13425        lpfc_sli4_hba_unset(phba);
13426
13427        lpfc_unset_driver_resource_phase2(phba);
13428        lpfc_sli4_driver_resource_unset(phba);
13429
13430        /* Unmap adapter Control and Doorbell registers */
13431        lpfc_sli4_pci_mem_unset(phba);
13432
13433        /* Release PCI resources and disable device's PCI function */
13434        scsi_host_put(shost);
13435        lpfc_disable_pci_dev(phba);
13436
13437        /* Finally, free the driver's device data structure */
13438        lpfc_hba_free(phba);
13439
13440        return;
13441}
13442
13443/**
13444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
13445 * @dev_d: pointer to device
13446 *
13447 * This routine is called from the kernel's PCI subsystem to support system
13448 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13449 * this method, it quiesces the device by stopping the driver's worker
13450 * thread for the device, turning off device's interrupt and DMA, and bring
13451 * the device offline. Note that as the driver implements the minimum PM
13452 * requirements to a power-aware driver's PM support for suspend/resume -- all
13453 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13454 * method call will be treated as SUSPEND and the driver will fully
13455 * reinitialize its device during resume() method call, the driver will set
13456 * device to PCI_D3hot state in PCI config space instead of setting it
13457 * according to the @msg provided by the PM.
13458 *
13459 * Return code
13460 *      0 - driver suspended the device
13461 *      Error otherwise
13462 **/
13463static int __maybe_unused
13464lpfc_pci_suspend_one_s4(struct device *dev_d)
13465{
13466        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13467        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13468
13469        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13470                        "2843 PCI device Power Management suspend.\n");
13471
13472        /* Bring down the device */
13473        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13474        lpfc_offline(phba);
13475        kthread_stop(phba->worker_thread);
13476
13477        /* Disable interrupt from device */
13478        lpfc_sli4_disable_intr(phba);
13479        lpfc_sli4_queue_destroy(phba);
13480
13481        return 0;
13482}
13483
13484/**
13485 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
13486 * @dev_d: pointer to device
13487 *
13488 * This routine is called from the kernel's PCI subsystem to support system
13489 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13490 * this method, it restores the device's PCI config space state and fully
13491 * reinitializes the device and brings it online. Note that as the driver
13492 * implements the minimum PM requirements to a power-aware driver's PM for
13493 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13494 * to the suspend() method call will be treated as SUSPEND and the driver
13495 * will fully reinitialize its device during resume() method call, the device
13496 * will be set to PCI_D0 directly in PCI config space before restoring the
13497 * state.
13498 *
13499 * Return code
13500 *      0 - driver suspended the device
13501 *      Error otherwise
13502 **/
13503static int __maybe_unused
13504lpfc_pci_resume_one_s4(struct device *dev_d)
13505{
13506        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13507        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13508        uint32_t intr_mode;
13509        int error;
13510
13511        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13512                        "0292 PCI device Power Management resume.\n");
13513
13514         /* Startup the kernel thread for this host adapter. */
13515        phba->worker_thread = kthread_run(lpfc_do_work, phba,
13516                                        "lpfc_worker_%d", phba->brd_no);
13517        if (IS_ERR(phba->worker_thread)) {
13518                error = PTR_ERR(phba->worker_thread);
13519                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13520                                "0293 PM resume failed to start worker "
13521                                "thread: error=x%x.\n", error);
13522                return error;
13523        }
13524
13525        /* Configure and enable interrupt */
13526        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13527        if (intr_mode == LPFC_INTR_ERROR) {
13528                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13529                                "0294 PM resume Failed to enable interrupt\n");
13530                return -EIO;
13531        } else
13532                phba->intr_mode = intr_mode;
13533
13534        /* Restart HBA and bring it online */
13535        lpfc_sli_brdrestart(phba);
13536        lpfc_online(phba);
13537
13538        /* Log the current active interrupt mode */
13539        lpfc_log_intr_mode(phba, phba->intr_mode);
13540
13541        return 0;
13542}
13543
13544/**
13545 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13546 * @phba: pointer to lpfc hba data structure.
13547 *
13548 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13549 * aborts all the outstanding SCSI I/Os to the pci device.
13550 **/
13551static void
13552lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13553{
13554        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13555                        "2828 PCI channel I/O abort preparing for recovery\n");
13556        /*
13557         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13558         * and let the SCSI mid-layer to retry them to recover.
13559         */
13560        lpfc_sli_abort_fcp_rings(phba);
13561}
13562
13563/**
13564 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13565 * @phba: pointer to lpfc hba data structure.
13566 *
13567 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13568 * disables the device interrupt and pci device, and aborts the internal FCP
13569 * pending I/Os.
13570 **/
13571static void
13572lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13573{
13574        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13575                        "2826 PCI channel disable preparing for reset\n");
13576
13577        /* Block any management I/Os to the device */
13578        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13579
13580        /* Block all SCSI devices' I/Os on the host */
13581        lpfc_scsi_dev_block(phba);
13582
13583        /* Flush all driver's outstanding I/Os as we are to reset */
13584        lpfc_sli_flush_io_rings(phba);
13585
13586        /* stop all timers */
13587        lpfc_stop_hba_timers(phba);
13588
13589        /* Disable interrupt and pci device */
13590        lpfc_sli4_disable_intr(phba);
13591        lpfc_sli4_queue_destroy(phba);
13592        pci_disable_device(phba->pcidev);
13593}
13594
13595/**
13596 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13597 * @phba: pointer to lpfc hba data structure.
13598 *
13599 * This routine is called to prepare the SLI4 device for PCI slot permanently
13600 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13601 * pending I/Os.
13602 **/
13603static void
13604lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13605{
13606        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13607                        "2827 PCI channel permanent disable for failure\n");
13608
13609        /* Block all SCSI devices' I/Os on the host */
13610        lpfc_scsi_dev_block(phba);
13611
13612        /* stop all timers */
13613        lpfc_stop_hba_timers(phba);
13614
13615        /* Clean up all driver's outstanding I/Os */
13616        lpfc_sli_flush_io_rings(phba);
13617}
13618
13619/**
13620 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13621 * @pdev: pointer to PCI device.
13622 * @state: the current PCI connection state.
13623 *
13624 * This routine is called from the PCI subsystem for error handling to device
13625 * with SLI-4 interface spec. This function is called by the PCI subsystem
13626 * after a PCI bus error affecting this device has been detected. When this
13627 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13628 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13629 * for the PCI subsystem to perform proper recovery as desired.
13630 *
13631 * Return codes
13632 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13633 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13634 **/
13635static pci_ers_result_t
13636lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13637{
13638        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13639        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13640
13641        switch (state) {
13642        case pci_channel_io_normal:
13643                /* Non-fatal error, prepare for recovery */
13644                lpfc_sli4_prep_dev_for_recover(phba);
13645                return PCI_ERS_RESULT_CAN_RECOVER;
13646        case pci_channel_io_frozen:
13647                /* Fatal error, prepare for slot reset */
13648                lpfc_sli4_prep_dev_for_reset(phba);
13649                return PCI_ERS_RESULT_NEED_RESET;
13650        case pci_channel_io_perm_failure:
13651                /* Permanent failure, prepare for device down */
13652                lpfc_sli4_prep_dev_for_perm_failure(phba);
13653                return PCI_ERS_RESULT_DISCONNECT;
13654        default:
13655                /* Unknown state, prepare and request slot reset */
13656                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13657                                "2825 Unknown PCI error state: x%x\n", state);
13658                lpfc_sli4_prep_dev_for_reset(phba);
13659                return PCI_ERS_RESULT_NEED_RESET;
13660        }
13661}
13662
13663/**
13664 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13665 * @pdev: pointer to PCI device.
13666 *
13667 * This routine is called from the PCI subsystem for error handling to device
13668 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13669 * restart the PCI card from scratch, as if from a cold-boot. During the
13670 * PCI subsystem error recovery, after the driver returns
13671 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13672 * recovery and then call this routine before calling the .resume method to
13673 * recover the device. This function will initialize the HBA device, enable
13674 * the interrupt, but it will just put the HBA to offline state without
13675 * passing any I/O traffic.
13676 *
13677 * Return codes
13678 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13679 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13680 */
13681static pci_ers_result_t
13682lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13683{
13684        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13685        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13686        struct lpfc_sli *psli = &phba->sli;
13687        uint32_t intr_mode;
13688
13689        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13690        if (pci_enable_device_mem(pdev)) {
13691                printk(KERN_ERR "lpfc: Cannot re-enable "
13692                        "PCI device after reset.\n");
13693                return PCI_ERS_RESULT_DISCONNECT;
13694        }
13695
13696        pci_restore_state(pdev);
13697
13698        /*
13699         * As the new kernel behavior of pci_restore_state() API call clears
13700         * device saved_state flag, need to save the restored state again.
13701         */
13702        pci_save_state(pdev);
13703
13704        if (pdev->is_busmaster)
13705                pci_set_master(pdev);
13706
13707        spin_lock_irq(&phba->hbalock);
13708        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13709        spin_unlock_irq(&phba->hbalock);
13710
13711        /* Configure and enable interrupt */
13712        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13713        if (intr_mode == LPFC_INTR_ERROR) {
13714                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13715                                "2824 Cannot re-enable interrupt after "
13716                                "slot reset.\n");
13717                return PCI_ERS_RESULT_DISCONNECT;
13718        } else
13719                phba->intr_mode = intr_mode;
13720
13721        /* Log the current active interrupt mode */
13722        lpfc_log_intr_mode(phba, phba->intr_mode);
13723
13724        return PCI_ERS_RESULT_RECOVERED;
13725}
13726
13727/**
13728 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13729 * @pdev: pointer to PCI device
13730 *
13731 * This routine is called from the PCI subsystem for error handling to device
13732 * with SLI-4 interface spec. It is called when kernel error recovery tells
13733 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13734 * error recovery. After this call, traffic can start to flow from this device
13735 * again.
13736 **/
13737static void
13738lpfc_io_resume_s4(struct pci_dev *pdev)
13739{
13740        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13741        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13742
13743        /*
13744         * In case of slot reset, as function reset is performed through
13745         * mailbox command which needs DMA to be enabled, this operation
13746         * has to be moved to the io resume phase. Taking device offline
13747         * will perform the necessary cleanup.
13748         */
13749        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13750                /* Perform device reset */
13751                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13752                lpfc_offline(phba);
13753                lpfc_sli_brdrestart(phba);
13754                /* Bring the device back online */
13755                lpfc_online(phba);
13756        }
13757}
13758
13759/**
13760 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13761 * @pdev: pointer to PCI device
13762 * @pid: pointer to PCI device identifier
13763 *
13764 * This routine is to be registered to the kernel's PCI subsystem. When an
13765 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13766 * at PCI device-specific information of the device and driver to see if the
13767 * driver state that it can support this kind of device. If the match is
13768 * successful, the driver core invokes this routine. This routine dispatches
13769 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13770 * do all the initialization that it needs to do to handle the HBA device
13771 * properly.
13772 *
13773 * Return code
13774 *      0 - driver can claim the device
13775 *      negative value - driver can not claim the device
13776 **/
13777static int
13778lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13779{
13780        int rc;
13781        struct lpfc_sli_intf intf;
13782
13783        if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13784                return -ENODEV;
13785
13786        if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13787            (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13788                rc = lpfc_pci_probe_one_s4(pdev, pid);
13789        else
13790                rc = lpfc_pci_probe_one_s3(pdev, pid);
13791
13792        return rc;
13793}
13794
13795/**
13796 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13797 * @pdev: pointer to PCI device
13798 *
13799 * This routine is to be registered to the kernel's PCI subsystem. When an
13800 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13801 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13802 * remove routine, which will perform all the necessary cleanup for the
13803 * device to be removed from the PCI subsystem properly.
13804 **/
13805static void
13806lpfc_pci_remove_one(struct pci_dev *pdev)
13807{
13808        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13809        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13810
13811        switch (phba->pci_dev_grp) {
13812        case LPFC_PCI_DEV_LP:
13813                lpfc_pci_remove_one_s3(pdev);
13814                break;
13815        case LPFC_PCI_DEV_OC:
13816                lpfc_pci_remove_one_s4(pdev);
13817                break;
13818        default:
13819                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13820                                "1424 Invalid PCI device group: 0x%x\n",
13821                                phba->pci_dev_grp);
13822                break;
13823        }
13824        return;
13825}
13826
13827/**
13828 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13829 * @dev: pointer to device
13830 *
13831 * This routine is to be registered to the kernel's PCI subsystem to support
13832 * system Power Management (PM). When PM invokes this method, it dispatches
13833 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13834 * suspend the device.
13835 *
13836 * Return code
13837 *      0 - driver suspended the device
13838 *      Error otherwise
13839 **/
13840static int __maybe_unused
13841lpfc_pci_suspend_one(struct device *dev)
13842{
13843        struct Scsi_Host *shost = dev_get_drvdata(dev);
13844        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13845        int rc = -ENODEV;
13846
13847        switch (phba->pci_dev_grp) {
13848        case LPFC_PCI_DEV_LP:
13849                rc = lpfc_pci_suspend_one_s3(dev);
13850                break;
13851        case LPFC_PCI_DEV_OC:
13852                rc = lpfc_pci_suspend_one_s4(dev);
13853                break;
13854        default:
13855                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13856                                "1425 Invalid PCI device group: 0x%x\n",
13857                                phba->pci_dev_grp);
13858                break;
13859        }
13860        return rc;
13861}
13862
13863/**
13864 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13865 * @dev: pointer to device
13866 *
13867 * This routine is to be registered to the kernel's PCI subsystem to support
13868 * system Power Management (PM). When PM invokes this method, it dispatches
13869 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13870 * resume the device.
13871 *
13872 * Return code
13873 *      0 - driver suspended the device
13874 *      Error otherwise
13875 **/
13876static int __maybe_unused
13877lpfc_pci_resume_one(struct device *dev)
13878{
13879        struct Scsi_Host *shost = dev_get_drvdata(dev);
13880        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13881        int rc = -ENODEV;
13882
13883        switch (phba->pci_dev_grp) {
13884        case LPFC_PCI_DEV_LP:
13885                rc = lpfc_pci_resume_one_s3(dev);
13886                break;
13887        case LPFC_PCI_DEV_OC:
13888                rc = lpfc_pci_resume_one_s4(dev);
13889                break;
13890        default:
13891                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13892                                "1426 Invalid PCI device group: 0x%x\n",
13893                                phba->pci_dev_grp);
13894                break;
13895        }
13896        return rc;
13897}
13898
13899/**
13900 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13901 * @pdev: pointer to PCI device.
13902 * @state: the current PCI connection state.
13903 *
13904 * This routine is registered to the PCI subsystem for error handling. This
13905 * function is called by the PCI subsystem after a PCI bus error affecting
13906 * this device has been detected. When this routine is invoked, it dispatches
13907 * the action to the proper SLI-3 or SLI-4 device error detected handling
13908 * routine, which will perform the proper error detected operation.
13909 *
13910 * Return codes
13911 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13912 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13913 **/
13914static pci_ers_result_t
13915lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13916{
13917        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13918        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13919        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13920
13921        switch (phba->pci_dev_grp) {
13922        case LPFC_PCI_DEV_LP:
13923                rc = lpfc_io_error_detected_s3(pdev, state);
13924                break;
13925        case LPFC_PCI_DEV_OC:
13926                rc = lpfc_io_error_detected_s4(pdev, state);
13927                break;
13928        default:
13929                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13930                                "1427 Invalid PCI device group: 0x%x\n",
13931                                phba->pci_dev_grp);
13932                break;
13933        }
13934        return rc;
13935}
13936
13937/**
13938 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13939 * @pdev: pointer to PCI device.
13940 *
13941 * This routine is registered to the PCI subsystem for error handling. This
13942 * function is called after PCI bus has been reset to restart the PCI card
13943 * from scratch, as if from a cold-boot. When this routine is invoked, it
13944 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13945 * routine, which will perform the proper device reset.
13946 *
13947 * Return codes
13948 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13949 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13950 **/
13951static pci_ers_result_t
13952lpfc_io_slot_reset(struct pci_dev *pdev)
13953{
13954        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13955        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13956        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13957
13958        switch (phba->pci_dev_grp) {
13959        case LPFC_PCI_DEV_LP:
13960                rc = lpfc_io_slot_reset_s3(pdev);
13961                break;
13962        case LPFC_PCI_DEV_OC:
13963                rc = lpfc_io_slot_reset_s4(pdev);
13964                break;
13965        default:
13966                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13967                                "1428 Invalid PCI device group: 0x%x\n",
13968                                phba->pci_dev_grp);
13969                break;
13970        }
13971        return rc;
13972}
13973
13974/**
13975 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13976 * @pdev: pointer to PCI device
13977 *
13978 * This routine is registered to the PCI subsystem for error handling. It
13979 * is called when kernel error recovery tells the lpfc driver that it is
13980 * OK to resume normal PCI operation after PCI bus error recovery. When
13981 * this routine is invoked, it dispatches the action to the proper SLI-3
13982 * or SLI-4 device io_resume routine, which will resume the device operation.
13983 **/
13984static void
13985lpfc_io_resume(struct pci_dev *pdev)
13986{
13987        struct Scsi_Host *shost = pci_get_drvdata(pdev);
13988        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13989
13990        switch (phba->pci_dev_grp) {
13991        case LPFC_PCI_DEV_LP:
13992                lpfc_io_resume_s3(pdev);
13993                break;
13994        case LPFC_PCI_DEV_OC:
13995                lpfc_io_resume_s4(pdev);
13996                break;
13997        default:
13998                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13999                                "1429 Invalid PCI device group: 0x%x\n",
14000                                phba->pci_dev_grp);
14001                break;
14002        }
14003        return;
14004}
14005
14006/**
14007 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
14008 * @phba: pointer to lpfc hba data structure.
14009 *
14010 * This routine checks to see if OAS is supported for this adapter. If
14011 * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
14012 * the enable oas flag is cleared and the pool created for OAS device data
14013 * is destroyed.
14014 *
14015 **/
14016static void
14017lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14018{
14019
14020        if (!phba->cfg_EnableXLane)
14021                return;
14022
14023        if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14024                phba->cfg_fof = 1;
14025        } else {
14026                phba->cfg_fof = 0;
14027                mempool_destroy(phba->device_data_mem_pool);
14028                phba->device_data_mem_pool = NULL;
14029        }
14030
14031        return;
14032}
14033
14034/**
14035 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
14036 * @phba: pointer to lpfc hba data structure.
14037 *
14038 * This routine checks to see if RAS is supported by the adapter. Check the
14039 * function through which RAS support enablement is to be done.
14040 **/
14041void
14042lpfc_sli4_ras_init(struct lpfc_hba *phba)
14043{
14044        switch (phba->pcidev->device) {
14045        case PCI_DEVICE_ID_LANCER_G6_FC:
14046        case PCI_DEVICE_ID_LANCER_G7_FC:
14047                phba->ras_fwlog.ras_hwsupport = true;
14048                if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14049                    phba->cfg_ras_fwlog_buffsize)
14050                        phba->ras_fwlog.ras_enabled = true;
14051                else
14052                        phba->ras_fwlog.ras_enabled = false;
14053                break;
14054        default:
14055                phba->ras_fwlog.ras_hwsupport = false;
14056        }
14057}
14058
14059
14060MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14061
14062static const struct pci_error_handlers lpfc_err_handler = {
14063        .error_detected = lpfc_io_error_detected,
14064        .slot_reset = lpfc_io_slot_reset,
14065        .resume = lpfc_io_resume,
14066};
14067
14068static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
14069                         lpfc_pci_suspend_one,
14070                         lpfc_pci_resume_one);
14071
14072static struct pci_driver lpfc_driver = {
14073        .name           = LPFC_DRIVER_NAME,
14074        .id_table       = lpfc_id_table,
14075        .probe          = lpfc_pci_probe_one,
14076        .remove         = lpfc_pci_remove_one,
14077        .shutdown       = lpfc_pci_remove_one,
14078        .driver.pm      = &lpfc_pci_pm_ops_one,
14079        .err_handler    = &lpfc_err_handler,
14080};
14081
14082static const struct file_operations lpfc_mgmt_fop = {
14083        .owner = THIS_MODULE,
14084};
14085
14086static struct miscdevice lpfc_mgmt_dev = {
14087        .minor = MISC_DYNAMIC_MINOR,
14088        .name = "lpfcmgmt",
14089        .fops = &lpfc_mgmt_fop,
14090};
14091
14092/**
14093 * lpfc_init - lpfc module initialization routine
14094 *
14095 * This routine is to be invoked when the lpfc module is loaded into the
14096 * kernel. The special kernel macro module_init() is used to indicate the
14097 * role of this routine to the kernel as lpfc module entry point.
14098 *
14099 * Return codes
14100 *   0 - successful
14101 *   -ENOMEM - FC attach transport failed
14102 *   all others - failed
14103 */
14104static int __init
14105lpfc_init(void)
14106{
14107        int error = 0;
14108
14109        pr_info(LPFC_MODULE_DESC "\n");
14110        pr_info(LPFC_COPYRIGHT "\n");
14111
14112        error = misc_register(&lpfc_mgmt_dev);
14113        if (error)
14114                printk(KERN_ERR "Could not register lpfcmgmt device, "
14115                        "misc_register returned with status %d", error);
14116
14117        error = -ENOMEM;
14118        lpfc_transport_functions.vport_create = lpfc_vport_create;
14119        lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14120        lpfc_transport_template =
14121                                fc_attach_transport(&lpfc_transport_functions);
14122        if (lpfc_transport_template == NULL)
14123                goto unregister;
14124        lpfc_vport_transport_template =
14125                fc_attach_transport(&lpfc_vport_transport_functions);
14126        if (lpfc_vport_transport_template == NULL) {
14127                fc_release_transport(lpfc_transport_template);
14128                goto unregister;
14129        }
14130        lpfc_wqe_cmd_template();
14131        lpfc_nvmet_cmd_template();
14132
14133        /* Initialize in case vector mapping is needed */
14134        lpfc_present_cpu = num_present_cpus();
14135
14136        error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14137                                        "lpfc/sli4:online",
14138                                        lpfc_cpu_online, lpfc_cpu_offline);
14139        if (error < 0)
14140                goto cpuhp_failure;
14141        lpfc_cpuhp_state = error;
14142
14143        error = pci_register_driver(&lpfc_driver);
14144        if (error)
14145                goto unwind;
14146
14147        return error;
14148
14149unwind:
14150        cpuhp_remove_multi_state(lpfc_cpuhp_state);
14151cpuhp_failure:
14152        fc_release_transport(lpfc_transport_template);
14153        fc_release_transport(lpfc_vport_transport_template);
14154unregister:
14155        misc_deregister(&lpfc_mgmt_dev);
14156
14157        return error;
14158}
14159
14160void lpfc_dmp_dbg(struct lpfc_hba *phba)
14161{
14162        unsigned int start_idx;
14163        unsigned int dbg_cnt;
14164        unsigned int temp_idx;
14165        int i;
14166        int j = 0;
14167        unsigned long rem_nsec;
14168        struct lpfc_vport **vports;
14169
14170        /* Don't dump messages if we explicitly set log_verbose for the
14171         * physical port or any vport.
14172         */
14173        if (phba->cfg_log_verbose)
14174                return;
14175
14176        vports = lpfc_create_vport_work_array(phba);
14177        if (vports != NULL) {
14178                for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14179                        if (vports[i]->cfg_log_verbose) {
14180                                lpfc_destroy_vport_work_array(phba, vports);
14181                                return;
14182                        }
14183                }
14184        }
14185        lpfc_destroy_vport_work_array(phba, vports);
14186
14187        if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14188                return;
14189
14190        start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14191        dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14192        if (!dbg_cnt)
14193                goto out;
14194        temp_idx = start_idx;
14195        if (dbg_cnt >= DBG_LOG_SZ) {
14196                dbg_cnt = DBG_LOG_SZ;
14197                temp_idx -= 1;
14198        } else {
14199                if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14200                        temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14201                } else {
14202                        if (start_idx < dbg_cnt)
14203                                start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14204                        else
14205                                start_idx -= dbg_cnt;
14206                }
14207        }
14208        dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14209                 start_idx, temp_idx, dbg_cnt);
14210
14211        for (i = 0; i < dbg_cnt; i++) {
14212                if ((start_idx + i) < DBG_LOG_SZ)
14213                        temp_idx = (start_idx + i) % DBG_LOG_SZ;
14214                else
14215                        temp_idx = j++;
14216                rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14217                dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14218                         temp_idx,
14219                         (unsigned long)phba->dbg_log[temp_idx].t_ns,
14220                         rem_nsec / 1000,
14221                         phba->dbg_log[temp_idx].log);
14222        }
14223out:
14224        atomic_set(&phba->dbg_log_cnt, 0);
14225        atomic_set(&phba->dbg_log_dmping, 0);
14226}
14227
14228__printf(2, 3)
14229void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14230{
14231        unsigned int idx;
14232        va_list args;
14233        int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14234        struct va_format vaf;
14235
14236
14237        va_start(args, fmt);
14238        if (unlikely(dbg_dmping)) {
14239                vaf.fmt = fmt;
14240                vaf.va = &args;
14241                dev_info(&phba->pcidev->dev, "%pV", &vaf);
14242                va_end(args);
14243                return;
14244        }
14245        idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14246                DBG_LOG_SZ;
14247
14248        atomic_inc(&phba->dbg_log_cnt);
14249
14250        vscnprintf(phba->dbg_log[idx].log,
14251                   sizeof(phba->dbg_log[idx].log), fmt, args);
14252        va_end(args);
14253
14254        phba->dbg_log[idx].t_ns = local_clock();
14255}
14256
14257/**
14258 * lpfc_exit - lpfc module removal routine
14259 *
14260 * This routine is invoked when the lpfc module is removed from the kernel.
14261 * The special kernel macro module_exit() is used to indicate the role of
14262 * this routine to the kernel as lpfc module exit point.
14263 */
14264static void __exit
14265lpfc_exit(void)
14266{
14267        misc_deregister(&lpfc_mgmt_dev);
14268        pci_unregister_driver(&lpfc_driver);
14269        cpuhp_remove_multi_state(lpfc_cpuhp_state);
14270        fc_release_transport(lpfc_transport_template);
14271        fc_release_transport(lpfc_vport_transport_template);
14272        idr_destroy(&lpfc_hba_index);
14273}
14274
14275module_init(lpfc_init);
14276module_exit(lpfc_exit);
14277MODULE_LICENSE("GPL");
14278MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14279MODULE_AUTHOR("Broadcom");
14280MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
14281