linux/drivers/scsi/lpfc/lpfc_init.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/idr.h>
  28#include <linux/interrupt.h>
  29#include <linux/module.h>
  30#include <linux/kthread.h>
  31#include <linux/pci.h>
  32#include <linux/spinlock.h>
  33#include <linux/ctype.h>
  34#include <linux/aer.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <linux/miscdevice.h>
  38#include <linux/percpu.h>
  39#include <linux/msi.h>
  40#include <linux/irq.h>
  41#include <linux/bitops.h>
  42#include <linux/crash_dump.h>
  43#include <linux/cpu.h>
  44#include <linux/cpuhotplug.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_host.h>
  49#include <scsi/scsi_transport_fc.h>
  50#include <scsi/scsi_tcq.h>
  51#include <scsi/fc/fc_fs.h>
  52
  53#include "lpfc_hw4.h"
  54#include "lpfc_hw.h"
  55#include "lpfc_sli.h"
  56#include "lpfc_sli4.h"
  57#include "lpfc_nl.h"
  58#include "lpfc_disc.h"
  59#include "lpfc.h"
  60#include "lpfc_scsi.h"
  61#include "lpfc_nvme.h"
  62#include "lpfc_logmsg.h"
  63#include "lpfc_crtn.h"
  64#include "lpfc_vport.h"
  65#include "lpfc_version.h"
  66#include "lpfc_ids.h"
  67
  68static enum cpuhp_state lpfc_cpuhp_state;
  69/* Used when mapping IRQ vectors in a driver centric manner */
  70static uint32_t lpfc_present_cpu;
  71
  72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  76static int lpfc_post_rcv_buf(struct lpfc_hba *);
  77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  79static int lpfc_setup_endian_order(struct lpfc_hba *);
  80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  83static void lpfc_init_sgl_list(struct lpfc_hba *);
  84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  85static void lpfc_free_active_sgl(struct lpfc_hba *);
  86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  96static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
  97
  98static struct scsi_transport_template *lpfc_transport_template = NULL;
  99static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
 100static DEFINE_IDR(lpfc_hba_index);
 101#define LPFC_NVMET_BUF_POST 254
 102static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
 103
 104/**
 105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
 106 * @phba: pointer to lpfc hba data structure.
 107 *
 108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
 109 * mailbox command. It retrieves the revision information from the HBA and
 110 * collects the Vital Product Data (VPD) about the HBA for preparing the
 111 * configuration of the HBA.
 112 *
 113 * Return codes:
 114 *   0 - success.
 115 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
 116 *   Any other value - indicates an error.
 117 **/
 118int
 119lpfc_config_port_prep(struct lpfc_hba *phba)
 120{
 121        lpfc_vpd_t *vp = &phba->vpd;
 122        int i = 0, rc;
 123        LPFC_MBOXQ_t *pmb;
 124        MAILBOX_t *mb;
 125        char *lpfc_vpd_data = NULL;
 126        uint16_t offset = 0;
 127        static char licensed[56] =
 128                    "key unlock for use with gnu public licensed code only\0";
 129        static int init_key = 1;
 130
 131        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 132        if (!pmb) {
 133                phba->link_state = LPFC_HBA_ERROR;
 134                return -ENOMEM;
 135        }
 136
 137        mb = &pmb->u.mb;
 138        phba->link_state = LPFC_INIT_MBX_CMDS;
 139
 140        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 141                if (init_key) {
 142                        uint32_t *ptext = (uint32_t *) licensed;
 143
 144                        for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 145                                *ptext = cpu_to_be32(*ptext);
 146                        init_key = 0;
 147                }
 148
 149                lpfc_read_nv(phba, pmb);
 150                memset((char*)mb->un.varRDnvp.rsvd3, 0,
 151                        sizeof (mb->un.varRDnvp.rsvd3));
 152                memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 153                         sizeof (licensed));
 154
 155                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 156
 157                if (rc != MBX_SUCCESS) {
 158                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 159                                        "0324 Config Port initialization "
 160                                        "error, mbxCmd x%x READ_NVPARM, "
 161                                        "mbxStatus x%x\n",
 162                                        mb->mbxCommand, mb->mbxStatus);
 163                        mempool_free(pmb, phba->mbox_mem_pool);
 164                        return -ERESTART;
 165                }
 166                memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 167                       sizeof(phba->wwnn));
 168                memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 169                       sizeof(phba->wwpn));
 170        }
 171
 172        /*
 173         * Clear all option bits except LPFC_SLI3_BG_ENABLED,
 174         * which was already set in lpfc_get_cfgparam()
 175         */
 176        phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 177
 178        /* Setup and issue mailbox READ REV command */
 179        lpfc_read_rev(phba, pmb);
 180        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 181        if (rc != MBX_SUCCESS) {
 182                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 183                                "0439 Adapter failed to init, mbxCmd x%x "
 184                                "READ_REV, mbxStatus x%x\n",
 185                                mb->mbxCommand, mb->mbxStatus);
 186                mempool_free( pmb, phba->mbox_mem_pool);
 187                return -ERESTART;
 188        }
 189
 190
 191        /*
 192         * The value of rr must be 1 since the driver set the cv field to 1.
 193         * This setting requires the FW to set all revision fields.
 194         */
 195        if (mb->un.varRdRev.rr == 0) {
 196                vp->rev.rBit = 0;
 197                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 198                                "0440 Adapter failed to init, READ_REV has "
 199                                "missing revision information.\n");
 200                mempool_free(pmb, phba->mbox_mem_pool);
 201                return -ERESTART;
 202        }
 203
 204        if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 205                mempool_free(pmb, phba->mbox_mem_pool);
 206                return -EINVAL;
 207        }
 208
 209        /* Save information as VPD data */
 210        vp->rev.rBit = 1;
 211        memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 212        vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 213        memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 214        vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 215        memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 216        vp->rev.biuRev = mb->un.varRdRev.biuRev;
 217        vp->rev.smRev = mb->un.varRdRev.smRev;
 218        vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 219        vp->rev.endecRev = mb->un.varRdRev.endecRev;
 220        vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 221        vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 222        vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 223        vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 224        vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 225        vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 226
 227        /* If the sli feature level is less then 9, we must
 228         * tear down all RPIs and VPIs on link down if NPIV
 229         * is enabled.
 230         */
 231        if (vp->rev.feaLevelHigh < 9)
 232                phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 233
 234        if (lpfc_is_LC_HBA(phba->pcidev->device))
 235                memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 236                                                sizeof (phba->RandomData));
 237
 238        /* Get adapter VPD information */
 239        lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 240        if (!lpfc_vpd_data)
 241                goto out_free_mbox;
 242        do {
 243                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 244                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 245
 246                if (rc != MBX_SUCCESS) {
 247                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 248                                        "0441 VPD not present on adapter, "
 249                                        "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 250                                        mb->mbxCommand, mb->mbxStatus);
 251                        mb->un.varDmp.word_cnt = 0;
 252                }
 253                /* dump mem may return a zero when finished or we got a
 254                 * mailbox error, either way we are done.
 255                 */
 256                if (mb->un.varDmp.word_cnt == 0)
 257                        break;
 258
 259                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 260                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 261                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 262                                      lpfc_vpd_data + offset,
 263                                      mb->un.varDmp.word_cnt);
 264                offset += mb->un.varDmp.word_cnt;
 265        } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 266
 267        lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 268
 269        kfree(lpfc_vpd_data);
 270out_free_mbox:
 271        mempool_free(pmb, phba->mbox_mem_pool);
 272        return 0;
 273}
 274
 275/**
 276 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
 277 * @phba: pointer to lpfc hba data structure.
 278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 279 *
 280 * This is the completion handler for driver's configuring asynchronous event
 281 * mailbox command to the device. If the mailbox command returns successfully,
 282 * it will set internal async event support flag to 1; otherwise, it will
 283 * set internal async event support flag to 0.
 284 **/
 285static void
 286lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 287{
 288        if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 289                phba->temp_sensor_support = 1;
 290        else
 291                phba->temp_sensor_support = 0;
 292        mempool_free(pmboxq, phba->mbox_mem_pool);
 293        return;
 294}
 295
 296/**
 297 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
 298 * @phba: pointer to lpfc hba data structure.
 299 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 300 *
 301 * This is the completion handler for dump mailbox command for getting
 302 * wake up parameters. When this command complete, the response contain
 303 * Option rom version of the HBA. This function translate the version number
 304 * into a human readable string and store it in OptionROMVersion.
 305 **/
 306static void
 307lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 308{
 309        struct prog_id *prg;
 310        uint32_t prog_id_word;
 311        char dist = ' ';
 312        /* character array used for decoding dist type. */
 313        char dist_char[] = "nabx";
 314
 315        if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 316                mempool_free(pmboxq, phba->mbox_mem_pool);
 317                return;
 318        }
 319
 320        prg = (struct prog_id *) &prog_id_word;
 321
 322        /* word 7 contain option rom version */
 323        prog_id_word = pmboxq->u.mb.un.varWords[7];
 324
 325        /* Decode the Option rom version word to a readable string */
 326        if (prg->dist < 4)
 327                dist = dist_char[prg->dist];
 328
 329        if ((prg->dist == 3) && (prg->num == 0))
 330                snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 331                        prg->ver, prg->rev, prg->lev);
 332        else
 333                snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 334                        prg->ver, prg->rev, prg->lev,
 335                        dist, prg->num);
 336        mempool_free(pmboxq, phba->mbox_mem_pool);
 337        return;
 338}
 339
 340/**
 341 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
 342 *      cfg_soft_wwnn, cfg_soft_wwpn
 343 * @vport: pointer to lpfc vport data structure.
 344 *
 345 *
 346 * Return codes
 347 *   None.
 348 **/
 349void
 350lpfc_update_vport_wwn(struct lpfc_vport *vport)
 351{
 352        uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
 353        u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
 354
 355        /* If the soft name exists then update it using the service params */
 356        if (vport->phba->cfg_soft_wwnn)
 357                u64_to_wwn(vport->phba->cfg_soft_wwnn,
 358                           vport->fc_sparam.nodeName.u.wwn);
 359        if (vport->phba->cfg_soft_wwpn)
 360                u64_to_wwn(vport->phba->cfg_soft_wwpn,
 361                           vport->fc_sparam.portName.u.wwn);
 362
 363        /*
 364         * If the name is empty or there exists a soft name
 365         * then copy the service params name, otherwise use the fc name
 366         */
 367        if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
 368                memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 369                        sizeof(struct lpfc_name));
 370        else
 371                memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 372                        sizeof(struct lpfc_name));
 373
 374        /*
 375         * If the port name has changed, then set the Param changes flag
 376         * to unreg the login
 377         */
 378        if (vport->fc_portname.u.wwn[0] != 0 &&
 379                memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
 380                        sizeof(struct lpfc_name)))
 381                vport->vport_flag |= FAWWPN_PARAM_CHG;
 382
 383        if (vport->fc_portname.u.wwn[0] == 0 ||
 384            vport->phba->cfg_soft_wwpn ||
 385            (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
 386            vport->vport_flag & FAWWPN_SET) {
 387                memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 388                        sizeof(struct lpfc_name));
 389                vport->vport_flag &= ~FAWWPN_SET;
 390                if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
 391                        vport->vport_flag |= FAWWPN_SET;
 392        }
 393        else
 394                memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 395                        sizeof(struct lpfc_name));
 396}
 397
 398/**
 399 * lpfc_config_port_post - Perform lpfc initialization after config port
 400 * @phba: pointer to lpfc hba data structure.
 401 *
 402 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
 403 * command call. It performs all internal resource and state setups on the
 404 * port: post IOCB buffers, enable appropriate host interrupt attentions,
 405 * ELS ring timers, etc.
 406 *
 407 * Return codes
 408 *   0 - success.
 409 *   Any other value - error.
 410 **/
 411int
 412lpfc_config_port_post(struct lpfc_hba *phba)
 413{
 414        struct lpfc_vport *vport = phba->pport;
 415        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 416        LPFC_MBOXQ_t *pmb;
 417        MAILBOX_t *mb;
 418        struct lpfc_dmabuf *mp;
 419        struct lpfc_sli *psli = &phba->sli;
 420        uint32_t status, timeout;
 421        int i, j;
 422        int rc;
 423
 424        spin_lock_irq(&phba->hbalock);
 425        /*
 426         * If the Config port completed correctly the HBA is not
 427         * over heated any more.
 428         */
 429        if (phba->over_temp_state == HBA_OVER_TEMP)
 430                phba->over_temp_state = HBA_NORMAL_TEMP;
 431        spin_unlock_irq(&phba->hbalock);
 432
 433        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 434        if (!pmb) {
 435                phba->link_state = LPFC_HBA_ERROR;
 436                return -ENOMEM;
 437        }
 438        mb = &pmb->u.mb;
 439
 440        /* Get login parameters for NID.  */
 441        rc = lpfc_read_sparam(phba, pmb, 0);
 442        if (rc) {
 443                mempool_free(pmb, phba->mbox_mem_pool);
 444                return -ENOMEM;
 445        }
 446
 447        pmb->vport = vport;
 448        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 449                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 450                                "0448 Adapter failed init, mbxCmd x%x "
 451                                "READ_SPARM mbxStatus x%x\n",
 452                                mb->mbxCommand, mb->mbxStatus);
 453                phba->link_state = LPFC_HBA_ERROR;
 454                mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 455                mempool_free(pmb, phba->mbox_mem_pool);
 456                lpfc_mbuf_free(phba, mp->virt, mp->phys);
 457                kfree(mp);
 458                return -EIO;
 459        }
 460
 461        mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 462
 463        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 464        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 465        kfree(mp);
 466        pmb->ctx_buf = NULL;
 467        lpfc_update_vport_wwn(vport);
 468
 469        /* Update the fc_host data structures with new wwn. */
 470        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 471        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 472        fc_host_max_npiv_vports(shost) = phba->max_vpi;
 473
 474        /* If no serial number in VPD data, use low 6 bytes of WWNN */
 475        /* This should be consolidated into parse_vpd ? - mr */
 476        if (phba->SerialNumber[0] == 0) {
 477                uint8_t *outptr;
 478
 479                outptr = &vport->fc_nodename.u.s.IEEE[0];
 480                for (i = 0; i < 12; i++) {
 481                        status = *outptr++;
 482                        j = ((status & 0xf0) >> 4);
 483                        if (j <= 9)
 484                                phba->SerialNumber[i] =
 485                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 486                        else
 487                                phba->SerialNumber[i] =
 488                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 489                        i++;
 490                        j = (status & 0xf);
 491                        if (j <= 9)
 492                                phba->SerialNumber[i] =
 493                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 494                        else
 495                                phba->SerialNumber[i] =
 496                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 497                }
 498        }
 499
 500        lpfc_read_config(phba, pmb);
 501        pmb->vport = vport;
 502        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 503                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 504                                "0453 Adapter failed to init, mbxCmd x%x "
 505                                "READ_CONFIG, mbxStatus x%x\n",
 506                                mb->mbxCommand, mb->mbxStatus);
 507                phba->link_state = LPFC_HBA_ERROR;
 508                mempool_free( pmb, phba->mbox_mem_pool);
 509                return -EIO;
 510        }
 511
 512        /* Check if the port is disabled */
 513        lpfc_sli_read_link_ste(phba);
 514
 515        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 516        if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
 517                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 518                                "3359 HBA queue depth changed from %d to %d\n",
 519                                phba->cfg_hba_queue_depth,
 520                                mb->un.varRdConfig.max_xri);
 521                phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
 522        }
 523
 524        phba->lmt = mb->un.varRdConfig.lmt;
 525
 526        /* Get the default values for Model Name and Description */
 527        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 528
 529        phba->link_state = LPFC_LINK_DOWN;
 530
 531        /* Only process IOCBs on ELS ring till hba_state is READY */
 532        if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
 533                psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
 534        if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
 535                psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
 536
 537        /* Post receive buffers for desired rings */
 538        if (phba->sli_rev != 3)
 539                lpfc_post_rcv_buf(phba);
 540
 541        /*
 542         * Configure HBA MSI-X attention conditions to messages if MSI-X mode
 543         */
 544        if (phba->intr_type == MSIX) {
 545                rc = lpfc_config_msi(phba, pmb);
 546                if (rc) {
 547                        mempool_free(pmb, phba->mbox_mem_pool);
 548                        return -EIO;
 549                }
 550                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 551                if (rc != MBX_SUCCESS) {
 552                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 553                                        "0352 Config MSI mailbox command "
 554                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
 555                                        pmb->u.mb.mbxCommand,
 556                                        pmb->u.mb.mbxStatus);
 557                        mempool_free(pmb, phba->mbox_mem_pool);
 558                        return -EIO;
 559                }
 560        }
 561
 562        spin_lock_irq(&phba->hbalock);
 563        /* Initialize ERATT handling flag */
 564        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 565
 566        /* Enable appropriate host interrupts */
 567        if (lpfc_readl(phba->HCregaddr, &status)) {
 568                spin_unlock_irq(&phba->hbalock);
 569                return -EIO;
 570        }
 571        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 572        if (psli->num_rings > 0)
 573                status |= HC_R0INT_ENA;
 574        if (psli->num_rings > 1)
 575                status |= HC_R1INT_ENA;
 576        if (psli->num_rings > 2)
 577                status |= HC_R2INT_ENA;
 578        if (psli->num_rings > 3)
 579                status |= HC_R3INT_ENA;
 580
 581        if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 582            (phba->cfg_poll & DISABLE_FCP_RING_INT))
 583                status &= ~(HC_R0INT_ENA);
 584
 585        writel(status, phba->HCregaddr);
 586        readl(phba->HCregaddr); /* flush */
 587        spin_unlock_irq(&phba->hbalock);
 588
 589        /* Set up ring-0 (ELS) timer */
 590        timeout = phba->fc_ratov * 2;
 591        mod_timer(&vport->els_tmofunc,
 592                  jiffies + msecs_to_jiffies(1000 * timeout));
 593        /* Set up heart beat (HB) timer */
 594        mod_timer(&phba->hb_tmofunc,
 595                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 596        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
 597        phba->last_completion_time = jiffies;
 598        /* Set up error attention (ERATT) polling timer */
 599        mod_timer(&phba->eratt_poll,
 600                  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
 601
 602        if (phba->hba_flag & LINK_DISABLED) {
 603                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 604                                "2598 Adapter Link is disabled.\n");
 605                lpfc_down_link(phba, pmb);
 606                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 607                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 608                if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 609                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 610                                        "2599 Adapter failed to issue DOWN_LINK"
 611                                        " mbox command rc 0x%x\n", rc);
 612
 613                        mempool_free(pmb, phba->mbox_mem_pool);
 614                        return -EIO;
 615                }
 616        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 617                mempool_free(pmb, phba->mbox_mem_pool);
 618                rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 619                if (rc)
 620                        return rc;
 621        }
 622        /* MBOX buffer will be freed in mbox compl */
 623        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 624        if (!pmb) {
 625                phba->link_state = LPFC_HBA_ERROR;
 626                return -ENOMEM;
 627        }
 628
 629        lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 630        pmb->mbox_cmpl = lpfc_config_async_cmpl;
 631        pmb->vport = phba->pport;
 632        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 633
 634        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 635                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 636                                "0456 Adapter failed to issue "
 637                                "ASYNCEVT_ENABLE mbox status x%x\n",
 638                                rc);
 639                mempool_free(pmb, phba->mbox_mem_pool);
 640        }
 641
 642        /* Get Option rom version */
 643        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 644        if (!pmb) {
 645                phba->link_state = LPFC_HBA_ERROR;
 646                return -ENOMEM;
 647        }
 648
 649        lpfc_dump_wakeup_param(phba, pmb);
 650        pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 651        pmb->vport = phba->pport;
 652        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 653
 654        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 655                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 656                                "0435 Adapter failed "
 657                                "to get Option ROM version status x%x\n", rc);
 658                mempool_free(pmb, phba->mbox_mem_pool);
 659        }
 660
 661        return 0;
 662}
 663
 664/**
 665 * lpfc_hba_init_link - Initialize the FC link
 666 * @phba: pointer to lpfc hba data structure.
 667 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 668 *
 669 * This routine will issue the INIT_LINK mailbox command call.
 670 * It is available to other drivers through the lpfc_hba data
 671 * structure for use as a delayed link up mechanism with the
 672 * module parameter lpfc_suppress_link_up.
 673 *
 674 * Return code
 675 *              0 - success
 676 *              Any other value - error
 677 **/
 678static int
 679lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 680{
 681        return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 682}
 683
 684/**
 685 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
 686 * @phba: pointer to lpfc hba data structure.
 687 * @fc_topology: desired fc topology.
 688 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 689 *
 690 * This routine will issue the INIT_LINK mailbox command call.
 691 * It is available to other drivers through the lpfc_hba data
 692 * structure for use as a delayed link up mechanism with the
 693 * module parameter lpfc_suppress_link_up.
 694 *
 695 * Return code
 696 *              0 - success
 697 *              Any other value - error
 698 **/
 699int
 700lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 701                               uint32_t flag)
 702{
 703        struct lpfc_vport *vport = phba->pport;
 704        LPFC_MBOXQ_t *pmb;
 705        MAILBOX_t *mb;
 706        int rc;
 707
 708        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 709        if (!pmb) {
 710                phba->link_state = LPFC_HBA_ERROR;
 711                return -ENOMEM;
 712        }
 713        mb = &pmb->u.mb;
 714        pmb->vport = vport;
 715
 716        if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 717            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 718             !(phba->lmt & LMT_1Gb)) ||
 719            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 720             !(phba->lmt & LMT_2Gb)) ||
 721            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 722             !(phba->lmt & LMT_4Gb)) ||
 723            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 724             !(phba->lmt & LMT_8Gb)) ||
 725            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 726             !(phba->lmt & LMT_10Gb)) ||
 727            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 728             !(phba->lmt & LMT_16Gb)) ||
 729            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
 730             !(phba->lmt & LMT_32Gb)) ||
 731            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
 732             !(phba->lmt & LMT_64Gb))) {
 733                /* Reset link speed to auto */
 734                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 735                                "1302 Invalid speed for this board:%d "
 736                                "Reset link speed to auto.\n",
 737                                phba->cfg_link_speed);
 738                        phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 739        }
 740        lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 741        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 742        if (phba->sli_rev < LPFC_SLI_REV4)
 743                lpfc_set_loopback_flag(phba);
 744        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 745        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 746                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 747                                "0498 Adapter failed to init, mbxCmd x%x "
 748                                "INIT_LINK, mbxStatus x%x\n",
 749                                mb->mbxCommand, mb->mbxStatus);
 750                if (phba->sli_rev <= LPFC_SLI_REV3) {
 751                        /* Clear all interrupt enable conditions */
 752                        writel(0, phba->HCregaddr);
 753                        readl(phba->HCregaddr); /* flush */
 754                        /* Clear all pending interrupts */
 755                        writel(0xffffffff, phba->HAregaddr);
 756                        readl(phba->HAregaddr); /* flush */
 757                }
 758                phba->link_state = LPFC_HBA_ERROR;
 759                if (rc != MBX_BUSY || flag == MBX_POLL)
 760                        mempool_free(pmb, phba->mbox_mem_pool);
 761                return -EIO;
 762        }
 763        phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 764        if (flag == MBX_POLL)
 765                mempool_free(pmb, phba->mbox_mem_pool);
 766
 767        return 0;
 768}
 769
 770/**
 771 * lpfc_hba_down_link - this routine downs the FC link
 772 * @phba: pointer to lpfc hba data structure.
 773 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 774 *
 775 * This routine will issue the DOWN_LINK mailbox command call.
 776 * It is available to other drivers through the lpfc_hba data
 777 * structure for use to stop the link.
 778 *
 779 * Return code
 780 *              0 - success
 781 *              Any other value - error
 782 **/
 783static int
 784lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 785{
 786        LPFC_MBOXQ_t *pmb;
 787        int rc;
 788
 789        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 790        if (!pmb) {
 791                phba->link_state = LPFC_HBA_ERROR;
 792                return -ENOMEM;
 793        }
 794
 795        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 796                        "0491 Adapter Link is disabled.\n");
 797        lpfc_down_link(phba, pmb);
 798        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 799        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 800        if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 801                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 802                                "2522 Adapter failed to issue DOWN_LINK"
 803                                " mbox command rc 0x%x\n", rc);
 804
 805                mempool_free(pmb, phba->mbox_mem_pool);
 806                return -EIO;
 807        }
 808        if (flag == MBX_POLL)
 809                mempool_free(pmb, phba->mbox_mem_pool);
 810
 811        return 0;
 812}
 813
 814/**
 815 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
 816 * @phba: pointer to lpfc HBA data structure.
 817 *
 818 * This routine will do LPFC uninitialization before the HBA is reset when
 819 * bringing down the SLI Layer.
 820 *
 821 * Return codes
 822 *   0 - success.
 823 *   Any other value - error.
 824 **/
 825int
 826lpfc_hba_down_prep(struct lpfc_hba *phba)
 827{
 828        struct lpfc_vport **vports;
 829        int i;
 830
 831        if (phba->sli_rev <= LPFC_SLI_REV3) {
 832                /* Disable interrupts */
 833                writel(0, phba->HCregaddr);
 834                readl(phba->HCregaddr); /* flush */
 835        }
 836
 837        if (phba->pport->load_flag & FC_UNLOADING)
 838                lpfc_cleanup_discovery_resources(phba->pport);
 839        else {
 840                vports = lpfc_create_vport_work_array(phba);
 841                if (vports != NULL)
 842                        for (i = 0; i <= phba->max_vports &&
 843                                vports[i] != NULL; i++)
 844                                lpfc_cleanup_discovery_resources(vports[i]);
 845                lpfc_destroy_vport_work_array(phba, vports);
 846        }
 847        return 0;
 848}
 849
 850/**
 851 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
 852 * rspiocb which got deferred
 853 *
 854 * @phba: pointer to lpfc HBA data structure.
 855 *
 856 * This routine will cleanup completed slow path events after HBA is reset
 857 * when bringing down the SLI Layer.
 858 *
 859 *
 860 * Return codes
 861 *   void.
 862 **/
 863static void
 864lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 865{
 866        struct lpfc_iocbq *rspiocbq;
 867        struct hbq_dmabuf *dmabuf;
 868        struct lpfc_cq_event *cq_event;
 869
 870        spin_lock_irq(&phba->hbalock);
 871        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 872        spin_unlock_irq(&phba->hbalock);
 873
 874        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 875                /* Get the response iocb from the head of work queue */
 876                spin_lock_irq(&phba->hbalock);
 877                list_remove_head(&phba->sli4_hba.sp_queue_event,
 878                                 cq_event, struct lpfc_cq_event, list);
 879                spin_unlock_irq(&phba->hbalock);
 880
 881                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 882                case CQE_CODE_COMPL_WQE:
 883                        rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 884                                                 cq_event);
 885                        lpfc_sli_release_iocbq(phba, rspiocbq);
 886                        break;
 887                case CQE_CODE_RECEIVE:
 888                case CQE_CODE_RECEIVE_V1:
 889                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
 890                                              cq_event);
 891                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
 892                }
 893        }
 894}
 895
 896/**
 897 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
 898 * @phba: pointer to lpfc HBA data structure.
 899 *
 900 * This routine will cleanup posted ELS buffers after the HBA is reset
 901 * when bringing down the SLI Layer.
 902 *
 903 *
 904 * Return codes
 905 *   void.
 906 **/
 907static void
 908lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 909{
 910        struct lpfc_sli *psli = &phba->sli;
 911        struct lpfc_sli_ring *pring;
 912        struct lpfc_dmabuf *mp, *next_mp;
 913        LIST_HEAD(buflist);
 914        int count;
 915
 916        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 917                lpfc_sli_hbqbuf_free_all(phba);
 918        else {
 919                /* Cleanup preposted buffers on the ELS ring */
 920                pring = &psli->sli3_ring[LPFC_ELS_RING];
 921                spin_lock_irq(&phba->hbalock);
 922                list_splice_init(&pring->postbufq, &buflist);
 923                spin_unlock_irq(&phba->hbalock);
 924
 925                count = 0;
 926                list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 927                        list_del(&mp->list);
 928                        count++;
 929                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 930                        kfree(mp);
 931                }
 932
 933                spin_lock_irq(&phba->hbalock);
 934                pring->postbufq_cnt -= count;
 935                spin_unlock_irq(&phba->hbalock);
 936        }
 937}
 938
 939/**
 940 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
 941 * @phba: pointer to lpfc HBA data structure.
 942 *
 943 * This routine will cleanup the txcmplq after the HBA is reset when bringing
 944 * down the SLI Layer.
 945 *
 946 * Return codes
 947 *   void
 948 **/
 949static void
 950lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
 951{
 952        struct lpfc_sli *psli = &phba->sli;
 953        struct lpfc_queue *qp = NULL;
 954        struct lpfc_sli_ring *pring;
 955        LIST_HEAD(completions);
 956        int i;
 957        struct lpfc_iocbq *piocb, *next_iocb;
 958
 959        if (phba->sli_rev != LPFC_SLI_REV4) {
 960                for (i = 0; i < psli->num_rings; i++) {
 961                        pring = &psli->sli3_ring[i];
 962                        spin_lock_irq(&phba->hbalock);
 963                        /* At this point in time the HBA is either reset or DOA
 964                         * Nothing should be on txcmplq as it will
 965                         * NEVER complete.
 966                         */
 967                        list_splice_init(&pring->txcmplq, &completions);
 968                        pring->txcmplq_cnt = 0;
 969                        spin_unlock_irq(&phba->hbalock);
 970
 971                        lpfc_sli_abort_iocb_ring(phba, pring);
 972                }
 973                /* Cancel all the IOCBs from the completions list */
 974                lpfc_sli_cancel_iocbs(phba, &completions,
 975                                      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 976                return;
 977        }
 978        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
 979                pring = qp->pring;
 980                if (!pring)
 981                        continue;
 982                spin_lock_irq(&pring->ring_lock);
 983                list_for_each_entry_safe(piocb, next_iocb,
 984                                         &pring->txcmplq, list)
 985                        piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 986                list_splice_init(&pring->txcmplq, &completions);
 987                pring->txcmplq_cnt = 0;
 988                spin_unlock_irq(&pring->ring_lock);
 989                lpfc_sli_abort_iocb_ring(phba, pring);
 990        }
 991        /* Cancel all the IOCBs from the completions list */
 992        lpfc_sli_cancel_iocbs(phba, &completions,
 993                              IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 994}
 995
 996/**
 997 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
 998 * @phba: pointer to lpfc HBA data structure.
 999 *
1000 * This routine will do uninitialization after the HBA is reset when bring
1001 * down the SLI Layer.
1002 *
1003 * Return codes
1004 *   0 - success.
1005 *   Any other value - error.
1006 **/
1007static int
1008lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009{
1010        lpfc_hba_free_post_buf(phba);
1011        lpfc_hba_clean_txcmplq(phba);
1012        return 0;
1013}
1014
1015/**
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017 * @phba: pointer to lpfc HBA data structure.
1018 *
1019 * This routine will do uninitialization after the HBA is reset when bring
1020 * down the SLI Layer.
1021 *
1022 * Return codes
1023 *   0 - success.
1024 *   Any other value - error.
1025 **/
1026static int
1027lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028{
1029        struct lpfc_io_buf *psb, *psb_next;
1030        struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031        struct lpfc_sli4_hdw_queue *qp;
1032        LIST_HEAD(aborts);
1033        LIST_HEAD(nvme_aborts);
1034        LIST_HEAD(nvmet_aborts);
1035        struct lpfc_sglq *sglq_entry = NULL;
1036        int cnt, idx;
1037
1038
1039        lpfc_sli_hbqbuf_free_all(phba);
1040        lpfc_hba_clean_txcmplq(phba);
1041
1042        /* At this point in time the HBA is either reset or DOA. Either
1043         * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1044         * on the lpfc_els_sgl_list so that it can either be freed if the
1045         * driver is unloading or reposted if the driver is restarting
1046         * the port.
1047         */
1048
1049        /* sgl_list_lock required because worker thread uses this
1050         * list.
1051         */
1052        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053        list_for_each_entry(sglq_entry,
1054                &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055                sglq_entry->state = SGL_FREED;
1056
1057        list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058                        &phba->sli4_hba.lpfc_els_sgl_list);
1059
1060
1061        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1062
1063        /* abts_xxxx_buf_list_lock required because worker thread uses this
1064         * list.
1065         */
1066        spin_lock_irq(&phba->hbalock);
1067        cnt = 0;
1068        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069                qp = &phba->sli4_hba.hdwq[idx];
1070
1071                spin_lock(&qp->abts_io_buf_list_lock);
1072                list_splice_init(&qp->lpfc_abts_io_buf_list,
1073                                 &aborts);
1074
1075                list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1076                        psb->pCmd = NULL;
1077                        psb->status = IOSTAT_SUCCESS;
1078                        cnt++;
1079                }
1080                spin_lock(&qp->io_buf_list_put_lock);
1081                list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082                qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083                qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084                qp->abts_scsi_io_bufs = 0;
1085                qp->abts_nvme_io_bufs = 0;
1086                spin_unlock(&qp->io_buf_list_put_lock);
1087                spin_unlock(&qp->abts_io_buf_list_lock);
1088        }
1089        spin_unlock_irq(&phba->hbalock);
1090
1091        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092                spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093                list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1094                                 &nvmet_aborts);
1095                spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097                        ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1099                }
1100        }
1101
1102        lpfc_sli4_free_sp_events(phba);
1103        return cnt;
1104}
1105
1106/**
1107 * lpfc_hba_down_post - Wrapper func for hba down post routine
1108 * @phba: pointer to lpfc HBA data structure.
1109 *
1110 * This routine wraps the actual SLI3 or SLI4 routine for performing
1111 * uninitialization after the HBA is reset when bring down the SLI Layer.
1112 *
1113 * Return codes
1114 *   0 - success.
1115 *   Any other value - error.
1116 **/
1117int
1118lpfc_hba_down_post(struct lpfc_hba *phba)
1119{
1120        return (*phba->lpfc_hba_down_post)(phba);
1121}
1122
1123/**
1124 * lpfc_hb_timeout - The HBA-timer timeout handler
1125 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1126 *
1127 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1128 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1129 * work-port-events bitmap and the worker thread is notified. This timeout
1130 * event will be used by the worker thread to invoke the actual timeout
1131 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1132 * be performed in the timeout handler and the HBA timeout event bit shall
1133 * be cleared by the worker thread after it has taken the event bitmap out.
1134 **/
1135static void
1136lpfc_hb_timeout(struct timer_list *t)
1137{
1138        struct lpfc_hba *phba;
1139        uint32_t tmo_posted;
1140        unsigned long iflag;
1141
1142        phba = from_timer(phba, t, hb_tmofunc);
1143
1144        /* Check for heart beat timeout conditions */
1145        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146        tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1147        if (!tmo_posted)
1148                phba->pport->work_port_events |= WORKER_HB_TMO;
1149        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1150
1151        /* Tell the worker thread there is work to do */
1152        if (!tmo_posted)
1153                lpfc_worker_wake_up(phba);
1154        return;
1155}
1156
1157/**
1158 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1159 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1160 *
1161 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1162 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1163 * work-port-events bitmap and the worker thread is notified. This timeout
1164 * event will be used by the worker thread to invoke the actual timeout
1165 * handler routine, lpfc_rrq_handler. Any periodical operations will
1166 * be performed in the timeout handler and the RRQ timeout event bit shall
1167 * be cleared by the worker thread after it has taken the event bitmap out.
1168 **/
1169static void
1170lpfc_rrq_timeout(struct timer_list *t)
1171{
1172        struct lpfc_hba *phba;
1173        unsigned long iflag;
1174
1175        phba = from_timer(phba, t, rrq_tmr);
1176        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177        if (!(phba->pport->load_flag & FC_UNLOADING))
1178                phba->hba_flag |= HBA_RRQ_ACTIVE;
1179        else
1180                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182
1183        if (!(phba->pport->load_flag & FC_UNLOADING))
1184                lpfc_worker_wake_up(phba);
1185}
1186
1187/**
1188 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1189 * @phba: pointer to lpfc hba data structure.
1190 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1191 *
1192 * This is the callback function to the lpfc heart-beat mailbox command.
1193 * If configured, the lpfc driver issues the heart-beat mailbox command to
1194 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1195 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1196 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1197 * heart-beat outstanding state. Once the mailbox command comes back and
1198 * no error conditions detected, the heart-beat mailbox command timer is
1199 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1200 * state is cleared for the next heart-beat. If the timer expired with the
1201 * heart-beat outstanding state set, the driver will put the HBA offline.
1202 **/
1203static void
1204lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1205{
1206        unsigned long drvr_flag;
1207
1208        spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1211
1212        /* Check and reset heart-beat timer if necessary */
1213        mempool_free(pmboxq, phba->mbox_mem_pool);
1214        if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215                !(phba->link_state == LPFC_HBA_ERROR) &&
1216                !(phba->pport->load_flag & FC_UNLOADING))
1217                mod_timer(&phba->hb_tmofunc,
1218                          jiffies +
1219                          msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1220        return;
1221}
1222
1223/*
1224 * lpfc_idle_stat_delay_work - idle_stat tracking
1225 *
1226 * This routine tracks per-cq idle_stat and determines polling decisions.
1227 *
1228 * Return codes:
1229 *   None
1230 **/
1231static void
1232lpfc_idle_stat_delay_work(struct work_struct *work)
1233{
1234        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1235                                             struct lpfc_hba,
1236                                             idle_stat_delay_work);
1237        struct lpfc_queue *cq;
1238        struct lpfc_sli4_hdw_queue *hdwq;
1239        struct lpfc_idle_stat *idle_stat;
1240        u32 i, idle_percent;
1241        u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1242
1243        if (phba->pport->load_flag & FC_UNLOADING)
1244                return;
1245
1246        if (phba->link_state == LPFC_HBA_ERROR ||
1247            phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248            phba->cmf_active_mode != LPFC_CFG_OFF)
1249                goto requeue;
1250
1251        for_each_present_cpu(i) {
1252                hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1253                cq = hdwq->io_cq;
1254
1255                /* Skip if we've already handled this cq's primary CPU */
1256                if (cq->chann != i)
1257                        continue;
1258
1259                idle_stat = &phba->sli4_hba.idle_stat[i];
1260
1261                /* get_cpu_idle_time returns values as running counters. Thus,
1262                 * to know the amount for this period, the prior counter values
1263                 * need to be subtracted from the current counter values.
1264                 * From there, the idle time stat can be calculated as a
1265                 * percentage of 100 - the sum of the other consumption times.
1266                 */
1267                wall_idle = get_cpu_idle_time(i, &wall, 1);
1268                diff_idle = wall_idle - idle_stat->prev_idle;
1269                diff_wall = wall - idle_stat->prev_wall;
1270
1271                if (diff_wall <= diff_idle)
1272                        busy_time = 0;
1273                else
1274                        busy_time = diff_wall - diff_idle;
1275
1276                idle_percent = div64_u64(100 * busy_time, diff_wall);
1277                idle_percent = 100 - idle_percent;
1278
1279                if (idle_percent < 15)
1280                        cq->poll_mode = LPFC_QUEUE_WORK;
1281                else
1282                        cq->poll_mode = LPFC_IRQ_POLL;
1283
1284                idle_stat->prev_idle = wall_idle;
1285                idle_stat->prev_wall = wall;
1286        }
1287
1288requeue:
1289        schedule_delayed_work(&phba->idle_stat_delay_work,
1290                              msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1291}
1292
1293static void
1294lpfc_hb_eq_delay_work(struct work_struct *work)
1295{
1296        struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297                                             struct lpfc_hba, eq_delay_work);
1298        struct lpfc_eq_intr_info *eqi, *eqi_new;
1299        struct lpfc_queue *eq, *eq_next;
1300        unsigned char *ena_delay = NULL;
1301        uint32_t usdelay;
1302        int i;
1303
1304        if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1305                return;
1306
1307        if (phba->link_state == LPFC_HBA_ERROR ||
1308            phba->pport->fc_flag & FC_OFFLINE_MODE)
1309                goto requeue;
1310
1311        ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1312                            GFP_KERNEL);
1313        if (!ena_delay)
1314                goto requeue;
1315
1316        for (i = 0; i < phba->cfg_irq_chann; i++) {
1317                /* Get the EQ corresponding to the IRQ vector */
1318                eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1319                if (!eq)
1320                        continue;
1321                if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322                        eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323                        ena_delay[eq->last_cpu] = 1;
1324                }
1325        }
1326
1327        for_each_present_cpu(i) {
1328                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1329                if (ena_delay[i]) {
1330                        usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331                        if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332                                usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1333                } else {
1334                        usdelay = 0;
1335                }
1336
1337                eqi->icnt = 0;
1338
1339                list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340                        if (unlikely(eq->last_cpu != i)) {
1341                                eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1342                                                      eq->last_cpu);
1343                                list_move_tail(&eq->cpu_list, &eqi_new->list);
1344                                continue;
1345                        }
1346                        if (usdelay != eq->q_mode)
1347                                lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1348                                                         usdelay);
1349                }
1350        }
1351
1352        kfree(ena_delay);
1353
1354requeue:
1355        queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356                           msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1357}
1358
1359/**
1360 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1361 * @phba: pointer to lpfc hba data structure.
1362 *
1363 * For each heartbeat, this routine does some heuristic methods to adjust
1364 * XRI distribution. The goal is to fully utilize free XRIs.
1365 **/
1366static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1367{
1368        u32 i;
1369        u32 hwq_count;
1370
1371        hwq_count = phba->cfg_hdw_queue;
1372        for (i = 0; i < hwq_count; i++) {
1373                /* Adjust XRIs in private pool */
1374                lpfc_adjust_pvt_pool_count(phba, i);
1375
1376                /* Adjust high watermark */
1377                lpfc_adjust_high_watermark(phba, i);
1378
1379#ifdef LPFC_MXP_STAT
1380                /* Snapshot pbl, pvt and busy count */
1381                lpfc_snapshot_mxp(phba, i);
1382#endif
1383        }
1384}
1385
1386/**
1387 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1388 * @phba: pointer to lpfc hba data structure.
1389 *
1390 * If a HB mbox is not already in progrees, this routine will allocate
1391 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1392 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1393 **/
1394int
1395lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1396{
1397        LPFC_MBOXQ_t *pmboxq;
1398        int retval;
1399
1400        /* Is a Heartbeat mbox already in progress */
1401        if (phba->hba_flag & HBA_HBEAT_INP)
1402                return 0;
1403
1404        pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1405        if (!pmboxq)
1406                return -ENOMEM;
1407
1408        lpfc_heart_beat(phba, pmboxq);
1409        pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410        pmboxq->vport = phba->pport;
1411        retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1412
1413        if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414                mempool_free(pmboxq, phba->mbox_mem_pool);
1415                return -ENXIO;
1416        }
1417        phba->hba_flag |= HBA_HBEAT_INP;
1418
1419        return 0;
1420}
1421
1422/**
1423 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1424 * @phba: pointer to lpfc hba data structure.
1425 *
1426 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1427 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1428 * of the value of lpfc_enable_hba_heartbeat.
1429 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1430 * try to issue a MBX_HEARTBEAT mbox command.
1431 **/
1432void
1433lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1434{
1435        if (phba->cfg_enable_hba_heartbeat)
1436                return;
1437        phba->hba_flag |= HBA_HBEAT_TMO;
1438}
1439
1440/**
1441 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1442 * @phba: pointer to lpfc hba data structure.
1443 *
1444 * This is the actual HBA-timer timeout handler to be invoked by the worker
1445 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1446 * handler performs any periodic operations needed for the device. If such
1447 * periodic event has already been attended to either in the interrupt handler
1448 * or by processing slow-ring or fast-ring events within the HBA-timer
1449 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1450 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1451 * is configured and there is no heart-beat mailbox command outstanding, a
1452 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1453 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1454 * to offline.
1455 **/
1456void
1457lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1458{
1459        struct lpfc_vport **vports;
1460        struct lpfc_dmabuf *buf_ptr;
1461        int retval = 0;
1462        int i, tmo;
1463        struct lpfc_sli *psli = &phba->sli;
1464        LIST_HEAD(completions);
1465
1466        if (phba->cfg_xri_rebalancing) {
1467                /* Multi-XRI pools handler */
1468                lpfc_hb_mxp_handler(phba);
1469        }
1470
1471        vports = lpfc_create_vport_work_array(phba);
1472        if (vports != NULL)
1473                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474                        lpfc_rcv_seq_check_edtov(vports[i]);
1475                        lpfc_fdmi_change_check(vports[i]);
1476                }
1477        lpfc_destroy_vport_work_array(phba, vports);
1478
1479        if ((phba->link_state == LPFC_HBA_ERROR) ||
1480                (phba->pport->load_flag & FC_UNLOADING) ||
1481                (phba->pport->fc_flag & FC_OFFLINE_MODE))
1482                return;
1483
1484        if (phba->elsbuf_cnt &&
1485                (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486                spin_lock_irq(&phba->hbalock);
1487                list_splice_init(&phba->elsbuf, &completions);
1488                phba->elsbuf_cnt = 0;
1489                phba->elsbuf_prev_cnt = 0;
1490                spin_unlock_irq(&phba->hbalock);
1491
1492                while (!list_empty(&completions)) {
1493                        list_remove_head(&completions, buf_ptr,
1494                                struct lpfc_dmabuf, list);
1495                        lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1496                        kfree(buf_ptr);
1497                }
1498        }
1499        phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1500
1501        /* If there is no heart beat outstanding, issue a heartbeat command */
1502        if (phba->cfg_enable_hba_heartbeat) {
1503                /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1504                spin_lock_irq(&phba->pport->work_port_lock);
1505                if (time_after(phba->last_completion_time +
1506                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1507                                jiffies)) {
1508                        spin_unlock_irq(&phba->pport->work_port_lock);
1509                        if (phba->hba_flag & HBA_HBEAT_INP)
1510                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1511                        else
1512                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1513                        goto out;
1514                }
1515                spin_unlock_irq(&phba->pport->work_port_lock);
1516
1517                /* Check if a MBX_HEARTBEAT is already in progress */
1518                if (phba->hba_flag & HBA_HBEAT_INP) {
1519                        /*
1520                         * If heart beat timeout called with HBA_HBEAT_INP set
1521                         * we need to give the hb mailbox cmd a chance to
1522                         * complete or TMO.
1523                         */
1524                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525                                "0459 Adapter heartbeat still outstanding: "
1526                                "last compl time was %d ms.\n",
1527                                jiffies_to_msecs(jiffies
1528                                         - phba->last_completion_time));
1529                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1530                } else {
1531                        if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532                                (list_empty(&psli->mboxq))) {
1533
1534                                retval = lpfc_issue_hb_mbox(phba);
1535                                if (retval) {
1536                                        tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1537                                        goto out;
1538                                }
1539                                phba->skipped_hb = 0;
1540                        } else if (time_before_eq(phba->last_completion_time,
1541                                        phba->skipped_hb)) {
1542                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543                                        "2857 Last completion time not "
1544                                        " updated in %d ms\n",
1545                                        jiffies_to_msecs(jiffies
1546                                                 - phba->last_completion_time));
1547                        } else
1548                                phba->skipped_hb = jiffies;
1549
1550                        tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1551                        goto out;
1552                }
1553        } else {
1554                /* Check to see if we want to force a MBX_HEARTBEAT */
1555                if (phba->hba_flag & HBA_HBEAT_TMO) {
1556                        retval = lpfc_issue_hb_mbox(phba);
1557                        if (retval)
1558                                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1559                        else
1560                                tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1561                        goto out;
1562                }
1563                tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1564        }
1565out:
1566        mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1567}
1568
1569/**
1570 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1571 * @phba: pointer to lpfc hba data structure.
1572 *
1573 * This routine is called to bring the HBA offline when HBA hardware error
1574 * other than Port Error 6 has been detected.
1575 **/
1576static void
1577lpfc_offline_eratt(struct lpfc_hba *phba)
1578{
1579        struct lpfc_sli   *psli = &phba->sli;
1580
1581        spin_lock_irq(&phba->hbalock);
1582        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583        spin_unlock_irq(&phba->hbalock);
1584        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1585
1586        lpfc_offline(phba);
1587        lpfc_reset_barrier(phba);
1588        spin_lock_irq(&phba->hbalock);
1589        lpfc_sli_brdreset(phba);
1590        spin_unlock_irq(&phba->hbalock);
1591        lpfc_hba_down_post(phba);
1592        lpfc_sli_brdready(phba, HS_MBRDY);
1593        lpfc_unblock_mgmt_io(phba);
1594        phba->link_state = LPFC_HBA_ERROR;
1595        return;
1596}
1597
1598/**
1599 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1600 * @phba: pointer to lpfc hba data structure.
1601 *
1602 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1603 * other than Port Error 6 has been detected.
1604 **/
1605void
1606lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1607{
1608        spin_lock_irq(&phba->hbalock);
1609        phba->link_state = LPFC_HBA_ERROR;
1610        spin_unlock_irq(&phba->hbalock);
1611
1612        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1613        lpfc_sli_flush_io_rings(phba);
1614        lpfc_offline(phba);
1615        lpfc_hba_down_post(phba);
1616        lpfc_unblock_mgmt_io(phba);
1617}
1618
1619/**
1620 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1621 * @phba: pointer to lpfc hba data structure.
1622 *
1623 * This routine is invoked to handle the deferred HBA hardware error
1624 * conditions. This type of error is indicated by HBA by setting ER1
1625 * and another ER bit in the host status register. The driver will
1626 * wait until the ER1 bit clears before handling the error condition.
1627 **/
1628static void
1629lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1630{
1631        uint32_t old_host_status = phba->work_hs;
1632        struct lpfc_sli *psli = &phba->sli;
1633
1634        /* If the pci channel is offline, ignore possible errors,
1635         * since we cannot communicate with the pci card anyway.
1636         */
1637        if (pci_channel_offline(phba->pcidev)) {
1638                spin_lock_irq(&phba->hbalock);
1639                phba->hba_flag &= ~DEFER_ERATT;
1640                spin_unlock_irq(&phba->hbalock);
1641                return;
1642        }
1643
1644        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1645                        "0479 Deferred Adapter Hardware Error "
1646                        "Data: x%x x%x x%x\n",
1647                        phba->work_hs, phba->work_status[0],
1648                        phba->work_status[1]);
1649
1650        spin_lock_irq(&phba->hbalock);
1651        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1652        spin_unlock_irq(&phba->hbalock);
1653
1654
1655        /*
1656         * Firmware stops when it triggred erratt. That could cause the I/Os
1657         * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1658         * SCSI layer retry it after re-establishing link.
1659         */
1660        lpfc_sli_abort_fcp_rings(phba);
1661
1662        /*
1663         * There was a firmware error. Take the hba offline and then
1664         * attempt to restart it.
1665         */
1666        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1667        lpfc_offline(phba);
1668
1669        /* Wait for the ER1 bit to clear.*/
1670        while (phba->work_hs & HS_FFER1) {
1671                msleep(100);
1672                if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1673                        phba->work_hs = UNPLUG_ERR ;
1674                        break;
1675                }
1676                /* If driver is unloading let the worker thread continue */
1677                if (phba->pport->load_flag & FC_UNLOADING) {
1678                        phba->work_hs = 0;
1679                        break;
1680                }
1681        }
1682
1683        /*
1684         * This is to ptrotect against a race condition in which
1685         * first write to the host attention register clear the
1686         * host status register.
1687         */
1688        if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1689                phba->work_hs = old_host_status & ~HS_FFER1;
1690
1691        spin_lock_irq(&phba->hbalock);
1692        phba->hba_flag &= ~DEFER_ERATT;
1693        spin_unlock_irq(&phba->hbalock);
1694        phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1695        phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1696}
1697
1698static void
1699lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1700{
1701        struct lpfc_board_event_header board_event;
1702        struct Scsi_Host *shost;
1703
1704        board_event.event_type = FC_REG_BOARD_EVENT;
1705        board_event.subcategory = LPFC_EVENT_PORTINTERR;
1706        shost = lpfc_shost_from_vport(phba->pport);
1707        fc_host_post_vendor_event(shost, fc_get_event_number(),
1708                                  sizeof(board_event),
1709                                  (char *) &board_event,
1710                                  LPFC_NL_VENDOR_ID);
1711}
1712
1713/**
1714 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1715 * @phba: pointer to lpfc hba data structure.
1716 *
1717 * This routine is invoked to handle the following HBA hardware error
1718 * conditions:
1719 * 1 - HBA error attention interrupt
1720 * 2 - DMA ring index out of range
1721 * 3 - Mailbox command came back as unknown
1722 **/
1723static void
1724lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1725{
1726        struct lpfc_vport *vport = phba->pport;
1727        struct lpfc_sli   *psli = &phba->sli;
1728        uint32_t event_data;
1729        unsigned long temperature;
1730        struct temp_event temp_event_data;
1731        struct Scsi_Host  *shost;
1732
1733        /* If the pci channel is offline, ignore possible errors,
1734         * since we cannot communicate with the pci card anyway.
1735         */
1736        if (pci_channel_offline(phba->pcidev)) {
1737                spin_lock_irq(&phba->hbalock);
1738                phba->hba_flag &= ~DEFER_ERATT;
1739                spin_unlock_irq(&phba->hbalock);
1740                return;
1741        }
1742
1743        /* If resets are disabled then leave the HBA alone and return */
1744        if (!phba->cfg_enable_hba_reset)
1745                return;
1746
1747        /* Send an internal error event to mgmt application */
1748        lpfc_board_errevt_to_mgmt(phba);
1749
1750        if (phba->hba_flag & DEFER_ERATT)
1751                lpfc_handle_deferred_eratt(phba);
1752
1753        if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1754                if (phba->work_hs & HS_FFER6)
1755                        /* Re-establishing Link */
1756                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1757                                        "1301 Re-establishing Link "
1758                                        "Data: x%x x%x x%x\n",
1759                                        phba->work_hs, phba->work_status[0],
1760                                        phba->work_status[1]);
1761                if (phba->work_hs & HS_FFER8)
1762                        /* Device Zeroization */
1763                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1764                                        "2861 Host Authentication device "
1765                                        "zeroization Data:x%x x%x x%x\n",
1766                                        phba->work_hs, phba->work_status[0],
1767                                        phba->work_status[1]);
1768
1769                spin_lock_irq(&phba->hbalock);
1770                psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1771                spin_unlock_irq(&phba->hbalock);
1772
1773                /*
1774                * Firmware stops when it triggled erratt with HS_FFER6.
1775                * That could cause the I/Os dropped by the firmware.
1776                * Error iocb (I/O) on txcmplq and let the SCSI layer
1777                * retry it after re-establishing link.
1778                */
1779                lpfc_sli_abort_fcp_rings(phba);
1780
1781                /*
1782                 * There was a firmware error.  Take the hba offline and then
1783                 * attempt to restart it.
1784                 */
1785                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1786                lpfc_offline(phba);
1787                lpfc_sli_brdrestart(phba);
1788                if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1789                        lpfc_unblock_mgmt_io(phba);
1790                        return;
1791                }
1792                lpfc_unblock_mgmt_io(phba);
1793        } else if (phba->work_hs & HS_CRIT_TEMP) {
1794                temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1795                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1796                temp_event_data.event_code = LPFC_CRIT_TEMP;
1797                temp_event_data.data = (uint32_t)temperature;
1798
1799                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1800                                "0406 Adapter maximum temperature exceeded "
1801                                "(%ld), taking this port offline "
1802                                "Data: x%x x%x x%x\n",
1803                                temperature, phba->work_hs,
1804                                phba->work_status[0], phba->work_status[1]);
1805
1806                shost = lpfc_shost_from_vport(phba->pport);
1807                fc_host_post_vendor_event(shost, fc_get_event_number(),
1808                                          sizeof(temp_event_data),
1809                                          (char *) &temp_event_data,
1810                                          SCSI_NL_VID_TYPE_PCI
1811                                          | PCI_VENDOR_ID_EMULEX);
1812
1813                spin_lock_irq(&phba->hbalock);
1814                phba->over_temp_state = HBA_OVER_TEMP;
1815                spin_unlock_irq(&phba->hbalock);
1816                lpfc_offline_eratt(phba);
1817
1818        } else {
1819                /* The if clause above forces this code path when the status
1820                 * failure is a value other than FFER6. Do not call the offline
1821                 * twice. This is the adapter hardware error path.
1822                 */
1823                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1824                                "0457 Adapter Hardware Error "
1825                                "Data: x%x x%x x%x\n",
1826                                phba->work_hs,
1827                                phba->work_status[0], phba->work_status[1]);
1828
1829                event_data = FC_REG_DUMP_EVENT;
1830                shost = lpfc_shost_from_vport(vport);
1831                fc_host_post_vendor_event(shost, fc_get_event_number(),
1832                                sizeof(event_data), (char *) &event_data,
1833                                SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1834
1835                lpfc_offline_eratt(phba);
1836        }
1837        return;
1838}
1839
1840/**
1841 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1842 * @phba: pointer to lpfc hba data structure.
1843 * @mbx_action: flag for mailbox shutdown action.
1844 * @en_rn_msg: send reset/port recovery message.
1845 * This routine is invoked to perform an SLI4 port PCI function reset in
1846 * response to port status register polling attention. It waits for port
1847 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1848 * During this process, interrupt vectors are freed and later requested
1849 * for handling possible port resource change.
1850 **/
1851static int
1852lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1853                            bool en_rn_msg)
1854{
1855        int rc;
1856        uint32_t intr_mode;
1857        LPFC_MBOXQ_t *mboxq;
1858
1859        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1860            LPFC_SLI_INTF_IF_TYPE_2) {
1861                /*
1862                 * On error status condition, driver need to wait for port
1863                 * ready before performing reset.
1864                 */
1865                rc = lpfc_sli4_pdev_status_reg_wait(phba);
1866                if (rc)
1867                        return rc;
1868        }
1869
1870        /* need reset: attempt for port recovery */
1871        if (en_rn_msg)
1872                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1873                                "2887 Reset Needed: Attempting Port "
1874                                "Recovery...\n");
1875
1876        /* If we are no wait, the HBA has been reset and is not
1877         * functional, thus we should clear
1878         * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1879         */
1880        if (mbx_action == LPFC_MBX_NO_WAIT) {
1881                spin_lock_irq(&phba->hbalock);
1882                phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1883                if (phba->sli.mbox_active) {
1884                        mboxq = phba->sli.mbox_active;
1885                        mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1886                        __lpfc_mbox_cmpl_put(phba, mboxq);
1887                        phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1888                        phba->sli.mbox_active = NULL;
1889                }
1890                spin_unlock_irq(&phba->hbalock);
1891        }
1892
1893        lpfc_offline_prep(phba, mbx_action);
1894        lpfc_sli_flush_io_rings(phba);
1895        lpfc_offline(phba);
1896        /* release interrupt for possible resource change */
1897        lpfc_sli4_disable_intr(phba);
1898        rc = lpfc_sli_brdrestart(phba);
1899        if (rc) {
1900                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1901                                "6309 Failed to restart board\n");
1902                return rc;
1903        }
1904        /* request and enable interrupt */
1905        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1906        if (intr_mode == LPFC_INTR_ERROR) {
1907                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1908                                "3175 Failed to enable interrupt\n");
1909                return -EIO;
1910        }
1911        phba->intr_mode = intr_mode;
1912        rc = lpfc_online(phba);
1913        if (rc == 0)
1914                lpfc_unblock_mgmt_io(phba);
1915
1916        return rc;
1917}
1918
1919/**
1920 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1921 * @phba: pointer to lpfc hba data structure.
1922 *
1923 * This routine is invoked to handle the SLI4 HBA hardware error attention
1924 * conditions.
1925 **/
1926static void
1927lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1928{
1929        struct lpfc_vport *vport = phba->pport;
1930        uint32_t event_data;
1931        struct Scsi_Host *shost;
1932        uint32_t if_type;
1933        struct lpfc_register portstat_reg = {0};
1934        uint32_t reg_err1, reg_err2;
1935        uint32_t uerrlo_reg, uemasklo_reg;
1936        uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1937        bool en_rn_msg = true;
1938        struct temp_event temp_event_data;
1939        struct lpfc_register portsmphr_reg;
1940        int rc, i;
1941
1942        /* If the pci channel is offline, ignore possible errors, since
1943         * we cannot communicate with the pci card anyway.
1944         */
1945        if (pci_channel_offline(phba->pcidev)) {
1946                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947                                "3166 pci channel is offline\n");
1948                lpfc_sli4_offline_eratt(phba);
1949                return;
1950        }
1951
1952        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1953        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1954        switch (if_type) {
1955        case LPFC_SLI_INTF_IF_TYPE_0:
1956                pci_rd_rc1 = lpfc_readl(
1957                                phba->sli4_hba.u.if_type0.UERRLOregaddr,
1958                                &uerrlo_reg);
1959                pci_rd_rc2 = lpfc_readl(
1960                                phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1961                                &uemasklo_reg);
1962                /* consider PCI bus read error as pci_channel_offline */
1963                if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1964                        return;
1965                if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1966                        lpfc_sli4_offline_eratt(phba);
1967                        return;
1968                }
1969                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970                                "7623 Checking UE recoverable");
1971
1972                for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1973                        if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1974                                       &portsmphr_reg.word0))
1975                                continue;
1976
1977                        smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1978                                                   &portsmphr_reg);
1979                        if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1980                            LPFC_PORT_SEM_UE_RECOVERABLE)
1981                                break;
1982                        /*Sleep for 1Sec, before checking SEMAPHORE */
1983                        msleep(1000);
1984                }
1985
1986                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1987                                "4827 smphr_port_status x%x : Waited %dSec",
1988                                smphr_port_status, i);
1989
1990                /* Recoverable UE, reset the HBA device */
1991                if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1992                    LPFC_PORT_SEM_UE_RECOVERABLE) {
1993                        for (i = 0; i < 20; i++) {
1994                                msleep(1000);
1995                                if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1996                                    &portsmphr_reg.word0) &&
1997                                    (LPFC_POST_STAGE_PORT_READY ==
1998                                     bf_get(lpfc_port_smphr_port_status,
1999                                     &portsmphr_reg))) {
2000                                        rc = lpfc_sli4_port_sta_fn_reset(phba,
2001                                                LPFC_MBX_NO_WAIT, en_rn_msg);
2002                                        if (rc == 0)
2003                                                return;
2004                                        lpfc_printf_log(phba, KERN_ERR,
2005                                                LOG_TRACE_EVENT,
2006                                                "4215 Failed to recover UE");
2007                                        break;
2008                                }
2009                        }
2010                }
2011                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2012                                "7624 Firmware not ready: Failing UE recovery,"
2013                                " waited %dSec", i);
2014                phba->link_state = LPFC_HBA_ERROR;
2015                break;
2016
2017        case LPFC_SLI_INTF_IF_TYPE_2:
2018        case LPFC_SLI_INTF_IF_TYPE_6:
2019                pci_rd_rc1 = lpfc_readl(
2020                                phba->sli4_hba.u.if_type2.STATUSregaddr,
2021                                &portstat_reg.word0);
2022                /* consider PCI bus read error as pci_channel_offline */
2023                if (pci_rd_rc1 == -EIO) {
2024                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2025                                "3151 PCI bus read access failure: x%x\n",
2026                                readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2027                        lpfc_sli4_offline_eratt(phba);
2028                        return;
2029                }
2030                reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2031                reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2032                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2033                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2034                                        "2889 Port Overtemperature event, "
2035                                        "taking port offline Data: x%x x%x\n",
2036                                        reg_err1, reg_err2);
2037
2038                        phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2039                        temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2040                        temp_event_data.event_code = LPFC_CRIT_TEMP;
2041                        temp_event_data.data = 0xFFFFFFFF;
2042
2043                        shost = lpfc_shost_from_vport(phba->pport);
2044                        fc_host_post_vendor_event(shost, fc_get_event_number(),
2045                                                  sizeof(temp_event_data),
2046                                                  (char *)&temp_event_data,
2047                                                  SCSI_NL_VID_TYPE_PCI
2048                                                  | PCI_VENDOR_ID_EMULEX);
2049
2050                        spin_lock_irq(&phba->hbalock);
2051                        phba->over_temp_state = HBA_OVER_TEMP;
2052                        spin_unlock_irq(&phba->hbalock);
2053                        lpfc_sli4_offline_eratt(phba);
2054                        return;
2055                }
2056                if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2057                    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2058                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2059                                        "3143 Port Down: Firmware Update "
2060                                        "Detected\n");
2061                        en_rn_msg = false;
2062                } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2063                         reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2064                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2065                                        "3144 Port Down: Debug Dump\n");
2066                else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067                         reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2068                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069                                        "3145 Port Down: Provisioning\n");
2070
2071                /* If resets are disabled then leave the HBA alone and return */
2072                if (!phba->cfg_enable_hba_reset)
2073                        return;
2074
2075                /* Check port status register for function reset */
2076                rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2077                                en_rn_msg);
2078                if (rc == 0) {
2079                        /* don't report event on forced debug dump */
2080                        if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2081                            reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2082                                return;
2083                        else
2084                                break;
2085                }
2086                /* fall through for not able to recover */
2087                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2088                                "3152 Unrecoverable error\n");
2089                phba->link_state = LPFC_HBA_ERROR;
2090                break;
2091        case LPFC_SLI_INTF_IF_TYPE_1:
2092        default:
2093                break;
2094        }
2095        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2096                        "3123 Report dump event to upper layer\n");
2097        /* Send an internal error event to mgmt application */
2098        lpfc_board_errevt_to_mgmt(phba);
2099
2100        event_data = FC_REG_DUMP_EVENT;
2101        shost = lpfc_shost_from_vport(vport);
2102        fc_host_post_vendor_event(shost, fc_get_event_number(),
2103                                  sizeof(event_data), (char *) &event_data,
2104                                  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2105}
2106
2107/**
2108 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2109 * @phba: pointer to lpfc HBA data structure.
2110 *
2111 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2112 * routine from the API jump table function pointer from the lpfc_hba struct.
2113 *
2114 * Return codes
2115 *   0 - success.
2116 *   Any other value - error.
2117 **/
2118void
2119lpfc_handle_eratt(struct lpfc_hba *phba)
2120{
2121        (*phba->lpfc_handle_eratt)(phba);
2122}
2123
2124/**
2125 * lpfc_handle_latt - The HBA link event handler
2126 * @phba: pointer to lpfc hba data structure.
2127 *
2128 * This routine is invoked from the worker thread to handle a HBA host
2129 * attention link event. SLI3 only.
2130 **/
2131void
2132lpfc_handle_latt(struct lpfc_hba *phba)
2133{
2134        struct lpfc_vport *vport = phba->pport;
2135        struct lpfc_sli   *psli = &phba->sli;
2136        LPFC_MBOXQ_t *pmb;
2137        volatile uint32_t control;
2138        struct lpfc_dmabuf *mp;
2139        int rc = 0;
2140
2141        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2142        if (!pmb) {
2143                rc = 1;
2144                goto lpfc_handle_latt_err_exit;
2145        }
2146
2147        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2148        if (!mp) {
2149                rc = 2;
2150                goto lpfc_handle_latt_free_pmb;
2151        }
2152
2153        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2154        if (!mp->virt) {
2155                rc = 3;
2156                goto lpfc_handle_latt_free_mp;
2157        }
2158
2159        /* Cleanup any outstanding ELS commands */
2160        lpfc_els_flush_all_cmd(phba);
2161
2162        psli->slistat.link_event++;
2163        lpfc_read_topology(phba, pmb, mp);
2164        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2165        pmb->vport = vport;
2166        /* Block ELS IOCBs until we have processed this mbox command */
2167        phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2168        rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2169        if (rc == MBX_NOT_FINISHED) {
2170                rc = 4;
2171                goto lpfc_handle_latt_free_mbuf;
2172        }
2173
2174        /* Clear Link Attention in HA REG */
2175        spin_lock_irq(&phba->hbalock);
2176        writel(HA_LATT, phba->HAregaddr);
2177        readl(phba->HAregaddr); /* flush */
2178        spin_unlock_irq(&phba->hbalock);
2179
2180        return;
2181
2182lpfc_handle_latt_free_mbuf:
2183        phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2184        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2185lpfc_handle_latt_free_mp:
2186        kfree(mp);
2187lpfc_handle_latt_free_pmb:
2188        mempool_free(pmb, phba->mbox_mem_pool);
2189lpfc_handle_latt_err_exit:
2190        /* Enable Link attention interrupts */
2191        spin_lock_irq(&phba->hbalock);
2192        psli->sli_flag |= LPFC_PROCESS_LA;
2193        control = readl(phba->HCregaddr);
2194        control |= HC_LAINT_ENA;
2195        writel(control, phba->HCregaddr);
2196        readl(phba->HCregaddr); /* flush */
2197
2198        /* Clear Link Attention in HA REG */
2199        writel(HA_LATT, phba->HAregaddr);
2200        readl(phba->HAregaddr); /* flush */
2201        spin_unlock_irq(&phba->hbalock);
2202        lpfc_linkdown(phba);
2203        phba->link_state = LPFC_HBA_ERROR;
2204
2205        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2206                        "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2207
2208        return;
2209}
2210
2211/**
2212 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2213 * @phba: pointer to lpfc hba data structure.
2214 * @vpd: pointer to the vital product data.
2215 * @len: length of the vital product data in bytes.
2216 *
2217 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2218 * an array of characters. In this routine, the ModelName, ProgramType, and
2219 * ModelDesc, etc. fields of the phba data structure will be populated.
2220 *
2221 * Return codes
2222 *   0 - pointer to the VPD passed in is NULL
2223 *   1 - success
2224 **/
2225int
2226lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2227{
2228        uint8_t lenlo, lenhi;
2229        int Length;
2230        int i, j;
2231        int finished = 0;
2232        int index = 0;
2233
2234        if (!vpd)
2235                return 0;
2236
2237        /* Vital Product */
2238        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2239                        "0455 Vital Product Data: x%x x%x x%x x%x\n",
2240                        (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2241                        (uint32_t) vpd[3]);
2242        while (!finished && (index < (len - 4))) {
2243                switch (vpd[index]) {
2244                case 0x82:
2245                case 0x91:
2246                        index += 1;
2247                        lenlo = vpd[index];
2248                        index += 1;
2249                        lenhi = vpd[index];
2250                        index += 1;
2251                        i = ((((unsigned short)lenhi) << 8) + lenlo);
2252                        index += i;
2253                        break;
2254                case 0x90:
2255                        index += 1;
2256                        lenlo = vpd[index];
2257                        index += 1;
2258                        lenhi = vpd[index];
2259                        index += 1;
2260                        Length = ((((unsigned short)lenhi) << 8) + lenlo);
2261                        if (Length > len - index)
2262                                Length = len - index;
2263                        while (Length > 0) {
2264                        /* Look for Serial Number */
2265                        if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2266                                index += 2;
2267                                i = vpd[index];
2268                                index += 1;
2269                                j = 0;
2270                                Length -= (3+i);
2271                                while(i--) {
2272                                        phba->SerialNumber[j++] = vpd[index++];
2273                                        if (j == 31)
2274                                                break;
2275                                }
2276                                phba->SerialNumber[j] = 0;
2277                                continue;
2278                        }
2279                        else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2280                                phba->vpd_flag |= VPD_MODEL_DESC;
2281                                index += 2;
2282                                i = vpd[index];
2283                                index += 1;
2284                                j = 0;
2285                                Length -= (3+i);
2286                                while(i--) {
2287                                        phba->ModelDesc[j++] = vpd[index++];
2288                                        if (j == 255)
2289                                                break;
2290                                }
2291                                phba->ModelDesc[j] = 0;
2292                                continue;
2293                        }
2294                        else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2295                                phba->vpd_flag |= VPD_MODEL_NAME;
2296                                index += 2;
2297                                i = vpd[index];
2298                                index += 1;
2299                                j = 0;
2300                                Length -= (3+i);
2301                                while(i--) {
2302                                        phba->ModelName[j++] = vpd[index++];
2303                                        if (j == 79)
2304                                                break;
2305                                }
2306                                phba->ModelName[j] = 0;
2307                                continue;
2308                        }
2309                        else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2310                                phba->vpd_flag |= VPD_PROGRAM_TYPE;
2311                                index += 2;
2312                                i = vpd[index];
2313                                index += 1;
2314                                j = 0;
2315                                Length -= (3+i);
2316                                while(i--) {
2317                                        phba->ProgramType[j++] = vpd[index++];
2318                                        if (j == 255)
2319                                                break;
2320                                }
2321                                phba->ProgramType[j] = 0;
2322                                continue;
2323                        }
2324                        else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2325                                phba->vpd_flag |= VPD_PORT;
2326                                index += 2;
2327                                i = vpd[index];
2328                                index += 1;
2329                                j = 0;
2330                                Length -= (3+i);
2331                                while(i--) {
2332                                        if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333                                            (phba->sli4_hba.pport_name_sta ==
2334                                             LPFC_SLI4_PPNAME_GET)) {
2335                                                j++;
2336                                                index++;
2337                                        } else
2338                                                phba->Port[j++] = vpd[index++];
2339                                        if (j == 19)
2340                                                break;
2341                                }
2342                                if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343                                    (phba->sli4_hba.pport_name_sta ==
2344                                     LPFC_SLI4_PPNAME_NON))
2345                                        phba->Port[j] = 0;
2346                                continue;
2347                        }
2348                        else {
2349                                index += 2;
2350                                i = vpd[index];
2351                                index += 1;
2352                                index += i;
2353                                Length -= (3 + i);
2354                        }
2355                }
2356                finished = 0;
2357                break;
2358                case 0x78:
2359                        finished = 1;
2360                        break;
2361                default:
2362                        index ++;
2363                        break;
2364                }
2365        }
2366
2367        return(1);
2368}
2369
2370/**
2371 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2372 * @phba: pointer to lpfc hba data structure.
2373 * @mdp: pointer to the data structure to hold the derived model name.
2374 * @descp: pointer to the data structure to hold the derived description.
2375 *
2376 * This routine retrieves HBA's description based on its registered PCI device
2377 * ID. The @descp passed into this function points to an array of 256 chars. It
2378 * shall be returned with the model name, maximum speed, and the host bus type.
2379 * The @mdp passed into this function points to an array of 80 chars. When the
2380 * function returns, the @mdp will be filled with the model name.
2381 **/
2382static void
2383lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2384{
2385        lpfc_vpd_t *vp;
2386        uint16_t dev_id = phba->pcidev->device;
2387        int max_speed;
2388        int GE = 0;
2389        int oneConnect = 0; /* default is not a oneConnect */
2390        struct {
2391                char *name;
2392                char *bus;
2393                char *function;
2394        } m = {"<Unknown>", "", ""};
2395
2396        if (mdp && mdp[0] != '\0'
2397                && descp && descp[0] != '\0')
2398                return;
2399
2400        if (phba->lmt & LMT_64Gb)
2401                max_speed = 64;
2402        else if (phba->lmt & LMT_32Gb)
2403                max_speed = 32;
2404        else if (phba->lmt & LMT_16Gb)
2405                max_speed = 16;
2406        else if (phba->lmt & LMT_10Gb)
2407                max_speed = 10;
2408        else if (phba->lmt & LMT_8Gb)
2409                max_speed = 8;
2410        else if (phba->lmt & LMT_4Gb)
2411                max_speed = 4;
2412        else if (phba->lmt & LMT_2Gb)
2413                max_speed = 2;
2414        else if (phba->lmt & LMT_1Gb)
2415                max_speed = 1;
2416        else
2417                max_speed = 0;
2418
2419        vp = &phba->vpd;
2420
2421        switch (dev_id) {
2422        case PCI_DEVICE_ID_FIREFLY:
2423                m = (typeof(m)){"LP6000", "PCI",
2424                                "Obsolete, Unsupported Fibre Channel Adapter"};
2425                break;
2426        case PCI_DEVICE_ID_SUPERFLY:
2427                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2428                        m = (typeof(m)){"LP7000", "PCI", ""};
2429                else
2430                        m = (typeof(m)){"LP7000E", "PCI", ""};
2431                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432                break;
2433        case PCI_DEVICE_ID_DRAGONFLY:
2434                m = (typeof(m)){"LP8000", "PCI",
2435                                "Obsolete, Unsupported Fibre Channel Adapter"};
2436                break;
2437        case PCI_DEVICE_ID_CENTAUR:
2438                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2439                        m = (typeof(m)){"LP9002", "PCI", ""};
2440                else
2441                        m = (typeof(m)){"LP9000", "PCI", ""};
2442                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2443                break;
2444        case PCI_DEVICE_ID_RFLY:
2445                m = (typeof(m)){"LP952", "PCI",
2446                                "Obsolete, Unsupported Fibre Channel Adapter"};
2447                break;
2448        case PCI_DEVICE_ID_PEGASUS:
2449                m = (typeof(m)){"LP9802", "PCI-X",
2450                                "Obsolete, Unsupported Fibre Channel Adapter"};
2451                break;
2452        case PCI_DEVICE_ID_THOR:
2453                m = (typeof(m)){"LP10000", "PCI-X",
2454                                "Obsolete, Unsupported Fibre Channel Adapter"};
2455                break;
2456        case PCI_DEVICE_ID_VIPER:
2457                m = (typeof(m)){"LPX1000",  "PCI-X",
2458                                "Obsolete, Unsupported Fibre Channel Adapter"};
2459                break;
2460        case PCI_DEVICE_ID_PFLY:
2461                m = (typeof(m)){"LP982", "PCI-X",
2462                                "Obsolete, Unsupported Fibre Channel Adapter"};
2463                break;
2464        case PCI_DEVICE_ID_TFLY:
2465                m = (typeof(m)){"LP1050", "PCI-X",
2466                                "Obsolete, Unsupported Fibre Channel Adapter"};
2467                break;
2468        case PCI_DEVICE_ID_HELIOS:
2469                m = (typeof(m)){"LP11000", "PCI-X2",
2470                                "Obsolete, Unsupported Fibre Channel Adapter"};
2471                break;
2472        case PCI_DEVICE_ID_HELIOS_SCSP:
2473                m = (typeof(m)){"LP11000-SP", "PCI-X2",
2474                                "Obsolete, Unsupported Fibre Channel Adapter"};
2475                break;
2476        case PCI_DEVICE_ID_HELIOS_DCSP:
2477                m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2478                                "Obsolete, Unsupported Fibre Channel Adapter"};
2479                break;
2480        case PCI_DEVICE_ID_NEPTUNE:
2481                m = (typeof(m)){"LPe1000", "PCIe",
2482                                "Obsolete, Unsupported Fibre Channel Adapter"};
2483                break;
2484        case PCI_DEVICE_ID_NEPTUNE_SCSP:
2485                m = (typeof(m)){"LPe1000-SP", "PCIe",
2486                                "Obsolete, Unsupported Fibre Channel Adapter"};
2487                break;
2488        case PCI_DEVICE_ID_NEPTUNE_DCSP:
2489                m = (typeof(m)){"LPe1002-SP", "PCIe",
2490                                "Obsolete, Unsupported Fibre Channel Adapter"};
2491                break;
2492        case PCI_DEVICE_ID_BMID:
2493                m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2494                break;
2495        case PCI_DEVICE_ID_BSMB:
2496                m = (typeof(m)){"LP111", "PCI-X2",
2497                                "Obsolete, Unsupported Fibre Channel Adapter"};
2498                break;
2499        case PCI_DEVICE_ID_ZEPHYR:
2500                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2501                break;
2502        case PCI_DEVICE_ID_ZEPHYR_SCSP:
2503                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2504                break;
2505        case PCI_DEVICE_ID_ZEPHYR_DCSP:
2506                m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2507                GE = 1;
2508                break;
2509        case PCI_DEVICE_ID_ZMID:
2510                m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2511                break;
2512        case PCI_DEVICE_ID_ZSMB:
2513                m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2514                break;
2515        case PCI_DEVICE_ID_LP101:
2516                m = (typeof(m)){"LP101", "PCI-X",
2517                                "Obsolete, Unsupported Fibre Channel Adapter"};
2518                break;
2519        case PCI_DEVICE_ID_LP10000S:
2520                m = (typeof(m)){"LP10000-S", "PCI",
2521                                "Obsolete, Unsupported Fibre Channel Adapter"};
2522                break;
2523        case PCI_DEVICE_ID_LP11000S:
2524                m = (typeof(m)){"LP11000-S", "PCI-X2",
2525                                "Obsolete, Unsupported Fibre Channel Adapter"};
2526                break;
2527        case PCI_DEVICE_ID_LPE11000S:
2528                m = (typeof(m)){"LPe11000-S", "PCIe",
2529                                "Obsolete, Unsupported Fibre Channel Adapter"};
2530                break;
2531        case PCI_DEVICE_ID_SAT:
2532                m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2533                break;
2534        case PCI_DEVICE_ID_SAT_MID:
2535                m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2536                break;
2537        case PCI_DEVICE_ID_SAT_SMB:
2538                m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2539                break;
2540        case PCI_DEVICE_ID_SAT_DCSP:
2541                m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2542                break;
2543        case PCI_DEVICE_ID_SAT_SCSP:
2544                m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2545                break;
2546        case PCI_DEVICE_ID_SAT_S:
2547                m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2548                break;
2549        case PCI_DEVICE_ID_HORNET:
2550                m = (typeof(m)){"LP21000", "PCIe",
2551                                "Obsolete, Unsupported FCoE Adapter"};
2552                GE = 1;
2553                break;
2554        case PCI_DEVICE_ID_PROTEUS_VF:
2555                m = (typeof(m)){"LPev12000", "PCIe IOV",
2556                                "Obsolete, Unsupported Fibre Channel Adapter"};
2557                break;
2558        case PCI_DEVICE_ID_PROTEUS_PF:
2559                m = (typeof(m)){"LPev12000", "PCIe IOV",
2560                                "Obsolete, Unsupported Fibre Channel Adapter"};
2561                break;
2562        case PCI_DEVICE_ID_PROTEUS_S:
2563                m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2564                                "Obsolete, Unsupported Fibre Channel Adapter"};
2565                break;
2566        case PCI_DEVICE_ID_TIGERSHARK:
2567                oneConnect = 1;
2568                m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2569                break;
2570        case PCI_DEVICE_ID_TOMCAT:
2571                oneConnect = 1;
2572                m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2573                break;
2574        case PCI_DEVICE_ID_FALCON:
2575                m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2576                                "EmulexSecure Fibre"};
2577                break;
2578        case PCI_DEVICE_ID_BALIUS:
2579                m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2580                                "Obsolete, Unsupported Fibre Channel Adapter"};
2581                break;
2582        case PCI_DEVICE_ID_LANCER_FC:
2583                m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2584                break;
2585        case PCI_DEVICE_ID_LANCER_FC_VF:
2586                m = (typeof(m)){"LPe16000", "PCIe",
2587                                "Obsolete, Unsupported Fibre Channel Adapter"};
2588                break;
2589        case PCI_DEVICE_ID_LANCER_FCOE:
2590                oneConnect = 1;
2591                m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2592                break;
2593        case PCI_DEVICE_ID_LANCER_FCOE_VF:
2594                oneConnect = 1;
2595                m = (typeof(m)){"OCe15100", "PCIe",
2596                                "Obsolete, Unsupported FCoE"};
2597                break;
2598        case PCI_DEVICE_ID_LANCER_G6_FC:
2599                m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2600                break;
2601        case PCI_DEVICE_ID_LANCER_G7_FC:
2602                m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2603                break;
2604        case PCI_DEVICE_ID_LANCER_G7P_FC:
2605                m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2606                break;
2607        case PCI_DEVICE_ID_SKYHAWK:
2608        case PCI_DEVICE_ID_SKYHAWK_VF:
2609                oneConnect = 1;
2610                m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2611                break;
2612        default:
2613                m = (typeof(m)){"Unknown", "", ""};
2614                break;
2615        }
2616
2617        if (mdp && mdp[0] == '\0')
2618                snprintf(mdp, 79,"%s", m.name);
2619        /*
2620         * oneConnect hba requires special processing, they are all initiators
2621         * and we put the port number on the end
2622         */
2623        if (descp && descp[0] == '\0') {
2624                if (oneConnect)
2625                        snprintf(descp, 255,
2626                                "Emulex OneConnect %s, %s Initiator %s",
2627                                m.name, m.function,
2628                                phba->Port);
2629                else if (max_speed == 0)
2630                        snprintf(descp, 255,
2631                                "Emulex %s %s %s",
2632                                m.name, m.bus, m.function);
2633                else
2634                        snprintf(descp, 255,
2635                                "Emulex %s %d%s %s %s",
2636                                m.name, max_speed, (GE) ? "GE" : "Gb",
2637                                m.bus, m.function);
2638        }
2639}
2640
2641/**
2642 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2643 * @phba: pointer to lpfc hba data structure.
2644 * @pring: pointer to a IOCB ring.
2645 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2646 *
2647 * This routine posts a given number of IOCBs with the associated DMA buffer
2648 * descriptors specified by the cnt argument to the given IOCB ring.
2649 *
2650 * Return codes
2651 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2652 **/
2653int
2654lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2655{
2656        IOCB_t *icmd;
2657        struct lpfc_iocbq *iocb;
2658        struct lpfc_dmabuf *mp1, *mp2;
2659
2660        cnt += pring->missbufcnt;
2661
2662        /* While there are buffers to post */
2663        while (cnt > 0) {
2664                /* Allocate buffer for  command iocb */
2665                iocb = lpfc_sli_get_iocbq(phba);
2666                if (iocb == NULL) {
2667                        pring->missbufcnt = cnt;
2668                        return cnt;
2669                }
2670                icmd = &iocb->iocb;
2671
2672                /* 2 buffers can be posted per command */
2673                /* Allocate buffer to post */
2674                mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2675                if (mp1)
2676                    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2677                if (!mp1 || !mp1->virt) {
2678                        kfree(mp1);
2679                        lpfc_sli_release_iocbq(phba, iocb);
2680                        pring->missbufcnt = cnt;
2681                        return cnt;
2682                }
2683
2684                INIT_LIST_HEAD(&mp1->list);
2685                /* Allocate buffer to post */
2686                if (cnt > 1) {
2687                        mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2688                        if (mp2)
2689                                mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2690                                                            &mp2->phys);
2691                        if (!mp2 || !mp2->virt) {
2692                                kfree(mp2);
2693                                lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2694                                kfree(mp1);
2695                                lpfc_sli_release_iocbq(phba, iocb);
2696                                pring->missbufcnt = cnt;
2697                                return cnt;
2698                        }
2699
2700                        INIT_LIST_HEAD(&mp2->list);
2701                } else {
2702                        mp2 = NULL;
2703                }
2704
2705                icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2706                icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2707                icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2708                icmd->ulpBdeCount = 1;
2709                cnt--;
2710                if (mp2) {
2711                        icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2712                        icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2713                        icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2714                        cnt--;
2715                        icmd->ulpBdeCount = 2;
2716                }
2717
2718                icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2719                icmd->ulpLe = 1;
2720
2721                if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2722                    IOCB_ERROR) {
2723                        lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2724                        kfree(mp1);
2725                        cnt++;
2726                        if (mp2) {
2727                                lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2728                                kfree(mp2);
2729                                cnt++;
2730                        }
2731                        lpfc_sli_release_iocbq(phba, iocb);
2732                        pring->missbufcnt = cnt;
2733                        return cnt;
2734                }
2735                lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2736                if (mp2)
2737                        lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2738        }
2739        pring->missbufcnt = 0;
2740        return 0;
2741}
2742
2743/**
2744 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2745 * @phba: pointer to lpfc hba data structure.
2746 *
2747 * This routine posts initial receive IOCB buffers to the ELS ring. The
2748 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2749 * set to 64 IOCBs. SLI3 only.
2750 *
2751 * Return codes
2752 *   0 - success (currently always success)
2753 **/
2754static int
2755lpfc_post_rcv_buf(struct lpfc_hba *phba)
2756{
2757        struct lpfc_sli *psli = &phba->sli;
2758
2759        /* Ring 0, ELS / CT buffers */
2760        lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2761        /* Ring 2 - FCP no buffers needed */
2762
2763        return 0;
2764}
2765
2766#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2767
2768/**
2769 * lpfc_sha_init - Set up initial array of hash table entries
2770 * @HashResultPointer: pointer to an array as hash table.
2771 *
2772 * This routine sets up the initial values to the array of hash table entries
2773 * for the LC HBAs.
2774 **/
2775static void
2776lpfc_sha_init(uint32_t * HashResultPointer)
2777{
2778        HashResultPointer[0] = 0x67452301;
2779        HashResultPointer[1] = 0xEFCDAB89;
2780        HashResultPointer[2] = 0x98BADCFE;
2781        HashResultPointer[3] = 0x10325476;
2782        HashResultPointer[4] = 0xC3D2E1F0;
2783}
2784
2785/**
2786 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2787 * @HashResultPointer: pointer to an initial/result hash table.
2788 * @HashWorkingPointer: pointer to an working hash table.
2789 *
2790 * This routine iterates an initial hash table pointed by @HashResultPointer
2791 * with the values from the working hash table pointeed by @HashWorkingPointer.
2792 * The results are putting back to the initial hash table, returned through
2793 * the @HashResultPointer as the result hash table.
2794 **/
2795static void
2796lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2797{
2798        int t;
2799        uint32_t TEMP;
2800        uint32_t A, B, C, D, E;
2801        t = 16;
2802        do {
2803                HashWorkingPointer[t] =
2804                    S(1,
2805                      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2806                                                                     8] ^
2807                      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2808        } while (++t <= 79);
2809        t = 0;
2810        A = HashResultPointer[0];
2811        B = HashResultPointer[1];
2812        C = HashResultPointer[2];
2813        D = HashResultPointer[3];
2814        E = HashResultPointer[4];
2815
2816        do {
2817                if (t < 20) {
2818                        TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2819                } else if (t < 40) {
2820                        TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2821                } else if (t < 60) {
2822                        TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2823                } else {
2824                        TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2825                }
2826                TEMP += S(5, A) + E + HashWorkingPointer[t];
2827                E = D;
2828                D = C;
2829                C = S(30, B);
2830                B = A;
2831                A = TEMP;
2832        } while (++t <= 79);
2833
2834        HashResultPointer[0] += A;
2835        HashResultPointer[1] += B;
2836        HashResultPointer[2] += C;
2837        HashResultPointer[3] += D;
2838        HashResultPointer[4] += E;
2839
2840}
2841
2842/**
2843 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2844 * @RandomChallenge: pointer to the entry of host challenge random number array.
2845 * @HashWorking: pointer to the entry of the working hash array.
2846 *
2847 * This routine calculates the working hash array referred by @HashWorking
2848 * from the challenge random numbers associated with the host, referred by
2849 * @RandomChallenge. The result is put into the entry of the working hash
2850 * array and returned by reference through @HashWorking.
2851 **/
2852static void
2853lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2854{
2855        *HashWorking = (*RandomChallenge ^ *HashWorking);
2856}
2857
2858/**
2859 * lpfc_hba_init - Perform special handling for LC HBA initialization
2860 * @phba: pointer to lpfc hba data structure.
2861 * @hbainit: pointer to an array of unsigned 32-bit integers.
2862 *
2863 * This routine performs the special handling for LC HBA initialization.
2864 **/
2865void
2866lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2867{
2868        int t;
2869        uint32_t *HashWorking;
2870        uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2871
2872        HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2873        if (!HashWorking)
2874                return;
2875
2876        HashWorking[0] = HashWorking[78] = *pwwnn++;
2877        HashWorking[1] = HashWorking[79] = *pwwnn;
2878
2879        for (t = 0; t < 7; t++)
2880                lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2881
2882        lpfc_sha_init(hbainit);
2883        lpfc_sha_iterate(hbainit, HashWorking);
2884        kfree(HashWorking);
2885}
2886
2887/**
2888 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2889 * @vport: pointer to a virtual N_Port data structure.
2890 *
2891 * This routine performs the necessary cleanups before deleting the @vport.
2892 * It invokes the discovery state machine to perform necessary state
2893 * transitions and to release the ndlps associated with the @vport. Note,
2894 * the physical port is treated as @vport 0.
2895 **/
2896void
2897lpfc_cleanup(struct lpfc_vport *vport)
2898{
2899        struct lpfc_hba   *phba = vport->phba;
2900        struct lpfc_nodelist *ndlp, *next_ndlp;
2901        int i = 0;
2902
2903        if (phba->link_state > LPFC_LINK_DOWN)
2904                lpfc_port_link_failure(vport);
2905
2906        /* Clean up VMID resources */
2907        if (lpfc_is_vmid_enabled(phba))
2908                lpfc_vmid_vport_cleanup(vport);
2909
2910        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2911                if (vport->port_type != LPFC_PHYSICAL_PORT &&
2912                    ndlp->nlp_DID == Fabric_DID) {
2913                        /* Just free up ndlp with Fabric_DID for vports */
2914                        lpfc_nlp_put(ndlp);
2915                        continue;
2916                }
2917
2918                if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2919                    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2920                        lpfc_nlp_put(ndlp);
2921                        continue;
2922                }
2923
2924                /* Fabric Ports not in UNMAPPED state are cleaned up in the
2925                 * DEVICE_RM event.
2926                 */
2927                if (ndlp->nlp_type & NLP_FABRIC &&
2928                    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2929                        lpfc_disc_state_machine(vport, ndlp, NULL,
2930                                        NLP_EVT_DEVICE_RECOVERY);
2931
2932                if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2933                        lpfc_disc_state_machine(vport, ndlp, NULL,
2934                                        NLP_EVT_DEVICE_RM);
2935        }
2936
2937        /* At this point, ALL ndlp's should be gone
2938         * because of the previous NLP_EVT_DEVICE_RM.
2939         * Lets wait for this to happen, if needed.
2940         */
2941        while (!list_empty(&vport->fc_nodes)) {
2942                if (i++ > 3000) {
2943                        lpfc_printf_vlog(vport, KERN_ERR,
2944                                         LOG_TRACE_EVENT,
2945                                "0233 Nodelist not empty\n");
2946                        list_for_each_entry_safe(ndlp, next_ndlp,
2947                                                &vport->fc_nodes, nlp_listp) {
2948                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2949                                                 LOG_TRACE_EVENT,
2950                                                 "0282 did:x%x ndlp:x%px "
2951                                                 "refcnt:%d xflags x%x nflag x%x\n",
2952                                                 ndlp->nlp_DID, (void *)ndlp,
2953                                                 kref_read(&ndlp->kref),
2954                                                 ndlp->fc4_xpt_flags,
2955                                                 ndlp->nlp_flag);
2956                        }
2957                        break;
2958                }
2959
2960                /* Wait for any activity on ndlps to settle */
2961                msleep(10);
2962        }
2963        lpfc_cleanup_vports_rrqs(vport, NULL);
2964}
2965
2966/**
2967 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2968 * @vport: pointer to a virtual N_Port data structure.
2969 *
2970 * This routine stops all the timers associated with a @vport. This function
2971 * is invoked before disabling or deleting a @vport. Note that the physical
2972 * port is treated as @vport 0.
2973 **/
2974void
2975lpfc_stop_vport_timers(struct lpfc_vport *vport)
2976{
2977        del_timer_sync(&vport->els_tmofunc);
2978        del_timer_sync(&vport->delayed_disc_tmo);
2979        lpfc_can_disctmo(vport);
2980        return;
2981}
2982
2983/**
2984 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2985 * @phba: pointer to lpfc hba data structure.
2986 *
2987 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2988 * caller of this routine should already hold the host lock.
2989 **/
2990void
2991__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2992{
2993        /* Clear pending FCF rediscovery wait flag */
2994        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2995
2996        /* Now, try to stop the timer */
2997        del_timer(&phba->fcf.redisc_wait);
2998}
2999
3000/**
3001 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3002 * @phba: pointer to lpfc hba data structure.
3003 *
3004 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3005 * checks whether the FCF rediscovery wait timer is pending with the host
3006 * lock held before proceeding with disabling the timer and clearing the
3007 * wait timer pendig flag.
3008 **/
3009void
3010lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3011{
3012        spin_lock_irq(&phba->hbalock);
3013        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3014                /* FCF rediscovery timer already fired or stopped */
3015                spin_unlock_irq(&phba->hbalock);
3016                return;
3017        }
3018        __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3019        /* Clear failover in progress flags */
3020        phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3021        spin_unlock_irq(&phba->hbalock);
3022}
3023
3024/**
3025 * lpfc_cmf_stop - Stop CMF processing
3026 * @phba: pointer to lpfc hba data structure.
3027 *
3028 * This is called when the link goes down or if CMF mode is turned OFF.
3029 * It is also called when going offline or unloaded just before the
3030 * congestion info buffer is unregistered.
3031 **/
3032void
3033lpfc_cmf_stop(struct lpfc_hba *phba)
3034{
3035        int cpu;
3036        struct lpfc_cgn_stat *cgs;
3037
3038        /* We only do something if CMF is enabled */
3039        if (!phba->sli4_hba.pc_sli4_params.cmf)
3040                return;
3041
3042        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3043                        "6221 Stop CMF / Cancel Timer\n");
3044
3045        /* Cancel the CMF timer */
3046        hrtimer_cancel(&phba->cmf_timer);
3047
3048        /* Zero CMF counters */
3049        atomic_set(&phba->cmf_busy, 0);
3050        for_each_present_cpu(cpu) {
3051                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3052                atomic64_set(&cgs->total_bytes, 0);
3053                atomic64_set(&cgs->rcv_bytes, 0);
3054                atomic_set(&cgs->rx_io_cnt, 0);
3055                atomic64_set(&cgs->rx_latency, 0);
3056        }
3057        atomic_set(&phba->cmf_bw_wait, 0);
3058
3059        /* Resume any blocked IO - Queue unblock on workqueue */
3060        queue_work(phba->wq, &phba->unblock_request_work);
3061}
3062
3063static inline uint64_t
3064lpfc_get_max_line_rate(struct lpfc_hba *phba)
3065{
3066        uint64_t rate = lpfc_sli_port_speed_get(phba);
3067
3068        return ((((unsigned long)rate) * 1024 * 1024) / 10);
3069}
3070
3071void
3072lpfc_cmf_signal_init(struct lpfc_hba *phba)
3073{
3074        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3075                        "6223 Signal CMF init\n");
3076
3077        /* Use the new fc_linkspeed to recalculate */
3078        phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3079        phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3080        phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3081                                            phba->cmf_interval_rate, 1000);
3082        phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3083
3084        /* This is a signal to firmware to sync up CMF BW with link speed */
3085        lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3086}
3087
3088/**
3089 * lpfc_cmf_start - Start CMF processing
3090 * @phba: pointer to lpfc hba data structure.
3091 *
3092 * This is called when the link comes up or if CMF mode is turned OFF
3093 * to Monitor or Managed.
3094 **/
3095void
3096lpfc_cmf_start(struct lpfc_hba *phba)
3097{
3098        struct lpfc_cgn_stat *cgs;
3099        int cpu;
3100
3101        /* We only do something if CMF is enabled */
3102        if (!phba->sli4_hba.pc_sli4_params.cmf ||
3103            phba->cmf_active_mode == LPFC_CFG_OFF)
3104                return;
3105
3106        /* Reinitialize congestion buffer info */
3107        lpfc_init_congestion_buf(phba);
3108
3109        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3110        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3111        atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3112        atomic_set(&phba->cgn_sync_warn_cnt, 0);
3113
3114        atomic_set(&phba->cmf_busy, 0);
3115        for_each_present_cpu(cpu) {
3116                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3117                atomic64_set(&cgs->total_bytes, 0);
3118                atomic64_set(&cgs->rcv_bytes, 0);
3119                atomic_set(&cgs->rx_io_cnt, 0);
3120                atomic64_set(&cgs->rx_latency, 0);
3121        }
3122        phba->cmf_latency.tv_sec = 0;
3123        phba->cmf_latency.tv_nsec = 0;
3124
3125        lpfc_cmf_signal_init(phba);
3126
3127        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3128                        "6222 Start CMF / Timer\n");
3129
3130        phba->cmf_timer_cnt = 0;
3131        hrtimer_start(&phba->cmf_timer,
3132                      ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3133                      HRTIMER_MODE_REL);
3134        /* Setup for latency check in IO cmpl routines */
3135        ktime_get_real_ts64(&phba->cmf_latency);
3136
3137        atomic_set(&phba->cmf_bw_wait, 0);
3138        atomic_set(&phba->cmf_stop_io, 0);
3139}
3140
3141/**
3142 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3143 * @phba: pointer to lpfc hba data structure.
3144 *
3145 * This routine stops all the timers associated with a HBA. This function is
3146 * invoked before either putting a HBA offline or unloading the driver.
3147 **/
3148void
3149lpfc_stop_hba_timers(struct lpfc_hba *phba)
3150{
3151        if (phba->pport)
3152                lpfc_stop_vport_timers(phba->pport);
3153        cancel_delayed_work_sync(&phba->eq_delay_work);
3154        cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3155        del_timer_sync(&phba->sli.mbox_tmo);
3156        del_timer_sync(&phba->fabric_block_timer);
3157        del_timer_sync(&phba->eratt_poll);
3158        del_timer_sync(&phba->hb_tmofunc);
3159        if (phba->sli_rev == LPFC_SLI_REV4) {
3160                del_timer_sync(&phba->rrq_tmr);
3161                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3162        }
3163        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3164
3165        switch (phba->pci_dev_grp) {
3166        case LPFC_PCI_DEV_LP:
3167                /* Stop any LightPulse device specific driver timers */
3168                del_timer_sync(&phba->fcp_poll_timer);
3169                break;
3170        case LPFC_PCI_DEV_OC:
3171                /* Stop any OneConnect device specific driver timers */
3172                lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3173                break;
3174        default:
3175                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3176                                "0297 Invalid device group (x%x)\n",
3177                                phba->pci_dev_grp);
3178                break;
3179        }
3180        return;
3181}
3182
3183/**
3184 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3185 * @phba: pointer to lpfc hba data structure.
3186 * @mbx_action: flag for mailbox no wait action.
3187 *
3188 * This routine marks a HBA's management interface as blocked. Once the HBA's
3189 * management interface is marked as blocked, all the user space access to
3190 * the HBA, whether they are from sysfs interface or libdfc interface will
3191 * all be blocked. The HBA is set to block the management interface when the
3192 * driver prepares the HBA interface for online or offline.
3193 **/
3194static void
3195lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3196{
3197        unsigned long iflag;
3198        uint8_t actcmd = MBX_HEARTBEAT;
3199        unsigned long timeout;
3200
3201        spin_lock_irqsave(&phba->hbalock, iflag);
3202        phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3203        spin_unlock_irqrestore(&phba->hbalock, iflag);
3204        if (mbx_action == LPFC_MBX_NO_WAIT)
3205                return;
3206        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3207        spin_lock_irqsave(&phba->hbalock, iflag);
3208        if (phba->sli.mbox_active) {
3209                actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3210                /* Determine how long we might wait for the active mailbox
3211                 * command to be gracefully completed by firmware.
3212                 */
3213                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3214                                phba->sli.mbox_active) * 1000) + jiffies;
3215        }
3216        spin_unlock_irqrestore(&phba->hbalock, iflag);
3217
3218        /* Wait for the outstnading mailbox command to complete */
3219        while (phba->sli.mbox_active) {
3220                /* Check active mailbox complete status every 2ms */
3221                msleep(2);
3222                if (time_after(jiffies, timeout)) {
3223                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3224                                        "2813 Mgmt IO is Blocked %x "
3225                                        "- mbox cmd %x still active\n",
3226                                        phba->sli.sli_flag, actcmd);
3227                        break;
3228                }
3229        }
3230}
3231
3232/**
3233 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3234 * @phba: pointer to lpfc hba data structure.
3235 *
3236 * Allocate RPIs for all active remote nodes. This is needed whenever
3237 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3238 * is to fixup the temporary rpi assignments.
3239 **/
3240void
3241lpfc_sli4_node_prep(struct lpfc_hba *phba)
3242{
3243        struct lpfc_nodelist  *ndlp, *next_ndlp;
3244        struct lpfc_vport **vports;
3245        int i, rpi;
3246
3247        if (phba->sli_rev != LPFC_SLI_REV4)
3248                return;
3249
3250        vports = lpfc_create_vport_work_array(phba);
3251        if (vports == NULL)
3252                return;
3253
3254        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3255                if (vports[i]->load_flag & FC_UNLOADING)
3256                        continue;
3257
3258                list_for_each_entry_safe(ndlp, next_ndlp,
3259                                         &vports[i]->fc_nodes,
3260                                         nlp_listp) {
3261                        rpi = lpfc_sli4_alloc_rpi(phba);
3262                        if (rpi == LPFC_RPI_ALLOC_ERROR) {
3263                                /* TODO print log? */
3264                                continue;
3265                        }
3266                        ndlp->nlp_rpi = rpi;
3267                        lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3268                                         LOG_NODE | LOG_DISCOVERY,
3269                                         "0009 Assign RPI x%x to ndlp x%px "
3270                                         "DID:x%06x flg:x%x\n",
3271                                         ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3272                                         ndlp->nlp_flag);
3273                }
3274        }
3275        lpfc_destroy_vport_work_array(phba, vports);
3276}
3277
3278/**
3279 * lpfc_create_expedite_pool - create expedite pool
3280 * @phba: pointer to lpfc hba data structure.
3281 *
3282 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3283 * to expedite pool. Mark them as expedite.
3284 **/
3285static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3286{
3287        struct lpfc_sli4_hdw_queue *qp;
3288        struct lpfc_io_buf *lpfc_ncmd;
3289        struct lpfc_io_buf *lpfc_ncmd_next;
3290        struct lpfc_epd_pool *epd_pool;
3291        unsigned long iflag;
3292
3293        epd_pool = &phba->epd_pool;
3294        qp = &phba->sli4_hba.hdwq[0];
3295
3296        spin_lock_init(&epd_pool->lock);
3297        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3298        spin_lock(&epd_pool->lock);
3299        INIT_LIST_HEAD(&epd_pool->list);
3300        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3301                                 &qp->lpfc_io_buf_list_put, list) {
3302                list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3303                lpfc_ncmd->expedite = true;
3304                qp->put_io_bufs--;
3305                epd_pool->count++;
3306                if (epd_pool->count >= XRI_BATCH)
3307                        break;
3308        }
3309        spin_unlock(&epd_pool->lock);
3310        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3311}
3312
3313/**
3314 * lpfc_destroy_expedite_pool - destroy expedite pool
3315 * @phba: pointer to lpfc hba data structure.
3316 *
3317 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3318 * of HWQ 0. Clear the mark.
3319 **/
3320static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3321{
3322        struct lpfc_sli4_hdw_queue *qp;
3323        struct lpfc_io_buf *lpfc_ncmd;
3324        struct lpfc_io_buf *lpfc_ncmd_next;
3325        struct lpfc_epd_pool *epd_pool;
3326        unsigned long iflag;
3327
3328        epd_pool = &phba->epd_pool;
3329        qp = &phba->sli4_hba.hdwq[0];
3330
3331        spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3332        spin_lock(&epd_pool->lock);
3333        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3334                                 &epd_pool->list, list) {
3335                list_move_tail(&lpfc_ncmd->list,
3336                               &qp->lpfc_io_buf_list_put);
3337                lpfc_ncmd->flags = false;
3338                qp->put_io_bufs++;
3339                epd_pool->count--;
3340        }
3341        spin_unlock(&epd_pool->lock);
3342        spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3343}
3344
3345/**
3346 * lpfc_create_multixri_pools - create multi-XRI pools
3347 * @phba: pointer to lpfc hba data structure.
3348 *
3349 * This routine initialize public, private per HWQ. Then, move XRIs from
3350 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3351 * Initialized.
3352 **/
3353void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3354{
3355        u32 i, j;
3356        u32 hwq_count;
3357        u32 count_per_hwq;
3358        struct lpfc_io_buf *lpfc_ncmd;
3359        struct lpfc_io_buf *lpfc_ncmd_next;
3360        unsigned long iflag;
3361        struct lpfc_sli4_hdw_queue *qp;
3362        struct lpfc_multixri_pool *multixri_pool;
3363        struct lpfc_pbl_pool *pbl_pool;
3364        struct lpfc_pvt_pool *pvt_pool;
3365
3366        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3367                        "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3368                        phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3369                        phba->sli4_hba.io_xri_cnt);
3370
3371        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3372                lpfc_create_expedite_pool(phba);
3373
3374        hwq_count = phba->cfg_hdw_queue;
3375        count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3376
3377        for (i = 0; i < hwq_count; i++) {
3378                multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3379
3380                if (!multixri_pool) {
3381                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3382                                        "1238 Failed to allocate memory for "
3383                                        "multixri_pool\n");
3384
3385                        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3386                                lpfc_destroy_expedite_pool(phba);
3387
3388                        j = 0;
3389                        while (j < i) {
3390                                qp = &phba->sli4_hba.hdwq[j];
3391                                kfree(qp->p_multixri_pool);
3392                                j++;
3393                        }
3394                        phba->cfg_xri_rebalancing = 0;
3395                        return;
3396                }
3397
3398                qp = &phba->sli4_hba.hdwq[i];
3399                qp->p_multixri_pool = multixri_pool;
3400
3401                multixri_pool->xri_limit = count_per_hwq;
3402                multixri_pool->rrb_next_hwqid = i;
3403
3404                /* Deal with public free xri pool */
3405                pbl_pool = &multixri_pool->pbl_pool;
3406                spin_lock_init(&pbl_pool->lock);
3407                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3408                spin_lock(&pbl_pool->lock);
3409                INIT_LIST_HEAD(&pbl_pool->list);
3410                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3411                                         &qp->lpfc_io_buf_list_put, list) {
3412                        list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3413                        qp->put_io_bufs--;
3414                        pbl_pool->count++;
3415                }
3416                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3417                                "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3418                                pbl_pool->count, i);
3419                spin_unlock(&pbl_pool->lock);
3420                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3421
3422                /* Deal with private free xri pool */
3423                pvt_pool = &multixri_pool->pvt_pool;
3424                pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3425                pvt_pool->low_watermark = XRI_BATCH;
3426                spin_lock_init(&pvt_pool->lock);
3427                spin_lock_irqsave(&pvt_pool->lock, iflag);
3428                INIT_LIST_HEAD(&pvt_pool->list);
3429                pvt_pool->count = 0;
3430                spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3431        }
3432}
3433
3434/**
3435 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3436 * @phba: pointer to lpfc hba data structure.
3437 *
3438 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3439 **/
3440static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3441{
3442        u32 i;
3443        u32 hwq_count;
3444        struct lpfc_io_buf *lpfc_ncmd;
3445        struct lpfc_io_buf *lpfc_ncmd_next;
3446        unsigned long iflag;
3447        struct lpfc_sli4_hdw_queue *qp;
3448        struct lpfc_multixri_pool *multixri_pool;
3449        struct lpfc_pbl_pool *pbl_pool;
3450        struct lpfc_pvt_pool *pvt_pool;
3451
3452        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3453                lpfc_destroy_expedite_pool(phba);
3454
3455        if (!(phba->pport->load_flag & FC_UNLOADING))
3456                lpfc_sli_flush_io_rings(phba);
3457
3458        hwq_count = phba->cfg_hdw_queue;
3459
3460        for (i = 0; i < hwq_count; i++) {
3461                qp = &phba->sli4_hba.hdwq[i];
3462                multixri_pool = qp->p_multixri_pool;
3463                if (!multixri_pool)
3464                        continue;
3465
3466                qp->p_multixri_pool = NULL;
3467
3468                spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3469
3470                /* Deal with public free xri pool */
3471                pbl_pool = &multixri_pool->pbl_pool;
3472                spin_lock(&pbl_pool->lock);
3473
3474                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3475                                "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3476                                pbl_pool->count, i);
3477
3478                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3479                                         &pbl_pool->list, list) {
3480                        list_move_tail(&lpfc_ncmd->list,
3481                                       &qp->lpfc_io_buf_list_put);
3482                        qp->put_io_bufs++;
3483                        pbl_pool->count--;
3484                }
3485
3486                INIT_LIST_HEAD(&pbl_pool->list);
3487                pbl_pool->count = 0;
3488
3489                spin_unlock(&pbl_pool->lock);
3490
3491                /* Deal with private free xri pool */
3492                pvt_pool = &multixri_pool->pvt_pool;
3493                spin_lock(&pvt_pool->lock);
3494
3495                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3496                                "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3497                                pvt_pool->count, i);
3498
3499                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3500                                         &pvt_pool->list, list) {
3501                        list_move_tail(&lpfc_ncmd->list,
3502                                       &qp->lpfc_io_buf_list_put);
3503                        qp->put_io_bufs++;
3504                        pvt_pool->count--;
3505                }
3506
3507                INIT_LIST_HEAD(&pvt_pool->list);
3508                pvt_pool->count = 0;
3509
3510                spin_unlock(&pvt_pool->lock);
3511                spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3512
3513                kfree(multixri_pool);
3514        }
3515}
3516
3517/**
3518 * lpfc_online - Initialize and bring a HBA online
3519 * @phba: pointer to lpfc hba data structure.
3520 *
3521 * This routine initializes the HBA and brings a HBA online. During this
3522 * process, the management interface is blocked to prevent user space access
3523 * to the HBA interfering with the driver initialization.
3524 *
3525 * Return codes
3526 *   0 - successful
3527 *   1 - failed
3528 **/
3529int
3530lpfc_online(struct lpfc_hba *phba)
3531{
3532        struct lpfc_vport *vport;
3533        struct lpfc_vport **vports;
3534        int i, error = 0;
3535        bool vpis_cleared = false;
3536
3537        if (!phba)
3538                return 0;
3539        vport = phba->pport;
3540
3541        if (!(vport->fc_flag & FC_OFFLINE_MODE))
3542                return 0;
3543
3544        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3545                        "0458 Bring Adapter online\n");
3546
3547        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3548
3549        if (phba->sli_rev == LPFC_SLI_REV4) {
3550                if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3551                        lpfc_unblock_mgmt_io(phba);
3552                        return 1;
3553                }
3554                spin_lock_irq(&phba->hbalock);
3555                if (!phba->sli4_hba.max_cfg_param.vpi_used)
3556                        vpis_cleared = true;
3557                spin_unlock_irq(&phba->hbalock);
3558
3559                /* Reestablish the local initiator port.
3560                 * The offline process destroyed the previous lport.
3561                 */
3562                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3563                                !phba->nvmet_support) {
3564                        error = lpfc_nvme_create_localport(phba->pport);
3565                        if (error)
3566                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3567                                        "6132 NVME restore reg failed "
3568                                        "on nvmei error x%x\n", error);
3569                }
3570        } else {
3571                lpfc_sli_queue_init(phba);
3572                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3573                        lpfc_unblock_mgmt_io(phba);
3574                        return 1;
3575                }
3576        }
3577
3578        vports = lpfc_create_vport_work_array(phba);
3579        if (vports != NULL) {
3580                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3581                        struct Scsi_Host *shost;
3582                        shost = lpfc_shost_from_vport(vports[i]);
3583                        spin_lock_irq(shost->host_lock);
3584                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3585                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3586                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3587                        if (phba->sli_rev == LPFC_SLI_REV4) {
3588                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3589                                if ((vpis_cleared) &&
3590                                    (vports[i]->port_type !=
3591                                        LPFC_PHYSICAL_PORT))
3592                                        vports[i]->vpi = 0;
3593                        }
3594                        spin_unlock_irq(shost->host_lock);
3595                }
3596        }
3597        lpfc_destroy_vport_work_array(phba, vports);
3598
3599        if (phba->cfg_xri_rebalancing)
3600                lpfc_create_multixri_pools(phba);
3601
3602        lpfc_cpuhp_add(phba);
3603
3604        lpfc_unblock_mgmt_io(phba);
3605        return 0;
3606}
3607
3608/**
3609 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3610 * @phba: pointer to lpfc hba data structure.
3611 *
3612 * This routine marks a HBA's management interface as not blocked. Once the
3613 * HBA's management interface is marked as not blocked, all the user space
3614 * access to the HBA, whether they are from sysfs interface or libdfc
3615 * interface will be allowed. The HBA is set to block the management interface
3616 * when the driver prepares the HBA interface for online or offline and then
3617 * set to unblock the management interface afterwards.
3618 **/
3619void
3620lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3621{
3622        unsigned long iflag;
3623
3624        spin_lock_irqsave(&phba->hbalock, iflag);
3625        phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3626        spin_unlock_irqrestore(&phba->hbalock, iflag);
3627}
3628
3629/**
3630 * lpfc_offline_prep - Prepare a HBA to be brought offline
3631 * @phba: pointer to lpfc hba data structure.
3632 * @mbx_action: flag for mailbox shutdown action.
3633 *
3634 * This routine is invoked to prepare a HBA to be brought offline. It performs
3635 * unregistration login to all the nodes on all vports and flushes the mailbox
3636 * queue to make it ready to be brought offline.
3637 **/
3638void
3639lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3640{
3641        struct lpfc_vport *vport = phba->pport;
3642        struct lpfc_nodelist  *ndlp, *next_ndlp;
3643        struct lpfc_vport **vports;
3644        struct Scsi_Host *shost;
3645        int i;
3646
3647        if (vport->fc_flag & FC_OFFLINE_MODE)
3648                return;
3649
3650        lpfc_block_mgmt_io(phba, mbx_action);
3651
3652        lpfc_linkdown(phba);
3653
3654        /* Issue an unreg_login to all nodes on all vports */
3655        vports = lpfc_create_vport_work_array(phba);
3656        if (vports != NULL) {
3657                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3658                        if (vports[i]->load_flag & FC_UNLOADING)
3659                                continue;
3660                        shost = lpfc_shost_from_vport(vports[i]);
3661                        spin_lock_irq(shost->host_lock);
3662                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3663                        vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3664                        vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3665                        spin_unlock_irq(shost->host_lock);
3666
3667                        shost = lpfc_shost_from_vport(vports[i]);
3668                        list_for_each_entry_safe(ndlp, next_ndlp,
3669                                                 &vports[i]->fc_nodes,
3670                                                 nlp_listp) {
3671
3672                                spin_lock_irq(&ndlp->lock);
3673                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3674                                spin_unlock_irq(&ndlp->lock);
3675
3676                                lpfc_unreg_rpi(vports[i], ndlp);
3677                                /*
3678                                 * Whenever an SLI4 port goes offline, free the
3679                                 * RPI. Get a new RPI when the adapter port
3680                                 * comes back online.
3681                                 */
3682                                if (phba->sli_rev == LPFC_SLI_REV4) {
3683                                        lpfc_printf_vlog(vports[i], KERN_INFO,
3684                                                 LOG_NODE | LOG_DISCOVERY,
3685                                                 "0011 Free RPI x%x on "
3686                                                 "ndlp: x%px did x%x\n",
3687                                                 ndlp->nlp_rpi, ndlp,
3688                                                 ndlp->nlp_DID);
3689                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3690                                        ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3691                                }
3692
3693                                if (ndlp->nlp_type & NLP_FABRIC) {
3694                                        lpfc_disc_state_machine(vports[i], ndlp,
3695                                                NULL, NLP_EVT_DEVICE_RECOVERY);
3696
3697                                        /* Don't remove the node unless the
3698                                         * has been unregistered with the
3699                                         * transport.  If so, let dev_loss
3700                                         * take care of the node.
3701                                         */
3702                                        if (!(ndlp->fc4_xpt_flags &
3703                                              (NVME_XPT_REGD | SCSI_XPT_REGD)))
3704                                                lpfc_disc_state_machine
3705                                                        (vports[i], ndlp,
3706                                                         NULL,
3707                                                         NLP_EVT_DEVICE_RM);
3708                                }
3709                        }
3710                }
3711        }
3712        lpfc_destroy_vport_work_array(phba, vports);
3713
3714        lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3715
3716        if (phba->wq)
3717                flush_workqueue(phba->wq);
3718}
3719
3720/**
3721 * lpfc_offline - Bring a HBA offline
3722 * @phba: pointer to lpfc hba data structure.
3723 *
3724 * This routine actually brings a HBA offline. It stops all the timers
3725 * associated with the HBA, brings down the SLI layer, and eventually
3726 * marks the HBA as in offline state for the upper layer protocol.
3727 **/
3728void
3729lpfc_offline(struct lpfc_hba *phba)
3730{
3731        struct Scsi_Host  *shost;
3732        struct lpfc_vport **vports;
3733        int i;
3734
3735        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3736                return;
3737
3738        /* stop port and all timers associated with this hba */
3739        lpfc_stop_port(phba);
3740
3741        /* Tear down the local and target port registrations.  The
3742         * nvme transports need to cleanup.
3743         */
3744        lpfc_nvmet_destroy_targetport(phba);
3745        lpfc_nvme_destroy_localport(phba->pport);
3746
3747        vports = lpfc_create_vport_work_array(phba);
3748        if (vports != NULL)
3749                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3750                        lpfc_stop_vport_timers(vports[i]);
3751        lpfc_destroy_vport_work_array(phba, vports);
3752        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3753                        "0460 Bring Adapter offline\n");
3754        /* Bring down the SLI Layer and cleanup.  The HBA is offline
3755           now.  */
3756        lpfc_sli_hba_down(phba);
3757        spin_lock_irq(&phba->hbalock);
3758        phba->work_ha = 0;
3759        spin_unlock_irq(&phba->hbalock);
3760        vports = lpfc_create_vport_work_array(phba);
3761        if (vports != NULL)
3762                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3763                        shost = lpfc_shost_from_vport(vports[i]);
3764                        spin_lock_irq(shost->host_lock);
3765                        vports[i]->work_port_events = 0;
3766                        vports[i]->fc_flag |= FC_OFFLINE_MODE;
3767                        spin_unlock_irq(shost->host_lock);
3768                }
3769        lpfc_destroy_vport_work_array(phba, vports);
3770        /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3771         * in hba_unset
3772         */
3773        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3774                __lpfc_cpuhp_remove(phba);
3775
3776        if (phba->cfg_xri_rebalancing)
3777                lpfc_destroy_multixri_pools(phba);
3778}
3779
3780/**
3781 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3782 * @phba: pointer to lpfc hba data structure.
3783 *
3784 * This routine is to free all the SCSI buffers and IOCBs from the driver
3785 * list back to kernel. It is called from lpfc_pci_remove_one to free
3786 * the internal resources before the device is removed from the system.
3787 **/
3788static void
3789lpfc_scsi_free(struct lpfc_hba *phba)
3790{
3791        struct lpfc_io_buf *sb, *sb_next;
3792
3793        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3794                return;
3795
3796        spin_lock_irq(&phba->hbalock);
3797
3798        /* Release all the lpfc_scsi_bufs maintained by this host. */
3799
3800        spin_lock(&phba->scsi_buf_list_put_lock);
3801        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3802                                 list) {
3803                list_del(&sb->list);
3804                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3805                              sb->dma_handle);
3806                kfree(sb);
3807                phba->total_scsi_bufs--;
3808        }
3809        spin_unlock(&phba->scsi_buf_list_put_lock);
3810
3811        spin_lock(&phba->scsi_buf_list_get_lock);
3812        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3813                                 list) {
3814                list_del(&sb->list);
3815                dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3816                              sb->dma_handle);
3817                kfree(sb);
3818                phba->total_scsi_bufs--;
3819        }
3820        spin_unlock(&phba->scsi_buf_list_get_lock);
3821        spin_unlock_irq(&phba->hbalock);
3822}
3823
3824/**
3825 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3826 * @phba: pointer to lpfc hba data structure.
3827 *
3828 * This routine is to free all the IO buffers and IOCBs from the driver
3829 * list back to kernel. It is called from lpfc_pci_remove_one to free
3830 * the internal resources before the device is removed from the system.
3831 **/
3832void
3833lpfc_io_free(struct lpfc_hba *phba)
3834{
3835        struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3836        struct lpfc_sli4_hdw_queue *qp;
3837        int idx;
3838
3839        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3840                qp = &phba->sli4_hba.hdwq[idx];
3841                /* Release all the lpfc_nvme_bufs maintained by this host. */
3842                spin_lock(&qp->io_buf_list_put_lock);
3843                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3844                                         &qp->lpfc_io_buf_list_put,
3845                                         list) {
3846                        list_del(&lpfc_ncmd->list);
3847                        qp->put_io_bufs--;
3848                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3849                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3850                        if (phba->cfg_xpsgl && !phba->nvmet_support)
3851                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3852                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3853                        kfree(lpfc_ncmd);
3854                        qp->total_io_bufs--;
3855                }
3856                spin_unlock(&qp->io_buf_list_put_lock);
3857
3858                spin_lock(&qp->io_buf_list_get_lock);
3859                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3860                                         &qp->lpfc_io_buf_list_get,
3861                                         list) {
3862                        list_del(&lpfc_ncmd->list);
3863                        qp->get_io_bufs--;
3864                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3865                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3866                        if (phba->cfg_xpsgl && !phba->nvmet_support)
3867                                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3868                        lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3869                        kfree(lpfc_ncmd);
3870                        qp->total_io_bufs--;
3871                }
3872                spin_unlock(&qp->io_buf_list_get_lock);
3873        }
3874}
3875
3876/**
3877 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3878 * @phba: pointer to lpfc hba data structure.
3879 *
3880 * This routine first calculates the sizes of the current els and allocated
3881 * scsi sgl lists, and then goes through all sgls to updates the physical
3882 * XRIs assigned due to port function reset. During port initialization, the
3883 * current els and allocated scsi sgl lists are 0s.
3884 *
3885 * Return codes
3886 *   0 - successful (for now, it always returns 0)
3887 **/
3888int
3889lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3890{
3891        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3892        uint16_t i, lxri, xri_cnt, els_xri_cnt;
3893        LIST_HEAD(els_sgl_list);
3894        int rc;
3895
3896        /*
3897         * update on pci function's els xri-sgl list
3898         */
3899        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3900
3901        if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3902                /* els xri-sgl expanded */
3903                xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3904                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3905                                "3157 ELS xri-sgl count increased from "
3906                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3907                                els_xri_cnt);
3908                /* allocate the additional els sgls */
3909                for (i = 0; i < xri_cnt; i++) {
3910                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3911                                             GFP_KERNEL);
3912                        if (sglq_entry == NULL) {
3913                                lpfc_printf_log(phba, KERN_ERR,
3914                                                LOG_TRACE_EVENT,
3915                                                "2562 Failure to allocate an "
3916                                                "ELS sgl entry:%d\n", i);
3917                                rc = -ENOMEM;
3918                                goto out_free_mem;
3919                        }
3920                        sglq_entry->buff_type = GEN_BUFF_TYPE;
3921                        sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3922                                                           &sglq_entry->phys);
3923                        if (sglq_entry->virt == NULL) {
3924                                kfree(sglq_entry);
3925                                lpfc_printf_log(phba, KERN_ERR,
3926                                                LOG_TRACE_EVENT,
3927                                                "2563 Failure to allocate an "
3928                                                "ELS mbuf:%d\n", i);
3929                                rc = -ENOMEM;
3930                                goto out_free_mem;
3931                        }
3932                        sglq_entry->sgl = sglq_entry->virt;
3933                        memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3934                        sglq_entry->state = SGL_FREED;
3935                        list_add_tail(&sglq_entry->list, &els_sgl_list);
3936                }
3937                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3938                list_splice_init(&els_sgl_list,
3939                                 &phba->sli4_hba.lpfc_els_sgl_list);
3940                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3941        } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3942                /* els xri-sgl shrinked */
3943                xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3944                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3945                                "3158 ELS xri-sgl count decreased from "
3946                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3947                                els_xri_cnt);
3948                spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3949                list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3950                                 &els_sgl_list);
3951                /* release extra els sgls from list */
3952                for (i = 0; i < xri_cnt; i++) {
3953                        list_remove_head(&els_sgl_list,
3954                                         sglq_entry, struct lpfc_sglq, list);
3955                        if (sglq_entry) {
3956                                __lpfc_mbuf_free(phba, sglq_entry->virt,
3957                                                 sglq_entry->phys);
3958                                kfree(sglq_entry);
3959                        }
3960                }
3961                list_splice_init(&els_sgl_list,
3962                                 &phba->sli4_hba.lpfc_els_sgl_list);
3963                spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3964        } else
3965                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3966                                "3163 ELS xri-sgl count unchanged: %d\n",
3967                                els_xri_cnt);
3968        phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3969
3970        /* update xris to els sgls on the list */
3971        sglq_entry = NULL;
3972        sglq_entry_next = NULL;
3973        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3974                                 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3975                lxri = lpfc_sli4_next_xritag(phba);
3976                if (lxri == NO_XRI) {
3977                        lpfc_printf_log(phba, KERN_ERR,
3978                                        LOG_TRACE_EVENT,
3979                                        "2400 Failed to allocate xri for "
3980                                        "ELS sgl\n");
3981                        rc = -ENOMEM;
3982                        goto out_free_mem;
3983                }
3984                sglq_entry->sli4_lxritag = lxri;
3985                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3986        }
3987        return 0;
3988
3989out_free_mem:
3990        lpfc_free_els_sgl_list(phba);
3991        return rc;
3992}
3993
3994/**
3995 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3996 * @phba: pointer to lpfc hba data structure.
3997 *
3998 * This routine first calculates the sizes of the current els and allocated
3999 * scsi sgl lists, and then goes through all sgls to updates the physical
4000 * XRIs assigned due to port function reset. During port initialization, the
4001 * current els and allocated scsi sgl lists are 0s.
4002 *
4003 * Return codes
4004 *   0 - successful (for now, it always returns 0)
4005 **/
4006int
4007lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4008{
4009        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4010        uint16_t i, lxri, xri_cnt, els_xri_cnt;
4011        uint16_t nvmet_xri_cnt;
4012        LIST_HEAD(nvmet_sgl_list);
4013        int rc;
4014
4015        /*
4016         * update on pci function's nvmet xri-sgl list
4017         */
4018        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4019
4020        /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4021        nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4022        if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4023                /* els xri-sgl expanded */
4024                xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4025                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4026                                "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4027                                phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4028                /* allocate the additional nvmet sgls */
4029                for (i = 0; i < xri_cnt; i++) {
4030                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4031                                             GFP_KERNEL);
4032                        if (sglq_entry == NULL) {
4033                                lpfc_printf_log(phba, KERN_ERR,
4034                                                LOG_TRACE_EVENT,
4035                                                "6303 Failure to allocate an "
4036                                                "NVMET sgl entry:%d\n", i);
4037                                rc = -ENOMEM;
4038                                goto out_free_mem;
4039                        }
4040                        sglq_entry->buff_type = NVMET_BUFF_TYPE;
4041                        sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4042                                                           &sglq_entry->phys);
4043                        if (sglq_entry->virt == NULL) {
4044                                kfree(sglq_entry);
4045                                lpfc_printf_log(phba, KERN_ERR,
4046                                                LOG_TRACE_EVENT,
4047                                                "6304 Failure to allocate an "
4048                                                "NVMET buf:%d\n", i);
4049                                rc = -ENOMEM;
4050                                goto out_free_mem;
4051                        }
4052                        sglq_entry->sgl = sglq_entry->virt;
4053                        memset(sglq_entry->sgl, 0,
4054                               phba->cfg_sg_dma_buf_size);
4055                        sglq_entry->state = SGL_FREED;
4056                        list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4057                }
4058                spin_lock_irq(&phba->hbalock);
4059                spin_lock(&phba->sli4_hba.sgl_list_lock);
4060                list_splice_init(&nvmet_sgl_list,
4061                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4062                spin_unlock(&phba->sli4_hba.sgl_list_lock);
4063                spin_unlock_irq(&phba->hbalock);
4064        } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4065                /* nvmet xri-sgl shrunk */
4066                xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4067                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4068                                "6305 NVMET xri-sgl count decreased from "
4069                                "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4070                                nvmet_xri_cnt);
4071                spin_lock_irq(&phba->hbalock);
4072                spin_lock(&phba->sli4_hba.sgl_list_lock);
4073                list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4074                                 &nvmet_sgl_list);
4075                /* release extra nvmet sgls from list */
4076                for (i = 0; i < xri_cnt; i++) {
4077                        list_remove_head(&nvmet_sgl_list,
4078                                         sglq_entry, struct lpfc_sglq, list);
4079                        if (sglq_entry) {
4080                                lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4081                                                    sglq_entry->phys);
4082                                kfree(sglq_entry);
4083                        }
4084                }
4085                list_splice_init(&nvmet_sgl_list,
4086                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4087                spin_unlock(&phba->sli4_hba.sgl_list_lock);
4088                spin_unlock_irq(&phba->hbalock);
4089        } else
4090                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4091                                "6306 NVMET xri-sgl count unchanged: %d\n",
4092                                nvmet_xri_cnt);
4093        phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4094
4095        /* update xris to nvmet sgls on the list */
4096        sglq_entry = NULL;
4097        sglq_entry_next = NULL;
4098        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4099                                 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4100                lxri = lpfc_sli4_next_xritag(phba);
4101                if (lxri == NO_XRI) {
4102                        lpfc_printf_log(phba, KERN_ERR,
4103                                        LOG_TRACE_EVENT,
4104                                        "6307 Failed to allocate xri for "
4105                                        "NVMET sgl\n");
4106                        rc = -ENOMEM;
4107                        goto out_free_mem;
4108                }
4109                sglq_entry->sli4_lxritag = lxri;
4110                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4111        }
4112        return 0;
4113
4114out_free_mem:
4115        lpfc_free_nvmet_sgl_list(phba);
4116        return rc;
4117}
4118
4119int
4120lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4121{
4122        LIST_HEAD(blist);
4123        struct lpfc_sli4_hdw_queue *qp;
4124        struct lpfc_io_buf *lpfc_cmd;
4125        struct lpfc_io_buf *iobufp, *prev_iobufp;
4126        int idx, cnt, xri, inserted;
4127
4128        cnt = 0;
4129        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4130                qp = &phba->sli4_hba.hdwq[idx];
4131                spin_lock_irq(&qp->io_buf_list_get_lock);
4132                spin_lock(&qp->io_buf_list_put_lock);
4133
4134                /* Take everything off the get and put lists */
4135                list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4136                list_splice(&qp->lpfc_io_buf_list_put, &blist);
4137                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4138                INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4139                cnt += qp->get_io_bufs + qp->put_io_bufs;
4140                qp->get_io_bufs = 0;
4141                qp->put_io_bufs = 0;
4142                qp->total_io_bufs = 0;
4143                spin_unlock(&qp->io_buf_list_put_lock);
4144                spin_unlock_irq(&qp->io_buf_list_get_lock);
4145        }
4146
4147        /*
4148         * Take IO buffers off blist and put on cbuf sorted by XRI.
4149         * This is because POST_SGL takes a sequential range of XRIs
4150         * to post to the firmware.
4151         */
4152        for (idx = 0; idx < cnt; idx++) {
4153                list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4154                if (!lpfc_cmd)
4155                        return cnt;
4156                if (idx == 0) {
4157                        list_add_tail(&lpfc_cmd->list, cbuf);
4158                        continue;
4159                }
4160                xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4161                inserted = 0;
4162                prev_iobufp = NULL;
4163                list_for_each_entry(iobufp, cbuf, list) {
4164                        if (xri < iobufp->cur_iocbq.sli4_xritag) {
4165                                if (prev_iobufp)
4166                                        list_add(&lpfc_cmd->list,
4167                                                 &prev_iobufp->list);
4168                                else
4169                                        list_add(&lpfc_cmd->list, cbuf);
4170                                inserted = 1;
4171                                break;
4172                        }
4173                        prev_iobufp = iobufp;
4174                }
4175                if (!inserted)
4176                        list_add_tail(&lpfc_cmd->list, cbuf);
4177        }
4178        return cnt;
4179}
4180
4181int
4182lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4183{
4184        struct lpfc_sli4_hdw_queue *qp;
4185        struct lpfc_io_buf *lpfc_cmd;
4186        int idx, cnt;
4187
4188        qp = phba->sli4_hba.hdwq;
4189        cnt = 0;
4190        while (!list_empty(cbuf)) {
4191                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4192                        list_remove_head(cbuf, lpfc_cmd,
4193                                         struct lpfc_io_buf, list);
4194                        if (!lpfc_cmd)
4195                                return cnt;
4196                        cnt++;
4197                        qp = &phba->sli4_hba.hdwq[idx];
4198                        lpfc_cmd->hdwq_no = idx;
4199                        lpfc_cmd->hdwq = qp;
4200                        lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4201                        lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4202                        spin_lock(&qp->io_buf_list_put_lock);
4203                        list_add_tail(&lpfc_cmd->list,
4204                                      &qp->lpfc_io_buf_list_put);
4205                        qp->put_io_bufs++;
4206                        qp->total_io_bufs++;
4207                        spin_unlock(&qp->io_buf_list_put_lock);
4208                }
4209        }
4210        return cnt;
4211}
4212
4213/**
4214 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4215 * @phba: pointer to lpfc hba data structure.
4216 *
4217 * This routine first calculates the sizes of the current els and allocated
4218 * scsi sgl lists, and then goes through all sgls to updates the physical
4219 * XRIs assigned due to port function reset. During port initialization, the
4220 * current els and allocated scsi sgl lists are 0s.
4221 *
4222 * Return codes
4223 *   0 - successful (for now, it always returns 0)
4224 **/
4225int
4226lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4227{
4228        struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4229        uint16_t i, lxri, els_xri_cnt;
4230        uint16_t io_xri_cnt, io_xri_max;
4231        LIST_HEAD(io_sgl_list);
4232        int rc, cnt;
4233
4234        /*
4235         * update on pci function's allocated nvme xri-sgl list
4236         */
4237
4238        /* maximum number of xris available for nvme buffers */
4239        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4240        io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4241        phba->sli4_hba.io_xri_max = io_xri_max;
4242
4243        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4244                        "6074 Current allocated XRI sgl count:%d, "
4245                        "maximum XRI count:%d\n",
4246                        phba->sli4_hba.io_xri_cnt,
4247                        phba->sli4_hba.io_xri_max);
4248
4249        cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4250
4251        if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4252                /* max nvme xri shrunk below the allocated nvme buffers */
4253                io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4254                                        phba->sli4_hba.io_xri_max;
4255                /* release the extra allocated nvme buffers */
4256                for (i = 0; i < io_xri_cnt; i++) {
4257                        list_remove_head(&io_sgl_list, lpfc_ncmd,
4258                                         struct lpfc_io_buf, list);
4259                        if (lpfc_ncmd) {
4260                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4261                                              lpfc_ncmd->data,
4262                                              lpfc_ncmd->dma_handle);
4263                                kfree(lpfc_ncmd);
4264                        }
4265                }
4266                phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4267        }
4268
4269        /* update xris associated to remaining allocated nvme buffers */
4270        lpfc_ncmd = NULL;
4271        lpfc_ncmd_next = NULL;
4272        phba->sli4_hba.io_xri_cnt = cnt;
4273        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4274                                 &io_sgl_list, list) {
4275                lxri = lpfc_sli4_next_xritag(phba);
4276                if (lxri == NO_XRI) {
4277                        lpfc_printf_log(phba, KERN_ERR,
4278                                        LOG_TRACE_EVENT,
4279                                        "6075 Failed to allocate xri for "
4280                                        "nvme buffer\n");
4281                        rc = -ENOMEM;
4282                        goto out_free_mem;
4283                }
4284                lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4285                lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4286        }
4287        cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4288        return 0;
4289
4290out_free_mem:
4291        lpfc_io_free(phba);
4292        return rc;
4293}
4294
4295/**
4296 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4297 * @phba: Pointer to lpfc hba data structure.
4298 * @num_to_alloc: The requested number of buffers to allocate.
4299 *
4300 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4301 * the nvme buffer contains all the necessary information needed to initiate
4302 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4303 * them on a list, it post them to the port by using SGL block post.
4304 *
4305 * Return codes:
4306 *   int - number of IO buffers that were allocated and posted.
4307 *   0 = failure, less than num_to_alloc is a partial failure.
4308 **/
4309int
4310lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4311{
4312        struct lpfc_io_buf *lpfc_ncmd;
4313        struct lpfc_iocbq *pwqeq;
4314        uint16_t iotag, lxri = 0;
4315        int bcnt, num_posted;
4316        LIST_HEAD(prep_nblist);
4317        LIST_HEAD(post_nblist);
4318        LIST_HEAD(nvme_nblist);
4319
4320        phba->sli4_hba.io_xri_cnt = 0;
4321        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4322                lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4323                if (!lpfc_ncmd)
4324                        break;
4325                /*
4326                 * Get memory from the pci pool to map the virt space to
4327                 * pci bus space for an I/O. The DMA buffer includes the
4328                 * number of SGE's necessary to support the sg_tablesize.
4329                 */
4330                lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4331                                                  GFP_KERNEL,
4332                                                  &lpfc_ncmd->dma_handle);
4333                if (!lpfc_ncmd->data) {
4334                        kfree(lpfc_ncmd);
4335                        break;
4336                }
4337
4338                if (phba->cfg_xpsgl && !phba->nvmet_support) {
4339                        INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4340                } else {
4341                        /*
4342                         * 4K Page alignment is CRITICAL to BlockGuard, double
4343                         * check to be sure.
4344                         */
4345                        if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4346                            (((unsigned long)(lpfc_ncmd->data) &
4347                            (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4348                                lpfc_printf_log(phba, KERN_ERR,
4349                                                LOG_TRACE_EVENT,
4350                                                "3369 Memory alignment err: "
4351                                                "addr=%lx\n",
4352                                                (unsigned long)lpfc_ncmd->data);
4353                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4354                                              lpfc_ncmd->data,
4355                                              lpfc_ncmd->dma_handle);
4356                                kfree(lpfc_ncmd);
4357                                break;
4358                        }
4359                }
4360
4361                INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4362
4363                lxri = lpfc_sli4_next_xritag(phba);
4364                if (lxri == NO_XRI) {
4365                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4366                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4367                        kfree(lpfc_ncmd);
4368                        break;
4369                }
4370                pwqeq = &lpfc_ncmd->cur_iocbq;
4371
4372                /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4373                iotag = lpfc_sli_next_iotag(phba, pwqeq);
4374                if (iotag == 0) {
4375                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4376                                      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4377                        kfree(lpfc_ncmd);
4378                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4379                                        "6121 Failed to allocate IOTAG for"
4380                                        " XRI:0x%x\n", lxri);
4381                        lpfc_sli4_free_xri(phba, lxri);
4382                        break;
4383                }
4384                pwqeq->sli4_lxritag = lxri;
4385                pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4386                pwqeq->context1 = lpfc_ncmd;
4387
4388                /* Initialize local short-hand pointers. */
4389                lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4390                lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4391                lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4392                spin_lock_init(&lpfc_ncmd->buf_lock);
4393
4394                /* add the nvme buffer to a post list */
4395                list_add_tail(&lpfc_ncmd->list, &post_nblist);
4396                phba->sli4_hba.io_xri_cnt++;
4397        }
4398        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4399                        "6114 Allocate %d out of %d requested new NVME "
4400                        "buffers\n", bcnt, num_to_alloc);
4401
4402        /* post the list of nvme buffer sgls to port if available */
4403        if (!list_empty(&post_nblist))
4404                num_posted = lpfc_sli4_post_io_sgl_list(
4405                                phba, &post_nblist, bcnt);
4406        else
4407                num_posted = 0;
4408
4409        return num_posted;
4410}
4411
4412static uint64_t
4413lpfc_get_wwpn(struct lpfc_hba *phba)
4414{
4415        uint64_t wwn;
4416        int rc;
4417        LPFC_MBOXQ_t *mboxq;
4418        MAILBOX_t *mb;
4419
4420        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4421                                                GFP_KERNEL);
4422        if (!mboxq)
4423                return (uint64_t)-1;
4424
4425        /* First get WWN of HBA instance */
4426        lpfc_read_nv(phba, mboxq);
4427        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4428        if (rc != MBX_SUCCESS) {
4429                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4430                                "6019 Mailbox failed , mbxCmd x%x "
4431                                "READ_NV, mbxStatus x%x\n",
4432                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4433                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4434                mempool_free(mboxq, phba->mbox_mem_pool);
4435                return (uint64_t) -1;
4436        }
4437        mb = &mboxq->u.mb;
4438        memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4439        /* wwn is WWPN of HBA instance */
4440        mempool_free(mboxq, phba->mbox_mem_pool);
4441        if (phba->sli_rev == LPFC_SLI_REV4)
4442                return be64_to_cpu(wwn);
4443        else
4444                return rol64(wwn, 32);
4445}
4446
4447/**
4448 * lpfc_vmid_res_alloc - Allocates resources for VMID
4449 * @phba: pointer to lpfc hba data structure.
4450 * @vport: pointer to vport data structure
4451 *
4452 * This routine allocated the resources needed for the VMID.
4453 *
4454 * Return codes
4455 *      0 on Success
4456 *      Non-0 on Failure
4457 */
4458static int
4459lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4460{
4461        /* VMID feature is supported only on SLI4 */
4462        if (phba->sli_rev == LPFC_SLI_REV3) {
4463                phba->cfg_vmid_app_header = 0;
4464                phba->cfg_vmid_priority_tagging = 0;
4465        }
4466
4467        if (lpfc_is_vmid_enabled(phba)) {
4468                vport->vmid =
4469                    kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4470                            GFP_KERNEL);
4471                if (!vport->vmid)
4472                        return -ENOMEM;
4473
4474                rwlock_init(&vport->vmid_lock);
4475
4476                /* Set the VMID parameters for the vport */
4477                vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4478                vport->vmid_inactivity_timeout =
4479                    phba->cfg_vmid_inactivity_timeout;
4480                vport->max_vmid = phba->cfg_max_vmid;
4481                vport->cur_vmid_cnt = 0;
4482
4483                vport->vmid_priority_range = bitmap_zalloc
4484                        (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4485
4486                if (!vport->vmid_priority_range) {
4487                        kfree(vport->vmid);
4488                        return -ENOMEM;
4489                }
4490
4491                hash_init(vport->hash_table);
4492        }
4493        return 0;
4494}
4495
4496/**
4497 * lpfc_create_port - Create an FC port
4498 * @phba: pointer to lpfc hba data structure.
4499 * @instance: a unique integer ID to this FC port.
4500 * @dev: pointer to the device data structure.
4501 *
4502 * This routine creates a FC port for the upper layer protocol. The FC port
4503 * can be created on top of either a physical port or a virtual port provided
4504 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4505 * and associates the FC port created before adding the shost into the SCSI
4506 * layer.
4507 *
4508 * Return codes
4509 *   @vport - pointer to the virtual N_Port data structure.
4510 *   NULL - port create failed.
4511 **/
4512struct lpfc_vport *
4513lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4514{
4515        struct lpfc_vport *vport;
4516        struct Scsi_Host  *shost = NULL;
4517        struct scsi_host_template *template;
4518        int error = 0;
4519        int i;
4520        uint64_t wwn;
4521        bool use_no_reset_hba = false;
4522        int rc;
4523
4524        if (lpfc_no_hba_reset_cnt) {
4525                if (phba->sli_rev < LPFC_SLI_REV4 &&
4526                    dev == &phba->pcidev->dev) {
4527                        /* Reset the port first */
4528                        lpfc_sli_brdrestart(phba);
4529                        rc = lpfc_sli_chipset_init(phba);
4530                        if (rc)
4531                                return NULL;
4532                }
4533                wwn = lpfc_get_wwpn(phba);
4534        }
4535
4536        for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4537                if (wwn == lpfc_no_hba_reset[i]) {
4538                        lpfc_printf_log(phba, KERN_ERR,
4539                                        LOG_TRACE_EVENT,
4540                                        "6020 Setting use_no_reset port=%llx\n",
4541                                        wwn);
4542                        use_no_reset_hba = true;
4543                        break;
4544                }
4545        }
4546
4547        /* Seed template for SCSI host registration */
4548        if (dev == &phba->pcidev->dev) {
4549                template = &phba->port_template;
4550
4551                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4552                        /* Seed physical port template */
4553                        memcpy(template, &lpfc_template, sizeof(*template));
4554
4555                        if (use_no_reset_hba)
4556                                /* template is for a no reset SCSI Host */
4557                                template->eh_host_reset_handler = NULL;
4558
4559                        /* Template for all vports this physical port creates */
4560                        memcpy(&phba->vport_template, &lpfc_template,
4561                               sizeof(*template));
4562                        phba->vport_template.shost_attrs = lpfc_vport_attrs;
4563                        phba->vport_template.eh_bus_reset_handler = NULL;
4564                        phba->vport_template.eh_host_reset_handler = NULL;
4565                        phba->vport_template.vendor_id = 0;
4566
4567                        /* Initialize the host templates with updated value */
4568                        if (phba->sli_rev == LPFC_SLI_REV4) {
4569                                template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4570                                phba->vport_template.sg_tablesize =
4571                                        phba->cfg_scsi_seg_cnt;
4572                        } else {
4573                                template->sg_tablesize = phba->cfg_sg_seg_cnt;
4574                                phba->vport_template.sg_tablesize =
4575                                        phba->cfg_sg_seg_cnt;
4576                        }
4577
4578                } else {
4579                        /* NVMET is for physical port only */
4580                        memcpy(template, &lpfc_template_nvme,
4581                               sizeof(*template));
4582                }
4583        } else {
4584                template = &phba->vport_template;
4585        }
4586
4587        shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4588        if (!shost)
4589                goto out;
4590
4591        vport = (struct lpfc_vport *) shost->hostdata;
4592        vport->phba = phba;
4593        vport->load_flag |= FC_LOADING;
4594        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4595        vport->fc_rscn_flush = 0;
4596        lpfc_get_vport_cfgparam(vport);
4597
4598        /* Adjust value in vport */
4599        vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4600
4601        shost->unique_id = instance;
4602        shost->max_id = LPFC_MAX_TARGET;
4603        shost->max_lun = vport->cfg_max_luns;
4604        shost->this_id = -1;
4605        shost->max_cmd_len = 16;
4606
4607        if (phba->sli_rev == LPFC_SLI_REV4) {
4608                if (!phba->cfg_fcp_mq_threshold ||
4609                    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4610                        phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4611
4612                shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4613                                            phba->cfg_fcp_mq_threshold);
4614
4615                shost->dma_boundary =
4616                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4617
4618                if (phba->cfg_xpsgl && !phba->nvmet_support)
4619                        shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4620                else
4621                        shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4622        } else
4623                /* SLI-3 has a limited number of hardware queues (3),
4624                 * thus there is only one for FCP processing.
4625                 */
4626                shost->nr_hw_queues = 1;
4627
4628        /*
4629         * Set initial can_queue value since 0 is no longer supported and
4630         * scsi_add_host will fail. This will be adjusted later based on the
4631         * max xri value determined in hba setup.
4632         */
4633        shost->can_queue = phba->cfg_hba_queue_depth - 10;
4634        if (dev != &phba->pcidev->dev) {
4635                shost->transportt = lpfc_vport_transport_template;
4636                vport->port_type = LPFC_NPIV_PORT;
4637        } else {
4638                shost->transportt = lpfc_transport_template;
4639                vport->port_type = LPFC_PHYSICAL_PORT;
4640        }
4641
4642        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4643                        "9081 CreatePort TMPLATE type %x TBLsize %d "
4644                        "SEGcnt %d/%d\n",
4645                        vport->port_type, shost->sg_tablesize,
4646                        phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4647
4648        /* Allocate the resources for VMID */
4649        rc = lpfc_vmid_res_alloc(phba, vport);
4650
4651        if (rc)
4652                goto out;
4653
4654        /* Initialize all internally managed lists. */
4655        INIT_LIST_HEAD(&vport->fc_nodes);
4656        INIT_LIST_HEAD(&vport->rcv_buffer_list);
4657        spin_lock_init(&vport->work_port_lock);
4658
4659        timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4660
4661        timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4662
4663        timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4664
4665        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4666                lpfc_setup_bg(phba, shost);
4667
4668        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4669        if (error)
4670                goto out_put_shost;
4671
4672        spin_lock_irq(&phba->port_list_lock);
4673        list_add_tail(&vport->listentry, &phba->port_list);
4674        spin_unlock_irq(&phba->port_list_lock);
4675        return vport;
4676
4677out_put_shost:
4678        kfree(vport->vmid);
4679        bitmap_free(vport->vmid_priority_range);
4680        scsi_host_put(shost);
4681out:
4682        return NULL;
4683}
4684
4685/**
4686 * destroy_port -  destroy an FC port
4687 * @vport: pointer to an lpfc virtual N_Port data structure.
4688 *
4689 * This routine destroys a FC port from the upper layer protocol. All the
4690 * resources associated with the port are released.
4691 **/
4692void
4693destroy_port(struct lpfc_vport *vport)
4694{
4695        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4696        struct lpfc_hba  *phba = vport->phba;
4697
4698        lpfc_debugfs_terminate(vport);
4699        fc_remove_host(shost);
4700        scsi_remove_host(shost);
4701
4702        spin_lock_irq(&phba->port_list_lock);
4703        list_del_init(&vport->listentry);
4704        spin_unlock_irq(&phba->port_list_lock);
4705
4706        lpfc_cleanup(vport);
4707        return;
4708}
4709
4710/**
4711 * lpfc_get_instance - Get a unique integer ID
4712 *
4713 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4714 * uses the kernel idr facility to perform the task.
4715 *
4716 * Return codes:
4717 *   instance - a unique integer ID allocated as the new instance.
4718 *   -1 - lpfc get instance failed.
4719 **/
4720int
4721lpfc_get_instance(void)
4722{
4723        int ret;
4724
4725        ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4726        return ret < 0 ? -1 : ret;
4727}
4728
4729/**
4730 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4731 * @shost: pointer to SCSI host data structure.
4732 * @time: elapsed time of the scan in jiffies.
4733 *
4734 * This routine is called by the SCSI layer with a SCSI host to determine
4735 * whether the scan host is finished.
4736 *
4737 * Note: there is no scan_start function as adapter initialization will have
4738 * asynchronously kicked off the link initialization.
4739 *
4740 * Return codes
4741 *   0 - SCSI host scan is not over yet.
4742 *   1 - SCSI host scan is over.
4743 **/
4744int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4745{
4746        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4747        struct lpfc_hba   *phba = vport->phba;
4748        int stat = 0;
4749
4750        spin_lock_irq(shost->host_lock);
4751
4752        if (vport->load_flag & FC_UNLOADING) {
4753                stat = 1;
4754                goto finished;
4755        }
4756        if (time >= msecs_to_jiffies(30 * 1000)) {
4757                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4758                                "0461 Scanning longer than 30 "
4759                                "seconds.  Continuing initialization\n");
4760                stat = 1;
4761                goto finished;
4762        }
4763        if (time >= msecs_to_jiffies(15 * 1000) &&
4764            phba->link_state <= LPFC_LINK_DOWN) {
4765                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4766                                "0465 Link down longer than 15 "
4767                                "seconds.  Continuing initialization\n");
4768                stat = 1;
4769                goto finished;
4770        }
4771
4772        if (vport->port_state != LPFC_VPORT_READY)
4773                goto finished;
4774        if (vport->num_disc_nodes || vport->fc_prli_sent)
4775                goto finished;
4776        if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4777                goto finished;
4778        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4779                goto finished;
4780
4781        stat = 1;
4782
4783finished:
4784        spin_unlock_irq(shost->host_lock);
4785        return stat;
4786}
4787
4788static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4789{
4790        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4791        struct lpfc_hba   *phba = vport->phba;
4792
4793        fc_host_supported_speeds(shost) = 0;
4794        /*
4795         * Avoid reporting supported link speed for FCoE as it can't be
4796         * controlled via FCoE.
4797         */
4798        if (phba->hba_flag & HBA_FCOE_MODE)
4799                return;
4800
4801        if (phba->lmt & LMT_256Gb)
4802                fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4803        if (phba->lmt & LMT_128Gb)
4804                fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4805        if (phba->lmt & LMT_64Gb)
4806                fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4807        if (phba->lmt & LMT_32Gb)
4808                fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4809        if (phba->lmt & LMT_16Gb)
4810                fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4811        if (phba->lmt & LMT_10Gb)
4812                fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4813        if (phba->lmt & LMT_8Gb)
4814                fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4815        if (phba->lmt & LMT_4Gb)
4816                fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4817        if (phba->lmt & LMT_2Gb)
4818                fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4819        if (phba->lmt & LMT_1Gb)
4820                fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4821}
4822
4823/**
4824 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4825 * @shost: pointer to SCSI host data structure.
4826 *
4827 * This routine initializes a given SCSI host attributes on a FC port. The
4828 * SCSI host can be either on top of a physical port or a virtual port.
4829 **/
4830void lpfc_host_attrib_init(struct Scsi_Host *shost)
4831{
4832        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4833        struct lpfc_hba   *phba = vport->phba;
4834        /*
4835         * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4836         */
4837
4838        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4839        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4840        fc_host_supported_classes(shost) = FC_COS_CLASS3;
4841
4842        memset(fc_host_supported_fc4s(shost), 0,
4843               sizeof(fc_host_supported_fc4s(shost)));
4844        fc_host_supported_fc4s(shost)[2] = 1;
4845        fc_host_supported_fc4s(shost)[7] = 1;
4846
4847        lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4848                                 sizeof fc_host_symbolic_name(shost));
4849
4850        lpfc_host_supported_speeds_set(shost);
4851
4852        fc_host_maxframe_size(shost) =
4853                (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4854                (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4855
4856        fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4857
4858        /* This value is also unchanging */
4859        memset(fc_host_active_fc4s(shost), 0,
4860               sizeof(fc_host_active_fc4s(shost)));
4861        fc_host_active_fc4s(shost)[2] = 1;
4862        fc_host_active_fc4s(shost)[7] = 1;
4863
4864        fc_host_max_npiv_vports(shost) = phba->max_vpi;
4865        spin_lock_irq(shost->host_lock);
4866        vport->load_flag &= ~FC_LOADING;
4867        spin_unlock_irq(shost->host_lock);
4868}
4869
4870/**
4871 * lpfc_stop_port_s3 - Stop SLI3 device port
4872 * @phba: pointer to lpfc hba data structure.
4873 *
4874 * This routine is invoked to stop an SLI3 device port, it stops the device
4875 * from generating interrupts and stops the device driver's timers for the
4876 * device.
4877 **/
4878static void
4879lpfc_stop_port_s3(struct lpfc_hba *phba)
4880{
4881        /* Clear all interrupt enable conditions */
4882        writel(0, phba->HCregaddr);
4883        readl(phba->HCregaddr); /* flush */
4884        /* Clear all pending interrupts */
4885        writel(0xffffffff, phba->HAregaddr);
4886        readl(phba->HAregaddr); /* flush */
4887
4888        /* Reset some HBA SLI setup states */
4889        lpfc_stop_hba_timers(phba);
4890        phba->pport->work_port_events = 0;
4891}
4892
4893/**
4894 * lpfc_stop_port_s4 - Stop SLI4 device port
4895 * @phba: pointer to lpfc hba data structure.
4896 *
4897 * This routine is invoked to stop an SLI4 device port, it stops the device
4898 * from generating interrupts and stops the device driver's timers for the
4899 * device.
4900 **/
4901static void
4902lpfc_stop_port_s4(struct lpfc_hba *phba)
4903{
4904        /* Reset some HBA SLI4 setup states */
4905        lpfc_stop_hba_timers(phba);
4906        if (phba->pport)
4907                phba->pport->work_port_events = 0;
4908        phba->sli4_hba.intr_enable = 0;
4909}
4910
4911/**
4912 * lpfc_stop_port - Wrapper function for stopping hba port
4913 * @phba: Pointer to HBA context object.
4914 *
4915 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4916 * the API jump table function pointer from the lpfc_hba struct.
4917 **/
4918void
4919lpfc_stop_port(struct lpfc_hba *phba)
4920{
4921        phba->lpfc_stop_port(phba);
4922
4923        if (phba->wq)
4924                flush_workqueue(phba->wq);
4925}
4926
4927/**
4928 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4929 * @phba: Pointer to hba for which this call is being executed.
4930 *
4931 * This routine starts the timer waiting for the FCF rediscovery to complete.
4932 **/
4933void
4934lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4935{
4936        unsigned long fcf_redisc_wait_tmo =
4937                (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4938        /* Start fcf rediscovery wait period timer */
4939        mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4940        spin_lock_irq(&phba->hbalock);
4941        /* Allow action to new fcf asynchronous event */
4942        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4943        /* Mark the FCF rediscovery pending state */
4944        phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4945        spin_unlock_irq(&phba->hbalock);
4946}
4947
4948/**
4949 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4950 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4951 *
4952 * This routine is invoked when waiting for FCF table rediscover has been
4953 * timed out. If new FCF record(s) has (have) been discovered during the
4954 * wait period, a new FCF event shall be added to the FCOE async event
4955 * list, and then worker thread shall be waked up for processing from the
4956 * worker thread context.
4957 **/
4958static void
4959lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4960{
4961        struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4962
4963        /* Don't send FCF rediscovery event if timer cancelled */
4964        spin_lock_irq(&phba->hbalock);
4965        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4966                spin_unlock_irq(&phba->hbalock);
4967                return;
4968        }
4969        /* Clear FCF rediscovery timer pending flag */
4970        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4971        /* FCF rediscovery event to worker thread */
4972        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4973        spin_unlock_irq(&phba->hbalock);
4974        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4975                        "2776 FCF rediscover quiescent timer expired\n");
4976        /* wake up worker thread */
4977        lpfc_worker_wake_up(phba);
4978}
4979
4980/**
4981 * lpfc_vmid_poll - VMID timeout detection
4982 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4983 *
4984 * This routine is invoked when there is no I/O on by a VM for the specified
4985 * amount of time. When this situation is detected, the VMID has to be
4986 * deregistered from the switch and all the local resources freed. The VMID
4987 * will be reassigned to the VM once the I/O begins.
4988 **/
4989static void
4990lpfc_vmid_poll(struct timer_list *t)
4991{
4992        struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4993        u32 wake_up = 0;
4994
4995        /* check if there is a need to issue QFPA */
4996        if (phba->pport->vmid_priority_tagging) {
4997                wake_up = 1;
4998                phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4999        }
5000
5001        /* Is the vmid inactivity timer enabled */
5002        if (phba->pport->vmid_inactivity_timeout ||
5003            phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5004                wake_up = 1;
5005                phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5006        }
5007
5008        if (wake_up)
5009                lpfc_worker_wake_up(phba);
5010
5011        /* restart the timer for the next iteration */
5012        mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5013                                                        LPFC_VMID_TIMER));
5014}
5015
5016/**
5017 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5018 * @phba: pointer to lpfc hba data structure.
5019 * @acqe_link: pointer to the async link completion queue entry.
5020 *
5021 * This routine is to parse the SLI4 link-attention link fault code.
5022 **/
5023static void
5024lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5025                           struct lpfc_acqe_link *acqe_link)
5026{
5027        switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5028        case LPFC_ASYNC_LINK_FAULT_NONE:
5029        case LPFC_ASYNC_LINK_FAULT_LOCAL:
5030        case LPFC_ASYNC_LINK_FAULT_REMOTE:
5031        case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5032                break;
5033        default:
5034                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5035                                "0398 Unknown link fault code: x%x\n",
5036                                bf_get(lpfc_acqe_link_fault, acqe_link));
5037                break;
5038        }
5039}
5040
5041/**
5042 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5043 * @phba: pointer to lpfc hba data structure.
5044 * @acqe_link: pointer to the async link completion queue entry.
5045 *
5046 * This routine is to parse the SLI4 link attention type and translate it
5047 * into the base driver's link attention type coding.
5048 *
5049 * Return: Link attention type in terms of base driver's coding.
5050 **/
5051static uint8_t
5052lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5053                          struct lpfc_acqe_link *acqe_link)
5054{
5055        uint8_t att_type;
5056
5057        switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5058        case LPFC_ASYNC_LINK_STATUS_DOWN:
5059        case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5060                att_type = LPFC_ATT_LINK_DOWN;
5061                break;
5062        case LPFC_ASYNC_LINK_STATUS_UP:
5063                /* Ignore physical link up events - wait for logical link up */
5064                att_type = LPFC_ATT_RESERVED;
5065                break;
5066        case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5067                att_type = LPFC_ATT_LINK_UP;
5068                break;
5069        default:
5070                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5071                                "0399 Invalid link attention type: x%x\n",
5072                                bf_get(lpfc_acqe_link_status, acqe_link));
5073                att_type = LPFC_ATT_RESERVED;
5074                break;
5075        }
5076        return att_type;
5077}
5078
5079/**
5080 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5081 * @phba: pointer to lpfc hba data structure.
5082 *
5083 * This routine is to get an SLI3 FC port's link speed in Mbps.
5084 *
5085 * Return: link speed in terms of Mbps.
5086 **/
5087uint32_t
5088lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5089{
5090        uint32_t link_speed;
5091
5092        if (!lpfc_is_link_up(phba))
5093                return 0;
5094
5095        if (phba->sli_rev <= LPFC_SLI_REV3) {
5096                switch (phba->fc_linkspeed) {
5097                case LPFC_LINK_SPEED_1GHZ:
5098                        link_speed = 1000;
5099                        break;
5100                case LPFC_LINK_SPEED_2GHZ:
5101                        link_speed = 2000;
5102                        break;
5103                case LPFC_LINK_SPEED_4GHZ:
5104                        link_speed = 4000;
5105                        break;
5106                case LPFC_LINK_SPEED_8GHZ:
5107                        link_speed = 8000;
5108                        break;
5109                case LPFC_LINK_SPEED_10GHZ:
5110                        link_speed = 10000;
5111                        break;
5112                case LPFC_LINK_SPEED_16GHZ:
5113                        link_speed = 16000;
5114                        break;
5115                default:
5116                        link_speed = 0;
5117                }
5118        } else {
5119                if (phba->sli4_hba.link_state.logical_speed)
5120                        link_speed =
5121                              phba->sli4_hba.link_state.logical_speed;
5122                else
5123                        link_speed = phba->sli4_hba.link_state.speed;
5124        }
5125        return link_speed;
5126}
5127
5128/**
5129 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5130 * @phba: pointer to lpfc hba data structure.
5131 * @evt_code: asynchronous event code.
5132 * @speed_code: asynchronous event link speed code.
5133 *
5134 * This routine is to parse the giving SLI4 async event link speed code into
5135 * value of Mbps for the link speed.
5136 *
5137 * Return: link speed in terms of Mbps.
5138 **/
5139static uint32_t
5140lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5141                           uint8_t speed_code)
5142{
5143        uint32_t port_speed;
5144
5145        switch (evt_code) {
5146        case LPFC_TRAILER_CODE_LINK:
5147                switch (speed_code) {
5148                case LPFC_ASYNC_LINK_SPEED_ZERO:
5149                        port_speed = 0;
5150                        break;
5151                case LPFC_ASYNC_LINK_SPEED_10MBPS:
5152                        port_speed = 10;
5153                        break;
5154                case LPFC_ASYNC_LINK_SPEED_100MBPS:
5155                        port_speed = 100;
5156                        break;
5157                case LPFC_ASYNC_LINK_SPEED_1GBPS:
5158                        port_speed = 1000;
5159                        break;
5160                case LPFC_ASYNC_LINK_SPEED_10GBPS:
5161                        port_speed = 10000;
5162                        break;
5163                case LPFC_ASYNC_LINK_SPEED_20GBPS:
5164                        port_speed = 20000;
5165                        break;
5166                case LPFC_ASYNC_LINK_SPEED_25GBPS:
5167                        port_speed = 25000;
5168                        break;
5169                case LPFC_ASYNC_LINK_SPEED_40GBPS:
5170                        port_speed = 40000;
5171                        break;
5172                case LPFC_ASYNC_LINK_SPEED_100GBPS:
5173                        port_speed = 100000;
5174                        break;
5175                default:
5176                        port_speed = 0;
5177                }
5178                break;
5179        case LPFC_TRAILER_CODE_FC:
5180                switch (speed_code) {
5181                case LPFC_FC_LA_SPEED_UNKNOWN:
5182                        port_speed = 0;
5183                        break;
5184                case LPFC_FC_LA_SPEED_1G:
5185                        port_speed = 1000;
5186                        break;
5187                case LPFC_FC_LA_SPEED_2G:
5188                        port_speed = 2000;
5189                        break;
5190                case LPFC_FC_LA_SPEED_4G:
5191                        port_speed = 4000;
5192                        break;
5193                case LPFC_FC_LA_SPEED_8G:
5194                        port_speed = 8000;
5195                        break;
5196                case LPFC_FC_LA_SPEED_10G:
5197                        port_speed = 10000;
5198                        break;
5199                case LPFC_FC_LA_SPEED_16G:
5200                        port_speed = 16000;
5201                        break;
5202                case LPFC_FC_LA_SPEED_32G:
5203                        port_speed = 32000;
5204                        break;
5205                case LPFC_FC_LA_SPEED_64G:
5206                        port_speed = 64000;
5207                        break;
5208                case LPFC_FC_LA_SPEED_128G:
5209                        port_speed = 128000;
5210                        break;
5211                case LPFC_FC_LA_SPEED_256G:
5212                        port_speed = 256000;
5213                        break;
5214                default:
5215                        port_speed = 0;
5216                }
5217                break;
5218        default:
5219                port_speed = 0;
5220        }
5221        return port_speed;
5222}
5223
5224/**
5225 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5226 * @phba: pointer to lpfc hba data structure.
5227 * @acqe_link: pointer to the async link completion queue entry.
5228 *
5229 * This routine is to handle the SLI4 asynchronous FCoE link event.
5230 **/
5231static void
5232lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5233                         struct lpfc_acqe_link *acqe_link)
5234{
5235        struct lpfc_dmabuf *mp;
5236        LPFC_MBOXQ_t *pmb;
5237        MAILBOX_t *mb;
5238        struct lpfc_mbx_read_top *la;
5239        uint8_t att_type;
5240        int rc;
5241
5242        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5243        if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5244                return;
5245        phba->fcoe_eventtag = acqe_link->event_tag;
5246        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5247        if (!pmb) {
5248                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5249                                "0395 The mboxq allocation failed\n");
5250                return;
5251        }
5252        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5253        if (!mp) {
5254                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5255                                "0396 The lpfc_dmabuf allocation failed\n");
5256                goto out_free_pmb;
5257        }
5258        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5259        if (!mp->virt) {
5260                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5261                                "0397 The mbuf allocation failed\n");
5262                goto out_free_dmabuf;
5263        }
5264
5265        /* Cleanup any outstanding ELS commands */
5266        lpfc_els_flush_all_cmd(phba);
5267
5268        /* Block ELS IOCBs until we have done process link event */
5269        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5270
5271        /* Update link event statistics */
5272        phba->sli.slistat.link_event++;
5273
5274        /* Create lpfc_handle_latt mailbox command from link ACQE */
5275        lpfc_read_topology(phba, pmb, mp);
5276        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5277        pmb->vport = phba->pport;
5278
5279        /* Keep the link status for extra SLI4 state machine reference */
5280        phba->sli4_hba.link_state.speed =
5281                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5282                                bf_get(lpfc_acqe_link_speed, acqe_link));
5283        phba->sli4_hba.link_state.duplex =
5284                                bf_get(lpfc_acqe_link_duplex, acqe_link);
5285        phba->sli4_hba.link_state.status =
5286                                bf_get(lpfc_acqe_link_status, acqe_link);
5287        phba->sli4_hba.link_state.type =
5288                                bf_get(lpfc_acqe_link_type, acqe_link);
5289        phba->sli4_hba.link_state.number =
5290                                bf_get(lpfc_acqe_link_number, acqe_link);
5291        phba->sli4_hba.link_state.fault =
5292                                bf_get(lpfc_acqe_link_fault, acqe_link);
5293        phba->sli4_hba.link_state.logical_speed =
5294                        bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5295
5296        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5297                        "2900 Async FC/FCoE Link event - Speed:%dGBit "
5298                        "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5299                        "Logical speed:%dMbps Fault:%d\n",
5300                        phba->sli4_hba.link_state.speed,
5301                        phba->sli4_hba.link_state.topology,
5302                        phba->sli4_hba.link_state.status,
5303                        phba->sli4_hba.link_state.type,
5304                        phba->sli4_hba.link_state.number,
5305                        phba->sli4_hba.link_state.logical_speed,
5306                        phba->sli4_hba.link_state.fault);
5307        /*
5308         * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5309         * topology info. Note: Optional for non FC-AL ports.
5310         */
5311        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5312                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5313                if (rc == MBX_NOT_FINISHED)
5314                        goto out_free_dmabuf;
5315                return;
5316        }
5317        /*
5318         * For FCoE Mode: fill in all the topology information we need and call
5319         * the READ_TOPOLOGY completion routine to continue without actually
5320         * sending the READ_TOPOLOGY mailbox command to the port.
5321         */
5322        /* Initialize completion status */
5323        mb = &pmb->u.mb;
5324        mb->mbxStatus = MBX_SUCCESS;
5325
5326        /* Parse port fault information field */
5327        lpfc_sli4_parse_latt_fault(phba, acqe_link);
5328
5329        /* Parse and translate link attention fields */
5330        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5331        la->eventTag = acqe_link->event_tag;
5332        bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5333        bf_set(lpfc_mbx_read_top_link_spd, la,
5334               (bf_get(lpfc_acqe_link_speed, acqe_link)));
5335
5336        /* Fake the the following irrelvant fields */
5337        bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5338        bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5339        bf_set(lpfc_mbx_read_top_il, la, 0);
5340        bf_set(lpfc_mbx_read_top_pb, la, 0);
5341        bf_set(lpfc_mbx_read_top_fa, la, 0);
5342        bf_set(lpfc_mbx_read_top_mm, la, 0);
5343
5344        /* Invoke the lpfc_handle_latt mailbox command callback function */
5345        lpfc_mbx_cmpl_read_topology(phba, pmb);
5346
5347        return;
5348
5349out_free_dmabuf:
5350        kfree(mp);
5351out_free_pmb:
5352        mempool_free(pmb, phba->mbox_mem_pool);
5353}
5354
5355/**
5356 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5357 * topology.
5358 * @phba: pointer to lpfc hba data structure.
5359 * @speed_code: asynchronous event link speed code.
5360 *
5361 * This routine is to parse the giving SLI4 async event link speed code into
5362 * value of Read topology link speed.
5363 *
5364 * Return: link speed in terms of Read topology.
5365 **/
5366static uint8_t
5367lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5368{
5369        uint8_t port_speed;
5370
5371        switch (speed_code) {
5372        case LPFC_FC_LA_SPEED_1G:
5373                port_speed = LPFC_LINK_SPEED_1GHZ;
5374                break;
5375        case LPFC_FC_LA_SPEED_2G:
5376                port_speed = LPFC_LINK_SPEED_2GHZ;
5377                break;
5378        case LPFC_FC_LA_SPEED_4G:
5379                port_speed = LPFC_LINK_SPEED_4GHZ;
5380                break;
5381        case LPFC_FC_LA_SPEED_8G:
5382                port_speed = LPFC_LINK_SPEED_8GHZ;
5383                break;
5384        case LPFC_FC_LA_SPEED_16G:
5385                port_speed = LPFC_LINK_SPEED_16GHZ;
5386                break;
5387        case LPFC_FC_LA_SPEED_32G:
5388                port_speed = LPFC_LINK_SPEED_32GHZ;
5389                break;
5390        case LPFC_FC_LA_SPEED_64G:
5391                port_speed = LPFC_LINK_SPEED_64GHZ;
5392                break;
5393        case LPFC_FC_LA_SPEED_128G:
5394                port_speed = LPFC_LINK_SPEED_128GHZ;
5395                break;
5396        case LPFC_FC_LA_SPEED_256G:
5397                port_speed = LPFC_LINK_SPEED_256GHZ;
5398                break;
5399        default:
5400                port_speed = 0;
5401                break;
5402        }
5403
5404        return port_speed;
5405}
5406
5407void
5408lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5409{
5410        struct rxtable_entry *entry;
5411        int cnt = 0, head, tail, last, start;
5412
5413        head = atomic_read(&phba->rxtable_idx_head);
5414        tail = atomic_read(&phba->rxtable_idx_tail);
5415        if (!phba->rxtable || head == tail) {
5416                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5417                                "4411 Rxtable is empty\n");
5418                return;
5419        }
5420        last = tail;
5421        start = head;
5422
5423        /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5424        while (start != last) {
5425                if (start)
5426                        start--;
5427                else
5428                        start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5429                entry = &phba->rxtable[start];
5430                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5431                                "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5432                                "Lat %lld ASz %lld Info %02d BWUtil %d "
5433                                "Int %d slot %d\n",
5434                                cnt, entry->max_bytes_per_interval,
5435                                entry->total_bytes, entry->rcv_bytes,
5436                                entry->avg_io_latency, entry->avg_io_size,
5437                                entry->cmf_info, entry->timer_utilization,
5438                                entry->timer_interval, start);
5439                cnt++;
5440                if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5441                        return;
5442        }
5443}
5444
5445/**
5446 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5447 * @phba: pointer to lpfc hba data structure.
5448 * @dtag: FPIN descriptor received
5449 *
5450 * Increment the FPIN received counter/time when it happens.
5451 */
5452void
5453lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5454{
5455        struct lpfc_cgn_info *cp;
5456        struct tm broken;
5457        struct timespec64 cur_time;
5458        u32 cnt;
5459        u16 value;
5460
5461        /* Make sure we have a congestion info buffer */
5462        if (!phba->cgn_i)
5463                return;
5464        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5465        ktime_get_real_ts64(&cur_time);
5466        time64_to_tm(cur_time.tv_sec, 0, &broken);
5467
5468        /* Update congestion statistics */
5469        switch (dtag) {
5470        case ELS_DTAG_LNK_INTEGRITY:
5471                cnt = le32_to_cpu(cp->link_integ_notification);
5472                cnt++;
5473                cp->link_integ_notification = cpu_to_le32(cnt);
5474
5475                cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5476                cp->cgn_stat_lnk_day = broken.tm_mday;
5477                cp->cgn_stat_lnk_year = broken.tm_year - 100;
5478                cp->cgn_stat_lnk_hour = broken.tm_hour;
5479                cp->cgn_stat_lnk_min = broken.tm_min;
5480                cp->cgn_stat_lnk_sec = broken.tm_sec;
5481                break;
5482        case ELS_DTAG_DELIVERY:
5483                cnt = le32_to_cpu(cp->delivery_notification);
5484                cnt++;
5485                cp->delivery_notification = cpu_to_le32(cnt);
5486
5487                cp->cgn_stat_del_month = broken.tm_mon + 1;
5488                cp->cgn_stat_del_day = broken.tm_mday;
5489                cp->cgn_stat_del_year = broken.tm_year - 100;
5490                cp->cgn_stat_del_hour = broken.tm_hour;
5491                cp->cgn_stat_del_min = broken.tm_min;
5492                cp->cgn_stat_del_sec = broken.tm_sec;
5493                break;
5494        case ELS_DTAG_PEER_CONGEST:
5495                cnt = le32_to_cpu(cp->cgn_peer_notification);
5496                cnt++;
5497                cp->cgn_peer_notification = cpu_to_le32(cnt);
5498
5499                cp->cgn_stat_peer_month = broken.tm_mon + 1;
5500                cp->cgn_stat_peer_day = broken.tm_mday;
5501                cp->cgn_stat_peer_year = broken.tm_year - 100;
5502                cp->cgn_stat_peer_hour = broken.tm_hour;
5503                cp->cgn_stat_peer_min = broken.tm_min;
5504                cp->cgn_stat_peer_sec = broken.tm_sec;
5505                break;
5506        case ELS_DTAG_CONGESTION:
5507                cnt = le32_to_cpu(cp->cgn_notification);
5508                cnt++;
5509                cp->cgn_notification = cpu_to_le32(cnt);
5510
5511                cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5512                cp->cgn_stat_cgn_day = broken.tm_mday;
5513                cp->cgn_stat_cgn_year = broken.tm_year - 100;
5514                cp->cgn_stat_cgn_hour = broken.tm_hour;
5515                cp->cgn_stat_cgn_min = broken.tm_min;
5516                cp->cgn_stat_cgn_sec = broken.tm_sec;
5517        }
5518        if (phba->cgn_fpin_frequency &&
5519            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5520                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5521                cp->cgn_stat_npm = value;
5522        }
5523        value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5524                                    LPFC_CGN_CRC32_SEED);
5525        cp->cgn_info_crc = cpu_to_le32(value);
5526}
5527
5528/**
5529 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5530 * @phba: pointer to lpfc hba data structure.
5531 *
5532 * Save the congestion event data every minute.
5533 * On the hour collapse all the minute data into hour data. Every day
5534 * collapse all the hour data into daily data. Separate driver
5535 * and fabrc congestion event counters that will be saved out
5536 * to the registered congestion buffer every minute.
5537 */
5538static void
5539lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5540{
5541        struct lpfc_cgn_info *cp;
5542        struct tm broken;
5543        struct timespec64 cur_time;
5544        uint32_t i, index;
5545        uint16_t value, mvalue;
5546        uint64_t bps;
5547        uint32_t mbps;
5548        uint32_t dvalue, wvalue, lvalue, avalue;
5549        uint64_t latsum;
5550        __le16 *ptr;
5551        __le32 *lptr;
5552        __le16 *mptr;
5553
5554        /* Make sure we have a congestion info buffer */
5555        if (!phba->cgn_i)
5556                return;
5557        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5558
5559        if (time_before(jiffies, phba->cgn_evt_timestamp))
5560                return;
5561        phba->cgn_evt_timestamp = jiffies +
5562                        msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5563        phba->cgn_evt_minute++;
5564
5565        /* We should get to this point in the routine on 1 minute intervals */
5566
5567        ktime_get_real_ts64(&cur_time);
5568        time64_to_tm(cur_time.tv_sec, 0, &broken);
5569
5570        if (phba->cgn_fpin_frequency &&
5571            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5572                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5573                cp->cgn_stat_npm = value;
5574        }
5575
5576        /* Read and clear the latency counters for this minute */
5577        lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5578        latsum = atomic64_read(&phba->cgn_latency_evt);
5579        atomic_set(&phba->cgn_latency_evt_cnt, 0);
5580        atomic64_set(&phba->cgn_latency_evt, 0);
5581
5582        /* We need to store MB/sec bandwidth in the congestion information.
5583         * block_cnt is count of 512 byte blocks for the entire minute,
5584         * bps will get bytes per sec before finally converting to MB/sec.
5585         */
5586        bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5587        phba->rx_block_cnt = 0;
5588        mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5589
5590        /* Every minute */
5591        /* cgn parameters */
5592        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5593        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5594        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5595        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5596
5597        /* Fill in default LUN qdepth */
5598        value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5599        cp->cgn_lunq = cpu_to_le16(value);
5600
5601        /* Record congestion buffer info - every minute
5602         * cgn_driver_evt_cnt (Driver events)
5603         * cgn_fabric_warn_cnt (Congestion Warnings)
5604         * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5605         * cgn_fabric_alarm_cnt (Congestion Alarms)
5606         */
5607        index = ++cp->cgn_index_minute;
5608        if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5609                cp->cgn_index_minute = 0;
5610                index = 0;
5611        }
5612
5613        /* Get the number of driver events in this sample and reset counter */
5614        dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5615        atomic_set(&phba->cgn_driver_evt_cnt, 0);
5616
5617        /* Get the number of warning events - FPIN and Signal for this minute */
5618        wvalue = 0;
5619        if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5620            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5621            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5622                wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5623        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5624
5625        /* Get the number of alarm events - FPIN and Signal for this minute */
5626        avalue = 0;
5627        if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5628            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5629                avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5630        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5631
5632        /* Collect the driver, warning, alarm and latency counts for this
5633         * minute into the driver congestion buffer.
5634         */
5635        ptr = &cp->cgn_drvr_min[index];
5636        value = (uint16_t)dvalue;
5637        *ptr = cpu_to_le16(value);
5638
5639        ptr = &cp->cgn_warn_min[index];
5640        value = (uint16_t)wvalue;
5641        *ptr = cpu_to_le16(value);
5642
5643        ptr = &cp->cgn_alarm_min[index];
5644        value = (uint16_t)avalue;
5645        *ptr = cpu_to_le16(value);
5646
5647        lptr = &cp->cgn_latency_min[index];
5648        if (lvalue) {
5649                lvalue = (uint32_t)div_u64(latsum, lvalue);
5650                *lptr = cpu_to_le32(lvalue);
5651        } else {
5652                *lptr = 0;
5653        }
5654
5655        /* Collect the bandwidth value into the driver's congesion buffer. */
5656        mptr = &cp->cgn_bw_min[index];
5657        *mptr = cpu_to_le16(mvalue);
5658
5659        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5660                        "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5661                        index, dvalue, wvalue, *lptr, mvalue, avalue);
5662
5663        /* Every hour */
5664        if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5665                /* Record congestion buffer info - every hour
5666                 * Collapse all minutes into an hour
5667                 */
5668                index = ++cp->cgn_index_hour;
5669                if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5670                        cp->cgn_index_hour = 0;
5671                        index = 0;
5672                }
5673
5674                dvalue = 0;
5675                wvalue = 0;
5676                lvalue = 0;
5677                avalue = 0;
5678                mvalue = 0;
5679                mbps = 0;
5680                for (i = 0; i < LPFC_MIN_HOUR; i++) {
5681                        dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5682                        wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5683                        lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5684                        mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5685                        avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5686                }
5687                if (lvalue)             /* Avg of latency averages */
5688                        lvalue /= LPFC_MIN_HOUR;
5689                if (mbps)               /* Avg of Bandwidth averages */
5690                        mvalue = mbps / LPFC_MIN_HOUR;
5691
5692                lptr = &cp->cgn_drvr_hr[index];
5693                *lptr = cpu_to_le32(dvalue);
5694                lptr = &cp->cgn_warn_hr[index];
5695                *lptr = cpu_to_le32(wvalue);
5696                lptr = &cp->cgn_latency_hr[index];
5697                *lptr = cpu_to_le32(lvalue);
5698                mptr = &cp->cgn_bw_hr[index];
5699                *mptr = cpu_to_le16(mvalue);
5700                lptr = &cp->cgn_alarm_hr[index];
5701                *lptr = cpu_to_le32(avalue);
5702
5703                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5704                                "2419 Congestion Info - hour "
5705                                "(%d): %d %d %d %d %d\n",
5706                                index, dvalue, wvalue, lvalue, mvalue, avalue);
5707        }
5708
5709        /* Every day */
5710        if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5711                /* Record congestion buffer info - every hour
5712                 * Collapse all hours into a day. Rotate days
5713                 * after LPFC_MAX_CGN_DAYS.
5714                 */
5715                index = ++cp->cgn_index_day;
5716                if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5717                        cp->cgn_index_day = 0;
5718                        index = 0;
5719                }
5720
5721                /* Anytime we overwrite daily index 0, after we wrap,
5722                 * we will be overwriting the oldest day, so we must
5723                 * update the congestion data start time for that day.
5724                 * That start time should have previously been saved after
5725                 * we wrote the last days worth of data.
5726                 */
5727                if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5728                        time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5729
5730                        cp->cgn_info_month = broken.tm_mon + 1;
5731                        cp->cgn_info_day = broken.tm_mday;
5732                        cp->cgn_info_year = broken.tm_year - 100;
5733                        cp->cgn_info_hour = broken.tm_hour;
5734                        cp->cgn_info_minute = broken.tm_min;
5735                        cp->cgn_info_second = broken.tm_sec;
5736
5737                        lpfc_printf_log
5738                                (phba, KERN_INFO, LOG_CGN_MGMT,
5739                                "2646 CGNInfo idx0 Start Time: "
5740                                "%d/%d/%d %d:%d:%d\n",
5741                                cp->cgn_info_day, cp->cgn_info_month,
5742                                cp->cgn_info_year, cp->cgn_info_hour,
5743                                cp->cgn_info_minute, cp->cgn_info_second);
5744                }
5745
5746                dvalue = 0;
5747                wvalue = 0;
5748                lvalue = 0;
5749                mvalue = 0;
5750                mbps = 0;
5751                avalue = 0;
5752                for (i = 0; i < LPFC_HOUR_DAY; i++) {
5753                        dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5754                        wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5755                        lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5756                        mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5757                        avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5758                }
5759                if (lvalue)             /* Avg of latency averages */
5760                        lvalue /= LPFC_HOUR_DAY;
5761                if (mbps)               /* Avg of Bandwidth averages */
5762                        mvalue = mbps / LPFC_HOUR_DAY;
5763
5764                lptr = &cp->cgn_drvr_day[index];
5765                *lptr = cpu_to_le32(dvalue);
5766                lptr = &cp->cgn_warn_day[index];
5767                *lptr = cpu_to_le32(wvalue);
5768                lptr = &cp->cgn_latency_day[index];
5769                *lptr = cpu_to_le32(lvalue);
5770                mptr = &cp->cgn_bw_day[index];
5771                *mptr = cpu_to_le16(mvalue);
5772                lptr = &cp->cgn_alarm_day[index];
5773                *lptr = cpu_to_le32(avalue);
5774
5775                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5776                                "2420 Congestion Info - daily (%d): "
5777                                "%d %d %d %d %d\n",
5778                                index, dvalue, wvalue, lvalue, mvalue, avalue);
5779
5780                /* We just wrote LPFC_MAX_CGN_DAYS of data,
5781                 * so we are wrapped on any data after this.
5782                 * Save this as the start time for the next day.
5783                 */
5784                if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5785                        phba->hba_flag |= HBA_CGN_DAY_WRAP;
5786                        ktime_get_real_ts64(&phba->cgn_daily_ts);
5787                }
5788        }
5789
5790        /* Use the frequency found in the last rcv'ed FPIN */
5791        value = phba->cgn_fpin_frequency;
5792        if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
5793                cp->cgn_warn_freq = cpu_to_le16(value);
5794        if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
5795                cp->cgn_alarm_freq = cpu_to_le16(value);
5796
5797        /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
5798         * are received by the HBA
5799         */
5800        value = phba->cgn_sig_freq;
5801
5802        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5803            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5804                cp->cgn_warn_freq = cpu_to_le16(value);
5805        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5806                cp->cgn_alarm_freq = cpu_to_le16(value);
5807
5808        lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5809                                     LPFC_CGN_CRC32_SEED);
5810        cp->cgn_info_crc = cpu_to_le32(lvalue);
5811}
5812
5813/**
5814 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5815 * @phba: The Hba for which this call is being executed.
5816 *
5817 * The routine calculates the latency from the beginning of the CMF timer
5818 * interval to the current point in time. It is called from IO completion
5819 * when we exceed our Bandwidth limitation for the time interval.
5820 */
5821uint32_t
5822lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5823{
5824        struct timespec64 cmpl_time;
5825        uint32_t msec = 0;
5826
5827        ktime_get_real_ts64(&cmpl_time);
5828
5829        /* This routine works on a ms granularity so sec and usec are
5830         * converted accordingly.
5831         */
5832        if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5833                msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5834                        NSEC_PER_MSEC;
5835        } else {
5836                if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5837                        msec = (cmpl_time.tv_sec -
5838                                phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5839                        msec += ((cmpl_time.tv_nsec -
5840                                  phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5841                } else {
5842                        msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5843                                1) * MSEC_PER_SEC;
5844                        msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5845                                 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5846                }
5847        }
5848        return msec;
5849}
5850
5851/**
5852 * lpfc_cmf_timer -  This is the timer function for one congestion
5853 * rate interval.
5854 * @timer: Pointer to the high resolution timer that expired
5855 */
5856static enum hrtimer_restart
5857lpfc_cmf_timer(struct hrtimer *timer)
5858{
5859        struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5860                                             cmf_timer);
5861        struct rxtable_entry *entry;
5862        uint32_t io_cnt;
5863        uint32_t head, tail;
5864        uint32_t busy, max_read;
5865        uint64_t total, rcv, lat, mbpi;
5866        int timer_interval = LPFC_CMF_INTERVAL;
5867        uint32_t ms;
5868        struct lpfc_cgn_stat *cgs;
5869        int cpu;
5870
5871        /* Only restart the timer if congestion mgmt is on */
5872        if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5873            !phba->cmf_latency.tv_sec) {
5874                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5875                                "6224 CMF timer exit: %d %lld\n",
5876                                phba->cmf_active_mode,
5877                                (uint64_t)phba->cmf_latency.tv_sec);
5878                return HRTIMER_NORESTART;
5879        }
5880
5881        /* If pport is not ready yet, just exit and wait for
5882         * the next timer cycle to hit.
5883         */
5884        if (!phba->pport)
5885                goto skip;
5886
5887        /* Do not block SCSI IO while in the timer routine since
5888         * total_bytes will be cleared
5889         */
5890        atomic_set(&phba->cmf_stop_io, 1);
5891
5892        /* First we need to calculate the actual ms between
5893         * the last timer interrupt and this one. We ask for
5894         * LPFC_CMF_INTERVAL, however the actual time may
5895         * vary depending on system overhead.
5896         */
5897        ms = lpfc_calc_cmf_latency(phba);
5898
5899
5900        /* Immediately after we calculate the time since the last
5901         * timer interrupt, set the start time for the next
5902         * interrupt
5903         */
5904        ktime_get_real_ts64(&phba->cmf_latency);
5905
5906        phba->cmf_link_byte_count =
5907                div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5908
5909        /* Collect all the stats from the prior timer interval */
5910        total = 0;
5911        io_cnt = 0;
5912        lat = 0;
5913        rcv = 0;
5914        for_each_present_cpu(cpu) {
5915                cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5916                total += atomic64_xchg(&cgs->total_bytes, 0);
5917                io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5918                lat += atomic64_xchg(&cgs->rx_latency, 0);
5919                rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5920        }
5921
5922        /* Before we issue another CMF_SYNC_WQE, retrieve the BW
5923         * returned from the last CMF_SYNC_WQE issued, from
5924         * cmf_last_sync_bw. This will be the target BW for
5925         * this next timer interval.
5926         */
5927        if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
5928            phba->link_state != LPFC_LINK_DOWN &&
5929            phba->hba_flag & HBA_SETUP) {
5930                mbpi = phba->cmf_last_sync_bw;
5931                phba->cmf_last_sync_bw = 0;
5932                lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
5933        } else {
5934                /* For Monitor mode or link down we want mbpi
5935                 * to be the full link speed
5936                 */
5937                mbpi = phba->cmf_link_byte_count;
5938        }
5939        phba->cmf_timer_cnt++;
5940
5941        if (io_cnt) {
5942                /* Update congestion info buffer latency in us */
5943                atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
5944                atomic64_add(lat, &phba->cgn_latency_evt);
5945        }
5946        busy = atomic_xchg(&phba->cmf_busy, 0);
5947        max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
5948
5949        /* Calculate MBPI for the next timer interval */
5950        if (mbpi) {
5951                if (mbpi > phba->cmf_link_byte_count ||
5952                    phba->cmf_active_mode == LPFC_CFG_MONITOR)
5953                        mbpi = phba->cmf_link_byte_count;
5954
5955                /* Change max_bytes_per_interval to what the prior
5956                 * CMF_SYNC_WQE cmpl indicated.
5957                 */
5958                if (mbpi != phba->cmf_max_bytes_per_interval)
5959                        phba->cmf_max_bytes_per_interval = mbpi;
5960        }
5961
5962        /* Save rxmonitor information for debug */
5963        if (phba->rxtable) {
5964                head = atomic_xchg(&phba->rxtable_idx_head,
5965                                   LPFC_RXMONITOR_TABLE_IN_USE);
5966                entry = &phba->rxtable[head];
5967                entry->total_bytes = total;
5968                entry->rcv_bytes = rcv;
5969                entry->cmf_busy = busy;
5970                entry->cmf_info = phba->cmf_active_info;
5971                if (io_cnt) {
5972                        entry->avg_io_latency = div_u64(lat, io_cnt);
5973                        entry->avg_io_size = div_u64(rcv, io_cnt);
5974                } else {
5975                        entry->avg_io_latency = 0;
5976                        entry->avg_io_size = 0;
5977                }
5978                entry->max_read_cnt = max_read;
5979                entry->io_cnt = io_cnt;
5980                entry->max_bytes_per_interval = mbpi;
5981                if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
5982                        entry->timer_utilization = phba->cmf_last_ts;
5983                else
5984                        entry->timer_utilization = ms;
5985                entry->timer_interval = ms;
5986                phba->cmf_last_ts = 0;
5987
5988                /* Increment rxtable index */
5989                head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5990                tail = atomic_read(&phba->rxtable_idx_tail);
5991                if (head == tail) {
5992                        tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5993                        atomic_set(&phba->rxtable_idx_tail, tail);
5994                }
5995                atomic_set(&phba->rxtable_idx_head, head);
5996        }
5997
5998        if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
5999                /* If Monitor mode, check if we are oversubscribed
6000                 * against the full line rate.
6001                 */
6002                if (mbpi && total > mbpi)
6003                        atomic_inc(&phba->cgn_driver_evt_cnt);
6004        }
6005        phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6006
6007        /* Each minute save Fabric and Driver congestion information */
6008        lpfc_cgn_save_evt_cnt(phba);
6009
6010        /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6011         * minute, adjust our next timer interval, if needed, to ensure a
6012         * 1 minute granularity when we get the next timer interrupt.
6013         */
6014        if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6015                       phba->cgn_evt_timestamp)) {
6016                timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6017                                                  jiffies);
6018                if (timer_interval <= 0)
6019                        timer_interval = LPFC_CMF_INTERVAL;
6020
6021                /* If we adjust timer_interval, max_bytes_per_interval
6022                 * needs to be adjusted as well.
6023                 */
6024                phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6025                                                    timer_interval, 1000);
6026                if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6027                        phba->cmf_max_bytes_per_interval =
6028                                phba->cmf_link_byte_count;
6029        }
6030
6031        /* Since total_bytes has already been zero'ed, its okay to unblock
6032         * after max_bytes_per_interval is setup.
6033         */
6034        if (atomic_xchg(&phba->cmf_bw_wait, 0))
6035                queue_work(phba->wq, &phba->unblock_request_work);
6036
6037        /* SCSI IO is now unblocked */
6038        atomic_set(&phba->cmf_stop_io, 0);
6039
6040skip:
6041        hrtimer_forward_now(timer,
6042                            ktime_set(0, timer_interval * NSEC_PER_MSEC));
6043        return HRTIMER_RESTART;
6044}
6045
6046#define trunk_link_status(__idx)\
6047        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6048               ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6049                "Link up" : "Link down") : "NA"
6050/* Did port __idx reported an error */
6051#define trunk_port_fault(__idx)\
6052        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6053               (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6054
6055static void
6056lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6057                              struct lpfc_acqe_fc_la *acqe_fc)
6058{
6059        uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6060        uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6061
6062        phba->sli4_hba.link_state.speed =
6063                lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6064                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6065
6066        phba->sli4_hba.link_state.logical_speed =
6067                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6068        /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6069        phba->fc_linkspeed =
6070                 lpfc_async_link_speed_to_read_top(
6071                                phba,
6072                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6073
6074        if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6075                phba->trunk_link.link0.state =
6076                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6077                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6078                phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6079        }
6080        if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6081                phba->trunk_link.link1.state =
6082                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6083                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6084                phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6085        }
6086        if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6087                phba->trunk_link.link2.state =
6088                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6089                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6090                phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6091        }
6092        if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6093                phba->trunk_link.link3.state =
6094                        bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6095                        ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6096                phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6097        }
6098
6099        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6100                        "2910 Async FC Trunking Event - Speed:%d\n"
6101                        "\tLogical speed:%d "
6102                        "port0: %s port1: %s port2: %s port3: %s\n",
6103                        phba->sli4_hba.link_state.speed,
6104                        phba->sli4_hba.link_state.logical_speed,
6105                        trunk_link_status(0), trunk_link_status(1),
6106                        trunk_link_status(2), trunk_link_status(3));
6107
6108        if (phba->cmf_active_mode != LPFC_CFG_OFF)
6109                lpfc_cmf_signal_init(phba);
6110
6111        if (port_fault)
6112                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6113                                "3202 trunk error:0x%x (%s) seen on port0:%s "
6114                                /*
6115                                 * SLI-4: We have only 0xA error codes
6116                                 * defined as of now. print an appropriate
6117                                 * message in case driver needs to be updated.
6118                                 */
6119                                "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6120                                "UNDEFINED. update driver." : trunk_errmsg[err],
6121                                trunk_port_fault(0), trunk_port_fault(1),
6122                                trunk_port_fault(2), trunk_port_fault(3));
6123}
6124
6125
6126/**
6127 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6128 * @phba: pointer to lpfc hba data structure.
6129 * @acqe_fc: pointer to the async fc completion queue entry.
6130 *
6131 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6132 * that the event was received and then issue a read_topology mailbox command so
6133 * that the rest of the driver will treat it the same as SLI3.
6134 **/
6135static void
6136lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6137{
6138        struct lpfc_dmabuf *mp;
6139        LPFC_MBOXQ_t *pmb;
6140        MAILBOX_t *mb;
6141        struct lpfc_mbx_read_top *la;
6142        int rc;
6143
6144        if (bf_get(lpfc_trailer_type, acqe_fc) !=
6145            LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6146                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6147                                "2895 Non FC link Event detected.(%d)\n",
6148                                bf_get(lpfc_trailer_type, acqe_fc));
6149                return;
6150        }
6151
6152        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6153            LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6154                lpfc_update_trunk_link_status(phba, acqe_fc);
6155                return;
6156        }
6157
6158        /* Keep the link status for extra SLI4 state machine reference */
6159        phba->sli4_hba.link_state.speed =
6160                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6161                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6162        phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6163        phba->sli4_hba.link_state.topology =
6164                                bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6165        phba->sli4_hba.link_state.status =
6166                                bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6167        phba->sli4_hba.link_state.type =
6168                                bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6169        phba->sli4_hba.link_state.number =
6170                                bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6171        phba->sli4_hba.link_state.fault =
6172                                bf_get(lpfc_acqe_link_fault, acqe_fc);
6173
6174        if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6175            LPFC_FC_LA_TYPE_LINK_DOWN)
6176                phba->sli4_hba.link_state.logical_speed = 0;
6177        else if (!phba->sli4_hba.conf_trunk)
6178                phba->sli4_hba.link_state.logical_speed =
6179                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6180
6181        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6182                        "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6183                        "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6184                        "%dMbps Fault:%d\n",
6185                        phba->sli4_hba.link_state.speed,
6186                        phba->sli4_hba.link_state.topology,
6187                        phba->sli4_hba.link_state.status,
6188                        phba->sli4_hba.link_state.type,
6189                        phba->sli4_hba.link_state.number,
6190                        phba->sli4_hba.link_state.logical_speed,
6191                        phba->sli4_hba.link_state.fault);
6192        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6193        if (!pmb) {
6194                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6195                                "2897 The mboxq allocation failed\n");
6196                return;
6197        }
6198        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6199        if (!mp) {
6200                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6201                                "2898 The lpfc_dmabuf allocation failed\n");
6202                goto out_free_pmb;
6203        }
6204        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6205        if (!mp->virt) {
6206                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6207                                "2899 The mbuf allocation failed\n");
6208                goto out_free_dmabuf;
6209        }
6210
6211        /* Cleanup any outstanding ELS commands */
6212        lpfc_els_flush_all_cmd(phba);
6213
6214        /* Block ELS IOCBs until we have done process link event */
6215        phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6216
6217        /* Update link event statistics */
6218        phba->sli.slistat.link_event++;
6219
6220        /* Create lpfc_handle_latt mailbox command from link ACQE */
6221        lpfc_read_topology(phba, pmb, mp);
6222        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6223        pmb->vport = phba->pport;
6224
6225        if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6226                phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6227
6228                switch (phba->sli4_hba.link_state.status) {
6229                case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6230                        phba->link_flag |= LS_MDS_LINK_DOWN;
6231                        break;
6232                case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6233                        phba->link_flag |= LS_MDS_LOOPBACK;
6234                        break;
6235                default:
6236                        break;
6237                }
6238
6239                /* Initialize completion status */
6240                mb = &pmb->u.mb;
6241                mb->mbxStatus = MBX_SUCCESS;
6242
6243                /* Parse port fault information field */
6244                lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6245
6246                /* Parse and translate link attention fields */
6247                la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6248                la->eventTag = acqe_fc->event_tag;
6249
6250                if (phba->sli4_hba.link_state.status ==
6251                    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6252                        bf_set(lpfc_mbx_read_top_att_type, la,
6253                               LPFC_FC_LA_TYPE_UNEXP_WWPN);
6254                } else {
6255                        bf_set(lpfc_mbx_read_top_att_type, la,
6256                               LPFC_FC_LA_TYPE_LINK_DOWN);
6257                }
6258                /* Invoke the mailbox command callback function */
6259                lpfc_mbx_cmpl_read_topology(phba, pmb);
6260
6261                return;
6262        }
6263
6264        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6265        if (rc == MBX_NOT_FINISHED)
6266                goto out_free_dmabuf;
6267        return;
6268
6269out_free_dmabuf:
6270        kfree(mp);
6271out_free_pmb:
6272        mempool_free(pmb, phba->mbox_mem_pool);
6273}
6274
6275/**
6276 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6277 * @phba: pointer to lpfc hba data structure.
6278 * @acqe_sli: pointer to the async SLI completion queue entry.
6279 *
6280 * This routine is to handle the SLI4 asynchronous SLI events.
6281 **/
6282static void
6283lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6284{
6285        char port_name;
6286        char message[128];
6287        uint8_t status;
6288        uint8_t evt_type;
6289        uint8_t operational = 0;
6290        struct temp_event temp_event_data;
6291        struct lpfc_acqe_misconfigured_event *misconfigured;
6292        struct lpfc_acqe_cgn_signal *cgn_signal;
6293        struct Scsi_Host  *shost;
6294        struct lpfc_vport **vports;
6295        int rc, i, cnt;
6296
6297        evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6298
6299        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6300                        "2901 Async SLI event - Type:%d, Event Data: x%08x "
6301                        "x%08x x%08x x%08x\n", evt_type,
6302                        acqe_sli->event_data1, acqe_sli->event_data2,
6303                        acqe_sli->reserved, acqe_sli->trailer);
6304
6305        port_name = phba->Port[0];
6306        if (port_name == 0x00)
6307                port_name = '?'; /* get port name is empty */
6308
6309        switch (evt_type) {
6310        case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6311                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6312                temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6313                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6314
6315                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6316                                "3190 Over Temperature:%d Celsius- Port Name %c\n",
6317                                acqe_sli->event_data1, port_name);
6318
6319                phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6320                shost = lpfc_shost_from_vport(phba->pport);
6321                fc_host_post_vendor_event(shost, fc_get_event_number(),
6322                                          sizeof(temp_event_data),
6323                                          (char *)&temp_event_data,
6324                                          SCSI_NL_VID_TYPE_PCI
6325                                          | PCI_VENDOR_ID_EMULEX);
6326                break;
6327        case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6328                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6329                temp_event_data.event_code = LPFC_NORMAL_TEMP;
6330                temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6331
6332                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6333                                "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6334                                acqe_sli->event_data1, port_name);
6335
6336                shost = lpfc_shost_from_vport(phba->pport);
6337                fc_host_post_vendor_event(shost, fc_get_event_number(),
6338                                          sizeof(temp_event_data),
6339                                          (char *)&temp_event_data,
6340                                          SCSI_NL_VID_TYPE_PCI
6341                                          | PCI_VENDOR_ID_EMULEX);
6342                break;
6343        case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6344                misconfigured = (struct lpfc_acqe_misconfigured_event *)
6345                                        &acqe_sli->event_data1;
6346
6347                /* fetch the status for this port */
6348                switch (phba->sli4_hba.lnk_info.lnk_no) {
6349                case LPFC_LINK_NUMBER_0:
6350                        status = bf_get(lpfc_sli_misconfigured_port0_state,
6351                                        &misconfigured->theEvent);
6352                        operational = bf_get(lpfc_sli_misconfigured_port0_op,
6353                                        &misconfigured->theEvent);
6354                        break;
6355                case LPFC_LINK_NUMBER_1:
6356                        status = bf_get(lpfc_sli_misconfigured_port1_state,
6357                                        &misconfigured->theEvent);
6358                        operational = bf_get(lpfc_sli_misconfigured_port1_op,
6359                                        &misconfigured->theEvent);
6360                        break;
6361                case LPFC_LINK_NUMBER_2:
6362                        status = bf_get(lpfc_sli_misconfigured_port2_state,
6363                                        &misconfigured->theEvent);
6364                        operational = bf_get(lpfc_sli_misconfigured_port2_op,
6365                                        &misconfigured->theEvent);
6366                        break;
6367                case LPFC_LINK_NUMBER_3:
6368                        status = bf_get(lpfc_sli_misconfigured_port3_state,
6369                                        &misconfigured->theEvent);
6370                        operational = bf_get(lpfc_sli_misconfigured_port3_op,
6371                                        &misconfigured->theEvent);
6372                        break;
6373                default:
6374                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6375                                        "3296 "
6376                                        "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6377                                        "event: Invalid link %d",
6378                                        phba->sli4_hba.lnk_info.lnk_no);
6379                        return;
6380                }
6381
6382                /* Skip if optic state unchanged */
6383                if (phba->sli4_hba.lnk_info.optic_state == status)
6384                        return;
6385
6386                switch (status) {
6387                case LPFC_SLI_EVENT_STATUS_VALID:
6388                        sprintf(message, "Physical Link is functional");
6389                        break;
6390                case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6391                        sprintf(message, "Optics faulted/incorrectly "
6392                                "installed/not installed - Reseat optics, "
6393                                "if issue not resolved, replace.");
6394                        break;
6395                case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6396                        sprintf(message,
6397                                "Optics of two types installed - Remove one "
6398                                "optic or install matching pair of optics.");
6399                        break;
6400                case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6401                        sprintf(message, "Incompatible optics - Replace with "
6402                                "compatible optics for card to function.");
6403                        break;
6404                case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6405                        sprintf(message, "Unqualified optics - Replace with "
6406                                "Avago optics for Warranty and Technical "
6407                                "Support - Link is%s operational",
6408                                (operational) ? " not" : "");
6409                        break;
6410                case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6411                        sprintf(message, "Uncertified optics - Replace with "
6412                                "Avago-certified optics to enable link "
6413                                "operation - Link is%s operational",
6414                                (operational) ? " not" : "");
6415                        break;
6416                default:
6417                        /* firmware is reporting a status we don't know about */
6418                        sprintf(message, "Unknown event status x%02x", status);
6419                        break;
6420                }
6421
6422                /* Issue READ_CONFIG mbox command to refresh supported speeds */
6423                rc = lpfc_sli4_read_config(phba);
6424                if (rc) {
6425                        phba->lmt = 0;
6426                        lpfc_printf_log(phba, KERN_ERR,
6427                                        LOG_TRACE_EVENT,
6428                                        "3194 Unable to retrieve supported "
6429                                        "speeds, rc = 0x%x\n", rc);
6430                }
6431                vports = lpfc_create_vport_work_array(phba);
6432                if (vports != NULL) {
6433                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6434                                        i++) {
6435                                shost = lpfc_shost_from_vport(vports[i]);
6436                                lpfc_host_supported_speeds_set(shost);
6437                        }
6438                }
6439                lpfc_destroy_vport_work_array(phba, vports);
6440
6441                phba->sli4_hba.lnk_info.optic_state = status;
6442                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6443                                "3176 Port Name %c %s\n", port_name, message);
6444                break;
6445        case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6446                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6447                                "3192 Remote DPort Test Initiated - "
6448                                "Event Data1:x%08x Event Data2: x%08x\n",
6449                                acqe_sli->event_data1, acqe_sli->event_data2);
6450                break;
6451        case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6452                /* Call FW to obtain active parms */
6453                lpfc_sli4_cgn_parm_chg_evt(phba);
6454                break;
6455        case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6456                /* Misconfigured WWN. Reports that the SLI Port is configured
6457                 * to use FA-WWN, but the attached device doesn’t support it.
6458                 * No driver action is required.
6459                 * Event Data1 - N.A, Event Data2 - N.A
6460                 */
6461                lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6462                             "2699 Misconfigured FA-WWN - Attached device does "
6463                             "not support FA-WWN\n");
6464                break;
6465        case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6466                /* EEPROM failure. No driver action is required */
6467                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6468                             "2518 EEPROM failure - "
6469                             "Event Data1: x%08x Event Data2: x%08x\n",
6470                             acqe_sli->event_data1, acqe_sli->event_data2);
6471                break;
6472        case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6473                if (phba->cmf_active_mode == LPFC_CFG_OFF)
6474                        break;
6475                cgn_signal = (struct lpfc_acqe_cgn_signal *)
6476                                        &acqe_sli->event_data1;
6477                phba->cgn_acqe_cnt++;
6478
6479                cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6480                atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6481                atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6482
6483                /* no threshold for CMF, even 1 signal will trigger an event */
6484
6485                /* Alarm overrides warning, so check that first */
6486                if (cgn_signal->alarm_cnt) {
6487                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6488                                /* Keep track of alarm cnt for cgn_info */
6489                                atomic_add(cgn_signal->alarm_cnt,
6490                                           &phba->cgn_fabric_alarm_cnt);
6491                                /* Keep track of alarm cnt for CMF_SYNC_WQE */
6492                                atomic_add(cgn_signal->alarm_cnt,
6493                                           &phba->cgn_sync_alarm_cnt);
6494                        }
6495                } else if (cnt) {
6496                        /* signal action needs to be taken */
6497                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6498                            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6499                                /* Keep track of warning cnt for cgn_info */
6500                                atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
6501                                /* Keep track of warning cnt for CMF_SYNC_WQE */
6502                                atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6503                        }
6504                }
6505                break;
6506        default:
6507                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6508                                "3193 Unrecognized SLI event, type: 0x%x",
6509                                evt_type);
6510                break;
6511        }
6512}
6513
6514/**
6515 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6516 * @vport: pointer to vport data structure.
6517 *
6518 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6519 * response to a CVL event.
6520 *
6521 * Return the pointer to the ndlp with the vport if successful, otherwise
6522 * return NULL.
6523 **/
6524static struct lpfc_nodelist *
6525lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6526{
6527        struct lpfc_nodelist *ndlp;
6528        struct Scsi_Host *shost;
6529        struct lpfc_hba *phba;
6530
6531        if (!vport)
6532                return NULL;
6533        phba = vport->phba;
6534        if (!phba)
6535                return NULL;
6536        ndlp = lpfc_findnode_did(vport, Fabric_DID);
6537        if (!ndlp) {
6538                /* Cannot find existing Fabric ndlp, so allocate a new one */
6539                ndlp = lpfc_nlp_init(vport, Fabric_DID);
6540                if (!ndlp)
6541                        return 0;
6542                /* Set the node type */
6543                ndlp->nlp_type |= NLP_FABRIC;
6544                /* Put ndlp onto node list */
6545                lpfc_enqueue_node(vport, ndlp);
6546        }
6547        if ((phba->pport->port_state < LPFC_FLOGI) &&
6548                (phba->pport->port_state != LPFC_VPORT_FAILED))
6549                return NULL;
6550        /* If virtual link is not yet instantiated ignore CVL */
6551        if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6552                && (vport->port_state != LPFC_VPORT_FAILED))
6553                return NULL;
6554        shost = lpfc_shost_from_vport(vport);
6555        if (!shost)
6556                return NULL;
6557        lpfc_linkdown_port(vport);
6558        lpfc_cleanup_pending_mbox(vport);
6559        spin_lock_irq(shost->host_lock);
6560        vport->fc_flag |= FC_VPORT_CVL_RCVD;
6561        spin_unlock_irq(shost->host_lock);
6562
6563        return ndlp;
6564}
6565
6566/**
6567 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6568 * @phba: pointer to lpfc hba data structure.
6569 *
6570 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6571 * response to a FCF dead event.
6572 **/
6573static void
6574lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6575{
6576        struct lpfc_vport **vports;
6577        int i;
6578
6579        vports = lpfc_create_vport_work_array(phba);
6580        if (vports)
6581                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6582                        lpfc_sli4_perform_vport_cvl(vports[i]);
6583        lpfc_destroy_vport_work_array(phba, vports);
6584}
6585
6586/**
6587 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6588 * @phba: pointer to lpfc hba data structure.
6589 * @acqe_fip: pointer to the async fcoe completion queue entry.
6590 *
6591 * This routine is to handle the SLI4 asynchronous fcoe event.
6592 **/
6593static void
6594lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6595                        struct lpfc_acqe_fip *acqe_fip)
6596{
6597        uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6598        int rc;
6599        struct lpfc_vport *vport;
6600        struct lpfc_nodelist *ndlp;
6601        int active_vlink_present;
6602        struct lpfc_vport **vports;
6603        int i;
6604
6605        phba->fc_eventTag = acqe_fip->event_tag;
6606        phba->fcoe_eventtag = acqe_fip->event_tag;
6607        switch (event_type) {
6608        case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6609        case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6610                if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6611                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6612                                        "2546 New FCF event, evt_tag:x%x, "
6613                                        "index:x%x\n",
6614                                        acqe_fip->event_tag,
6615                                        acqe_fip->index);
6616                else
6617                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6618                                        LOG_DISCOVERY,
6619                                        "2788 FCF param modified event, "
6620                                        "evt_tag:x%x, index:x%x\n",
6621                                        acqe_fip->event_tag,
6622                                        acqe_fip->index);
6623                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6624                        /*
6625                         * During period of FCF discovery, read the FCF
6626                         * table record indexed by the event to update
6627                         * FCF roundrobin failover eligible FCF bmask.
6628                         */
6629                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6630                                        LOG_DISCOVERY,
6631                                        "2779 Read FCF (x%x) for updating "
6632                                        "roundrobin FCF failover bmask\n",
6633                                        acqe_fip->index);
6634                        rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6635                }
6636
6637                /* If the FCF discovery is in progress, do nothing. */
6638                spin_lock_irq(&phba->hbalock);
6639                if (phba->hba_flag & FCF_TS_INPROG) {
6640                        spin_unlock_irq(&phba->hbalock);
6641                        break;
6642                }
6643                /* If fast FCF failover rescan event is pending, do nothing */
6644                if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6645                        spin_unlock_irq(&phba->hbalock);
6646                        break;
6647                }
6648
6649                /* If the FCF has been in discovered state, do nothing. */
6650                if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6651                        spin_unlock_irq(&phba->hbalock);
6652                        break;
6653                }
6654                spin_unlock_irq(&phba->hbalock);
6655
6656                /* Otherwise, scan the entire FCF table and re-discover SAN */
6657                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6658                                "2770 Start FCF table scan per async FCF "
6659                                "event, evt_tag:x%x, index:x%x\n",
6660                                acqe_fip->event_tag, acqe_fip->index);
6661                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6662                                                     LPFC_FCOE_FCF_GET_FIRST);
6663                if (rc)
6664                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6665                                        "2547 Issue FCF scan read FCF mailbox "
6666                                        "command failed (x%x)\n", rc);
6667                break;
6668
6669        case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6670                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671                                "2548 FCF Table full count 0x%x tag 0x%x\n",
6672                                bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6673                                acqe_fip->event_tag);
6674                break;
6675
6676        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6677                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6678                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6679                                "2549 FCF (x%x) disconnected from network, "
6680                                 "tag:x%x\n", acqe_fip->index,
6681                                 acqe_fip->event_tag);
6682                /*
6683                 * If we are in the middle of FCF failover process, clear
6684                 * the corresponding FCF bit in the roundrobin bitmap.
6685                 */
6686                spin_lock_irq(&phba->hbalock);
6687                if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6688                    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6689                        spin_unlock_irq(&phba->hbalock);
6690                        /* Update FLOGI FCF failover eligible FCF bmask */
6691                        lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6692                        break;
6693                }
6694                spin_unlock_irq(&phba->hbalock);
6695
6696                /* If the event is not for currently used fcf do nothing */
6697                if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6698                        break;
6699
6700                /*
6701                 * Otherwise, request the port to rediscover the entire FCF
6702                 * table for a fast recovery from case that the current FCF
6703                 * is no longer valid as we are not in the middle of FCF
6704                 * failover process already.
6705                 */
6706                spin_lock_irq(&phba->hbalock);
6707                /* Mark the fast failover process in progress */
6708                phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6709                spin_unlock_irq(&phba->hbalock);
6710
6711                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6712                                "2771 Start FCF fast failover process due to "
6713                                "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6714                                "\n", acqe_fip->event_tag, acqe_fip->index);
6715                rc = lpfc_sli4_redisc_fcf_table(phba);
6716                if (rc) {
6717                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6718                                        LOG_TRACE_EVENT,
6719                                        "2772 Issue FCF rediscover mailbox "
6720                                        "command failed, fail through to FCF "
6721                                        "dead event\n");
6722                        spin_lock_irq(&phba->hbalock);
6723                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6724                        spin_unlock_irq(&phba->hbalock);
6725                        /*
6726                         * Last resort will fail over by treating this
6727                         * as a link down to FCF registration.
6728                         */
6729                        lpfc_sli4_fcf_dead_failthrough(phba);
6730                } else {
6731                        /* Reset FCF roundrobin bmask for new discovery */
6732                        lpfc_sli4_clear_fcf_rr_bmask(phba);
6733                        /*
6734                         * Handling fast FCF failover to a DEAD FCF event is
6735                         * considered equalivant to receiving CVL to all vports.
6736                         */
6737                        lpfc_sli4_perform_all_vport_cvl(phba);
6738                }
6739                break;
6740        case LPFC_FIP_EVENT_TYPE_CVL:
6741                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6742                lpfc_printf_log(phba, KERN_ERR,
6743                                LOG_TRACE_EVENT,
6744                        "2718 Clear Virtual Link Received for VPI 0x%x"
6745                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6746
6747                vport = lpfc_find_vport_by_vpid(phba,
6748                                                acqe_fip->index);
6749                ndlp = lpfc_sli4_perform_vport_cvl(vport);
6750                if (!ndlp)
6751                        break;
6752                active_vlink_present = 0;
6753
6754                vports = lpfc_create_vport_work_array(phba);
6755                if (vports) {
6756                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6757                                        i++) {
6758                                if ((!(vports[i]->fc_flag &
6759                                        FC_VPORT_CVL_RCVD)) &&
6760                                        (vports[i]->port_state > LPFC_FDISC)) {
6761                                        active_vlink_present = 1;
6762                                        break;
6763                                }
6764                        }
6765                        lpfc_destroy_vport_work_array(phba, vports);
6766                }
6767
6768                /*
6769                 * Don't re-instantiate if vport is marked for deletion.
6770                 * If we are here first then vport_delete is going to wait
6771                 * for discovery to complete.
6772                 */
6773                if (!(vport->load_flag & FC_UNLOADING) &&
6774                                        active_vlink_present) {
6775                        /*
6776                         * If there are other active VLinks present,
6777                         * re-instantiate the Vlink using FDISC.
6778                         */
6779                        mod_timer(&ndlp->nlp_delayfunc,
6780                                  jiffies + msecs_to_jiffies(1000));
6781                        spin_lock_irq(&ndlp->lock);
6782                        ndlp->nlp_flag |= NLP_DELAY_TMO;
6783                        spin_unlock_irq(&ndlp->lock);
6784                        ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6785                        vport->port_state = LPFC_FDISC;
6786                } else {
6787                        /*
6788                         * Otherwise, we request port to rediscover
6789                         * the entire FCF table for a fast recovery
6790                         * from possible case that the current FCF
6791                         * is no longer valid if we are not already
6792                         * in the FCF failover process.
6793                         */
6794                        spin_lock_irq(&phba->hbalock);
6795                        if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6796                                spin_unlock_irq(&phba->hbalock);
6797                                break;
6798                        }
6799                        /* Mark the fast failover process in progress */
6800                        phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6801                        spin_unlock_irq(&phba->hbalock);
6802                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6803                                        LOG_DISCOVERY,
6804                                        "2773 Start FCF failover per CVL, "
6805                                        "evt_tag:x%x\n", acqe_fip->event_tag);
6806                        rc = lpfc_sli4_redisc_fcf_table(phba);
6807                        if (rc) {
6808                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6809                                                LOG_TRACE_EVENT,
6810                                                "2774 Issue FCF rediscover "
6811                                                "mailbox command failed, "
6812                                                "through to CVL event\n");
6813                                spin_lock_irq(&phba->hbalock);
6814                                phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6815                                spin_unlock_irq(&phba->hbalock);
6816                                /*
6817                                 * Last resort will be re-try on the
6818                                 * the current registered FCF entry.
6819                                 */
6820                                lpfc_retry_pport_discovery(phba);
6821                        } else
6822                                /*
6823                                 * Reset FCF roundrobin bmask for new
6824                                 * discovery.
6825                                 */
6826                                lpfc_sli4_clear_fcf_rr_bmask(phba);
6827                }
6828                break;
6829        default:
6830                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6831                                "0288 Unknown FCoE event type 0x%x event tag "
6832                                "0x%x\n", event_type, acqe_fip->event_tag);
6833                break;
6834        }
6835}
6836
6837/**
6838 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6839 * @phba: pointer to lpfc hba data structure.
6840 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6841 *
6842 * This routine is to handle the SLI4 asynchronous dcbx event.
6843 **/
6844static void
6845lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6846                         struct lpfc_acqe_dcbx *acqe_dcbx)
6847{
6848        phba->fc_eventTag = acqe_dcbx->event_tag;
6849        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6850                        "0290 The SLI4 DCBX asynchronous event is not "
6851                        "handled yet\n");
6852}
6853
6854/**
6855 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6856 * @phba: pointer to lpfc hba data structure.
6857 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6858 *
6859 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6860 * is an asynchronous notified of a logical link speed change.  The Port
6861 * reports the logical link speed in units of 10Mbps.
6862 **/
6863static void
6864lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6865                         struct lpfc_acqe_grp5 *acqe_grp5)
6866{
6867        uint16_t prev_ll_spd;
6868
6869        phba->fc_eventTag = acqe_grp5->event_tag;
6870        phba->fcoe_eventtag = acqe_grp5->event_tag;
6871        prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6872        phba->sli4_hba.link_state.logical_speed =
6873                (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6874        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6875                        "2789 GRP5 Async Event: Updating logical link speed "
6876                        "from %dMbps to %dMbps\n", prev_ll_spd,
6877                        phba->sli4_hba.link_state.logical_speed);
6878}
6879
6880/**
6881 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6882 * @phba: pointer to lpfc hba data structure.
6883 *
6884 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
6885 * is an asynchronous notification of a request to reset CM stats.
6886 **/
6887static void
6888lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
6889{
6890        if (!phba->cgn_i)
6891                return;
6892        lpfc_init_congestion_stat(phba);
6893}
6894
6895/**
6896 * lpfc_cgn_params_val - Validate FW congestion parameters.
6897 * @phba: pointer to lpfc hba data structure.
6898 * @p_cfg_param: pointer to FW provided congestion parameters.
6899 *
6900 * This routine validates the congestion parameters passed
6901 * by the FW to the driver via an ACQE event.
6902 **/
6903static void
6904lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
6905{
6906        spin_lock_irq(&phba->hbalock);
6907
6908        if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
6909                             LPFC_CFG_MONITOR)) {
6910                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
6911                                "6225 CMF mode param out of range: %d\n",
6912                                 p_cfg_param->cgn_param_mode);
6913                p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
6914        }
6915
6916        spin_unlock_irq(&phba->hbalock);
6917}
6918
6919/**
6920 * lpfc_cgn_params_parse - Process a FW cong parm change event
6921 * @phba: pointer to lpfc hba data structure.
6922 * @p_cgn_param: pointer to a data buffer with the FW cong params.
6923 * @len: the size of pdata in bytes.
6924 *
6925 * This routine validates the congestion management buffer signature
6926 * from the FW, validates the contents and makes corrections for
6927 * valid, in-range values.  If the signature magic is correct and
6928 * after parameter validation, the contents are copied to the driver's
6929 * @phba structure. If the magic is incorrect, an error message is
6930 * logged.
6931 **/
6932static void
6933lpfc_cgn_params_parse(struct lpfc_hba *phba,
6934                      struct lpfc_cgn_param *p_cgn_param, uint32_t len)
6935{
6936        struct lpfc_cgn_info *cp;
6937        uint32_t crc, oldmode;
6938
6939        /* Make sure the FW has encoded the correct magic number to
6940         * validate the congestion parameter in FW memory.
6941         */
6942        if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
6943                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
6944                                "4668 FW cgn parm buffer data: "
6945                                "magic 0x%x version %d mode %d "
6946                                "level0 %d level1 %d "
6947                                "level2 %d byte13 %d "
6948                                "byte14 %d byte15 %d "
6949                                "byte11 %d byte12 %d activeMode %d\n",
6950                                p_cgn_param->cgn_param_magic,
6951                                p_cgn_param->cgn_param_version,
6952                                p_cgn_param->cgn_param_mode,
6953                                p_cgn_param->cgn_param_level0,
6954                                p_cgn_param->cgn_param_level1,
6955                                p_cgn_param->cgn_param_level2,
6956                                p_cgn_param->byte13,
6957                                p_cgn_param->byte14,
6958                                p_cgn_param->byte15,
6959                                p_cgn_param->byte11,
6960                                p_cgn_param->byte12,
6961                                phba->cmf_active_mode);
6962
6963                oldmode = phba->cmf_active_mode;
6964
6965                /* Any parameters out of range are corrected to defaults
6966                 * by this routine.  No need to fail.
6967                 */
6968                lpfc_cgn_params_val(phba, p_cgn_param);
6969
6970                /* Parameters are verified, move them into driver storage */
6971                spin_lock_irq(&phba->hbalock);
6972                memcpy(&phba->cgn_p, p_cgn_param,
6973                       sizeof(struct lpfc_cgn_param));
6974
6975                /* Update parameters in congestion info buffer now */
6976                if (phba->cgn_i) {
6977                        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
6978                        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
6979                        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
6980                        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
6981                        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
6982                        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
6983                                                  LPFC_CGN_CRC32_SEED);
6984                        cp->cgn_info_crc = cpu_to_le32(crc);
6985                }
6986                spin_unlock_irq(&phba->hbalock);
6987
6988                phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
6989
6990                switch (oldmode) {
6991                case LPFC_CFG_OFF:
6992                        if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
6993                                /* Turning CMF on */
6994                                lpfc_cmf_start(phba);
6995
6996                                if (phba->link_state >= LPFC_LINK_UP) {
6997                                        phba->cgn_reg_fpin =
6998                                                phba->cgn_init_reg_fpin;
6999                                        phba->cgn_reg_signal =
7000                                                phba->cgn_init_reg_signal;
7001                                        lpfc_issue_els_edc(phba->pport, 0);
7002                                }
7003                        }
7004                        break;
7005                case LPFC_CFG_MANAGED:
7006                        switch (phba->cgn_p.cgn_param_mode) {
7007                        case LPFC_CFG_OFF:
7008                                /* Turning CMF off */
7009                                lpfc_cmf_stop(phba);
7010                                if (phba->link_state >= LPFC_LINK_UP)
7011                                        lpfc_issue_els_edc(phba->pport, 0);
7012                                break;
7013                        case LPFC_CFG_MONITOR:
7014                                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7015                                                "4661 Switch from MANAGED to "
7016                                                "`MONITOR mode\n");
7017                                phba->cmf_max_bytes_per_interval =
7018                                        phba->cmf_link_byte_count;
7019
7020                                /* Resume blocked IO - unblock on workqueue */
7021                                queue_work(phba->wq,
7022                                           &phba->unblock_request_work);
7023                                break;
7024                        }
7025                        break;
7026                case LPFC_CFG_MONITOR:
7027                        switch (phba->cgn_p.cgn_param_mode) {
7028                        case LPFC_CFG_OFF:
7029                                /* Turning CMF off */
7030                                lpfc_cmf_stop(phba);
7031                                if (phba->link_state >= LPFC_LINK_UP)
7032                                        lpfc_issue_els_edc(phba->pport, 0);
7033                                break;
7034                        case LPFC_CFG_MANAGED:
7035                                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7036                                                "4662 Switch from MONITOR to "
7037                                                "MANAGED mode\n");
7038                                lpfc_cmf_signal_init(phba);
7039                                break;
7040                        }
7041                        break;
7042                }
7043        } else {
7044                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7045                                "4669 FW cgn parm buf wrong magic 0x%x "
7046                                "version %d\n", p_cgn_param->cgn_param_magic,
7047                                p_cgn_param->cgn_param_version);
7048        }
7049}
7050
7051/**
7052 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7053 * @phba: pointer to lpfc hba data structure.
7054 *
7055 * This routine issues a read_object mailbox command to
7056 * get the congestion management parameters from the FW
7057 * parses it and updates the driver maintained values.
7058 *
7059 * Returns
7060 *  0     if the object was empty
7061 *  -Eval if an error was encountered
7062 *  Count if bytes were read from object
7063 **/
7064int
7065lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7066{
7067        int ret = 0;
7068        struct lpfc_cgn_param *p_cgn_param = NULL;
7069        u32 *pdata = NULL;
7070        u32 len = 0;
7071
7072        /* Find out if the FW has a new set of congestion parameters. */
7073        len = sizeof(struct lpfc_cgn_param);
7074        pdata = kzalloc(len, GFP_KERNEL);
7075        ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7076                               pdata, len);
7077
7078        /* 0 means no data.  A negative means error.  A positive means
7079         * bytes were copied.
7080         */
7081        if (!ret) {
7082                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7083                                "4670 CGN RD OBJ returns no data\n");
7084                goto rd_obj_err;
7085        } else if (ret < 0) {
7086                /* Some error.  Just exit and return it to the caller.*/
7087                goto rd_obj_err;
7088        }
7089
7090        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7091                        "6234 READ CGN PARAMS Successful %d\n", len);
7092
7093        /* Parse data pointer over len and update the phba congestion
7094         * parameters with values passed back.  The receive rate values
7095         * may have been altered in FW, but take no action here.
7096         */
7097        p_cgn_param = (struct lpfc_cgn_param *)pdata;
7098        lpfc_cgn_params_parse(phba, p_cgn_param, len);
7099
7100 rd_obj_err:
7101        kfree(pdata);
7102        return ret;
7103}
7104
7105/**
7106 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7107 * @phba: pointer to lpfc hba data structure.
7108 *
7109 * The FW generated Async ACQE SLI event calls this routine when
7110 * the event type is an SLI Internal Port Event and the Event Code
7111 * indicates a change to the FW maintained congestion parameters.
7112 *
7113 * This routine executes a Read_Object mailbox call to obtain the
7114 * current congestion parameters maintained in FW and corrects
7115 * the driver's active congestion parameters.
7116 *
7117 * The acqe event is not passed because there is no further data
7118 * required.
7119 *
7120 * Returns nonzero error if event processing encountered an error.
7121 * Zero otherwise for success.
7122 **/
7123static int
7124lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7125{
7126        int ret = 0;
7127
7128        if (!phba->sli4_hba.pc_sli4_params.cmf) {
7129                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7130                                "4664 Cgn Evt when E2E off. Drop event\n");
7131                return -EACCES;
7132        }
7133
7134        /* If the event is claiming an empty object, it's ok.  A write
7135         * could have cleared it.  Only error is a negative return
7136         * status.
7137         */
7138        ret = lpfc_sli4_cgn_params_read(phba);
7139        if (ret < 0) {
7140                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7141                                "4667 Error reading Cgn Params (%d)\n",
7142                                ret);
7143        } else if (!ret) {
7144                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7145                                "4673 CGN Event empty object.\n");
7146        }
7147        return ret;
7148}
7149
7150/**
7151 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7152 * @phba: pointer to lpfc hba data structure.
7153 *
7154 * This routine is invoked by the worker thread to process all the pending
7155 * SLI4 asynchronous events.
7156 **/
7157void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7158{
7159        struct lpfc_cq_event *cq_event;
7160        unsigned long iflags;
7161
7162        /* First, declare the async event has been handled */
7163        spin_lock_irqsave(&phba->hbalock, iflags);
7164        phba->hba_flag &= ~ASYNC_EVENT;
7165        spin_unlock_irqrestore(&phba->hbalock, iflags);
7166
7167        /* Now, handle all the async events */
7168        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7169        while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7170                list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7171                                 cq_event, struct lpfc_cq_event, list);
7172                spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7173                                       iflags);
7174
7175                /* Process the asynchronous event */
7176                switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7177                case LPFC_TRAILER_CODE_LINK:
7178                        lpfc_sli4_async_link_evt(phba,
7179                                                 &cq_event->cqe.acqe_link);
7180                        break;
7181                case LPFC_TRAILER_CODE_FCOE:
7182                        lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7183                        break;
7184                case LPFC_TRAILER_CODE_DCBX:
7185                        lpfc_sli4_async_dcbx_evt(phba,
7186                                                 &cq_event->cqe.acqe_dcbx);
7187                        break;
7188                case LPFC_TRAILER_CODE_GRP5:
7189                        lpfc_sli4_async_grp5_evt(phba,
7190                                                 &cq_event->cqe.acqe_grp5);
7191                        break;
7192                case LPFC_TRAILER_CODE_FC:
7193                        lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7194                        break;
7195                case LPFC_TRAILER_CODE_SLI:
7196                        lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7197                        break;
7198                case LPFC_TRAILER_CODE_CMSTAT:
7199                        lpfc_sli4_async_cmstat_evt(phba);
7200                        break;
7201                default:
7202                        lpfc_printf_log(phba, KERN_ERR,
7203                                        LOG_TRACE_EVENT,
7204                                        "1804 Invalid asynchronous event code: "
7205                                        "x%x\n", bf_get(lpfc_trailer_code,
7206                                        &cq_event->cqe.mcqe_cmpl));
7207                        break;
7208                }
7209
7210                /* Free the completion event processed to the free pool */
7211                lpfc_sli4_cq_event_release(phba, cq_event);
7212                spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7213        }
7214        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7215}
7216
7217/**
7218 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7219 * @phba: pointer to lpfc hba data structure.
7220 *
7221 * This routine is invoked by the worker thread to process FCF table
7222 * rediscovery pending completion event.
7223 **/
7224void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7225{
7226        int rc;
7227
7228        spin_lock_irq(&phba->hbalock);
7229        /* Clear FCF rediscovery timeout event */
7230        phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7231        /* Clear driver fast failover FCF record flag */
7232        phba->fcf.failover_rec.flag = 0;
7233        /* Set state for FCF fast failover */
7234        phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7235        spin_unlock_irq(&phba->hbalock);
7236
7237        /* Scan FCF table from the first entry to re-discover SAN */
7238        lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7239                        "2777 Start post-quiescent FCF table scan\n");
7240        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7241        if (rc)
7242                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7243                                "2747 Issue FCF scan read FCF mailbox "
7244                                "command failed 0x%x\n", rc);
7245}
7246
7247/**
7248 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7249 * @phba: pointer to lpfc hba data structure.
7250 * @dev_grp: The HBA PCI-Device group number.
7251 *
7252 * This routine is invoked to set up the per HBA PCI-Device group function
7253 * API jump table entries.
7254 *
7255 * Return: 0 if success, otherwise -ENODEV
7256 **/
7257int
7258lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7259{
7260        int rc;
7261
7262        /* Set up lpfc PCI-device group */
7263        phba->pci_dev_grp = dev_grp;
7264
7265        /* The LPFC_PCI_DEV_OC uses SLI4 */
7266        if (dev_grp == LPFC_PCI_DEV_OC)
7267                phba->sli_rev = LPFC_SLI_REV4;
7268
7269        /* Set up device INIT API function jump table */
7270        rc = lpfc_init_api_table_setup(phba, dev_grp);
7271        if (rc)
7272                return -ENODEV;
7273        /* Set up SCSI API function jump table */
7274        rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7275        if (rc)
7276                return -ENODEV;
7277        /* Set up SLI API function jump table */
7278        rc = lpfc_sli_api_table_setup(phba, dev_grp);
7279        if (rc)
7280                return -ENODEV;
7281        /* Set up MBOX API function jump table */
7282        rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7283        if (rc)
7284                return -ENODEV;
7285
7286        return 0;
7287}
7288
7289/**
7290 * lpfc_log_intr_mode - Log the active interrupt mode
7291 * @phba: pointer to lpfc hba data structure.
7292 * @intr_mode: active interrupt mode adopted.
7293 *
7294 * This routine it invoked to log the currently used active interrupt mode
7295 * to the device.
7296 **/
7297static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7298{
7299        switch (intr_mode) {
7300        case 0:
7301                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7302                                "0470 Enable INTx interrupt mode.\n");
7303                break;
7304        case 1:
7305                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7306                                "0481 Enabled MSI interrupt mode.\n");
7307                break;
7308        case 2:
7309                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7310                                "0480 Enabled MSI-X interrupt mode.\n");
7311                break;
7312        default:
7313                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7314                                "0482 Illegal interrupt mode.\n");
7315                break;
7316        }
7317        return;
7318}
7319
7320/**
7321 * lpfc_enable_pci_dev - Enable a generic PCI device.
7322 * @phba: pointer to lpfc hba data structure.
7323 *
7324 * This routine is invoked to enable the PCI device that is common to all
7325 * PCI devices.
7326 *
7327 * Return codes
7328 *      0 - successful
7329 *      other values - error
7330 **/
7331static int
7332lpfc_enable_pci_dev(struct lpfc_hba *phba)
7333{
7334        struct pci_dev *pdev;
7335
7336        /* Obtain PCI device reference */
7337        if (!phba->pcidev)
7338                goto out_error;
7339        else
7340                pdev = phba->pcidev;
7341        /* Enable PCI device */
7342        if (pci_enable_device_mem(pdev))
7343                goto out_error;
7344        /* Request PCI resource for the device */
7345        if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7346                goto out_disable_device;
7347        /* Set up device as PCI master and save state for EEH */
7348        pci_set_master(pdev);
7349        pci_try_set_mwi(pdev);
7350        pci_save_state(pdev);
7351
7352        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7353        if (pci_is_pcie(pdev))
7354                pdev->needs_freset = 1;
7355
7356        return 0;
7357
7358out_disable_device:
7359        pci_disable_device(pdev);
7360out_error:
7361        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7362                        "1401 Failed to enable pci device\n");
7363        return -ENODEV;
7364}
7365
7366/**
7367 * lpfc_disable_pci_dev - Disable a generic PCI device.
7368 * @phba: pointer to lpfc hba data structure.
7369 *
7370 * This routine is invoked to disable the PCI device that is common to all
7371 * PCI devices.
7372 **/
7373static void
7374lpfc_disable_pci_dev(struct lpfc_hba *phba)
7375{
7376        struct pci_dev *pdev;
7377
7378        /* Obtain PCI device reference */
7379        if (!phba->pcidev)
7380                return;
7381        else
7382                pdev = phba->pcidev;
7383        /* Release PCI resource and disable PCI device */
7384        pci_release_mem_regions(pdev);
7385        pci_disable_device(pdev);
7386
7387        return;
7388}
7389
7390/**
7391 * lpfc_reset_hba - Reset a hba
7392 * @phba: pointer to lpfc hba data structure.
7393 *
7394 * This routine is invoked to reset a hba device. It brings the HBA
7395 * offline, performs a board restart, and then brings the board back
7396 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7397 * on outstanding mailbox commands.
7398 **/
7399void
7400lpfc_reset_hba(struct lpfc_hba *phba)
7401{
7402        /* If resets are disabled then set error state and return. */
7403        if (!phba->cfg_enable_hba_reset) {
7404                phba->link_state = LPFC_HBA_ERROR;
7405                return;
7406        }
7407
7408        /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7409        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7410                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7411        } else {
7412                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7413                lpfc_sli_flush_io_rings(phba);
7414        }
7415        lpfc_offline(phba);
7416        lpfc_sli_brdrestart(phba);
7417        lpfc_online(phba);
7418        lpfc_unblock_mgmt_io(phba);
7419}
7420
7421/**
7422 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7423 * @phba: pointer to lpfc hba data structure.
7424 *
7425 * This function enables the PCI SR-IOV virtual functions to a physical
7426 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7427 * enable the number of virtual functions to the physical function. As
7428 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7429 * API call does not considered as an error condition for most of the device.
7430 **/
7431uint16_t
7432lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7433{
7434        struct pci_dev *pdev = phba->pcidev;
7435        uint16_t nr_virtfn;
7436        int pos;
7437
7438        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7439        if (pos == 0)
7440                return 0;
7441
7442        pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7443        return nr_virtfn;
7444}
7445
7446/**
7447 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7448 * @phba: pointer to lpfc hba data structure.
7449 * @nr_vfn: number of virtual functions to be enabled.
7450 *
7451 * This function enables the PCI SR-IOV virtual functions to a physical
7452 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7453 * enable the number of virtual functions to the physical function. As
7454 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7455 * API call does not considered as an error condition for most of the device.
7456 **/
7457int
7458lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7459{
7460        struct pci_dev *pdev = phba->pcidev;
7461        uint16_t max_nr_vfn;
7462        int rc;
7463
7464        max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7465        if (nr_vfn > max_nr_vfn) {
7466                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7467                                "3057 Requested vfs (%d) greater than "
7468                                "supported vfs (%d)", nr_vfn, max_nr_vfn);
7469                return -EINVAL;
7470        }
7471
7472        rc = pci_enable_sriov(pdev, nr_vfn);
7473        if (rc) {
7474                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7475                                "2806 Failed to enable sriov on this device "
7476                                "with vfn number nr_vf:%d, rc:%d\n",
7477                                nr_vfn, rc);
7478        } else
7479                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7480                                "2807 Successful enable sriov on this device "
7481                                "with vfn number nr_vf:%d\n", nr_vfn);
7482        return rc;
7483}
7484
7485static void
7486lpfc_unblock_requests_work(struct work_struct *work)
7487{
7488        struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7489                                             unblock_request_work);
7490
7491        lpfc_unblock_requests(phba);
7492}
7493
7494/**
7495 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7496 * @phba: pointer to lpfc hba data structure.
7497 *
7498 * This routine is invoked to set up the driver internal resources before the
7499 * device specific resource setup to support the HBA device it attached to.
7500 *
7501 * Return codes
7502 *      0 - successful
7503 *      other values - error
7504 **/
7505static int
7506lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7507{
7508        struct lpfc_sli *psli = &phba->sli;
7509
7510        /*
7511         * Driver resources common to all SLI revisions
7512         */
7513        atomic_set(&phba->fast_event_count, 0);
7514        atomic_set(&phba->dbg_log_idx, 0);
7515        atomic_set(&phba->dbg_log_cnt, 0);
7516        atomic_set(&phba->dbg_log_dmping, 0);
7517        spin_lock_init(&phba->hbalock);
7518
7519        /* Initialize port_list spinlock */
7520        spin_lock_init(&phba->port_list_lock);
7521        INIT_LIST_HEAD(&phba->port_list);
7522
7523        INIT_LIST_HEAD(&phba->work_list);
7524        init_waitqueue_head(&phba->wait_4_mlo_m_q);
7525
7526        /* Initialize the wait queue head for the kernel thread */
7527        init_waitqueue_head(&phba->work_waitq);
7528
7529        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7530                        "1403 Protocols supported %s %s %s\n",
7531                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7532                                "SCSI" : " "),
7533                        ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7534                                "NVME" : " "),
7535                        (phba->nvmet_support ? "NVMET" : " "));
7536
7537        /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7538        spin_lock_init(&phba->scsi_buf_list_get_lock);
7539        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7540        spin_lock_init(&phba->scsi_buf_list_put_lock);
7541        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7542
7543        /* Initialize the fabric iocb list */
7544        INIT_LIST_HEAD(&phba->fabric_iocb_list);
7545
7546        /* Initialize list to save ELS buffers */
7547        INIT_LIST_HEAD(&phba->elsbuf);
7548
7549        /* Initialize FCF connection rec list */
7550        INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7551
7552        /* Initialize OAS configuration list */
7553        spin_lock_init(&phba->devicelock);
7554        INIT_LIST_HEAD(&phba->luns);
7555
7556        /* MBOX heartbeat timer */
7557        timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7558        /* Fabric block timer */
7559        timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7560        /* EA polling mode timer */
7561        timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7562        /* Heartbeat timer */
7563        timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7564
7565        INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7566
7567        INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7568                          lpfc_idle_stat_delay_work);
7569        INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7570        return 0;
7571}
7572
7573/**
7574 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7575 * @phba: pointer to lpfc hba data structure.
7576 *
7577 * This routine is invoked to set up the driver internal resources specific to
7578 * support the SLI-3 HBA device it attached to.
7579 *
7580 * Return codes
7581 * 0 - successful
7582 * other values - error
7583 **/
7584static int
7585lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7586{
7587        int rc, entry_sz;
7588
7589        /*
7590         * Initialize timers used by driver
7591         */
7592
7593        /* FCP polling mode timer */
7594        timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7595
7596        /* Host attention work mask setup */
7597        phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7598        phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7599
7600        /* Get all the module params for configuring this host */
7601        lpfc_get_cfgparam(phba);
7602        /* Set up phase-1 common device driver resources */
7603
7604        rc = lpfc_setup_driver_resource_phase1(phba);
7605        if (rc)
7606                return -ENODEV;
7607
7608        if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7609                phba->menlo_flag |= HBA_MENLO_SUPPORT;
7610                /* check for menlo minimum sg count */
7611                if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7612                        phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7613        }
7614
7615        if (!phba->sli.sli3_ring)
7616                phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7617                                              sizeof(struct lpfc_sli_ring),
7618                                              GFP_KERNEL);
7619        if (!phba->sli.sli3_ring)
7620                return -ENOMEM;
7621
7622        /*
7623         * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7624         * used to create the sg_dma_buf_pool must be dynamically calculated.
7625         */
7626
7627        if (phba->sli_rev == LPFC_SLI_REV4)
7628                entry_sz = sizeof(struct sli4_sge);
7629        else
7630                entry_sz = sizeof(struct ulp_bde64);
7631
7632        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7633        if (phba->cfg_enable_bg) {
7634                /*
7635                 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7636                 * the FCP rsp, and a BDE for each. Sice we have no control
7637                 * over how many protection data segments the SCSI Layer
7638                 * will hand us (ie: there could be one for every block
7639                 * in the IO), we just allocate enough BDEs to accomidate
7640                 * our max amount and we need to limit lpfc_sg_seg_cnt to
7641                 * minimize the risk of running out.
7642                 */
7643                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7644                        sizeof(struct fcp_rsp) +
7645                        (LPFC_MAX_SG_SEG_CNT * entry_sz);
7646
7647                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7648                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7649
7650                /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7651                phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7652        } else {
7653                /*
7654                 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7655                 * the FCP rsp, a BDE for each, and a BDE for up to
7656                 * cfg_sg_seg_cnt data segments.
7657                 */
7658                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7659                        sizeof(struct fcp_rsp) +
7660                        ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7661
7662                /* Total BDEs in BPL for scsi_sg_list */
7663                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7664        }
7665
7666        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7667                        "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7668                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7669                        phba->cfg_total_seg_cnt);
7670
7671        phba->max_vpi = LPFC_MAX_VPI;
7672        /* This will be set to correct value after config_port mbox */
7673        phba->max_vports = 0;
7674
7675        /*
7676         * Initialize the SLI Layer to run with lpfc HBAs.
7677         */
7678        lpfc_sli_setup(phba);
7679        lpfc_sli_queue_init(phba);
7680
7681        /* Allocate device driver memory */
7682        if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7683                return -ENOMEM;
7684
7685        phba->lpfc_sg_dma_buf_pool =
7686                dma_pool_create("lpfc_sg_dma_buf_pool",
7687                                &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7688                                BPL_ALIGN_SZ, 0);
7689
7690        if (!phba->lpfc_sg_dma_buf_pool)
7691                goto fail_free_mem;
7692
7693        phba->lpfc_cmd_rsp_buf_pool =
7694                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
7695                                        &phba->pcidev->dev,
7696                                        sizeof(struct fcp_cmnd) +
7697                                        sizeof(struct fcp_rsp),
7698                                        BPL_ALIGN_SZ, 0);
7699
7700        if (!phba->lpfc_cmd_rsp_buf_pool)
7701                goto fail_free_dma_buf_pool;
7702
7703        /*
7704         * Enable sr-iov virtual functions if supported and configured
7705         * through the module parameter.
7706         */
7707        if (phba->cfg_sriov_nr_virtfn > 0) {
7708                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7709                                                 phba->cfg_sriov_nr_virtfn);
7710                if (rc) {
7711                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7712                                        "2808 Requested number of SR-IOV "
7713                                        "virtual functions (%d) is not "
7714                                        "supported\n",
7715                                        phba->cfg_sriov_nr_virtfn);
7716                        phba->cfg_sriov_nr_virtfn = 0;
7717                }
7718        }
7719
7720        return 0;
7721
7722fail_free_dma_buf_pool:
7723        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7724        phba->lpfc_sg_dma_buf_pool = NULL;
7725fail_free_mem:
7726        lpfc_mem_free(phba);
7727        return -ENOMEM;
7728}
7729
7730/**
7731 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7732 * @phba: pointer to lpfc hba data structure.
7733 *
7734 * This routine is invoked to unset the driver internal resources set up
7735 * specific for supporting the SLI-3 HBA device it attached to.
7736 **/
7737static void
7738lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7739{
7740        /* Free device driver memory allocated */
7741        lpfc_mem_free_all(phba);
7742
7743        return;
7744}
7745
7746/**
7747 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7748 * @phba: pointer to lpfc hba data structure.
7749 *
7750 * This routine is invoked to set up the driver internal resources specific to
7751 * support the SLI-4 HBA device it attached to.
7752 *
7753 * Return codes
7754 *      0 - successful
7755 *      other values - error
7756 **/
7757static int
7758lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7759{
7760        LPFC_MBOXQ_t *mboxq;
7761        MAILBOX_t *mb;
7762        int rc, i, max_buf_size;
7763        int longs;
7764        int extra;
7765        uint64_t wwn;
7766        u32 if_type;
7767        u32 if_fam;
7768
7769        phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7770        phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7771        phba->sli4_hba.curr_disp_cpu = 0;
7772
7773        /* Get all the module params for configuring this host */
7774        lpfc_get_cfgparam(phba);
7775
7776        /* Set up phase-1 common device driver resources */
7777        rc = lpfc_setup_driver_resource_phase1(phba);
7778        if (rc)
7779                return -ENODEV;
7780
7781        /* Before proceed, wait for POST done and device ready */
7782        rc = lpfc_sli4_post_status_check(phba);
7783        if (rc)
7784                return -ENODEV;
7785
7786        /* Allocate all driver workqueues here */
7787
7788        /* The lpfc_wq workqueue for deferred irq use */
7789        phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7790
7791        /*
7792         * Initialize timers used by driver
7793         */
7794
7795        timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7796
7797        /* FCF rediscover timer */
7798        timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7799
7800        /* CMF congestion timer */
7801        hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7802        phba->cmf_timer.function = lpfc_cmf_timer;
7803
7804        /*
7805         * Control structure for handling external multi-buffer mailbox
7806         * command pass-through.
7807         */
7808        memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7809                sizeof(struct lpfc_mbox_ext_buf_ctx));
7810        INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7811
7812        phba->max_vpi = LPFC_MAX_VPI;
7813
7814        /* This will be set to correct value after the read_config mbox */
7815        phba->max_vports = 0;
7816
7817        /* Program the default value of vlan_id and fc_map */
7818        phba->valid_vlan = 0;
7819        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7820        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7821        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7822
7823        /*
7824         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7825         * we will associate a new ring, for each EQ/CQ/WQ tuple.
7826         * The WQ create will allocate the ring.
7827         */
7828
7829        /* Initialize buffer queue management fields */
7830        INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7831        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7832        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7833
7834        /* for VMID idle timeout if VMID is enabled */
7835        if (lpfc_is_vmid_enabled(phba))
7836                timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7837
7838        /*
7839         * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7840         */
7841        /* Initialize the Abort buffer list used by driver */
7842        spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7843        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7844
7845        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7846                /* Initialize the Abort nvme buffer list used by driver */
7847                spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7848                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7849                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7850                spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7851                INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7852        }
7853
7854        /* This abort list used by worker thread */
7855        spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7856        spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7857        spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7858        spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7859
7860        /*
7861         * Initialize driver internal slow-path work queues
7862         */
7863
7864        /* Driver internel slow-path CQ Event pool */
7865        INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7866        /* Response IOCB work queue list */
7867        INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7868        /* Asynchronous event CQ Event work queue list */
7869        INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7870        /* Slow-path XRI aborted CQ Event work queue list */
7871        INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7872        /* Receive queue CQ Event work queue list */
7873        INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7874
7875        /* Initialize extent block lists. */
7876        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7877        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7878        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7879        INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7880
7881        /* Initialize mboxq lists. If the early init routines fail
7882         * these lists need to be correctly initialized.
7883         */
7884        INIT_LIST_HEAD(&phba->sli.mboxq);
7885        INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7886
7887        /* initialize optic_state to 0xFF */
7888        phba->sli4_hba.lnk_info.optic_state = 0xff;
7889
7890        /* Allocate device driver memory */
7891        rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
7892        if (rc)
7893                return -ENOMEM;
7894
7895        /* IF Type 2 ports get initialized now. */
7896        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
7897            LPFC_SLI_INTF_IF_TYPE_2) {
7898                rc = lpfc_pci_function_reset(phba);
7899                if (unlikely(rc)) {
7900                        rc = -ENODEV;
7901                        goto out_free_mem;
7902                }
7903                phba->temp_sensor_support = 1;
7904        }
7905
7906        /* Create the bootstrap mailbox command */
7907        rc = lpfc_create_bootstrap_mbox(phba);
7908        if (unlikely(rc))
7909                goto out_free_mem;
7910
7911        /* Set up the host's endian order with the device. */
7912        rc = lpfc_setup_endian_order(phba);
7913        if (unlikely(rc))
7914                goto out_free_bsmbx;
7915
7916        /* Set up the hba's configuration parameters. */
7917        rc = lpfc_sli4_read_config(phba);
7918        if (unlikely(rc))
7919                goto out_free_bsmbx;
7920        rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
7921        if (unlikely(rc))
7922                goto out_free_bsmbx;
7923
7924        /* IF Type 0 ports get initialized now. */
7925        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7926            LPFC_SLI_INTF_IF_TYPE_0) {
7927                rc = lpfc_pci_function_reset(phba);
7928                if (unlikely(rc))
7929                        goto out_free_bsmbx;
7930        }
7931
7932        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7933                                                       GFP_KERNEL);
7934        if (!mboxq) {
7935                rc = -ENOMEM;
7936                goto out_free_bsmbx;
7937        }
7938
7939        /* Check for NVMET being configured */
7940        phba->nvmet_support = 0;
7941        if (lpfc_enable_nvmet_cnt) {
7942
7943                /* First get WWN of HBA instance */
7944                lpfc_read_nv(phba, mboxq);
7945                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7946                if (rc != MBX_SUCCESS) {
7947                        lpfc_printf_log(phba, KERN_ERR,
7948                                        LOG_TRACE_EVENT,
7949                                        "6016 Mailbox failed , mbxCmd x%x "
7950                                        "READ_NV, mbxStatus x%x\n",
7951                                        bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7952                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe));
7953                        mempool_free(mboxq, phba->mbox_mem_pool);
7954                        rc = -EIO;
7955                        goto out_free_bsmbx;
7956                }
7957                mb = &mboxq->u.mb;
7958                memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
7959                       sizeof(uint64_t));
7960                wwn = cpu_to_be64(wwn);
7961                phba->sli4_hba.wwnn.u.name = wwn;
7962                memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
7963                       sizeof(uint64_t));
7964                /* wwn is WWPN of HBA instance */
7965                wwn = cpu_to_be64(wwn);
7966                phba->sli4_hba.wwpn.u.name = wwn;
7967
7968                /* Check to see if it matches any module parameter */
7969                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
7970                        if (wwn == lpfc_enable_nvmet[i]) {
7971#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
7972                                if (lpfc_nvmet_mem_alloc(phba))
7973                                        break;
7974
7975                                phba->nvmet_support = 1; /* a match */
7976
7977                                lpfc_printf_log(phba, KERN_ERR,
7978                                                LOG_TRACE_EVENT,
7979                                                "6017 NVME Target %016llx\n",
7980                                                wwn);
7981#else
7982                                lpfc_printf_log(phba, KERN_ERR,
7983                                                LOG_TRACE_EVENT,
7984                                                "6021 Can't enable NVME Target."
7985                                                " NVME_TARGET_FC infrastructure"
7986                                                " is not in kernel\n");
7987#endif
7988                                /* Not supported for NVMET */
7989                                phba->cfg_xri_rebalancing = 0;
7990                                if (phba->irq_chann_mode == NHT_MODE) {
7991                                        phba->cfg_irq_chann =
7992                                                phba->sli4_hba.num_present_cpu;
7993                                        phba->cfg_hdw_queue =
7994                                                phba->sli4_hba.num_present_cpu;
7995                                        phba->irq_chann_mode = NORMAL_MODE;
7996                                }
7997                                break;
7998                        }
7999                }
8000        }
8001
8002        lpfc_nvme_mod_param_dep(phba);
8003
8004        /*
8005         * Get sli4 parameters that override parameters from Port capabilities.
8006         * If this call fails, it isn't critical unless the SLI4 parameters come
8007         * back in conflict.
8008         */
8009        rc = lpfc_get_sli4_parameters(phba, mboxq);
8010        if (rc) {
8011                if_type = bf_get(lpfc_sli_intf_if_type,
8012                                 &phba->sli4_hba.sli_intf);
8013                if_fam = bf_get(lpfc_sli_intf_sli_family,
8014                                &phba->sli4_hba.sli_intf);
8015                if (phba->sli4_hba.extents_in_use &&
8016                    phba->sli4_hba.rpi_hdrs_in_use) {
8017                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8018                                        "2999 Unsupported SLI4 Parameters "
8019                                        "Extents and RPI headers enabled.\n");
8020                        if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8021                            if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
8022                                mempool_free(mboxq, phba->mbox_mem_pool);
8023                                rc = -EIO;
8024                                goto out_free_bsmbx;
8025                        }
8026                }
8027                if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8028                      if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8029                        mempool_free(mboxq, phba->mbox_mem_pool);
8030                        rc = -EIO;
8031                        goto out_free_bsmbx;
8032                }
8033        }
8034
8035        /*
8036         * 1 for cmd, 1 for rsp, NVME adds an extra one
8037         * for boundary conditions in its max_sgl_segment template.
8038         */
8039        extra = 2;
8040        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8041                extra++;
8042
8043        /*
8044         * It doesn't matter what family our adapter is in, we are
8045         * limited to 2 Pages, 512 SGEs, for our SGL.
8046         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8047         */
8048        max_buf_size = (2 * SLI4_PAGE_SIZE);
8049
8050        /*
8051         * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8052         * used to create the sg_dma_buf_pool must be calculated.
8053         */
8054        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8055                /* Both cfg_enable_bg and cfg_external_dif code paths */
8056
8057                /*
8058                 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8059                 * the FCP rsp, and a SGE. Sice we have no control
8060                 * over how many protection segments the SCSI Layer
8061                 * will hand us (ie: there could be one for every block
8062                 * in the IO), just allocate enough SGEs to accomidate
8063                 * our max amount and we need to limit lpfc_sg_seg_cnt
8064                 * to minimize the risk of running out.
8065                 */
8066                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8067                                sizeof(struct fcp_rsp) + max_buf_size;
8068
8069                /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8070                phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8071
8072                /*
8073                 * If supporting DIF, reduce the seg count for scsi to
8074                 * allow room for the DIF sges.
8075                 */
8076                if (phba->cfg_enable_bg &&
8077                    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8078                        phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8079                else
8080                        phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8081
8082        } else {
8083                /*
8084                 * The scsi_buf for a regular I/O holds the FCP cmnd,
8085                 * the FCP rsp, a SGE for each, and a SGE for up to
8086                 * cfg_sg_seg_cnt data segments.
8087                 */
8088                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8089                                sizeof(struct fcp_rsp) +
8090                                ((phba->cfg_sg_seg_cnt + extra) *
8091                                sizeof(struct sli4_sge));
8092
8093                /* Total SGEs for scsi_sg_list */
8094                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8095                phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8096
8097                /*
8098                 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8099                 * need to post 1 page for the SGL.
8100                 */
8101        }
8102
8103        if (phba->cfg_xpsgl && !phba->nvmet_support)
8104                phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8105        else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8106                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8107        else
8108                phba->cfg_sg_dma_buf_size =
8109                                SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8110
8111        phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8112                               sizeof(struct sli4_sge);
8113
8114        /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8115        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8116                if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8117                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8118                                        "6300 Reducing NVME sg segment "
8119                                        "cnt to %d\n",
8120                                        LPFC_MAX_NVME_SEG_CNT);
8121                        phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8122                } else
8123                        phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8124        }
8125
8126        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8127                        "9087 sg_seg_cnt:%d dmabuf_size:%d "
8128                        "total:%d scsi:%d nvme:%d\n",
8129                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8130                        phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8131                        phba->cfg_nvme_seg_cnt);
8132
8133        if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8134                i = phba->cfg_sg_dma_buf_size;
8135        else
8136                i = SLI4_PAGE_SIZE;
8137
8138        phba->lpfc_sg_dma_buf_pool =
8139                        dma_pool_create("lpfc_sg_dma_buf_pool",
8140                                        &phba->pcidev->dev,
8141                                        phba->cfg_sg_dma_buf_size,
8142                                        i, 0);
8143        if (!phba->lpfc_sg_dma_buf_pool)
8144                goto out_free_bsmbx;
8145
8146        phba->lpfc_cmd_rsp_buf_pool =
8147                        dma_pool_create("lpfc_cmd_rsp_buf_pool",
8148                                        &phba->pcidev->dev,
8149                                        sizeof(struct fcp_cmnd) +
8150                                        sizeof(struct fcp_rsp),
8151                                        i, 0);
8152        if (!phba->lpfc_cmd_rsp_buf_pool)
8153                goto out_free_sg_dma_buf;
8154
8155        mempool_free(mboxq, phba->mbox_mem_pool);
8156
8157        /* Verify OAS is supported */
8158        lpfc_sli4_oas_verify(phba);
8159
8160        /* Verify RAS support on adapter */
8161        lpfc_sli4_ras_init(phba);
8162
8163        /* Verify all the SLI4 queues */
8164        rc = lpfc_sli4_queue_verify(phba);
8165        if (rc)
8166                goto out_free_cmd_rsp_buf;
8167
8168        /* Create driver internal CQE event pool */
8169        rc = lpfc_sli4_cq_event_pool_create(phba);
8170        if (rc)
8171                goto out_free_cmd_rsp_buf;
8172
8173        /* Initialize sgl lists per host */
8174        lpfc_init_sgl_list(phba);
8175
8176        /* Allocate and initialize active sgl array */
8177        rc = lpfc_init_active_sgl_array(phba);
8178        if (rc) {
8179                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8180                                "1430 Failed to initialize sgl list.\n");
8181                goto out_destroy_cq_event_pool;
8182        }
8183        rc = lpfc_sli4_init_rpi_hdrs(phba);
8184        if (rc) {
8185                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8186                                "1432 Failed to initialize rpi headers.\n");
8187                goto out_free_active_sgl;
8188        }
8189
8190        /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8191        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8192        phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8193                                         GFP_KERNEL);
8194        if (!phba->fcf.fcf_rr_bmask) {
8195                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8196                                "2759 Failed allocate memory for FCF round "
8197                                "robin failover bmask\n");
8198                rc = -ENOMEM;
8199                goto out_remove_rpi_hdrs;
8200        }
8201
8202        phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8203                                            sizeof(struct lpfc_hba_eq_hdl),
8204                                            GFP_KERNEL);
8205        if (!phba->sli4_hba.hba_eq_hdl) {
8206                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8207                                "2572 Failed allocate memory for "
8208                                "fast-path per-EQ handle array\n");
8209                rc = -ENOMEM;
8210                goto out_free_fcf_rr_bmask;
8211        }
8212
8213        phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8214                                        sizeof(struct lpfc_vector_map_info),
8215                                        GFP_KERNEL);
8216        if (!phba->sli4_hba.cpu_map) {
8217                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8218                                "3327 Failed allocate memory for msi-x "
8219                                "interrupt vector mapping\n");
8220                rc = -ENOMEM;
8221                goto out_free_hba_eq_hdl;
8222        }
8223
8224        phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8225        if (!phba->sli4_hba.eq_info) {
8226                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8227                                "3321 Failed allocation for per_cpu stats\n");
8228                rc = -ENOMEM;
8229                goto out_free_hba_cpu_map;
8230        }
8231
8232        phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8233                                           sizeof(*phba->sli4_hba.idle_stat),
8234                                           GFP_KERNEL);
8235        if (!phba->sli4_hba.idle_stat) {
8236                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8237                                "3390 Failed allocation for idle_stat\n");
8238                rc = -ENOMEM;
8239                goto out_free_hba_eq_info;
8240        }
8241
8242#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8243        phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8244        if (!phba->sli4_hba.c_stat) {
8245                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8246                                "3332 Failed allocating per cpu hdwq stats\n");
8247                rc = -ENOMEM;
8248                goto out_free_hba_idle_stat;
8249        }
8250#endif
8251
8252        phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8253        if (!phba->cmf_stat) {
8254                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8255                                "3331 Failed allocating per cpu cgn stats\n");
8256                rc = -ENOMEM;
8257                goto out_free_hba_hdwq_info;
8258        }
8259
8260        /*
8261         * Enable sr-iov virtual functions if supported and configured
8262         * through the module parameter.
8263         */
8264        if (phba->cfg_sriov_nr_virtfn > 0) {
8265                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8266                                                 phba->cfg_sriov_nr_virtfn);
8267                if (rc) {
8268                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8269                                        "3020 Requested number of SR-IOV "
8270                                        "virtual functions (%d) is not "
8271                                        "supported\n",
8272                                        phba->cfg_sriov_nr_virtfn);
8273                        phba->cfg_sriov_nr_virtfn = 0;
8274                }
8275        }
8276
8277        return 0;
8278
8279out_free_hba_hdwq_info:
8280#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8281        free_percpu(phba->sli4_hba.c_stat);
8282out_free_hba_idle_stat:
8283#endif
8284        kfree(phba->sli4_hba.idle_stat);
8285out_free_hba_eq_info:
8286        free_percpu(phba->sli4_hba.eq_info);
8287out_free_hba_cpu_map:
8288        kfree(phba->sli4_hba.cpu_map);
8289out_free_hba_eq_hdl:
8290        kfree(phba->sli4_hba.hba_eq_hdl);
8291out_free_fcf_rr_bmask:
8292        kfree(phba->fcf.fcf_rr_bmask);
8293out_remove_rpi_hdrs:
8294        lpfc_sli4_remove_rpi_hdrs(phba);
8295out_free_active_sgl:
8296        lpfc_free_active_sgl(phba);
8297out_destroy_cq_event_pool:
8298        lpfc_sli4_cq_event_pool_destroy(phba);
8299out_free_cmd_rsp_buf:
8300        dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8301        phba->lpfc_cmd_rsp_buf_pool = NULL;
8302out_free_sg_dma_buf:
8303        dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8304        phba->lpfc_sg_dma_buf_pool = NULL;
8305out_free_bsmbx:
8306        lpfc_destroy_bootstrap_mbox(phba);
8307out_free_mem:
8308        lpfc_mem_free(phba);
8309        return rc;
8310}
8311
8312/**
8313 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8314 * @phba: pointer to lpfc hba data structure.
8315 *
8316 * This routine is invoked to unset the driver internal resources set up
8317 * specific for supporting the SLI-4 HBA device it attached to.
8318 **/
8319static void
8320lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8321{
8322        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8323
8324        free_percpu(phba->sli4_hba.eq_info);
8325#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8326        free_percpu(phba->sli4_hba.c_stat);
8327#endif
8328        free_percpu(phba->cmf_stat);
8329        kfree(phba->sli4_hba.idle_stat);
8330
8331        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8332        kfree(phba->sli4_hba.cpu_map);
8333        phba->sli4_hba.num_possible_cpu = 0;
8334        phba->sli4_hba.num_present_cpu = 0;
8335        phba->sli4_hba.curr_disp_cpu = 0;
8336        cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8337
8338        /* Free memory allocated for fast-path work queue handles */
8339        kfree(phba->sli4_hba.hba_eq_hdl);
8340
8341        /* Free the allocated rpi headers. */
8342        lpfc_sli4_remove_rpi_hdrs(phba);
8343        lpfc_sli4_remove_rpis(phba);
8344
8345        /* Free eligible FCF index bmask */
8346        kfree(phba->fcf.fcf_rr_bmask);
8347
8348        /* Free the ELS sgl list */
8349        lpfc_free_active_sgl(phba);
8350        lpfc_free_els_sgl_list(phba);
8351        lpfc_free_nvmet_sgl_list(phba);
8352
8353        /* Free the completion queue EQ event pool */
8354        lpfc_sli4_cq_event_release_all(phba);
8355        lpfc_sli4_cq_event_pool_destroy(phba);
8356
8357        /* Release resource identifiers. */
8358        lpfc_sli4_dealloc_resource_identifiers(phba);
8359
8360        /* Free the bsmbx region. */
8361        lpfc_destroy_bootstrap_mbox(phba);
8362
8363        /* Free the SLI Layer memory with SLI4 HBAs */
8364        lpfc_mem_free_all(phba);
8365
8366        /* Free the current connect table */
8367        list_for_each_entry_safe(conn_entry, next_conn_entry,
8368                &phba->fcf_conn_rec_list, list) {
8369                list_del_init(&conn_entry->list);
8370                kfree(conn_entry);
8371        }
8372
8373        return;
8374}
8375
8376/**
8377 * lpfc_init_api_table_setup - Set up init api function jump table
8378 * @phba: The hba struct for which this call is being executed.
8379 * @dev_grp: The HBA PCI-Device group number.
8380 *
8381 * This routine sets up the device INIT interface API function jump table
8382 * in @phba struct.
8383 *
8384 * Returns: 0 - success, -ENODEV - failure.
8385 **/
8386int
8387lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8388{
8389        phba->lpfc_hba_init_link = lpfc_hba_init_link;
8390        phba->lpfc_hba_down_link = lpfc_hba_down_link;
8391        phba->lpfc_selective_reset = lpfc_selective_reset;
8392        switch (dev_grp) {
8393        case LPFC_PCI_DEV_LP:
8394                phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8395                phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8396                phba->lpfc_stop_port = lpfc_stop_port_s3;
8397                break;
8398        case LPFC_PCI_DEV_OC:
8399                phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8400                phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8401                phba->lpfc_stop_port = lpfc_stop_port_s4;
8402                break;
8403        default:
8404                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8405                                "1431 Invalid HBA PCI-device group: 0x%x\n",
8406                                dev_grp);
8407                return -ENODEV;
8408        }
8409        return 0;
8410}
8411
8412/**
8413 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8414 * @phba: pointer to lpfc hba data structure.
8415 *
8416 * This routine is invoked to set up the driver internal resources after the
8417 * device specific resource setup to support the HBA device it attached to.
8418 *
8419 * Return codes
8420 *      0 - successful
8421 *      other values - error
8422 **/
8423static int
8424lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8425{
8426        int error;
8427
8428        /* Startup the kernel thread for this host adapter. */
8429        phba->worker_thread = kthread_run(lpfc_do_work, phba,
8430                                          "lpfc_worker_%d", phba->brd_no);
8431        if (IS_ERR(phba->worker_thread)) {
8432                error = PTR_ERR(phba->worker_thread);
8433                return error;
8434        }
8435
8436        return 0;
8437}
8438
8439/**
8440 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8441 * @phba: pointer to lpfc hba data structure.
8442 *
8443 * This routine is invoked to unset the driver internal resources set up after
8444 * the device specific resource setup for supporting the HBA device it
8445 * attached to.
8446 **/
8447static void
8448lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8449{
8450        if (phba->wq) {
8451                flush_workqueue(phba->wq);
8452                destroy_workqueue(phba->wq);
8453                phba->wq = NULL;
8454        }
8455
8456        /* Stop kernel worker thread */
8457        if (phba->worker_thread)
8458                kthread_stop(phba->worker_thread);
8459}
8460
8461/**
8462 * lpfc_free_iocb_list - Free iocb list.
8463 * @phba: pointer to lpfc hba data structure.
8464 *
8465 * This routine is invoked to free the driver's IOCB list and memory.
8466 **/
8467void
8468lpfc_free_iocb_list(struct lpfc_hba *phba)
8469{
8470        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8471
8472        spin_lock_irq(&phba->hbalock);
8473        list_for_each_entry_safe(iocbq_entry, iocbq_next,
8474                                 &phba->lpfc_iocb_list, list) {
8475                list_del(&iocbq_entry->list);
8476                kfree(iocbq_entry);
8477                phba->total_iocbq_bufs--;
8478        }
8479        spin_unlock_irq(&phba->hbalock);
8480
8481        return;
8482}
8483
8484/**
8485 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8486 * @phba: pointer to lpfc hba data structure.
8487 * @iocb_count: number of requested iocbs
8488 *
8489 * This routine is invoked to allocate and initizlize the driver's IOCB
8490 * list and set up the IOCB tag array accordingly.
8491 *
8492 * Return codes
8493 *      0 - successful
8494 *      other values - error
8495 **/
8496int
8497lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8498{
8499        struct lpfc_iocbq *iocbq_entry = NULL;
8500        uint16_t iotag;
8501        int i;
8502
8503        /* Initialize and populate the iocb list per host.  */
8504        INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8505        for (i = 0; i < iocb_count; i++) {
8506                iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8507                if (iocbq_entry == NULL) {
8508                        printk(KERN_ERR "%s: only allocated %d iocbs of "
8509                                "expected %d count. Unloading driver.\n",
8510                                __func__, i, iocb_count);
8511                        goto out_free_iocbq;
8512                }
8513
8514                iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8515                if (iotag == 0) {
8516                        kfree(iocbq_entry);
8517                        printk(KERN_ERR "%s: failed to allocate IOTAG. "
8518                                "Unloading driver.\n", __func__);
8519                        goto out_free_iocbq;
8520                }
8521                iocbq_entry->sli4_lxritag = NO_XRI;
8522                iocbq_entry->sli4_xritag = NO_XRI;
8523
8524                spin_lock_irq(&phba->hbalock);
8525                list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8526                phba->total_iocbq_bufs++;
8527                spin_unlock_irq(&phba->hbalock);
8528        }
8529
8530        return 0;
8531
8532out_free_iocbq:
8533        lpfc_free_iocb_list(phba);
8534
8535        return -ENOMEM;
8536}
8537
8538/**
8539 * lpfc_free_sgl_list - Free a given sgl list.
8540 * @phba: pointer to lpfc hba data structure.
8541 * @sglq_list: pointer to the head of sgl list.
8542 *
8543 * This routine is invoked to free a give sgl list and memory.
8544 **/
8545void
8546lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8547{
8548        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8549
8550        list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8551                list_del(&sglq_entry->list);
8552                lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8553                kfree(sglq_entry);
8554        }
8555}
8556
8557/**
8558 * lpfc_free_els_sgl_list - Free els sgl list.
8559 * @phba: pointer to lpfc hba data structure.
8560 *
8561 * This routine is invoked to free the driver's els sgl list and memory.
8562 **/
8563static void
8564lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8565{
8566        LIST_HEAD(sglq_list);
8567
8568        /* Retrieve all els sgls from driver list */
8569        spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8570        list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8571        spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8572
8573        /* Now free the sgl list */
8574        lpfc_free_sgl_list(phba, &sglq_list);
8575}
8576
8577/**
8578 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8579 * @phba: pointer to lpfc hba data structure.
8580 *
8581 * This routine is invoked to free the driver's nvmet sgl list and memory.
8582 **/
8583static void
8584lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8585{
8586        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8587        LIST_HEAD(sglq_list);
8588
8589        /* Retrieve all nvmet sgls from driver list */
8590        spin_lock_irq(&phba->hbalock);
8591        spin_lock(&phba->sli4_hba.sgl_list_lock);
8592        list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8593        spin_unlock(&phba->sli4_hba.sgl_list_lock);
8594        spin_unlock_irq(&phba->hbalock);
8595
8596        /* Now free the sgl list */
8597        list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8598                list_del(&sglq_entry->list);
8599                lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8600                kfree(sglq_entry);
8601        }
8602
8603        /* Update the nvmet_xri_cnt to reflect no current sgls.
8604         * The next initialization cycle sets the count and allocates
8605         * the sgls over again.
8606         */
8607        phba->sli4_hba.nvmet_xri_cnt = 0;
8608}
8609
8610/**
8611 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8612 * @phba: pointer to lpfc hba data structure.
8613 *
8614 * This routine is invoked to allocate the driver's active sgl memory.
8615 * This array will hold the sglq_entry's for active IOs.
8616 **/
8617static int
8618lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8619{
8620        int size;
8621        size = sizeof(struct lpfc_sglq *);
8622        size *= phba->sli4_hba.max_cfg_param.max_xri;
8623
8624        phba->sli4_hba.lpfc_sglq_active_list =
8625                kzalloc(size, GFP_KERNEL);
8626        if (!phba->sli4_hba.lpfc_sglq_active_list)
8627                return -ENOMEM;
8628        return 0;
8629}
8630
8631/**
8632 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8633 * @phba: pointer to lpfc hba data structure.
8634 *
8635 * This routine is invoked to walk through the array of active sglq entries
8636 * and free all of the resources.
8637 * This is just a place holder for now.
8638 **/
8639static void
8640lpfc_free_active_sgl(struct lpfc_hba *phba)
8641{
8642        kfree(phba->sli4_hba.lpfc_sglq_active_list);
8643}
8644
8645/**
8646 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8647 * @phba: pointer to lpfc hba data structure.
8648 *
8649 * This routine is invoked to allocate and initizlize the driver's sgl
8650 * list and set up the sgl xritag tag array accordingly.
8651 *
8652 **/
8653static void
8654lpfc_init_sgl_list(struct lpfc_hba *phba)
8655{
8656        /* Initialize and populate the sglq list per host/VF. */
8657        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8658        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8659        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8660        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8661
8662        /* els xri-sgl book keeping */
8663        phba->sli4_hba.els_xri_cnt = 0;
8664
8665        /* nvme xri-buffer book keeping */
8666        phba->sli4_hba.io_xri_cnt = 0;
8667}
8668
8669/**
8670 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8671 * @phba: pointer to lpfc hba data structure.
8672 *
8673 * This routine is invoked to post rpi header templates to the
8674 * port for those SLI4 ports that do not support extents.  This routine
8675 * posts a PAGE_SIZE memory region to the port to hold up to
8676 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8677 * and should be called only when interrupts are disabled.
8678 *
8679 * Return codes
8680 *      0 - successful
8681 *      -ERROR - otherwise.
8682 **/
8683int
8684lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8685{
8686        int rc = 0;
8687        struct lpfc_rpi_hdr *rpi_hdr;
8688
8689        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8690        if (!phba->sli4_hba.rpi_hdrs_in_use)
8691                return rc;
8692        if (phba->sli4_hba.extents_in_use)
8693                return -EIO;
8694
8695        rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8696        if (!rpi_hdr) {
8697                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8698                                "0391 Error during rpi post operation\n");
8699                lpfc_sli4_remove_rpis(phba);
8700                rc = -ENODEV;
8701        }
8702
8703        return rc;
8704}
8705
8706/**
8707 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8708 * @phba: pointer to lpfc hba data structure.
8709 *
8710 * This routine is invoked to allocate a single 4KB memory region to
8711 * support rpis and stores them in the phba.  This single region
8712 * provides support for up to 64 rpis.  The region is used globally
8713 * by the device.
8714 *
8715 * Returns:
8716 *   A valid rpi hdr on success.
8717 *   A NULL pointer on any failure.
8718 **/
8719struct lpfc_rpi_hdr *
8720lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8721{
8722        uint16_t rpi_limit, curr_rpi_range;
8723        struct lpfc_dmabuf *dmabuf;
8724        struct lpfc_rpi_hdr *rpi_hdr;
8725
8726        /*
8727         * If the SLI4 port supports extents, posting the rpi header isn't
8728         * required.  Set the expected maximum count and let the actual value
8729         * get set when extents are fully allocated.
8730         */
8731        if (!phba->sli4_hba.rpi_hdrs_in_use)
8732                return NULL;
8733        if (phba->sli4_hba.extents_in_use)
8734                return NULL;
8735
8736        /* The limit on the logical index is just the max_rpi count. */
8737        rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8738
8739        spin_lock_irq(&phba->hbalock);
8740        /*
8741         * Establish the starting RPI in this header block.  The starting
8742         * rpi is normalized to a zero base because the physical rpi is
8743         * port based.
8744         */
8745        curr_rpi_range = phba->sli4_hba.next_rpi;
8746        spin_unlock_irq(&phba->hbalock);
8747
8748        /* Reached full RPI range */
8749        if (curr_rpi_range == rpi_limit)
8750                return NULL;
8751
8752        /*
8753         * First allocate the protocol header region for the port.  The
8754         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8755         */
8756        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8757        if (!dmabuf)
8758                return NULL;
8759
8760        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8761                                          LPFC_HDR_TEMPLATE_SIZE,
8762                                          &dmabuf->phys, GFP_KERNEL);
8763        if (!dmabuf->virt) {
8764                rpi_hdr = NULL;
8765                goto err_free_dmabuf;
8766        }
8767
8768        if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8769                rpi_hdr = NULL;
8770                goto err_free_coherent;
8771        }
8772
8773        /* Save the rpi header data for cleanup later. */
8774        rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8775        if (!rpi_hdr)
8776                goto err_free_coherent;
8777
8778        rpi_hdr->dmabuf = dmabuf;
8779        rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8780        rpi_hdr->page_count = 1;
8781        spin_lock_irq(&phba->hbalock);
8782
8783        /* The rpi_hdr stores the logical index only. */
8784        rpi_hdr->start_rpi = curr_rpi_range;
8785        rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8786        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8787
8788        spin_unlock_irq(&phba->hbalock);
8789        return rpi_hdr;
8790
8791 err_free_coherent:
8792        dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8793                          dmabuf->virt, dmabuf->phys);
8794 err_free_dmabuf:
8795        kfree(dmabuf);
8796        return NULL;
8797}
8798
8799/**
8800 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8801 * @phba: pointer to lpfc hba data structure.
8802 *
8803 * This routine is invoked to remove all memory resources allocated
8804 * to support rpis for SLI4 ports not supporting extents. This routine
8805 * presumes the caller has released all rpis consumed by fabric or port
8806 * logins and is prepared to have the header pages removed.
8807 **/
8808void
8809lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8810{
8811        struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8812
8813        if (!phba->sli4_hba.rpi_hdrs_in_use)
8814                goto exit;
8815
8816        list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8817                                 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8818                list_del(&rpi_hdr->list);
8819                dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8820                                  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8821                kfree(rpi_hdr->dmabuf);
8822                kfree(rpi_hdr);
8823        }
8824 exit:
8825        /* There are no rpis available to the port now. */
8826        phba->sli4_hba.next_rpi = 0;
8827}
8828
8829/**
8830 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8831 * @pdev: pointer to pci device data structure.
8832 *
8833 * This routine is invoked to allocate the driver hba data structure for an
8834 * HBA device. If the allocation is successful, the phba reference to the
8835 * PCI device data structure is set.
8836 *
8837 * Return codes
8838 *      pointer to @phba - successful
8839 *      NULL - error
8840 **/
8841static struct lpfc_hba *
8842lpfc_hba_alloc(struct pci_dev *pdev)
8843{
8844        struct lpfc_hba *phba;
8845
8846        /* Allocate memory for HBA structure */
8847        phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8848        if (!phba) {
8849                dev_err(&pdev->dev, "failed to allocate hba struct\n");
8850                return NULL;
8851        }
8852
8853        /* Set reference to PCI device in HBA structure */
8854        phba->pcidev = pdev;
8855
8856        /* Assign an unused board number */
8857        phba->brd_no = lpfc_get_instance();
8858        if (phba->brd_no < 0) {
8859                kfree(phba);
8860                return NULL;
8861        }
8862        phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8863
8864        spin_lock_init(&phba->ct_ev_lock);
8865        INIT_LIST_HEAD(&phba->ct_ev_waiters);
8866
8867        return phba;
8868}
8869
8870/**
8871 * lpfc_hba_free - Free driver hba data structure with a device.
8872 * @phba: pointer to lpfc hba data structure.
8873 *
8874 * This routine is invoked to free the driver hba data structure with an
8875 * HBA device.
8876 **/
8877static void
8878lpfc_hba_free(struct lpfc_hba *phba)
8879{
8880        if (phba->sli_rev == LPFC_SLI_REV4)
8881                kfree(phba->sli4_hba.hdwq);
8882
8883        /* Release the driver assigned board number */
8884        idr_remove(&lpfc_hba_index, phba->brd_no);
8885
8886        /* Free memory allocated with sli3 rings */
8887        kfree(phba->sli.sli3_ring);
8888        phba->sli.sli3_ring = NULL;
8889
8890        kfree(phba);
8891        return;
8892}
8893
8894/**
8895 * lpfc_create_shost - Create hba physical port with associated scsi host.
8896 * @phba: pointer to lpfc hba data structure.
8897 *
8898 * This routine is invoked to create HBA physical port and associate a SCSI
8899 * host with it.
8900 *
8901 * Return codes
8902 *      0 - successful
8903 *      other values - error
8904 **/
8905static int
8906lpfc_create_shost(struct lpfc_hba *phba)
8907{
8908        struct lpfc_vport *vport;
8909        struct Scsi_Host  *shost;
8910
8911        /* Initialize HBA FC structure */
8912        phba->fc_edtov = FF_DEF_EDTOV;
8913        phba->fc_ratov = FF_DEF_RATOV;
8914        phba->fc_altov = FF_DEF_ALTOV;
8915        phba->fc_arbtov = FF_DEF_ARBTOV;
8916
8917        atomic_set(&phba->sdev_cnt, 0);
8918        vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
8919        if (!vport)
8920                return -ENODEV;
8921
8922        shost = lpfc_shost_from_vport(vport);
8923        phba->pport = vport;
8924
8925        if (phba->nvmet_support) {
8926                /* Only 1 vport (pport) will support NVME target */
8927                phba->targetport = NULL;
8928                phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
8929                lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
8930                                "6076 NVME Target Found\n");
8931        }
8932
8933        lpfc_debugfs_initialize(vport);
8934        /* Put reference to SCSI host to driver's device private data */
8935        pci_set_drvdata(phba->pcidev, shost);
8936
8937        /*
8938         * At this point we are fully registered with PSA. In addition,
8939         * any initial discovery should be completed.
8940         */
8941        vport->load_flag |= FC_ALLOW_FDMI;
8942        if (phba->cfg_enable_SmartSAN ||
8943            (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
8944
8945                /* Setup appropriate attribute masks */
8946                vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8947                if (phba->cfg_enable_SmartSAN)
8948                        vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
8949                else
8950                        vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8951        }
8952        return 0;
8953}
8954
8955/**
8956 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
8957 * @phba: pointer to lpfc hba data structure.
8958 *
8959 * This routine is invoked to destroy HBA physical port and the associated
8960 * SCSI host.
8961 **/
8962static void
8963lpfc_destroy_shost(struct lpfc_hba *phba)
8964{
8965        struct lpfc_vport *vport = phba->pport;
8966
8967        /* Destroy physical port that associated with the SCSI host */
8968        destroy_port(vport);
8969
8970        return;
8971}
8972
8973/**
8974 * lpfc_setup_bg - Setup Block guard structures and debug areas.
8975 * @phba: pointer to lpfc hba data structure.
8976 * @shost: the shost to be used to detect Block guard settings.
8977 *
8978 * This routine sets up the local Block guard protocol settings for @shost.
8979 * This routine also allocates memory for debugging bg buffers.
8980 **/
8981static void
8982lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
8983{
8984        uint32_t old_mask;
8985        uint32_t old_guard;
8986
8987        if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
8988                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8989                                "1478 Registering BlockGuard with the "
8990                                "SCSI layer\n");
8991
8992                old_mask = phba->cfg_prot_mask;
8993                old_guard = phba->cfg_prot_guard;
8994
8995                /* Only allow supported values */
8996                phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
8997                        SHOST_DIX_TYPE0_PROTECTION |
8998                        SHOST_DIX_TYPE1_PROTECTION);
8999                phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9000                                         SHOST_DIX_GUARD_CRC);
9001
9002                /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9003                if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9004                        phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9005
9006                if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9007                        if ((old_mask != phba->cfg_prot_mask) ||
9008                                (old_guard != phba->cfg_prot_guard))
9009                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9010                                        "1475 Registering BlockGuard with the "
9011                                        "SCSI layer: mask %d  guard %d\n",
9012                                        phba->cfg_prot_mask,
9013                                        phba->cfg_prot_guard);
9014
9015                        scsi_host_set_prot(shost, phba->cfg_prot_mask);
9016                        scsi_host_set_guard(shost, phba->cfg_prot_guard);
9017                } else
9018                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9019                                "1479 Not Registering BlockGuard with the SCSI "
9020                                "layer, Bad protection parameters: %d %d\n",
9021                                old_mask, old_guard);
9022        }
9023}
9024
9025/**
9026 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9027 * @phba: pointer to lpfc hba data structure.
9028 *
9029 * This routine is invoked to perform all the necessary post initialization
9030 * setup for the device.
9031 **/
9032static void
9033lpfc_post_init_setup(struct lpfc_hba *phba)
9034{
9035        struct Scsi_Host  *shost;
9036        struct lpfc_adapter_event_header adapter_event;
9037
9038        /* Get the default values for Model Name and Description */
9039        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9040
9041        /*
9042         * hba setup may have changed the hba_queue_depth so we need to
9043         * adjust the value of can_queue.
9044         */
9045        shost = pci_get_drvdata(phba->pcidev);
9046        shost->can_queue = phba->cfg_hba_queue_depth - 10;
9047
9048        lpfc_host_attrib_init(shost);
9049
9050        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9051                spin_lock_irq(shost->host_lock);
9052                lpfc_poll_start_timer(phba);
9053                spin_unlock_irq(shost->host_lock);
9054        }
9055
9056        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9057                        "0428 Perform SCSI scan\n");
9058        /* Send board arrival event to upper layer */
9059        adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9060        adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9061        fc_host_post_vendor_event(shost, fc_get_event_number(),
9062                                  sizeof(adapter_event),
9063                                  (char *) &adapter_event,
9064                                  LPFC_NL_VENDOR_ID);
9065        return;
9066}
9067
9068/**
9069 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9070 * @phba: pointer to lpfc hba data structure.
9071 *
9072 * This routine is invoked to set up the PCI device memory space for device
9073 * with SLI-3 interface spec.
9074 *
9075 * Return codes
9076 *      0 - successful
9077 *      other values - error
9078 **/
9079static int
9080lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9081{
9082        struct pci_dev *pdev = phba->pcidev;
9083        unsigned long bar0map_len, bar2map_len;
9084        int i, hbq_count;
9085        void *ptr;
9086        int error;
9087
9088        if (!pdev)
9089                return -ENODEV;
9090
9091        /* Set the device DMA mask size */
9092        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9093        if (error)
9094                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9095        if (error)
9096                return error;
9097        error = -ENODEV;
9098
9099        /* Get the bus address of Bar0 and Bar2 and the number of bytes
9100         * required by each mapping.
9101         */
9102        phba->pci_bar0_map = pci_resource_start(pdev, 0);
9103        bar0map_len = pci_resource_len(pdev, 0);
9104
9105        phba->pci_bar2_map = pci_resource_start(pdev, 2);
9106        bar2map_len = pci_resource_len(pdev, 2);
9107
9108        /* Map HBA SLIM to a kernel virtual address. */
9109        phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9110        if (!phba->slim_memmap_p) {
9111                dev_printk(KERN_ERR, &pdev->dev,
9112                           "ioremap failed for SLIM memory.\n");
9113                goto out;
9114        }
9115
9116        /* Map HBA Control Registers to a kernel virtual address. */
9117        phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9118        if (!phba->ctrl_regs_memmap_p) {
9119                dev_printk(KERN_ERR, &pdev->dev,
9120                           "ioremap failed for HBA control registers.\n");
9121                goto out_iounmap_slim;
9122        }
9123
9124        /* Allocate memory for SLI-2 structures */
9125        phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9126                                               &phba->slim2p.phys, GFP_KERNEL);
9127        if (!phba->slim2p.virt)
9128                goto out_iounmap;
9129
9130        phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9131        phba->mbox_ext = (phba->slim2p.virt +
9132                offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9133        phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9134        phba->IOCBs = (phba->slim2p.virt +
9135                       offsetof(struct lpfc_sli2_slim, IOCBs));
9136
9137        phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9138                                                 lpfc_sli_hbq_size(),
9139                                                 &phba->hbqslimp.phys,
9140                                                 GFP_KERNEL);
9141        if (!phba->hbqslimp.virt)
9142                goto out_free_slim;
9143
9144        hbq_count = lpfc_sli_hbq_count();
9145        ptr = phba->hbqslimp.virt;
9146        for (i = 0; i < hbq_count; ++i) {
9147                phba->hbqs[i].hbq_virt = ptr;
9148                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9149                ptr += (lpfc_hbq_defs[i]->entry_count *
9150                        sizeof(struct lpfc_hbq_entry));
9151        }
9152        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9153        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9154
9155        memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9156
9157        phba->MBslimaddr = phba->slim_memmap_p;
9158        phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9159        phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9160        phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9161        phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9162
9163        return 0;
9164
9165out_free_slim:
9166        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9167                          phba->slim2p.virt, phba->slim2p.phys);
9168out_iounmap:
9169        iounmap(phba->ctrl_regs_memmap_p);
9170out_iounmap_slim:
9171        iounmap(phba->slim_memmap_p);
9172out:
9173        return error;
9174}
9175
9176/**
9177 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9178 * @phba: pointer to lpfc hba data structure.
9179 *
9180 * This routine is invoked to unset the PCI device memory space for device
9181 * with SLI-3 interface spec.
9182 **/
9183static void
9184lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9185{
9186        struct pci_dev *pdev;
9187
9188        /* Obtain PCI device reference */
9189        if (!phba->pcidev)
9190                return;
9191        else
9192                pdev = phba->pcidev;
9193
9194        /* Free coherent DMA memory allocated */
9195        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9196                          phba->hbqslimp.virt, phba->hbqslimp.phys);
9197        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9198                          phba->slim2p.virt, phba->slim2p.phys);
9199
9200        /* I/O memory unmap */
9201        iounmap(phba->ctrl_regs_memmap_p);
9202        iounmap(phba->slim_memmap_p);
9203
9204        return;
9205}
9206
9207/**
9208 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9209 * @phba: pointer to lpfc hba data structure.
9210 *
9211 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9212 * done and check status.
9213 *
9214 * Return 0 if successful, otherwise -ENODEV.
9215 **/
9216int
9217lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9218{
9219        struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9220        struct lpfc_register reg_data;
9221        int i, port_error = 0;
9222        uint32_t if_type;
9223
9224        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9225        memset(&reg_data, 0, sizeof(reg_data));
9226        if (!phba->sli4_hba.PSMPHRregaddr)
9227                return -ENODEV;
9228
9229        /* Wait up to 30 seconds for the SLI Port POST done and ready */
9230        for (i = 0; i < 3000; i++) {
9231                if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9232                        &portsmphr_reg.word0) ||
9233                        (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9234                        /* Port has a fatal POST error, break out */
9235                        port_error = -ENODEV;
9236                        break;
9237                }
9238                if (LPFC_POST_STAGE_PORT_READY ==
9239                    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9240                        break;
9241                msleep(10);
9242        }
9243
9244        /*
9245         * If there was a port error during POST, then don't proceed with
9246         * other register reads as the data may not be valid.  Just exit.
9247         */
9248        if (port_error) {
9249                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9250                        "1408 Port Failed POST - portsmphr=0x%x, "
9251                        "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9252                        "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9253                        portsmphr_reg.word0,
9254                        bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9255                        bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9256                        bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9257                        bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9258                        bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9259                        bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9260                        bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9261                        bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9262        } else {
9263                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9264                                "2534 Device Info: SLIFamily=0x%x, "
9265                                "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9266                                "SLIHint_2=0x%x, FT=0x%x\n",
9267                                bf_get(lpfc_sli_intf_sli_family,
9268                                       &phba->sli4_hba.sli_intf),
9269                                bf_get(lpfc_sli_intf_slirev,
9270                                       &phba->sli4_hba.sli_intf),
9271                                bf_get(lpfc_sli_intf_if_type,
9272                                       &phba->sli4_hba.sli_intf),
9273                                bf_get(lpfc_sli_intf_sli_hint1,
9274                                       &phba->sli4_hba.sli_intf),
9275                                bf_get(lpfc_sli_intf_sli_hint2,
9276                                       &phba->sli4_hba.sli_intf),
9277                                bf_get(lpfc_sli_intf_func_type,
9278                                       &phba->sli4_hba.sli_intf));
9279                /*
9280                 * Check for other Port errors during the initialization
9281                 * process.  Fail the load if the port did not come up
9282                 * correctly.
9283                 */
9284                if_type = bf_get(lpfc_sli_intf_if_type,
9285                                 &phba->sli4_hba.sli_intf);
9286                switch (if_type) {
9287                case LPFC_SLI_INTF_IF_TYPE_0:
9288                        phba->sli4_hba.ue_mask_lo =
9289                              readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9290                        phba->sli4_hba.ue_mask_hi =
9291                              readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9292                        uerrlo_reg.word0 =
9293                              readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9294                        uerrhi_reg.word0 =
9295                                readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9296                        if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9297                            (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9298                                lpfc_printf_log(phba, KERN_ERR,
9299                                                LOG_TRACE_EVENT,
9300                                                "1422 Unrecoverable Error "
9301                                                "Detected during POST "
9302                                                "uerr_lo_reg=0x%x, "
9303                                                "uerr_hi_reg=0x%x, "
9304                                                "ue_mask_lo_reg=0x%x, "
9305                                                "ue_mask_hi_reg=0x%x\n",
9306                                                uerrlo_reg.word0,
9307                                                uerrhi_reg.word0,
9308                                                phba->sli4_hba.ue_mask_lo,
9309                                                phba->sli4_hba.ue_mask_hi);
9310                                port_error = -ENODEV;
9311                        }
9312                        break;
9313                case LPFC_SLI_INTF_IF_TYPE_2:
9314                case LPFC_SLI_INTF_IF_TYPE_6:
9315                        /* Final checks.  The port status should be clean. */
9316                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9317                                &reg_data.word0) ||
9318                                (bf_get(lpfc_sliport_status_err, &reg_data) &&
9319                                 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
9320                                phba->work_status[0] =
9321                                        readl(phba->sli4_hba.u.if_type2.
9322                                              ERR1regaddr);
9323                                phba->work_status[1] =
9324                                        readl(phba->sli4_hba.u.if_type2.
9325                                              ERR2regaddr);
9326                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9327                                        "2888 Unrecoverable port error "
9328                                        "following POST: port status reg "
9329                                        "0x%x, port_smphr reg 0x%x, "
9330                                        "error 1=0x%x, error 2=0x%x\n",
9331                                        reg_data.word0,
9332                                        portsmphr_reg.word0,
9333                                        phba->work_status[0],
9334                                        phba->work_status[1]);
9335                                port_error = -ENODEV;
9336                        }
9337                        break;
9338                case LPFC_SLI_INTF_IF_TYPE_1:
9339                default:
9340                        break;
9341                }
9342        }
9343        return port_error;
9344}
9345
9346/**
9347 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9348 * @phba: pointer to lpfc hba data structure.
9349 * @if_type:  The SLI4 interface type getting configured.
9350 *
9351 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9352 * memory map.
9353 **/
9354static void
9355lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9356{
9357        switch (if_type) {
9358        case LPFC_SLI_INTF_IF_TYPE_0:
9359                phba->sli4_hba.u.if_type0.UERRLOregaddr =
9360                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9361                phba->sli4_hba.u.if_type0.UERRHIregaddr =
9362                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9363                phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9364                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9365                phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9366                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9367                phba->sli4_hba.SLIINTFregaddr =
9368                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9369                break;
9370        case LPFC_SLI_INTF_IF_TYPE_2:
9371                phba->sli4_hba.u.if_type2.EQDregaddr =
9372                        phba->sli4_hba.conf_regs_memmap_p +
9373                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9374                phba->sli4_hba.u.if_type2.ERR1regaddr =
9375                        phba->sli4_hba.conf_regs_memmap_p +
9376                                                LPFC_CTL_PORT_ER1_OFFSET;
9377                phba->sli4_hba.u.if_type2.ERR2regaddr =
9378                        phba->sli4_hba.conf_regs_memmap_p +
9379                                                LPFC_CTL_PORT_ER2_OFFSET;
9380                phba->sli4_hba.u.if_type2.CTRLregaddr =
9381                        phba->sli4_hba.conf_regs_memmap_p +
9382                                                LPFC_CTL_PORT_CTL_OFFSET;
9383                phba->sli4_hba.u.if_type2.STATUSregaddr =
9384                        phba->sli4_hba.conf_regs_memmap_p +
9385                                                LPFC_CTL_PORT_STA_OFFSET;
9386                phba->sli4_hba.SLIINTFregaddr =
9387                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9388                phba->sli4_hba.PSMPHRregaddr =
9389                        phba->sli4_hba.conf_regs_memmap_p +
9390                                                LPFC_CTL_PORT_SEM_OFFSET;
9391                phba->sli4_hba.RQDBregaddr =
9392                        phba->sli4_hba.conf_regs_memmap_p +
9393                                                LPFC_ULP0_RQ_DOORBELL;
9394                phba->sli4_hba.WQDBregaddr =
9395                        phba->sli4_hba.conf_regs_memmap_p +
9396                                                LPFC_ULP0_WQ_DOORBELL;
9397                phba->sli4_hba.CQDBregaddr =
9398                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9399                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9400                phba->sli4_hba.MQDBregaddr =
9401                        phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9402                phba->sli4_hba.BMBXregaddr =
9403                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9404                break;
9405        case LPFC_SLI_INTF_IF_TYPE_6:
9406                phba->sli4_hba.u.if_type2.EQDregaddr =
9407                        phba->sli4_hba.conf_regs_memmap_p +
9408                                                LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9409                phba->sli4_hba.u.if_type2.ERR1regaddr =
9410                        phba->sli4_hba.conf_regs_memmap_p +
9411                                                LPFC_CTL_PORT_ER1_OFFSET;
9412                phba->sli4_hba.u.if_type2.ERR2regaddr =
9413                        phba->sli4_hba.conf_regs_memmap_p +
9414                                                LPFC_CTL_PORT_ER2_OFFSET;
9415                phba->sli4_hba.u.if_type2.CTRLregaddr =
9416                        phba->sli4_hba.conf_regs_memmap_p +
9417                                                LPFC_CTL_PORT_CTL_OFFSET;
9418                phba->sli4_hba.u.if_type2.STATUSregaddr =
9419                        phba->sli4_hba.conf_regs_memmap_p +
9420                                                LPFC_CTL_PORT_STA_OFFSET;
9421                phba->sli4_hba.PSMPHRregaddr =
9422                        phba->sli4_hba.conf_regs_memmap_p +
9423                                                LPFC_CTL_PORT_SEM_OFFSET;
9424                phba->sli4_hba.BMBXregaddr =
9425                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9426                break;
9427        case LPFC_SLI_INTF_IF_TYPE_1:
9428        default:
9429                dev_printk(KERN_ERR, &phba->pcidev->dev,
9430                           "FATAL - unsupported SLI4 interface type - %d\n",
9431                           if_type);
9432                break;
9433        }
9434}
9435
9436/**
9437 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9438 * @phba: pointer to lpfc hba data structure.
9439 * @if_type: sli if type to operate on.
9440 *
9441 * This routine is invoked to set up SLI4 BAR1 register memory map.
9442 **/
9443static void
9444lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9445{
9446        switch (if_type) {
9447        case LPFC_SLI_INTF_IF_TYPE_0:
9448                phba->sli4_hba.PSMPHRregaddr =
9449                        phba->sli4_hba.ctrl_regs_memmap_p +
9450                        LPFC_SLIPORT_IF0_SMPHR;
9451                phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9452                        LPFC_HST_ISR0;
9453                phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9454                        LPFC_HST_IMR0;
9455                phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9456                        LPFC_HST_ISCR0;
9457                break;
9458        case LPFC_SLI_INTF_IF_TYPE_6:
9459                phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9460                        LPFC_IF6_RQ_DOORBELL;
9461                phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9462                        LPFC_IF6_WQ_DOORBELL;
9463                phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9464                        LPFC_IF6_CQ_DOORBELL;
9465                phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9466                        LPFC_IF6_EQ_DOORBELL;
9467                phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9468                        LPFC_IF6_MQ_DOORBELL;
9469                break;
9470        case LPFC_SLI_INTF_IF_TYPE_2:
9471        case LPFC_SLI_INTF_IF_TYPE_1:
9472        default:
9473                dev_err(&phba->pcidev->dev,
9474                           "FATAL - unsupported SLI4 interface type - %d\n",
9475                           if_type);
9476                break;
9477        }
9478}
9479
9480/**
9481 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9482 * @phba: pointer to lpfc hba data structure.
9483 * @vf: virtual function number
9484 *
9485 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9486 * based on the given viftual function number, @vf.
9487 *
9488 * Return 0 if successful, otherwise -ENODEV.
9489 **/
9490static int
9491lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9492{
9493        if (vf > LPFC_VIR_FUNC_MAX)
9494                return -ENODEV;
9495
9496        phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9497                                vf * LPFC_VFR_PAGE_SIZE +
9498                                        LPFC_ULP0_RQ_DOORBELL);
9499        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9500                                vf * LPFC_VFR_PAGE_SIZE +
9501                                        LPFC_ULP0_WQ_DOORBELL);
9502        phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9503                                vf * LPFC_VFR_PAGE_SIZE +
9504                                        LPFC_EQCQ_DOORBELL);
9505        phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9506        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9507                                vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9508        phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9509                                vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9510        return 0;
9511}
9512
9513/**
9514 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9515 * @phba: pointer to lpfc hba data structure.
9516 *
9517 * This routine is invoked to create the bootstrap mailbox
9518 * region consistent with the SLI-4 interface spec.  This
9519 * routine allocates all memory necessary to communicate
9520 * mailbox commands to the port and sets up all alignment
9521 * needs.  No locks are expected to be held when calling
9522 * this routine.
9523 *
9524 * Return codes
9525 *      0 - successful
9526 *      -ENOMEM - could not allocated memory.
9527 **/
9528static int
9529lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9530{
9531        uint32_t bmbx_size;
9532        struct lpfc_dmabuf *dmabuf;
9533        struct dma_address *dma_address;
9534        uint32_t pa_addr;
9535        uint64_t phys_addr;
9536
9537        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9538        if (!dmabuf)
9539                return -ENOMEM;
9540
9541        /*
9542         * The bootstrap mailbox region is comprised of 2 parts
9543         * plus an alignment restriction of 16 bytes.
9544         */
9545        bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9546        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9547                                          &dmabuf->phys, GFP_KERNEL);
9548        if (!dmabuf->virt) {
9549                kfree(dmabuf);
9550                return -ENOMEM;
9551        }
9552
9553        /*
9554         * Initialize the bootstrap mailbox pointers now so that the register
9555         * operations are simple later.  The mailbox dma address is required
9556         * to be 16-byte aligned.  Also align the virtual memory as each
9557         * maibox is copied into the bmbx mailbox region before issuing the
9558         * command to the port.
9559         */
9560        phba->sli4_hba.bmbx.dmabuf = dmabuf;
9561        phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9562
9563        phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9564                                              LPFC_ALIGN_16_BYTE);
9565        phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9566                                              LPFC_ALIGN_16_BYTE);
9567
9568        /*
9569         * Set the high and low physical addresses now.  The SLI4 alignment
9570         * requirement is 16 bytes and the mailbox is posted to the port
9571         * as two 30-bit addresses.  The other data is a bit marking whether
9572         * the 30-bit address is the high or low address.
9573         * Upcast bmbx aphys to 64bits so shift instruction compiles
9574         * clean on 32 bit machines.
9575         */
9576        dma_address = &phba->sli4_hba.bmbx.dma_address;
9577        phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9578        pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9579        dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9580                                           LPFC_BMBX_BIT1_ADDR_HI);
9581
9582        pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9583        dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9584                                           LPFC_BMBX_BIT1_ADDR_LO);
9585        return 0;
9586}
9587
9588/**
9589 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9590 * @phba: pointer to lpfc hba data structure.
9591 *
9592 * This routine is invoked to teardown the bootstrap mailbox
9593 * region and release all host resources. This routine requires
9594 * the caller to ensure all mailbox commands recovered, no
9595 * additional mailbox comands are sent, and interrupts are disabled
9596 * before calling this routine.
9597 *
9598 **/
9599static void
9600lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9601{
9602        dma_free_coherent(&phba->pcidev->dev,
9603                          phba->sli4_hba.bmbx.bmbx_size,
9604                          phba->sli4_hba.bmbx.dmabuf->virt,
9605                          phba->sli4_hba.bmbx.dmabuf->phys);
9606
9607        kfree(phba->sli4_hba.bmbx.dmabuf);
9608        memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9609}
9610
9611static const char * const lpfc_topo_to_str[] = {
9612        "Loop then P2P",
9613        "Loopback",
9614        "P2P Only",
9615        "Unsupported",
9616        "Loop Only",
9617        "Unsupported",
9618        "P2P then Loop",
9619};
9620
9621#define LINK_FLAGS_DEF  0x0
9622#define LINK_FLAGS_P2P  0x1
9623#define LINK_FLAGS_LOOP 0x2
9624/**
9625 * lpfc_map_topology - Map the topology read from READ_CONFIG
9626 * @phba: pointer to lpfc hba data structure.
9627 * @rd_config: pointer to read config data
9628 *
9629 * This routine is invoked to map the topology values as read
9630 * from the read config mailbox command. If the persistent
9631 * topology feature is supported, the firmware will provide the
9632 * saved topology information to be used in INIT_LINK
9633 **/
9634static void
9635lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9636{
9637        u8 ptv, tf, pt;
9638
9639        ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9640        tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9641        pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9642
9643        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9644                        "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9645                         ptv, tf, pt);
9646        if (!ptv) {
9647                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9648                                "2019 FW does not support persistent topology "
9649                                "Using driver parameter defined value [%s]",
9650                                lpfc_topo_to_str[phba->cfg_topology]);
9651                return;
9652        }
9653        /* FW supports persistent topology - override module parameter value */
9654        phba->hba_flag |= HBA_PERSISTENT_TOPO;
9655
9656        /* if ASIC_GEN_NUM >= 0xC) */
9657        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9658                    LPFC_SLI_INTF_IF_TYPE_6) ||
9659            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9660                    LPFC_SLI_INTF_FAMILY_G6)) {
9661                if (!tf) {
9662                        phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9663                                        ? FLAGS_TOPOLOGY_MODE_LOOP
9664                                        : FLAGS_TOPOLOGY_MODE_PT_PT);
9665                } else {
9666                        phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9667                }
9668        } else { /* G5 */
9669                if (tf) {
9670                        /* If topology failover set - pt is '0' or '1' */
9671                        phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9672                                              FLAGS_TOPOLOGY_MODE_LOOP_PT);
9673                } else {
9674                        phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9675                                        ? FLAGS_TOPOLOGY_MODE_PT_PT
9676                                        : FLAGS_TOPOLOGY_MODE_LOOP);
9677                }
9678        }
9679        if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9680                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9681                                "2020 Using persistent topology value [%s]",
9682                                lpfc_topo_to_str[phba->cfg_topology]);
9683        } else {
9684                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9685                                "2021 Invalid topology values from FW "
9686                                "Using driver parameter defined value [%s]",
9687                                lpfc_topo_to_str[phba->cfg_topology]);
9688        }
9689}
9690
9691/**
9692 * lpfc_sli4_read_config - Get the config parameters.
9693 * @phba: pointer to lpfc hba data structure.
9694 *
9695 * This routine is invoked to read the configuration parameters from the HBA.
9696 * The configuration parameters are used to set the base and maximum values
9697 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9698 * allocation for the port.
9699 *
9700 * Return codes
9701 *      0 - successful
9702 *      -ENOMEM - No available memory
9703 *      -EIO - The mailbox failed to complete successfully.
9704 **/
9705int
9706lpfc_sli4_read_config(struct lpfc_hba *phba)
9707{
9708        LPFC_MBOXQ_t *pmb;
9709        struct lpfc_mbx_read_config *rd_config;
9710        union  lpfc_sli4_cfg_shdr *shdr;
9711        uint32_t shdr_status, shdr_add_status;
9712        struct lpfc_mbx_get_func_cfg *get_func_cfg;
9713        struct lpfc_rsrc_desc_fcfcoe *desc;
9714        char *pdesc_0;
9715        uint16_t forced_link_speed;
9716        uint32_t if_type, qmin;
9717        int length, i, rc = 0, rc2;
9718
9719        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9720        if (!pmb) {
9721                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9722                                "2011 Unable to allocate memory for issuing "
9723                                "SLI_CONFIG_SPECIAL mailbox command\n");
9724                return -ENOMEM;
9725        }
9726
9727        lpfc_read_config(phba, pmb);
9728
9729        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9730        if (rc != MBX_SUCCESS) {
9731                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9732                                "2012 Mailbox failed , mbxCmd x%x "
9733                                "READ_CONFIG, mbxStatus x%x\n",
9734                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
9735                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
9736                rc = -EIO;
9737        } else {
9738                rd_config = &pmb->u.mqe.un.rd_config;
9739                if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9740                        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9741                        phba->sli4_hba.lnk_info.lnk_tp =
9742                                bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9743                        phba->sli4_hba.lnk_info.lnk_no =
9744                                bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9745                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9746                                        "3081 lnk_type:%d, lnk_numb:%d\n",
9747                                        phba->sli4_hba.lnk_info.lnk_tp,
9748                                        phba->sli4_hba.lnk_info.lnk_no);
9749                } else
9750                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9751                                        "3082 Mailbox (x%x) returned ldv:x0\n",
9752                                        bf_get(lpfc_mqe_command, &pmb->u.mqe));
9753                if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9754                        phba->bbcredit_support = 1;
9755                        phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9756                }
9757
9758                phba->sli4_hba.conf_trunk =
9759                        bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9760                phba->sli4_hba.extents_in_use =
9761                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9762                phba->sli4_hba.max_cfg_param.max_xri =
9763                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9764                /* Reduce resource usage in kdump environment */
9765                if (is_kdump_kernel() &&
9766                    phba->sli4_hba.max_cfg_param.max_xri > 512)
9767                        phba->sli4_hba.max_cfg_param.max_xri = 512;
9768                phba->sli4_hba.max_cfg_param.xri_base =
9769                        bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9770                phba->sli4_hba.max_cfg_param.max_vpi =
9771                        bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9772                /* Limit the max we support */
9773                if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9774                        phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9775                phba->sli4_hba.max_cfg_param.vpi_base =
9776                        bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9777                phba->sli4_hba.max_cfg_param.max_rpi =
9778                        bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9779                phba->sli4_hba.max_cfg_param.rpi_base =
9780                        bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9781                phba->sli4_hba.max_cfg_param.max_vfi =
9782                        bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9783                phba->sli4_hba.max_cfg_param.vfi_base =
9784                        bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9785                phba->sli4_hba.max_cfg_param.max_fcfi =
9786                        bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9787                phba->sli4_hba.max_cfg_param.max_eq =
9788                        bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9789                phba->sli4_hba.max_cfg_param.max_rq =
9790                        bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9791                phba->sli4_hba.max_cfg_param.max_wq =
9792                        bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9793                phba->sli4_hba.max_cfg_param.max_cq =
9794                        bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9795                phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9796                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9797                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9798                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9799                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9800                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9801                phba->max_vports = phba->max_vpi;
9802
9803                /* Next decide on FPIN or Signal E2E CGN support
9804                 * For congestion alarms and warnings valid combination are:
9805                 * 1. FPIN alarms / FPIN warnings
9806                 * 2. Signal alarms / Signal warnings
9807                 * 3. FPIN alarms / Signal warnings
9808                 * 4. Signal alarms / FPIN warnings
9809                 *
9810                 * Initialize the adapter frequency to 100 mSecs
9811                 */
9812                phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9813                phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9814                phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9815
9816                if (lpfc_use_cgn_signal) {
9817                        if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9818                                phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9819                                phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9820                        }
9821                        if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9822                                /* MUST support both alarm and warning
9823                                 * because EDC does not support alarm alone.
9824                                 */
9825                                if (phba->cgn_reg_signal !=
9826                                    EDC_CG_SIG_WARN_ONLY) {
9827                                        /* Must support both or none */
9828                                        phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9829                                        phba->cgn_reg_signal =
9830                                                EDC_CG_SIG_NOTSUPPORTED;
9831                                } else {
9832                                        phba->cgn_reg_signal =
9833                                                EDC_CG_SIG_WARN_ALARM;
9834                                        phba->cgn_reg_fpin =
9835                                                LPFC_CGN_FPIN_NONE;
9836                                }
9837                        }
9838                }
9839
9840                /* Set the congestion initial signal and fpin values. */
9841                phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9842                phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9843
9844                lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9845                                "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9846                                phba->cgn_reg_signal, phba->cgn_reg_fpin);
9847
9848                lpfc_map_topology(phba, rd_config);
9849                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9850                                "2003 cfg params Extents? %d "
9851                                "XRI(B:%d M:%d), "
9852                                "VPI(B:%d M:%d) "
9853                                "VFI(B:%d M:%d) "
9854                                "RPI(B:%d M:%d) "
9855                                "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9856                                phba->sli4_hba.extents_in_use,
9857                                phba->sli4_hba.max_cfg_param.xri_base,
9858                                phba->sli4_hba.max_cfg_param.max_xri,
9859                                phba->sli4_hba.max_cfg_param.vpi_base,
9860                                phba->sli4_hba.max_cfg_param.max_vpi,
9861                                phba->sli4_hba.max_cfg_param.vfi_base,
9862                                phba->sli4_hba.max_cfg_param.max_vfi,
9863                                phba->sli4_hba.max_cfg_param.rpi_base,
9864                                phba->sli4_hba.max_cfg_param.max_rpi,
9865                                phba->sli4_hba.max_cfg_param.max_fcfi,
9866                                phba->sli4_hba.max_cfg_param.max_eq,
9867                                phba->sli4_hba.max_cfg_param.max_cq,
9868                                phba->sli4_hba.max_cfg_param.max_wq,
9869                                phba->sli4_hba.max_cfg_param.max_rq,
9870                                phba->lmt);
9871
9872                /*
9873                 * Calculate queue resources based on how
9874                 * many WQ/CQ/EQs are available.
9875                 */
9876                qmin = phba->sli4_hba.max_cfg_param.max_wq;
9877                if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9878                        qmin = phba->sli4_hba.max_cfg_param.max_cq;
9879                if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9880                        qmin = phba->sli4_hba.max_cfg_param.max_eq;
9881                /*
9882                 * Whats left after this can go toward NVME / FCP.
9883                 * The minus 4 accounts for ELS, NVME LS, MBOX
9884                 * plus one extra. When configured for
9885                 * NVMET, FCP io channel WQs are not created.
9886                 */
9887                qmin -= 4;
9888
9889                /* Check to see if there is enough for NVME */
9890                if ((phba->cfg_irq_chann > qmin) ||
9891                    (phba->cfg_hdw_queue > qmin)) {
9892                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893                                        "2005 Reducing Queues - "
9894                                        "FW resource limitation: "
9895                                        "WQ %d CQ %d EQ %d: min %d: "
9896                                        "IRQ %d HDWQ %d\n",
9897                                        phba->sli4_hba.max_cfg_param.max_wq,
9898                                        phba->sli4_hba.max_cfg_param.max_cq,
9899                                        phba->sli4_hba.max_cfg_param.max_eq,
9900                                        qmin, phba->cfg_irq_chann,
9901                                        phba->cfg_hdw_queue);
9902
9903                        if (phba->cfg_irq_chann > qmin)
9904                                phba->cfg_irq_chann = qmin;
9905                        if (phba->cfg_hdw_queue > qmin)
9906                                phba->cfg_hdw_queue = qmin;
9907                }
9908        }
9909
9910        if (rc)
9911                goto read_cfg_out;
9912
9913        /* Update link speed if forced link speed is supported */
9914        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9915        if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9916                forced_link_speed =
9917                        bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
9918                if (forced_link_speed) {
9919                        phba->hba_flag |= HBA_FORCED_LINK_SPEED;
9920
9921                        switch (forced_link_speed) {
9922                        case LINK_SPEED_1G:
9923                                phba->cfg_link_speed =
9924                                        LPFC_USER_LINK_SPEED_1G;
9925                                break;
9926                        case LINK_SPEED_2G:
9927                                phba->cfg_link_speed =
9928                                        LPFC_USER_LINK_SPEED_2G;
9929                                break;
9930                        case LINK_SPEED_4G:
9931                                phba->cfg_link_speed =
9932                                        LPFC_USER_LINK_SPEED_4G;
9933                                break;
9934                        case LINK_SPEED_8G:
9935                                phba->cfg_link_speed =
9936                                        LPFC_USER_LINK_SPEED_8G;
9937                                break;
9938                        case LINK_SPEED_10G:
9939                                phba->cfg_link_speed =
9940                                        LPFC_USER_LINK_SPEED_10G;
9941                                break;
9942                        case LINK_SPEED_16G:
9943                                phba->cfg_link_speed =
9944                                        LPFC_USER_LINK_SPEED_16G;
9945                                break;
9946                        case LINK_SPEED_32G:
9947                                phba->cfg_link_speed =
9948                                        LPFC_USER_LINK_SPEED_32G;
9949                                break;
9950                        case LINK_SPEED_64G:
9951                                phba->cfg_link_speed =
9952                                        LPFC_USER_LINK_SPEED_64G;
9953                                break;
9954                        case 0xffff:
9955                                phba->cfg_link_speed =
9956                                        LPFC_USER_LINK_SPEED_AUTO;
9957                                break;
9958                        default:
9959                                lpfc_printf_log(phba, KERN_ERR,
9960                                                LOG_TRACE_EVENT,
9961                                                "0047 Unrecognized link "
9962                                                "speed : %d\n",
9963                                                forced_link_speed);
9964                                phba->cfg_link_speed =
9965                                        LPFC_USER_LINK_SPEED_AUTO;
9966                        }
9967                }
9968        }
9969
9970        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
9971        length = phba->sli4_hba.max_cfg_param.max_xri -
9972                        lpfc_sli4_get_els_iocb_cnt(phba);
9973        if (phba->cfg_hba_queue_depth > length) {
9974                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9975                                "3361 HBA queue depth changed from %d to %d\n",
9976                                phba->cfg_hba_queue_depth, length);
9977                phba->cfg_hba_queue_depth = length;
9978        }
9979
9980        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
9981            LPFC_SLI_INTF_IF_TYPE_2)
9982                goto read_cfg_out;
9983
9984        /* get the pf# and vf# for SLI4 if_type 2 port */
9985        length = (sizeof(struct lpfc_mbx_get_func_cfg) -
9986                  sizeof(struct lpfc_sli4_cfg_mhdr));
9987        lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
9988                         LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
9989                         length, LPFC_SLI4_MBX_EMBED);
9990
9991        rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9992        shdr = (union lpfc_sli4_cfg_shdr *)
9993                                &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
9994        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9995        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9996        if (rc2 || shdr_status || shdr_add_status) {
9997                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9998                                "3026 Mailbox failed , mbxCmd x%x "
9999                                "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10000                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
10001                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
10002                goto read_cfg_out;
10003        }
10004
10005        /* search for fc_fcoe resrouce descriptor */
10006        get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10007
10008        pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10009        desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10010        length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10011        if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10012                length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10013        else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10014                goto read_cfg_out;
10015
10016        for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10017                desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10018                if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10019                    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10020                        phba->sli4_hba.iov.pf_number =
10021                                bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10022                        phba->sli4_hba.iov.vf_number =
10023                                bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10024                        break;
10025                }
10026        }
10027
10028        if (i < LPFC_RSRC_DESC_MAX_NUM)
10029                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10030                                "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10031                                "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10032                                phba->sli4_hba.iov.vf_number);
10033        else
10034                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10035                                "3028 GET_FUNCTION_CONFIG: failed to find "
10036                                "Resource Descriptor:x%x\n",
10037                                LPFC_RSRC_DESC_TYPE_FCFCOE);
10038
10039read_cfg_out:
10040        mempool_free(pmb, phba->mbox_mem_pool);
10041        return rc;
10042}
10043
10044/**
10045 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10046 * @phba: pointer to lpfc hba data structure.
10047 *
10048 * This routine is invoked to setup the port-side endian order when
10049 * the port if_type is 0.  This routine has no function for other
10050 * if_types.
10051 *
10052 * Return codes
10053 *      0 - successful
10054 *      -ENOMEM - No available memory
10055 *      -EIO - The mailbox failed to complete successfully.
10056 **/
10057static int
10058lpfc_setup_endian_order(struct lpfc_hba *phba)
10059{
10060        LPFC_MBOXQ_t *mboxq;
10061        uint32_t if_type, rc = 0;
10062        uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10063                                      HOST_ENDIAN_HIGH_WORD1};
10064
10065        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10066        switch (if_type) {
10067        case LPFC_SLI_INTF_IF_TYPE_0:
10068                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10069                                                       GFP_KERNEL);
10070                if (!mboxq) {
10071                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10072                                        "0492 Unable to allocate memory for "
10073                                        "issuing SLI_CONFIG_SPECIAL mailbox "
10074                                        "command\n");
10075                        return -ENOMEM;
10076                }
10077
10078                /*
10079                 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10080                 * two words to contain special data values and no other data.
10081                 */
10082                memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10083                memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10084                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10085                if (rc != MBX_SUCCESS) {
10086                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10087                                        "0493 SLI_CONFIG_SPECIAL mailbox "
10088                                        "failed with status x%x\n",
10089                                        rc);
10090                        rc = -EIO;
10091                }
10092                mempool_free(mboxq, phba->mbox_mem_pool);
10093                break;
10094        case LPFC_SLI_INTF_IF_TYPE_6:
10095        case LPFC_SLI_INTF_IF_TYPE_2:
10096        case LPFC_SLI_INTF_IF_TYPE_1:
10097        default:
10098                break;
10099        }
10100        return rc;
10101}
10102
10103/**
10104 * lpfc_sli4_queue_verify - Verify and update EQ counts
10105 * @phba: pointer to lpfc hba data structure.
10106 *
10107 * This routine is invoked to check the user settable queue counts for EQs.
10108 * After this routine is called the counts will be set to valid values that
10109 * adhere to the constraints of the system's interrupt vectors and the port's
10110 * queue resources.
10111 *
10112 * Return codes
10113 *      0 - successful
10114 *      -ENOMEM - No available memory
10115 **/
10116static int
10117lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10118{
10119        /*
10120         * Sanity check for configured queue parameters against the run-time
10121         * device parameters
10122         */
10123
10124        if (phba->nvmet_support) {
10125                if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10126                        phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10127                if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10128                        phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10129        }
10130
10131        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10132                        "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10133                        phba->cfg_hdw_queue, phba->cfg_irq_chann,
10134                        phba->cfg_nvmet_mrq);
10135
10136        /* Get EQ depth from module parameter, fake the default for now */
10137        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10138        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10139
10140        /* Get CQ depth from module parameter, fake the default for now */
10141        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10142        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10143        return 0;
10144}
10145
10146static int
10147lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10148{
10149        struct lpfc_queue *qdesc;
10150        u32 wqesize;
10151        int cpu;
10152
10153        cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10154        /* Create Fast Path IO CQs */
10155        if (phba->enab_exp_wqcq_pages)
10156                /* Increase the CQ size when WQEs contain an embedded cdb */
10157                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10158                                              phba->sli4_hba.cq_esize,
10159                                              LPFC_CQE_EXP_COUNT, cpu);
10160
10161        else
10162                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10163                                              phba->sli4_hba.cq_esize,
10164                                              phba->sli4_hba.cq_ecount, cpu);
10165        if (!qdesc) {
10166                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10167                                "0499 Failed allocate fast-path IO CQ (%d)\n",
10168                                idx);
10169                return 1;
10170        }
10171        qdesc->qe_valid = 1;
10172        qdesc->hdwq = idx;
10173        qdesc->chann = cpu;
10174        phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10175
10176        /* Create Fast Path IO WQs */
10177        if (phba->enab_exp_wqcq_pages) {
10178                /* Increase the WQ size when WQEs contain an embedded cdb */
10179                wqesize = (phba->fcp_embed_io) ?
10180                        LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10181                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10182                                              wqesize,
10183                                              LPFC_WQE_EXP_COUNT, cpu);
10184        } else
10185                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10186                                              phba->sli4_hba.wq_esize,
10187                                              phba->sli4_hba.wq_ecount, cpu);
10188
10189        if (!qdesc) {
10190                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10191                                "0503 Failed allocate fast-path IO WQ (%d)\n",
10192                                idx);
10193                return 1;
10194        }
10195        qdesc->hdwq = idx;
10196        qdesc->chann = cpu;
10197        phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10198        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10199        return 0;
10200}
10201
10202/**
10203 * lpfc_sli4_queue_create - Create all the SLI4 queues
10204 * @phba: pointer to lpfc hba data structure.
10205 *
10206 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10207 * operation. For each SLI4 queue type, the parameters such as queue entry
10208 * count (queue depth) shall be taken from the module parameter. For now,
10209 * we just use some constant number as place holder.
10210 *
10211 * Return codes
10212 *      0 - successful
10213 *      -ENOMEM - No availble memory
10214 *      -EIO - The mailbox failed to complete successfully.
10215 **/
10216int
10217lpfc_sli4_queue_create(struct lpfc_hba *phba)
10218{
10219        struct lpfc_queue *qdesc;
10220        int idx, cpu, eqcpu;
10221        struct lpfc_sli4_hdw_queue *qp;
10222        struct lpfc_vector_map_info *cpup;
10223        struct lpfc_vector_map_info *eqcpup;
10224        struct lpfc_eq_intr_info *eqi;
10225
10226        /*
10227         * Create HBA Record arrays.
10228         * Both NVME and FCP will share that same vectors / EQs
10229         */
10230        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10231        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10232        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10233        phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10234        phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10235        phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10236        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10237        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10238        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10239        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10240
10241        if (!phba->sli4_hba.hdwq) {
10242                phba->sli4_hba.hdwq = kcalloc(
10243                        phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10244                        GFP_KERNEL);
10245                if (!phba->sli4_hba.hdwq) {
10246                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10247                                        "6427 Failed allocate memory for "
10248                                        "fast-path Hardware Queue array\n");
10249                        goto out_error;
10250                }
10251                /* Prepare hardware queues to take IO buffers */
10252                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10253                        qp = &phba->sli4_hba.hdwq[idx];
10254                        spin_lock_init(&qp->io_buf_list_get_lock);
10255                        spin_lock_init(&qp->io_buf_list_put_lock);
10256                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10257                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10258                        qp->get_io_bufs = 0;
10259                        qp->put_io_bufs = 0;
10260                        qp->total_io_bufs = 0;
10261                        spin_lock_init(&qp->abts_io_buf_list_lock);
10262                        INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10263                        qp->abts_scsi_io_bufs = 0;
10264                        qp->abts_nvme_io_bufs = 0;
10265                        INIT_LIST_HEAD(&qp->sgl_list);
10266                        INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10267                        spin_lock_init(&qp->hdwq_lock);
10268                }
10269        }
10270
10271        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10272                if (phba->nvmet_support) {
10273                        phba->sli4_hba.nvmet_cqset = kcalloc(
10274                                        phba->cfg_nvmet_mrq,
10275                                        sizeof(struct lpfc_queue *),
10276                                        GFP_KERNEL);
10277                        if (!phba->sli4_hba.nvmet_cqset) {
10278                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10279                                        "3121 Fail allocate memory for "
10280                                        "fast-path CQ set array\n");
10281                                goto out_error;
10282                        }
10283                        phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10284                                        phba->cfg_nvmet_mrq,
10285                                        sizeof(struct lpfc_queue *),
10286                                        GFP_KERNEL);
10287                        if (!phba->sli4_hba.nvmet_mrq_hdr) {
10288                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10289                                        "3122 Fail allocate memory for "
10290                                        "fast-path RQ set hdr array\n");
10291                                goto out_error;
10292                        }
10293                        phba->sli4_hba.nvmet_mrq_data = kcalloc(
10294                                        phba->cfg_nvmet_mrq,
10295                                        sizeof(struct lpfc_queue *),
10296                                        GFP_KERNEL);
10297                        if (!phba->sli4_hba.nvmet_mrq_data) {
10298                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10299                                        "3124 Fail allocate memory for "
10300                                        "fast-path RQ set data array\n");
10301                                goto out_error;
10302                        }
10303                }
10304        }
10305
10306        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10307
10308        /* Create HBA Event Queues (EQs) */
10309        for_each_present_cpu(cpu) {
10310                /* We only want to create 1 EQ per vector, even though
10311                 * multiple CPUs might be using that vector. so only
10312                 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10313                 */
10314                cpup = &phba->sli4_hba.cpu_map[cpu];
10315                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10316                        continue;
10317
10318                /* Get a ptr to the Hardware Queue associated with this CPU */
10319                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10320
10321                /* Allocate an EQ */
10322                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10323                                              phba->sli4_hba.eq_esize,
10324                                              phba->sli4_hba.eq_ecount, cpu);
10325                if (!qdesc) {
10326                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10327                                        "0497 Failed allocate EQ (%d)\n",
10328                                        cpup->hdwq);
10329                        goto out_error;
10330                }
10331                qdesc->qe_valid = 1;
10332                qdesc->hdwq = cpup->hdwq;
10333                qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10334                qdesc->last_cpu = qdesc->chann;
10335
10336                /* Save the allocated EQ in the Hardware Queue */
10337                qp->hba_eq = qdesc;
10338
10339                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10340                list_add(&qdesc->cpu_list, &eqi->list);
10341        }
10342
10343        /* Now we need to populate the other Hardware Queues, that share
10344         * an IRQ vector, with the associated EQ ptr.
10345         */
10346        for_each_present_cpu(cpu) {
10347                cpup = &phba->sli4_hba.cpu_map[cpu];
10348
10349                /* Check for EQ already allocated in previous loop */
10350                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10351                        continue;
10352
10353                /* Check for multiple CPUs per hdwq */
10354                qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10355                if (qp->hba_eq)
10356                        continue;
10357
10358                /* We need to share an EQ for this hdwq */
10359                eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10360                eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10361                qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10362        }
10363
10364        /* Allocate IO Path SLI4 CQ/WQs */
10365        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10366                if (lpfc_alloc_io_wq_cq(phba, idx))
10367                        goto out_error;
10368        }
10369
10370        if (phba->nvmet_support) {
10371                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10372                        cpu = lpfc_find_cpu_handle(phba, idx,
10373                                                   LPFC_FIND_BY_HDWQ);
10374                        qdesc = lpfc_sli4_queue_alloc(phba,
10375                                                      LPFC_DEFAULT_PAGE_SIZE,
10376                                                      phba->sli4_hba.cq_esize,
10377                                                      phba->sli4_hba.cq_ecount,
10378                                                      cpu);
10379                        if (!qdesc) {
10380                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10381                                                "3142 Failed allocate NVME "
10382                                                "CQ Set (%d)\n", idx);
10383                                goto out_error;
10384                        }
10385                        qdesc->qe_valid = 1;
10386                        qdesc->hdwq = idx;
10387                        qdesc->chann = cpu;
10388                        phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10389                }
10390        }
10391
10392        /*
10393         * Create Slow Path Completion Queues (CQs)
10394         */
10395
10396        cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10397        /* Create slow-path Mailbox Command Complete Queue */
10398        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10399                                      phba->sli4_hba.cq_esize,
10400                                      phba->sli4_hba.cq_ecount, cpu);
10401        if (!qdesc) {
10402                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10403                                "0500 Failed allocate slow-path mailbox CQ\n");
10404                goto out_error;
10405        }
10406        qdesc->qe_valid = 1;
10407        phba->sli4_hba.mbx_cq = qdesc;
10408
10409        /* Create slow-path ELS Complete Queue */
10410        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10411                                      phba->sli4_hba.cq_esize,
10412                                      phba->sli4_hba.cq_ecount, cpu);
10413        if (!qdesc) {
10414                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415                                "0501 Failed allocate slow-path ELS CQ\n");
10416                goto out_error;
10417        }
10418        qdesc->qe_valid = 1;
10419        qdesc->chann = cpu;
10420        phba->sli4_hba.els_cq = qdesc;
10421
10422
10423        /*
10424         * Create Slow Path Work Queues (WQs)
10425         */
10426
10427        /* Create Mailbox Command Queue */
10428
10429        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10430                                      phba->sli4_hba.mq_esize,
10431                                      phba->sli4_hba.mq_ecount, cpu);
10432        if (!qdesc) {
10433                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10434                                "0505 Failed allocate slow-path MQ\n");
10435                goto out_error;
10436        }
10437        qdesc->chann = cpu;
10438        phba->sli4_hba.mbx_wq = qdesc;
10439
10440        /*
10441         * Create ELS Work Queues
10442         */
10443
10444        /* Create slow-path ELS Work Queue */
10445        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10446                                      phba->sli4_hba.wq_esize,
10447                                      phba->sli4_hba.wq_ecount, cpu);
10448        if (!qdesc) {
10449                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10450                                "0504 Failed allocate slow-path ELS WQ\n");
10451                goto out_error;
10452        }
10453        qdesc->chann = cpu;
10454        phba->sli4_hba.els_wq = qdesc;
10455        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10456
10457        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10458                /* Create NVME LS Complete Queue */
10459                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10460                                              phba->sli4_hba.cq_esize,
10461                                              phba->sli4_hba.cq_ecount, cpu);
10462                if (!qdesc) {
10463                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10464                                        "6079 Failed allocate NVME LS CQ\n");
10465                        goto out_error;
10466                }
10467                qdesc->chann = cpu;
10468                qdesc->qe_valid = 1;
10469                phba->sli4_hba.nvmels_cq = qdesc;
10470
10471                /* Create NVME LS Work Queue */
10472                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10473                                              phba->sli4_hba.wq_esize,
10474                                              phba->sli4_hba.wq_ecount, cpu);
10475                if (!qdesc) {
10476                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10477                                        "6080 Failed allocate NVME LS WQ\n");
10478                        goto out_error;
10479                }
10480                qdesc->chann = cpu;
10481                phba->sli4_hba.nvmels_wq = qdesc;
10482                list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10483        }
10484
10485        /*
10486         * Create Receive Queue (RQ)
10487         */
10488
10489        /* Create Receive Queue for header */
10490        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10491                                      phba->sli4_hba.rq_esize,
10492                                      phba->sli4_hba.rq_ecount, cpu);
10493        if (!qdesc) {
10494                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10495                                "0506 Failed allocate receive HRQ\n");
10496                goto out_error;
10497        }
10498        phba->sli4_hba.hdr_rq = qdesc;
10499
10500        /* Create Receive Queue for data */
10501        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10502                                      phba->sli4_hba.rq_esize,
10503                                      phba->sli4_hba.rq_ecount, cpu);
10504        if (!qdesc) {
10505                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10506                                "0507 Failed allocate receive DRQ\n");
10507                goto out_error;
10508        }
10509        phba->sli4_hba.dat_rq = qdesc;
10510
10511        if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10512            phba->nvmet_support) {
10513                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10514                        cpu = lpfc_find_cpu_handle(phba, idx,
10515                                                   LPFC_FIND_BY_HDWQ);
10516                        /* Create NVMET Receive Queue for header */
10517                        qdesc = lpfc_sli4_queue_alloc(phba,
10518                                                      LPFC_DEFAULT_PAGE_SIZE,
10519                                                      phba->sli4_hba.rq_esize,
10520                                                      LPFC_NVMET_RQE_DEF_COUNT,
10521                                                      cpu);
10522                        if (!qdesc) {
10523                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10524                                                "3146 Failed allocate "
10525                                                "receive HRQ\n");
10526                                goto out_error;
10527                        }
10528                        qdesc->hdwq = idx;
10529                        phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10530
10531                        /* Only needed for header of RQ pair */
10532                        qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10533                                                   GFP_KERNEL,
10534                                                   cpu_to_node(cpu));
10535                        if (qdesc->rqbp == NULL) {
10536                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10537                                                "6131 Failed allocate "
10538                                                "Header RQBP\n");
10539                                goto out_error;
10540                        }
10541
10542                        /* Put list in known state in case driver load fails. */
10543                        INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10544
10545                        /* Create NVMET Receive Queue for data */
10546                        qdesc = lpfc_sli4_queue_alloc(phba,
10547                                                      LPFC_DEFAULT_PAGE_SIZE,
10548                                                      phba->sli4_hba.rq_esize,
10549                                                      LPFC_NVMET_RQE_DEF_COUNT,
10550                                                      cpu);
10551                        if (!qdesc) {
10552                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10553                                                "3156 Failed allocate "
10554                                                "receive DRQ\n");
10555                                goto out_error;
10556                        }
10557                        qdesc->hdwq = idx;
10558                        phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10559                }
10560        }
10561
10562        /* Clear NVME stats */
10563        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10564                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10565                        memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10566                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10567                }
10568        }
10569
10570        /* Clear SCSI stats */
10571        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10572                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10573                        memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10574                               sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10575                }
10576        }
10577
10578        return 0;
10579
10580out_error:
10581        lpfc_sli4_queue_destroy(phba);
10582        return -ENOMEM;
10583}
10584
10585static inline void
10586__lpfc_sli4_release_queue(struct lpfc_queue **qp)
10587{
10588        if (*qp != NULL) {
10589                lpfc_sli4_queue_free(*qp);
10590                *qp = NULL;
10591        }
10592}
10593
10594static inline void
10595lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10596{
10597        int idx;
10598
10599        if (*qs == NULL)
10600                return;
10601
10602        for (idx = 0; idx < max; idx++)
10603                __lpfc_sli4_release_queue(&(*qs)[idx]);
10604
10605        kfree(*qs);
10606        *qs = NULL;
10607}
10608
10609static inline void
10610lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10611{
10612        struct lpfc_sli4_hdw_queue *hdwq;
10613        struct lpfc_queue *eq;
10614        uint32_t idx;
10615
10616        hdwq = phba->sli4_hba.hdwq;
10617
10618        /* Loop thru all Hardware Queues */
10619        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10620                /* Free the CQ/WQ corresponding to the Hardware Queue */
10621                lpfc_sli4_queue_free(hdwq[idx].io_cq);
10622                lpfc_sli4_queue_free(hdwq[idx].io_wq);
10623                hdwq[idx].hba_eq = NULL;
10624                hdwq[idx].io_cq = NULL;
10625                hdwq[idx].io_wq = NULL;
10626                if (phba->cfg_xpsgl && !phba->nvmet_support)
10627                        lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10628                lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10629        }
10630        /* Loop thru all IRQ vectors */
10631        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10632                /* Free the EQ corresponding to the IRQ vector */
10633                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10634                lpfc_sli4_queue_free(eq);
10635                phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10636        }
10637}
10638
10639/**
10640 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10641 * @phba: pointer to lpfc hba data structure.
10642 *
10643 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10644 * operation.
10645 *
10646 * Return codes
10647 *      0 - successful
10648 *      -ENOMEM - No available memory
10649 *      -EIO - The mailbox failed to complete successfully.
10650 **/
10651void
10652lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10653{
10654        /*
10655         * Set FREE_INIT before beginning to free the queues.
10656         * Wait until the users of queues to acknowledge to
10657         * release queues by clearing FREE_WAIT.
10658         */
10659        spin_lock_irq(&phba->hbalock);
10660        phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10661        while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10662                spin_unlock_irq(&phba->hbalock);
10663                msleep(20);
10664                spin_lock_irq(&phba->hbalock);
10665        }
10666        spin_unlock_irq(&phba->hbalock);
10667
10668        lpfc_sli4_cleanup_poll_list(phba);
10669
10670        /* Release HBA eqs */
10671        if (phba->sli4_hba.hdwq)
10672                lpfc_sli4_release_hdwq(phba);
10673
10674        if (phba->nvmet_support) {
10675                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10676                                         phba->cfg_nvmet_mrq);
10677
10678                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10679                                         phba->cfg_nvmet_mrq);
10680                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10681                                         phba->cfg_nvmet_mrq);
10682        }
10683
10684        /* Release mailbox command work queue */
10685        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10686
10687        /* Release ELS work queue */
10688        __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10689
10690        /* Release ELS work queue */
10691        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10692
10693        /* Release unsolicited receive queue */
10694        __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10695        __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10696
10697        /* Release ELS complete queue */
10698        __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10699
10700        /* Release NVME LS complete queue */
10701        __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10702
10703        /* Release mailbox command complete queue */
10704        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10705
10706        /* Everything on this list has been freed */
10707        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10708
10709        /* Done with freeing the queues */
10710        spin_lock_irq(&phba->hbalock);
10711        phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10712        spin_unlock_irq(&phba->hbalock);
10713}
10714
10715int
10716lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10717{
10718        struct lpfc_rqb *rqbp;
10719        struct lpfc_dmabuf *h_buf;
10720        struct rqb_dmabuf *rqb_buffer;
10721
10722        rqbp = rq->rqbp;
10723        while (!list_empty(&rqbp->rqb_buffer_list)) {
10724                list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10725                                 struct lpfc_dmabuf, list);
10726
10727                rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10728                (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10729                rqbp->buffer_count--;
10730        }
10731        return 1;
10732}
10733
10734static int
10735lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10736        struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10737        int qidx, uint32_t qtype)
10738{
10739        struct lpfc_sli_ring *pring;
10740        int rc;
10741
10742        if (!eq || !cq || !wq) {
10743                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10744                        "6085 Fast-path %s (%d) not allocated\n",
10745                        ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10746                return -ENOMEM;
10747        }
10748
10749        /* create the Cq first */
10750        rc = lpfc_cq_create(phba, cq, eq,
10751                        (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10752        if (rc) {
10753                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10754                                "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10755                                qidx, (uint32_t)rc);
10756                return rc;
10757        }
10758
10759        if (qtype != LPFC_MBOX) {
10760                /* Setup cq_map for fast lookup */
10761                if (cq_map)
10762                        *cq_map = cq->queue_id;
10763
10764                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10765                        "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10766                        qidx, cq->queue_id, qidx, eq->queue_id);
10767
10768                /* create the wq */
10769                rc = lpfc_wq_create(phba, wq, cq, qtype);
10770                if (rc) {
10771                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10772                                "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10773                                qidx, (uint32_t)rc);
10774                        /* no need to tear down cq - caller will do so */
10775                        return rc;
10776                }
10777
10778                /* Bind this CQ/WQ to the NVME ring */
10779                pring = wq->pring;
10780                pring->sli.sli4.wqp = (void *)wq;
10781                cq->pring = pring;
10782
10783                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10784                        "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10785                        qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10786        } else {
10787                rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10788                if (rc) {
10789                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10790                                        "0539 Failed setup of slow-path MQ: "
10791                                        "rc = 0x%x\n", rc);
10792                        /* no need to tear down cq - caller will do so */
10793                        return rc;
10794                }
10795
10796                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10797                        "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10798                        phba->sli4_hba.mbx_wq->queue_id,
10799                        phba->sli4_hba.mbx_cq->queue_id);
10800        }
10801
10802        return 0;
10803}
10804
10805/**
10806 * lpfc_setup_cq_lookup - Setup the CQ lookup table
10807 * @phba: pointer to lpfc hba data structure.
10808 *
10809 * This routine will populate the cq_lookup table by all
10810 * available CQ queue_id's.
10811 **/
10812static void
10813lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10814{
10815        struct lpfc_queue *eq, *childq;
10816        int qidx;
10817
10818        memset(phba->sli4_hba.cq_lookup, 0,
10819               (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10820        /* Loop thru all IRQ vectors */
10821        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10822                /* Get the EQ corresponding to the IRQ vector */
10823                eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10824                if (!eq)
10825                        continue;
10826                /* Loop through all CQs associated with that EQ */
10827                list_for_each_entry(childq, &eq->child_list, list) {
10828                        if (childq->queue_id > phba->sli4_hba.cq_max)
10829                                continue;
10830                        if (childq->subtype == LPFC_IO)
10831                                phba->sli4_hba.cq_lookup[childq->queue_id] =
10832                                        childq;
10833                }
10834        }
10835}
10836
10837/**
10838 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10839 * @phba: pointer to lpfc hba data structure.
10840 *
10841 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
10842 * operation.
10843 *
10844 * Return codes
10845 *      0 - successful
10846 *      -ENOMEM - No available memory
10847 *      -EIO - The mailbox failed to complete successfully.
10848 **/
10849int
10850lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10851{
10852        uint32_t shdr_status, shdr_add_status;
10853        union lpfc_sli4_cfg_shdr *shdr;
10854        struct lpfc_vector_map_info *cpup;
10855        struct lpfc_sli4_hdw_queue *qp;
10856        LPFC_MBOXQ_t *mboxq;
10857        int qidx, cpu;
10858        uint32_t length, usdelay;
10859        int rc = -ENOMEM;
10860
10861        /* Check for dual-ULP support */
10862        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10863        if (!mboxq) {
10864                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10865                                "3249 Unable to allocate memory for "
10866                                "QUERY_FW_CFG mailbox command\n");
10867                return -ENOMEM;
10868        }
10869        length = (sizeof(struct lpfc_mbx_query_fw_config) -
10870                  sizeof(struct lpfc_sli4_cfg_mhdr));
10871        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10872                         LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10873                         length, LPFC_SLI4_MBX_EMBED);
10874
10875        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10876
10877        shdr = (union lpfc_sli4_cfg_shdr *)
10878                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10879        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10880        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10881        if (shdr_status || shdr_add_status || rc) {
10882                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10883                                "3250 QUERY_FW_CFG mailbox failed with status "
10884                                "x%x add_status x%x, mbx status x%x\n",
10885                                shdr_status, shdr_add_status, rc);
10886                mempool_free(mboxq, phba->mbox_mem_pool);
10887                rc = -ENXIO;
10888                goto out_error;
10889        }
10890
10891        phba->sli4_hba.fw_func_mode =
10892                        mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
10893        phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
10894        phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
10895        phba->sli4_hba.physical_port =
10896                        mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
10897        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10898                        "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
10899                        "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
10900                        phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
10901
10902        mempool_free(mboxq, phba->mbox_mem_pool);
10903
10904        /*
10905         * Set up HBA Event Queues (EQs)
10906         */
10907        qp = phba->sli4_hba.hdwq;
10908
10909        /* Set up HBA event queue */
10910        if (!qp) {
10911                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10912                                "3147 Fast-path EQs not allocated\n");
10913                rc = -ENOMEM;
10914                goto out_error;
10915        }
10916
10917        /* Loop thru all IRQ vectors */
10918        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10919                /* Create HBA Event Queues (EQs) in order */
10920                for_each_present_cpu(cpu) {
10921                        cpup = &phba->sli4_hba.cpu_map[cpu];
10922
10923                        /* Look for the CPU thats using that vector with
10924                         * LPFC_CPU_FIRST_IRQ set.
10925                         */
10926                        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10927                                continue;
10928                        if (qidx != cpup->eq)
10929                                continue;
10930
10931                        /* Create an EQ for that vector */
10932                        rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
10933                                            phba->cfg_fcp_imax);
10934                        if (rc) {
10935                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10936                                                "0523 Failed setup of fast-path"
10937                                                " EQ (%d), rc = 0x%x\n",
10938                                                cpup->eq, (uint32_t)rc);
10939                                goto out_destroy;
10940                        }
10941
10942                        /* Save the EQ for that vector in the hba_eq_hdl */
10943                        phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
10944                                qp[cpup->hdwq].hba_eq;
10945
10946                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10947                                        "2584 HBA EQ setup: queue[%d]-id=%d\n",
10948                                        cpup->eq,
10949                                        qp[cpup->hdwq].hba_eq->queue_id);
10950                }
10951        }
10952
10953        /* Loop thru all Hardware Queues */
10954        for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10955                cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
10956                cpup = &phba->sli4_hba.cpu_map[cpu];
10957
10958                /* Create the CQ/WQ corresponding to the Hardware Queue */
10959                rc = lpfc_create_wq_cq(phba,
10960                                       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
10961                                       qp[qidx].io_cq,
10962                                       qp[qidx].io_wq,
10963                                       &phba->sli4_hba.hdwq[qidx].io_cq_map,
10964                                       qidx,
10965                                       LPFC_IO);
10966                if (rc) {
10967                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10968                                        "0535 Failed to setup fastpath "
10969                                        "IO WQ/CQ (%d), rc = 0x%x\n",
10970                                        qidx, (uint32_t)rc);
10971                        goto out_destroy;
10972                }
10973        }
10974
10975        /*
10976         * Set up Slow Path Complete Queues (CQs)
10977         */
10978
10979        /* Set up slow-path MBOX CQ/MQ */
10980
10981        if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
10982                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10983                                "0528 %s not allocated\n",
10984                                phba->sli4_hba.mbx_cq ?
10985                                "Mailbox WQ" : "Mailbox CQ");
10986                rc = -ENOMEM;
10987                goto out_destroy;
10988        }
10989
10990        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
10991                               phba->sli4_hba.mbx_cq,
10992                               phba->sli4_hba.mbx_wq,
10993                               NULL, 0, LPFC_MBOX);
10994        if (rc) {
10995                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996                        "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
10997                        (uint32_t)rc);
10998                goto out_destroy;
10999        }
11000        if (phba->nvmet_support) {
11001                if (!phba->sli4_hba.nvmet_cqset) {
11002                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11003                                        "3165 Fast-path NVME CQ Set "
11004                                        "array not allocated\n");
11005                        rc = -ENOMEM;
11006                        goto out_destroy;
11007                }
11008                if (phba->cfg_nvmet_mrq > 1) {
11009                        rc = lpfc_cq_create_set(phba,
11010                                        phba->sli4_hba.nvmet_cqset,
11011                                        qp,
11012                                        LPFC_WCQ, LPFC_NVMET);
11013                        if (rc) {
11014                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11015                                                "3164 Failed setup of NVME CQ "
11016                                                "Set, rc = 0x%x\n",
11017                                                (uint32_t)rc);
11018                                goto out_destroy;
11019                        }
11020                } else {
11021                        /* Set up NVMET Receive Complete Queue */
11022                        rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11023                                            qp[0].hba_eq,
11024                                            LPFC_WCQ, LPFC_NVMET);
11025                        if (rc) {
11026                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11027                                                "6089 Failed setup NVMET CQ: "
11028                                                "rc = 0x%x\n", (uint32_t)rc);
11029                                goto out_destroy;
11030                        }
11031                        phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11032
11033                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11034                                        "6090 NVMET CQ setup: cq-id=%d, "
11035                                        "parent eq-id=%d\n",
11036                                        phba->sli4_hba.nvmet_cqset[0]->queue_id,
11037                                        qp[0].hba_eq->queue_id);
11038                }
11039        }
11040
11041        /* Set up slow-path ELS WQ/CQ */
11042        if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11043                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11044                                "0530 ELS %s not allocated\n",
11045                                phba->sli4_hba.els_cq ? "WQ" : "CQ");
11046                rc = -ENOMEM;
11047                goto out_destroy;
11048        }
11049        rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11050                               phba->sli4_hba.els_cq,
11051                               phba->sli4_hba.els_wq,
11052                               NULL, 0, LPFC_ELS);
11053        if (rc) {
11054                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11055                                "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11056                                (uint32_t)rc);
11057                goto out_destroy;
11058        }
11059        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11060                        "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11061                        phba->sli4_hba.els_wq->queue_id,
11062                        phba->sli4_hba.els_cq->queue_id);
11063
11064        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11065                /* Set up NVME LS Complete Queue */
11066                if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11067                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11068                                        "6091 LS %s not allocated\n",
11069                                        phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11070                        rc = -ENOMEM;
11071                        goto out_destroy;
11072                }
11073                rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11074                                       phba->sli4_hba.nvmels_cq,
11075                                       phba->sli4_hba.nvmels_wq,
11076                                       NULL, 0, LPFC_NVME_LS);
11077                if (rc) {
11078                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11079                                        "0526 Failed setup of NVVME LS WQ/CQ: "
11080                                        "rc = 0x%x\n", (uint32_t)rc);
11081                        goto out_destroy;
11082                }
11083
11084                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11085                                "6096 ELS WQ setup: wq-id=%d, "
11086                                "parent cq-id=%d\n",
11087                                phba->sli4_hba.nvmels_wq->queue_id,
11088                                phba->sli4_hba.nvmels_cq->queue_id);
11089        }
11090
11091        /*
11092         * Create NVMET Receive Queue (RQ)
11093         */
11094        if (phba->nvmet_support) {
11095                if ((!phba->sli4_hba.nvmet_cqset) ||
11096                    (!phba->sli4_hba.nvmet_mrq_hdr) ||
11097                    (!phba->sli4_hba.nvmet_mrq_data)) {
11098                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11099                                        "6130 MRQ CQ Queues not "
11100                                        "allocated\n");
11101                        rc = -ENOMEM;
11102                        goto out_destroy;
11103                }
11104                if (phba->cfg_nvmet_mrq > 1) {
11105                        rc = lpfc_mrq_create(phba,
11106                                             phba->sli4_hba.nvmet_mrq_hdr,
11107                                             phba->sli4_hba.nvmet_mrq_data,
11108                                             phba->sli4_hba.nvmet_cqset,
11109                                             LPFC_NVMET);
11110                        if (rc) {
11111                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11112                                                "6098 Failed setup of NVMET "
11113                                                "MRQ: rc = 0x%x\n",
11114                                                (uint32_t)rc);
11115                                goto out_destroy;
11116                        }
11117
11118                } else {
11119                        rc = lpfc_rq_create(phba,
11120                                            phba->sli4_hba.nvmet_mrq_hdr[0],
11121                                            phba->sli4_hba.nvmet_mrq_data[0],
11122                                            phba->sli4_hba.nvmet_cqset[0],
11123                                            LPFC_NVMET);
11124                        if (rc) {
11125                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11126                                                "6057 Failed setup of NVMET "
11127                                                "Receive Queue: rc = 0x%x\n",
11128                                                (uint32_t)rc);
11129                                goto out_destroy;
11130                        }
11131
11132                        lpfc_printf_log(
11133                                phba, KERN_INFO, LOG_INIT,
11134                                "6099 NVMET RQ setup: hdr-rq-id=%d, "
11135                                "dat-rq-id=%d parent cq-id=%d\n",
11136                                phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11137                                phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11138                                phba->sli4_hba.nvmet_cqset[0]->queue_id);
11139
11140                }
11141        }
11142
11143        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11144                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11145                                "0540 Receive Queue not allocated\n");
11146                rc = -ENOMEM;
11147                goto out_destroy;
11148        }
11149
11150        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11151                            phba->sli4_hba.els_cq, LPFC_USOL);
11152        if (rc) {
11153                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11154                                "0541 Failed setup of Receive Queue: "
11155                                "rc = 0x%x\n", (uint32_t)rc);
11156                goto out_destroy;
11157        }
11158
11159        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11160                        "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11161                        "parent cq-id=%d\n",
11162                        phba->sli4_hba.hdr_rq->queue_id,
11163                        phba->sli4_hba.dat_rq->queue_id,
11164                        phba->sli4_hba.els_cq->queue_id);
11165
11166        if (phba->cfg_fcp_imax)
11167                usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11168        else
11169                usdelay = 0;
11170
11171        for (qidx = 0; qidx < phba->cfg_irq_chann;
11172             qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11173                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11174                                         usdelay);
11175
11176        if (phba->sli4_hba.cq_max) {
11177                kfree(phba->sli4_hba.cq_lookup);
11178                phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11179                        sizeof(struct lpfc_queue *), GFP_KERNEL);
11180                if (!phba->sli4_hba.cq_lookup) {
11181                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11182                                        "0549 Failed setup of CQ Lookup table: "
11183                                        "size 0x%x\n", phba->sli4_hba.cq_max);
11184                        rc = -ENOMEM;
11185                        goto out_destroy;
11186                }
11187                lpfc_setup_cq_lookup(phba);
11188        }
11189        return 0;
11190
11191out_destroy:
11192        lpfc_sli4_queue_unset(phba);
11193out_error:
11194        return rc;
11195}
11196
11197/**
11198 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11199 * @phba: pointer to lpfc hba data structure.
11200 *
11201 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11202 * operation.
11203 *
11204 * Return codes
11205 *      0 - successful
11206 *      -ENOMEM - No available memory
11207 *      -EIO - The mailbox failed to complete successfully.
11208 **/
11209void
11210lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11211{
11212        struct lpfc_sli4_hdw_queue *qp;
11213        struct lpfc_queue *eq;
11214        int qidx;
11215
11216        /* Unset mailbox command work queue */
11217        if (phba->sli4_hba.mbx_wq)
11218                lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11219
11220        /* Unset NVME LS work queue */
11221        if (phba->sli4_hba.nvmels_wq)
11222                lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11223
11224        /* Unset ELS work queue */
11225        if (phba->sli4_hba.els_wq)
11226                lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11227
11228        /* Unset unsolicited receive queue */
11229        if (phba->sli4_hba.hdr_rq)
11230                lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11231                                phba->sli4_hba.dat_rq);
11232
11233        /* Unset mailbox command complete queue */
11234        if (phba->sli4_hba.mbx_cq)
11235                lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11236
11237        /* Unset ELS complete queue */
11238        if (phba->sli4_hba.els_cq)
11239                lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11240
11241        /* Unset NVME LS complete queue */
11242        if (phba->sli4_hba.nvmels_cq)
11243                lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11244
11245        if (phba->nvmet_support) {
11246                /* Unset NVMET MRQ queue */
11247                if (phba->sli4_hba.nvmet_mrq_hdr) {
11248                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11249                                lpfc_rq_destroy(
11250                                        phba,
11251                                        phba->sli4_hba.nvmet_mrq_hdr[qidx],
11252                                        phba->sli4_hba.nvmet_mrq_data[qidx]);
11253                }
11254
11255                /* Unset NVMET CQ Set complete queue */
11256                if (phba->sli4_hba.nvmet_cqset) {
11257                        for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11258                                lpfc_cq_destroy(
11259                                        phba, phba->sli4_hba.nvmet_cqset[qidx]);
11260                }
11261        }
11262
11263        /* Unset fast-path SLI4 queues */
11264        if (phba->sli4_hba.hdwq) {
11265                /* Loop thru all Hardware Queues */
11266                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11267                        /* Destroy the CQ/WQ corresponding to Hardware Queue */
11268                        qp = &phba->sli4_hba.hdwq[qidx];
11269                        lpfc_wq_destroy(phba, qp->io_wq);
11270                        lpfc_cq_destroy(phba, qp->io_cq);
11271                }
11272                /* Loop thru all IRQ vectors */
11273                for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11274                        /* Destroy the EQ corresponding to the IRQ vector */
11275                        eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11276                        lpfc_eq_destroy(phba, eq);
11277                }
11278        }
11279
11280        kfree(phba->sli4_hba.cq_lookup);
11281        phba->sli4_hba.cq_lookup = NULL;
11282        phba->sli4_hba.cq_max = 0;
11283}
11284
11285/**
11286 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11287 * @phba: pointer to lpfc hba data structure.
11288 *
11289 * This routine is invoked to allocate and set up a pool of completion queue
11290 * events. The body of the completion queue event is a completion queue entry
11291 * CQE. For now, this pool is used for the interrupt service routine to queue
11292 * the following HBA completion queue events for the worker thread to process:
11293 *   - Mailbox asynchronous events
11294 *   - Receive queue completion unsolicited events
11295 * Later, this can be used for all the slow-path events.
11296 *
11297 * Return codes
11298 *      0 - successful
11299 *      -ENOMEM - No available memory
11300 **/
11301static int
11302lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11303{
11304        struct lpfc_cq_event *cq_event;
11305        int i;
11306
11307        for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11308                cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11309                if (!cq_event)
11310                        goto out_pool_create_fail;
11311                list_add_tail(&cq_event->list,
11312                              &phba->sli4_hba.sp_cqe_event_pool);
11313        }
11314        return 0;
11315
11316out_pool_create_fail:
11317        lpfc_sli4_cq_event_pool_destroy(phba);
11318        return -ENOMEM;
11319}
11320
11321/**
11322 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11323 * @phba: pointer to lpfc hba data structure.
11324 *
11325 * This routine is invoked to free the pool of completion queue events at
11326 * driver unload time. Note that, it is the responsibility of the driver
11327 * cleanup routine to free all the outstanding completion-queue events
11328 * allocated from this pool back into the pool before invoking this routine
11329 * to destroy the pool.
11330 **/
11331static void
11332lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11333{
11334        struct lpfc_cq_event *cq_event, *next_cq_event;
11335
11336        list_for_each_entry_safe(cq_event, next_cq_event,
11337                                 &phba->sli4_hba.sp_cqe_event_pool, list) {
11338                list_del(&cq_event->list);
11339                kfree(cq_event);
11340        }
11341}
11342
11343/**
11344 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11345 * @phba: pointer to lpfc hba data structure.
11346 *
11347 * This routine is the lock free version of the API invoked to allocate a
11348 * completion-queue event from the free pool.
11349 *
11350 * Return: Pointer to the newly allocated completion-queue event if successful
11351 *         NULL otherwise.
11352 **/
11353struct lpfc_cq_event *
11354__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11355{
11356        struct lpfc_cq_event *cq_event = NULL;
11357
11358        list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11359                         struct lpfc_cq_event, list);
11360        return cq_event;
11361}
11362
11363/**
11364 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11365 * @phba: pointer to lpfc hba data structure.
11366 *
11367 * This routine is the lock version of the API invoked to allocate a
11368 * completion-queue event from the free pool.
11369 *
11370 * Return: Pointer to the newly allocated completion-queue event if successful
11371 *         NULL otherwise.
11372 **/
11373struct lpfc_cq_event *
11374lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11375{
11376        struct lpfc_cq_event *cq_event;
11377        unsigned long iflags;
11378
11379        spin_lock_irqsave(&phba->hbalock, iflags);
11380        cq_event = __lpfc_sli4_cq_event_alloc(phba);
11381        spin_unlock_irqrestore(&phba->hbalock, iflags);
11382        return cq_event;
11383}
11384
11385/**
11386 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11387 * @phba: pointer to lpfc hba data structure.
11388 * @cq_event: pointer to the completion queue event to be freed.
11389 *
11390 * This routine is the lock free version of the API invoked to release a
11391 * completion-queue event back into the free pool.
11392 **/
11393void
11394__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11395                             struct lpfc_cq_event *cq_event)
11396{
11397        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11398}
11399
11400/**
11401 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11402 * @phba: pointer to lpfc hba data structure.
11403 * @cq_event: pointer to the completion queue event to be freed.
11404 *
11405 * This routine is the lock version of the API invoked to release a
11406 * completion-queue event back into the free pool.
11407 **/
11408void
11409lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11410                           struct lpfc_cq_event *cq_event)
11411{
11412        unsigned long iflags;
11413        spin_lock_irqsave(&phba->hbalock, iflags);
11414        __lpfc_sli4_cq_event_release(phba, cq_event);
11415        spin_unlock_irqrestore(&phba->hbalock, iflags);
11416}
11417
11418/**
11419 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11420 * @phba: pointer to lpfc hba data structure.
11421 *
11422 * This routine is to free all the pending completion-queue events to the
11423 * back into the free pool for device reset.
11424 **/
11425static void
11426lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11427{
11428        LIST_HEAD(cq_event_list);
11429        struct lpfc_cq_event *cq_event;
11430        unsigned long iflags;
11431
11432        /* Retrieve all the pending WCQEs from pending WCQE lists */
11433
11434        /* Pending ELS XRI abort events */
11435        spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11436        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11437                         &cq_event_list);
11438        spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11439
11440        /* Pending asynnc events */
11441        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11442        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11443                         &cq_event_list);
11444        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11445
11446        while (!list_empty(&cq_event_list)) {
11447                list_remove_head(&cq_event_list, cq_event,
11448                                 struct lpfc_cq_event, list);
11449                lpfc_sli4_cq_event_release(phba, cq_event);
11450        }
11451}
11452
11453/**
11454 * lpfc_pci_function_reset - Reset pci function.
11455 * @phba: pointer to lpfc hba data structure.
11456 *
11457 * This routine is invoked to request a PCI function reset. It will destroys
11458 * all resources assigned to the PCI function which originates this request.
11459 *
11460 * Return codes
11461 *      0 - successful
11462 *      -ENOMEM - No available memory
11463 *      -EIO - The mailbox failed to complete successfully.
11464 **/
11465int
11466lpfc_pci_function_reset(struct lpfc_hba *phba)
11467{
11468        LPFC_MBOXQ_t *mboxq;
11469        uint32_t rc = 0, if_type;
11470        uint32_t shdr_status, shdr_add_status;
11471        uint32_t rdy_chk;
11472        uint32_t port_reset = 0;
11473        union lpfc_sli4_cfg_shdr *shdr;
11474        struct lpfc_register reg_data;
11475        uint16_t devid;
11476
11477        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11478        switch (if_type) {
11479        case LPFC_SLI_INTF_IF_TYPE_0:
11480                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11481                                                       GFP_KERNEL);
11482                if (!mboxq) {
11483                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11484                                        "0494 Unable to allocate memory for "
11485                                        "issuing SLI_FUNCTION_RESET mailbox "
11486                                        "command\n");
11487                        return -ENOMEM;
11488                }
11489
11490                /* Setup PCI function reset mailbox-ioctl command */
11491                lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11492                                 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11493                                 LPFC_SLI4_MBX_EMBED);
11494                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11495                shdr = (union lpfc_sli4_cfg_shdr *)
11496                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11497                shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11498                shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11499                                         &shdr->response);
11500                mempool_free(mboxq, phba->mbox_mem_pool);
11501                if (shdr_status || shdr_add_status || rc) {
11502                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11503                                        "0495 SLI_FUNCTION_RESET mailbox "
11504                                        "failed with status x%x add_status x%x,"
11505                                        " mbx status x%x\n",
11506                                        shdr_status, shdr_add_status, rc);
11507                        rc = -ENXIO;
11508                }
11509                break;
11510        case LPFC_SLI_INTF_IF_TYPE_2:
11511        case LPFC_SLI_INTF_IF_TYPE_6:
11512wait:
11513                /*
11514                 * Poll the Port Status Register and wait for RDY for
11515                 * up to 30 seconds. If the port doesn't respond, treat
11516                 * it as an error.
11517                 */
11518                for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11519                        if (lpfc_readl(phba->sli4_hba.u.if_type2.
11520                                STATUSregaddr, &reg_data.word0)) {
11521                                rc = -ENODEV;
11522                                goto out;
11523                        }
11524                        if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11525                                break;
11526                        msleep(20);
11527                }
11528
11529                if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11530                        phba->work_status[0] = readl(
11531                                phba->sli4_hba.u.if_type2.ERR1regaddr);
11532                        phba->work_status[1] = readl(
11533                                phba->sli4_hba.u.if_type2.ERR2regaddr);
11534                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11535                                        "2890 Port not ready, port status reg "
11536                                        "0x%x error 1=0x%x, error 2=0x%x\n",
11537                                        reg_data.word0,
11538                                        phba->work_status[0],
11539                                        phba->work_status[1]);
11540                        rc = -ENODEV;
11541                        goto out;
11542                }
11543
11544                if (!port_reset) {
11545                        /*
11546                         * Reset the port now
11547                         */
11548                        reg_data.word0 = 0;
11549                        bf_set(lpfc_sliport_ctrl_end, &reg_data,
11550                               LPFC_SLIPORT_LITTLE_ENDIAN);
11551                        bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11552                               LPFC_SLIPORT_INIT_PORT);
11553                        writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11554                               CTRLregaddr);
11555                        /* flush */
11556                        pci_read_config_word(phba->pcidev,
11557                                             PCI_DEVICE_ID, &devid);
11558
11559                        port_reset = 1;
11560                        msleep(20);
11561                        goto wait;
11562                } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11563                        rc = -ENODEV;
11564                        goto out;
11565                }
11566                break;
11567
11568        case LPFC_SLI_INTF_IF_TYPE_1:
11569        default:
11570                break;
11571        }
11572
11573out:
11574        /* Catch the not-ready port failure after a port reset. */
11575        if (rc) {
11576                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11577                                "3317 HBA not functional: IP Reset Failed "
11578                                "try: echo fw_reset > board_mode\n");
11579                rc = -ENODEV;
11580        }
11581
11582        return rc;
11583}
11584
11585/**
11586 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11587 * @phba: pointer to lpfc hba data structure.
11588 *
11589 * This routine is invoked to set up the PCI device memory space for device
11590 * with SLI-4 interface spec.
11591 *
11592 * Return codes
11593 *      0 - successful
11594 *      other values - error
11595 **/
11596static int
11597lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11598{
11599        struct pci_dev *pdev = phba->pcidev;
11600        unsigned long bar0map_len, bar1map_len, bar2map_len;
11601        int error;
11602        uint32_t if_type;
11603
11604        if (!pdev)
11605                return -ENODEV;
11606
11607        /* Set the device DMA mask size */
11608        error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11609        if (error)
11610                error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11611        if (error)
11612                return error;
11613
11614        /*
11615         * The BARs and register set definitions and offset locations are
11616         * dependent on the if_type.
11617         */
11618        if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11619                                  &phba->sli4_hba.sli_intf.word0)) {
11620                return -ENODEV;
11621        }
11622
11623        /* There is no SLI3 failback for SLI4 devices. */
11624        if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11625            LPFC_SLI_INTF_VALID) {
11626                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11627                                "2894 SLI_INTF reg contents invalid "
11628                                "sli_intf reg 0x%x\n",
11629                                phba->sli4_hba.sli_intf.word0);
11630                return -ENODEV;
11631        }
11632
11633        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11634        /*
11635         * Get the bus address of SLI4 device Bar regions and the
11636         * number of bytes required by each mapping. The mapping of the
11637         * particular PCI BARs regions is dependent on the type of
11638         * SLI4 device.
11639         */
11640        if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11641                phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11642                bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11643
11644                /*
11645                 * Map SLI4 PCI Config Space Register base to a kernel virtual
11646                 * addr
11647                 */
11648                phba->sli4_hba.conf_regs_memmap_p =
11649                        ioremap(phba->pci_bar0_map, bar0map_len);
11650                if (!phba->sli4_hba.conf_regs_memmap_p) {
11651                        dev_printk(KERN_ERR, &pdev->dev,
11652                                   "ioremap failed for SLI4 PCI config "
11653                                   "registers.\n");
11654                        return -ENODEV;
11655                }
11656                phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11657                /* Set up BAR0 PCI config space register memory map */
11658                lpfc_sli4_bar0_register_memmap(phba, if_type);
11659        } else {
11660                phba->pci_bar0_map = pci_resource_start(pdev, 1);
11661                bar0map_len = pci_resource_len(pdev, 1);
11662                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11663                        dev_printk(KERN_ERR, &pdev->dev,
11664                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11665                        return -ENODEV;
11666                }
11667                phba->sli4_hba.conf_regs_memmap_p =
11668                                ioremap(phba->pci_bar0_map, bar0map_len);
11669                if (!phba->sli4_hba.conf_regs_memmap_p) {
11670                        dev_printk(KERN_ERR, &pdev->dev,
11671                                "ioremap failed for SLI4 PCI config "
11672                                "registers.\n");
11673                        return -ENODEV;
11674                }
11675                lpfc_sli4_bar0_register_memmap(phba, if_type);
11676        }
11677
11678        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11679                if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11680                        /*
11681                         * Map SLI4 if type 0 HBA Control Register base to a
11682                         * kernel virtual address and setup the registers.
11683                         */
11684                        phba->pci_bar1_map = pci_resource_start(pdev,
11685                                                                PCI_64BIT_BAR2);
11686                        bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11687                        phba->sli4_hba.ctrl_regs_memmap_p =
11688                                        ioremap(phba->pci_bar1_map,
11689                                                bar1map_len);
11690                        if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11691                                dev_err(&pdev->dev,
11692                                           "ioremap failed for SLI4 HBA "
11693                                            "control registers.\n");
11694                                error = -ENOMEM;
11695                                goto out_iounmap_conf;
11696                        }
11697                        phba->pci_bar2_memmap_p =
11698                                         phba->sli4_hba.ctrl_regs_memmap_p;
11699                        lpfc_sli4_bar1_register_memmap(phba, if_type);
11700                } else {
11701                        error = -ENOMEM;
11702                        goto out_iounmap_conf;
11703                }
11704        }
11705
11706        if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11707            (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11708                /*
11709                 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11710                 * virtual address and setup the registers.
11711                 */
11712                phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11713                bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11714                phba->sli4_hba.drbl_regs_memmap_p =
11715                                ioremap(phba->pci_bar1_map, bar1map_len);
11716                if (!phba->sli4_hba.drbl_regs_memmap_p) {
11717                        dev_err(&pdev->dev,
11718                           "ioremap failed for SLI4 HBA doorbell registers.\n");
11719                        error = -ENOMEM;
11720                        goto out_iounmap_conf;
11721                }
11722                phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11723                lpfc_sli4_bar1_register_memmap(phba, if_type);
11724        }
11725
11726        if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11727                if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11728                        /*
11729                         * Map SLI4 if type 0 HBA Doorbell Register base to
11730                         * a kernel virtual address and setup the registers.
11731                         */
11732                        phba->pci_bar2_map = pci_resource_start(pdev,
11733                                                                PCI_64BIT_BAR4);
11734                        bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11735                        phba->sli4_hba.drbl_regs_memmap_p =
11736                                        ioremap(phba->pci_bar2_map,
11737                                                bar2map_len);
11738                        if (!phba->sli4_hba.drbl_regs_memmap_p) {
11739                                dev_err(&pdev->dev,
11740                                           "ioremap failed for SLI4 HBA"
11741                                           " doorbell registers.\n");
11742                                error = -ENOMEM;
11743                                goto out_iounmap_ctrl;
11744                        }
11745                        phba->pci_bar4_memmap_p =
11746                                        phba->sli4_hba.drbl_regs_memmap_p;
11747                        error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11748                        if (error)
11749                                goto out_iounmap_all;
11750                } else {
11751                        error = -ENOMEM;
11752                        goto out_iounmap_all;
11753                }
11754        }
11755
11756        if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11757            pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11758                /*
11759                 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11760                 * virtual address and setup the registers.
11761                 */
11762                phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11763                bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11764                phba->sli4_hba.dpp_regs_memmap_p =
11765                                ioremap(phba->pci_bar2_map, bar2map_len);
11766                if (!phba->sli4_hba.dpp_regs_memmap_p) {
11767                        dev_err(&pdev->dev,
11768                           "ioremap failed for SLI4 HBA dpp registers.\n");
11769                        error = -ENOMEM;
11770                        goto out_iounmap_ctrl;
11771                }
11772                phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11773        }
11774
11775        /* Set up the EQ/CQ register handeling functions now */
11776        switch (if_type) {
11777        case LPFC_SLI_INTF_IF_TYPE_0:
11778        case LPFC_SLI_INTF_IF_TYPE_2:
11779                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11780                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11781                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11782                break;
11783        case LPFC_SLI_INTF_IF_TYPE_6:
11784                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11785                phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11786                phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11787                break;
11788        default:
11789                break;
11790        }
11791
11792        return 0;
11793
11794out_iounmap_all:
11795        iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11796out_iounmap_ctrl:
11797        iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11798out_iounmap_conf:
11799        iounmap(phba->sli4_hba.conf_regs_memmap_p);
11800
11801        return error;
11802}
11803
11804/**
11805 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11806 * @phba: pointer to lpfc hba data structure.
11807 *
11808 * This routine is invoked to unset the PCI device memory space for device
11809 * with SLI-4 interface spec.
11810 **/
11811static void
11812lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11813{
11814        uint32_t if_type;
11815        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11816
11817        switch (if_type) {
11818        case LPFC_SLI_INTF_IF_TYPE_0:
11819                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11820                iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11821                iounmap(phba->sli4_hba.conf_regs_memmap_p);
11822                break;
11823        case LPFC_SLI_INTF_IF_TYPE_2:
11824                iounmap(phba->sli4_hba.conf_regs_memmap_p);
11825                break;
11826        case LPFC_SLI_INTF_IF_TYPE_6:
11827                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11828                iounmap(phba->sli4_hba.conf_regs_memmap_p);
11829                if (phba->sli4_hba.dpp_regs_memmap_p)
11830                        iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11831                break;
11832        case LPFC_SLI_INTF_IF_TYPE_1:
11833        default:
11834                dev_printk(KERN_ERR, &phba->pcidev->dev,
11835                           "FATAL - unsupported SLI4 interface type - %d\n",
11836                           if_type);
11837                break;
11838        }
11839}
11840
11841/**
11842 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11843 * @phba: pointer to lpfc hba data structure.
11844 *
11845 * This routine is invoked to enable the MSI-X interrupt vectors to device
11846 * with SLI-3 interface specs.
11847 *
11848 * Return codes
11849 *   0 - successful
11850 *   other values - error
11851 **/
11852static int
11853lpfc_sli_enable_msix(struct lpfc_hba *phba)
11854{
11855        int rc;
11856        LPFC_MBOXQ_t *pmb;
11857
11858        /* Set up MSI-X multi-message vectors */
11859        rc = pci_alloc_irq_vectors(phba->pcidev,
11860                        LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11861        if (rc < 0) {
11862                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11863                                "0420 PCI enable MSI-X failed (%d)\n", rc);
11864                goto vec_fail_out;
11865        }
11866
11867        /*
11868         * Assign MSI-X vectors to interrupt handlers
11869         */
11870
11871        /* vector-0 is associated to slow-path handler */
11872        rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11873                         &lpfc_sli_sp_intr_handler, 0,
11874                         LPFC_SP_DRIVER_HANDLER_NAME, phba);
11875        if (rc) {
11876                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11877                                "0421 MSI-X slow-path request_irq failed "
11878                                "(%d)\n", rc);
11879                goto msi_fail_out;
11880        }
11881
11882        /* vector-1 is associated to fast-path handler */
11883        rc = request_irq(pci_irq_vector(phba->pcidev, 1),
11884                         &lpfc_sli_fp_intr_handler, 0,
11885                         LPFC_FP_DRIVER_HANDLER_NAME, phba);
11886
11887        if (rc) {
11888                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11889                                "0429 MSI-X fast-path request_irq failed "
11890                                "(%d)\n", rc);
11891                goto irq_fail_out;
11892        }
11893
11894        /*
11895         * Configure HBA MSI-X attention conditions to messages
11896         */
11897        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11898
11899        if (!pmb) {
11900                rc = -ENOMEM;
11901                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11902                                "0474 Unable to allocate memory for issuing "
11903                                "MBOX_CONFIG_MSI command\n");
11904                goto mem_fail_out;
11905        }
11906        rc = lpfc_config_msi(phba, pmb);
11907        if (rc)
11908                goto mbx_fail_out;
11909        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11910        if (rc != MBX_SUCCESS) {
11911                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
11912                                "0351 Config MSI mailbox command failed, "
11913                                "mbxCmd x%x, mbxStatus x%x\n",
11914                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
11915                goto mbx_fail_out;
11916        }
11917
11918        /* Free memory allocated for mailbox command */
11919        mempool_free(pmb, phba->mbox_mem_pool);
11920        return rc;
11921
11922mbx_fail_out:
11923        /* Free memory allocated for mailbox command */
11924        mempool_free(pmb, phba->mbox_mem_pool);
11925
11926mem_fail_out:
11927        /* free the irq already requested */
11928        free_irq(pci_irq_vector(phba->pcidev, 1), phba);
11929
11930irq_fail_out:
11931        /* free the irq already requested */
11932        free_irq(pci_irq_vector(phba->pcidev, 0), phba);
11933
11934msi_fail_out:
11935        /* Unconfigure MSI-X capability structure */
11936        pci_free_irq_vectors(phba->pcidev);
11937
11938vec_fail_out:
11939        return rc;
11940}
11941
11942/**
11943 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
11944 * @phba: pointer to lpfc hba data structure.
11945 *
11946 * This routine is invoked to enable the MSI interrupt mode to device with
11947 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
11948 * enable the MSI vector. The device driver is responsible for calling the
11949 * request_irq() to register MSI vector with a interrupt the handler, which
11950 * is done in this function.
11951 *
11952 * Return codes
11953 *      0 - successful
11954 *      other values - error
11955 */
11956static int
11957lpfc_sli_enable_msi(struct lpfc_hba *phba)
11958{
11959        int rc;
11960
11961        rc = pci_enable_msi(phba->pcidev);
11962        if (!rc)
11963                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11964                                "0462 PCI enable MSI mode success.\n");
11965        else {
11966                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11967                                "0471 PCI enable MSI mode failed (%d)\n", rc);
11968                return rc;
11969        }
11970
11971        rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
11972                         0, LPFC_DRIVER_NAME, phba);
11973        if (rc) {
11974                pci_disable_msi(phba->pcidev);
11975                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11976                                "0478 MSI request_irq failed (%d)\n", rc);
11977        }
11978        return rc;
11979}
11980
11981/**
11982 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
11983 * @phba: pointer to lpfc hba data structure.
11984 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11985 *
11986 * This routine is invoked to enable device interrupt and associate driver's
11987 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
11988 * spec. Depends on the interrupt mode configured to the driver, the driver
11989 * will try to fallback from the configured interrupt mode to an interrupt
11990 * mode which is supported by the platform, kernel, and device in the order
11991 * of:
11992 * MSI-X -> MSI -> IRQ.
11993 *
11994 * Return codes
11995 *   0 - successful
11996 *   other values - error
11997 **/
11998static uint32_t
11999lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12000{
12001        uint32_t intr_mode = LPFC_INTR_ERROR;
12002        int retval;
12003
12004        /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12005        retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12006        if (retval)
12007                return intr_mode;
12008        phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12009
12010        if (cfg_mode == 2) {
12011                /* Now, try to enable MSI-X interrupt mode */
12012                retval = lpfc_sli_enable_msix(phba);
12013                if (!retval) {
12014                        /* Indicate initialization to MSI-X mode */
12015                        phba->intr_type = MSIX;
12016                        intr_mode = 2;
12017                }
12018        }
12019
12020        /* Fallback to MSI if MSI-X initialization failed */
12021        if (cfg_mode >= 1 && phba->intr_type == NONE) {
12022                retval = lpfc_sli_enable_msi(phba);
12023                if (!retval) {
12024                        /* Indicate initialization to MSI mode */
12025                        phba->intr_type = MSI;
12026                        intr_mode = 1;
12027                }
12028        }
12029
12030        /* Fallback to INTx if both MSI-X/MSI initalization failed */
12031        if (phba->intr_type == NONE) {
12032                retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12033                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12034                if (!retval) {
12035                        /* Indicate initialization to INTx mode */
12036                        phba->intr_type = INTx;
12037                        intr_mode = 0;
12038                }
12039        }
12040        return intr_mode;
12041}
12042
12043/**
12044 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12045 * @phba: pointer to lpfc hba data structure.
12046 *
12047 * This routine is invoked to disable device interrupt and disassociate the
12048 * driver's interrupt handler(s) from interrupt vector(s) to device with
12049 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12050 * release the interrupt vector(s) for the message signaled interrupt.
12051 **/
12052static void
12053lpfc_sli_disable_intr(struct lpfc_hba *phba)
12054{
12055        int nr_irqs, i;
12056
12057        if (phba->intr_type == MSIX)
12058                nr_irqs = LPFC_MSIX_VECTORS;
12059        else
12060                nr_irqs = 1;
12061
12062        for (i = 0; i < nr_irqs; i++)
12063                free_irq(pci_irq_vector(phba->pcidev, i), phba);
12064        pci_free_irq_vectors(phba->pcidev);
12065
12066        /* Reset interrupt management states */
12067        phba->intr_type = NONE;
12068        phba->sli.slistat.sli_intr = 0;
12069}
12070
12071/**
12072 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12073 * @phba: pointer to lpfc hba data structure.
12074 * @id: EQ vector index or Hardware Queue index
12075 * @match: LPFC_FIND_BY_EQ = match by EQ
12076 *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12077 * Return the CPU that matches the selection criteria
12078 */
12079static uint16_t
12080lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12081{
12082        struct lpfc_vector_map_info *cpup;
12083        int cpu;
12084
12085        /* Loop through all CPUs */
12086        for_each_present_cpu(cpu) {
12087                cpup = &phba->sli4_hba.cpu_map[cpu];
12088
12089                /* If we are matching by EQ, there may be multiple CPUs using
12090                 * using the same vector, so select the one with
12091                 * LPFC_CPU_FIRST_IRQ set.
12092                 */
12093                if ((match == LPFC_FIND_BY_EQ) &&
12094                    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12095                    (cpup->eq == id))
12096                        return cpu;
12097
12098                /* If matching by HDWQ, select the first CPU that matches */
12099                if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12100                        return cpu;
12101        }
12102        return 0;
12103}
12104
12105#ifdef CONFIG_X86
12106/**
12107 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12108 * @phba: pointer to lpfc hba data structure.
12109 * @cpu: CPU map index
12110 * @phys_id: CPU package physical id
12111 * @core_id: CPU core id
12112 */
12113static int
12114lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12115                uint16_t phys_id, uint16_t core_id)
12116{
12117        struct lpfc_vector_map_info *cpup;
12118        int idx;
12119
12120        for_each_present_cpu(idx) {
12121                cpup = &phba->sli4_hba.cpu_map[idx];
12122                /* Does the cpup match the one we are looking for */
12123                if ((cpup->phys_id == phys_id) &&
12124                    (cpup->core_id == core_id) &&
12125                    (cpu != idx))
12126                        return 1;
12127        }
12128        return 0;
12129}
12130#endif
12131
12132/*
12133 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12134 * @phba: pointer to lpfc hba data structure.
12135 * @eqidx: index for eq and irq vector
12136 * @flag: flags to set for vector_map structure
12137 * @cpu: cpu used to index vector_map structure
12138 *
12139 * The routine assigns eq info into vector_map structure
12140 */
12141static inline void
12142lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12143                        unsigned int cpu)
12144{
12145        struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12146        struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12147
12148        cpup->eq = eqidx;
12149        cpup->flag |= flag;
12150
12151        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12152                        "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12153                        cpu, eqhdl->irq, cpup->eq, cpup->flag);
12154}
12155
12156/**
12157 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12158 * @phba: pointer to lpfc hba data structure.
12159 *
12160 * The routine initializes the cpu_map array structure
12161 */
12162static void
12163lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12164{
12165        struct lpfc_vector_map_info *cpup;
12166        struct lpfc_eq_intr_info *eqi;
12167        int cpu;
12168
12169        for_each_possible_cpu(cpu) {
12170                cpup = &phba->sli4_hba.cpu_map[cpu];
12171                cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12172                cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12173                cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12174                cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12175                cpup->flag = 0;
12176                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12177                INIT_LIST_HEAD(&eqi->list);
12178                eqi->icnt = 0;
12179        }
12180}
12181
12182/**
12183 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * The routine initializes the hba_eq_hdl array structure
12187 */
12188static void
12189lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12190{
12191        struct lpfc_hba_eq_hdl *eqhdl;
12192        int i;
12193
12194        for (i = 0; i < phba->cfg_irq_chann; i++) {
12195                eqhdl = lpfc_get_eq_hdl(i);
12196                eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12197                eqhdl->phba = phba;
12198        }
12199}
12200
12201/**
12202 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12203 * @phba: pointer to lpfc hba data structure.
12204 * @vectors: number of msix vectors allocated.
12205 *
12206 * The routine will figure out the CPU affinity assignment for every
12207 * MSI-X vector allocated for the HBA.
12208 * In addition, the CPU to IO channel mapping will be calculated
12209 * and the phba->sli4_hba.cpu_map array will reflect this.
12210 */
12211static void
12212lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12213{
12214        int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12215        int max_phys_id, min_phys_id;
12216        int max_core_id, min_core_id;
12217        struct lpfc_vector_map_info *cpup;
12218        struct lpfc_vector_map_info *new_cpup;
12219#ifdef CONFIG_X86
12220        struct cpuinfo_x86 *cpuinfo;
12221#endif
12222#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12223        struct lpfc_hdwq_stat *c_stat;
12224#endif
12225
12226        max_phys_id = 0;
12227        min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12228        max_core_id = 0;
12229        min_core_id = LPFC_VECTOR_MAP_EMPTY;
12230
12231        /* Update CPU map with physical id and core id of each CPU */
12232        for_each_present_cpu(cpu) {
12233                cpup = &phba->sli4_hba.cpu_map[cpu];
12234#ifdef CONFIG_X86
12235                cpuinfo = &cpu_data(cpu);
12236                cpup->phys_id = cpuinfo->phys_proc_id;
12237                cpup->core_id = cpuinfo->cpu_core_id;
12238                if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12239                        cpup->flag |= LPFC_CPU_MAP_HYPER;
12240#else
12241                /* No distinction between CPUs for other platforms */
12242                cpup->phys_id = 0;
12243                cpup->core_id = cpu;
12244#endif
12245
12246                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12247                                "3328 CPU %d physid %d coreid %d flag x%x\n",
12248                                cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12249
12250                if (cpup->phys_id > max_phys_id)
12251                        max_phys_id = cpup->phys_id;
12252                if (cpup->phys_id < min_phys_id)
12253                        min_phys_id = cpup->phys_id;
12254
12255                if (cpup->core_id > max_core_id)
12256                        max_core_id = cpup->core_id;
12257                if (cpup->core_id < min_core_id)
12258                        min_core_id = cpup->core_id;
12259        }
12260
12261        /* After looking at each irq vector assigned to this pcidev, its
12262         * possible to see that not ALL CPUs have been accounted for.
12263         * Next we will set any unassigned (unaffinitized) cpu map
12264         * entries to a IRQ on the same phys_id.
12265         */
12266        first_cpu = cpumask_first(cpu_present_mask);
12267        start_cpu = first_cpu;
12268
12269        for_each_present_cpu(cpu) {
12270                cpup = &phba->sli4_hba.cpu_map[cpu];
12271
12272                /* Is this CPU entry unassigned */
12273                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12274                        /* Mark CPU as IRQ not assigned by the kernel */
12275                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12276
12277                        /* If so, find a new_cpup thats on the the SAME
12278                         * phys_id as cpup. start_cpu will start where we
12279                         * left off so all unassigned entries don't get assgined
12280                         * the IRQ of the first entry.
12281                         */
12282                        new_cpu = start_cpu;
12283                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12284                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12285                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12286                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12287                                    (new_cpup->phys_id == cpup->phys_id))
12288                                        goto found_same;
12289                                new_cpu = cpumask_next(
12290                                        new_cpu, cpu_present_mask);
12291                                if (new_cpu == nr_cpumask_bits)
12292                                        new_cpu = first_cpu;
12293                        }
12294                        /* At this point, we leave the CPU as unassigned */
12295                        continue;
12296found_same:
12297                        /* We found a matching phys_id, so copy the IRQ info */
12298                        cpup->eq = new_cpup->eq;
12299
12300                        /* Bump start_cpu to the next slot to minmize the
12301                         * chance of having multiple unassigned CPU entries
12302                         * selecting the same IRQ.
12303                         */
12304                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12305                        if (start_cpu == nr_cpumask_bits)
12306                                start_cpu = first_cpu;
12307
12308                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12309                                        "3337 Set Affinity: CPU %d "
12310                                        "eq %d from peer cpu %d same "
12311                                        "phys_id (%d)\n",
12312                                        cpu, cpup->eq, new_cpu,
12313                                        cpup->phys_id);
12314                }
12315        }
12316
12317        /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12318        start_cpu = first_cpu;
12319
12320        for_each_present_cpu(cpu) {
12321                cpup = &phba->sli4_hba.cpu_map[cpu];
12322
12323                /* Is this entry unassigned */
12324                if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12325                        /* Mark it as IRQ not assigned by the kernel */
12326                        cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12327
12328                        /* If so, find a new_cpup thats on ANY phys_id
12329                         * as the cpup. start_cpu will start where we
12330                         * left off so all unassigned entries don't get
12331                         * assigned the IRQ of the first entry.
12332                         */
12333                        new_cpu = start_cpu;
12334                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12335                                new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12336                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12337                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12338                                        goto found_any;
12339                                new_cpu = cpumask_next(
12340                                        new_cpu, cpu_present_mask);
12341                                if (new_cpu == nr_cpumask_bits)
12342                                        new_cpu = first_cpu;
12343                        }
12344                        /* We should never leave an entry unassigned */
12345                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12346                                        "3339 Set Affinity: CPU %d "
12347                                        "eq %d UNASSIGNED\n",
12348                                        cpup->hdwq, cpup->eq);
12349                        continue;
12350found_any:
12351                        /* We found an available entry, copy the IRQ info */
12352                        cpup->eq = new_cpup->eq;
12353
12354                        /* Bump start_cpu to the next slot to minmize the
12355                         * chance of having multiple unassigned CPU entries
12356                         * selecting the same IRQ.
12357                         */
12358                        start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12359                        if (start_cpu == nr_cpumask_bits)
12360                                start_cpu = first_cpu;
12361
12362                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12363                                        "3338 Set Affinity: CPU %d "
12364                                        "eq %d from peer cpu %d (%d/%d)\n",
12365                                        cpu, cpup->eq, new_cpu,
12366                                        new_cpup->phys_id, new_cpup->core_id);
12367                }
12368        }
12369
12370        /* Assign hdwq indices that are unique across all cpus in the map
12371         * that are also FIRST_CPUs.
12372         */
12373        idx = 0;
12374        for_each_present_cpu(cpu) {
12375                cpup = &phba->sli4_hba.cpu_map[cpu];
12376
12377                /* Only FIRST IRQs get a hdwq index assignment. */
12378                if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12379                        continue;
12380
12381                /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12382                cpup->hdwq = idx;
12383                idx++;
12384                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12385                                "3333 Set Affinity: CPU %d (phys %d core %d): "
12386                                "hdwq %d eq %d flg x%x\n",
12387                                cpu, cpup->phys_id, cpup->core_id,
12388                                cpup->hdwq, cpup->eq, cpup->flag);
12389        }
12390        /* Associate a hdwq with each cpu_map entry
12391         * This will be 1 to 1 - hdwq to cpu, unless there are less
12392         * hardware queues then CPUs. For that case we will just round-robin
12393         * the available hardware queues as they get assigned to CPUs.
12394         * The next_idx is the idx from the FIRST_CPU loop above to account
12395         * for irq_chann < hdwq.  The idx is used for round-robin assignments
12396         * and needs to start at 0.
12397         */
12398        next_idx = idx;
12399        start_cpu = 0;
12400        idx = 0;
12401        for_each_present_cpu(cpu) {
12402                cpup = &phba->sli4_hba.cpu_map[cpu];
12403
12404                /* FIRST cpus are already mapped. */
12405                if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12406                        continue;
12407
12408                /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12409                 * of the unassigned cpus to the next idx so that all
12410                 * hdw queues are fully utilized.
12411                 */
12412                if (next_idx < phba->cfg_hdw_queue) {
12413                        cpup->hdwq = next_idx;
12414                        next_idx++;
12415                        continue;
12416                }
12417
12418                /* Not a First CPU and all hdw_queues are used.  Reuse a
12419                 * Hardware Queue for another CPU, so be smart about it
12420                 * and pick one that has its IRQ/EQ mapped to the same phys_id
12421                 * (CPU package) and core_id.
12422                 */
12423                new_cpu = start_cpu;
12424                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12425                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12426                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12427                            new_cpup->phys_id == cpup->phys_id &&
12428                            new_cpup->core_id == cpup->core_id) {
12429                                goto found_hdwq;
12430                        }
12431                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12432                        if (new_cpu == nr_cpumask_bits)
12433                                new_cpu = first_cpu;
12434                }
12435
12436                /* If we can't match both phys_id and core_id,
12437                 * settle for just a phys_id match.
12438                 */
12439                new_cpu = start_cpu;
12440                for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12441                        new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12442                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12443                            new_cpup->phys_id == cpup->phys_id)
12444                                goto found_hdwq;
12445
12446                        new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12447                        if (new_cpu == nr_cpumask_bits)
12448                                new_cpu = first_cpu;
12449                }
12450
12451                /* Otherwise just round robin on cfg_hdw_queue */
12452                cpup->hdwq = idx % phba->cfg_hdw_queue;
12453                idx++;
12454                goto logit;
12455 found_hdwq:
12456                /* We found an available entry, copy the IRQ info */
12457                start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12458                if (start_cpu == nr_cpumask_bits)
12459                        start_cpu = first_cpu;
12460                cpup->hdwq = new_cpup->hdwq;
12461 logit:
12462                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12463                                "3335 Set Affinity: CPU %d (phys %d core %d): "
12464                                "hdwq %d eq %d flg x%x\n",
12465                                cpu, cpup->phys_id, cpup->core_id,
12466                                cpup->hdwq, cpup->eq, cpup->flag);
12467        }
12468
12469        /*
12470         * Initialize the cpu_map slots for not-present cpus in case
12471         * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12472         */
12473        idx = 0;
12474        for_each_possible_cpu(cpu) {
12475                cpup = &phba->sli4_hba.cpu_map[cpu];
12476#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12477                c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12478                c_stat->hdwq_no = cpup->hdwq;
12479#endif
12480                if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12481                        continue;
12482
12483                cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12484#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12485                c_stat->hdwq_no = cpup->hdwq;
12486#endif
12487                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12488                                "3340 Set Affinity: not present "
12489                                "CPU %d hdwq %d\n",
12490                                cpu, cpup->hdwq);
12491        }
12492
12493        /* The cpu_map array will be used later during initialization
12494         * when EQ / CQ / WQs are allocated and configured.
12495         */
12496        return;
12497}
12498
12499/**
12500 * lpfc_cpuhp_get_eq
12501 *
12502 * @phba:   pointer to lpfc hba data structure.
12503 * @cpu:    cpu going offline
12504 * @eqlist: eq list to append to
12505 */
12506static int
12507lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12508                  struct list_head *eqlist)
12509{
12510        const struct cpumask *maskp;
12511        struct lpfc_queue *eq;
12512        struct cpumask *tmp;
12513        u16 idx;
12514
12515        tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12516        if (!tmp)
12517                return -ENOMEM;
12518
12519        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12520                maskp = pci_irq_get_affinity(phba->pcidev, idx);
12521                if (!maskp)
12522                        continue;
12523                /*
12524                 * if irq is not affinitized to the cpu going
12525                 * then we don't need to poll the eq attached
12526                 * to it.
12527                 */
12528                if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12529                        continue;
12530                /* get the cpus that are online and are affini-
12531                 * tized to this irq vector.  If the count is
12532                 * more than 1 then cpuhp is not going to shut-
12533                 * down this vector.  Since this cpu has not
12534                 * gone offline yet, we need >1.
12535                 */
12536                cpumask_and(tmp, maskp, cpu_online_mask);
12537                if (cpumask_weight(tmp) > 1)
12538                        continue;
12539
12540                /* Now that we have an irq to shutdown, get the eq
12541                 * mapped to this irq.  Note: multiple hdwq's in
12542                 * the software can share an eq, but eventually
12543                 * only eq will be mapped to this vector
12544                 */
12545                eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12546                list_add(&eq->_poll_list, eqlist);
12547        }
12548        kfree(tmp);
12549        return 0;
12550}
12551
12552static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12553{
12554        if (phba->sli_rev != LPFC_SLI_REV4)
12555                return;
12556
12557        cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12558                                            &phba->cpuhp);
12559        /*
12560         * unregistering the instance doesn't stop the polling
12561         * timer. Wait for the poll timer to retire.
12562         */
12563        synchronize_rcu();
12564        del_timer_sync(&phba->cpuhp_poll_timer);
12565}
12566
12567static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12568{
12569        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12570                return;
12571
12572        __lpfc_cpuhp_remove(phba);
12573}
12574
12575static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12576{
12577        if (phba->sli_rev != LPFC_SLI_REV4)
12578                return;
12579
12580        rcu_read_lock();
12581
12582        if (!list_empty(&phba->poll_list))
12583                mod_timer(&phba->cpuhp_poll_timer,
12584                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12585
12586        rcu_read_unlock();
12587
12588        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12589                                         &phba->cpuhp);
12590}
12591
12592static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12593{
12594        if (phba->pport->load_flag & FC_UNLOADING) {
12595                *retval = -EAGAIN;
12596                return true;
12597        }
12598
12599        if (phba->sli_rev != LPFC_SLI_REV4) {
12600                *retval = 0;
12601                return true;
12602        }
12603
12604        /* proceed with the hotplug */
12605        return false;
12606}
12607
12608/**
12609 * lpfc_irq_set_aff - set IRQ affinity
12610 * @eqhdl: EQ handle
12611 * @cpu: cpu to set affinity
12612 *
12613 **/
12614static inline void
12615lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12616{
12617        cpumask_clear(&eqhdl->aff_mask);
12618        cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12619        irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12620        irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
12621}
12622
12623/**
12624 * lpfc_irq_clear_aff - clear IRQ affinity
12625 * @eqhdl: EQ handle
12626 *
12627 **/
12628static inline void
12629lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12630{
12631        cpumask_clear(&eqhdl->aff_mask);
12632        irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12633}
12634
12635/**
12636 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12637 * @phba: pointer to HBA context object.
12638 * @cpu: cpu going offline/online
12639 * @offline: true, cpu is going offline. false, cpu is coming online.
12640 *
12641 * If cpu is going offline, we'll try our best effort to find the next
12642 * online cpu on the phba's original_mask and migrate all offlining IRQ
12643 * affinities.
12644 *
12645 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12646 *
12647 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12648 *       PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12649 *
12650 **/
12651static void
12652lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12653{
12654        struct lpfc_vector_map_info *cpup;
12655        struct cpumask *aff_mask;
12656        unsigned int cpu_select, cpu_next, idx;
12657        const struct cpumask *orig_mask;
12658
12659        if (phba->irq_chann_mode == NORMAL_MODE)
12660                return;
12661
12662        orig_mask = &phba->sli4_hba.irq_aff_mask;
12663
12664        if (!cpumask_test_cpu(cpu, orig_mask))
12665                return;
12666
12667        cpup = &phba->sli4_hba.cpu_map[cpu];
12668
12669        if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12670                return;
12671
12672        if (offline) {
12673                /* Find next online CPU on original mask */
12674                cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12675                cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12676
12677                /* Found a valid CPU */
12678                if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12679                        /* Go through each eqhdl and ensure offlining
12680                         * cpu aff_mask is migrated
12681                         */
12682                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12683                                aff_mask = lpfc_get_aff_mask(idx);
12684
12685                                /* Migrate affinity */
12686                                if (cpumask_test_cpu(cpu, aff_mask))
12687                                        lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12688                                                         cpu_select);
12689                        }
12690                } else {
12691                        /* Rely on irqbalance if no online CPUs left on NUMA */
12692                        for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12693                                lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12694                }
12695        } else {
12696                /* Migrate affinity back to this CPU */
12697                lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12698        }
12699}
12700
12701static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12702{
12703        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12704        struct lpfc_queue *eq, *next;
12705        LIST_HEAD(eqlist);
12706        int retval;
12707
12708        if (!phba) {
12709                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12710                return 0;
12711        }
12712
12713        if (__lpfc_cpuhp_checks(phba, &retval))
12714                return retval;
12715
12716        lpfc_irq_rebalance(phba, cpu, true);
12717
12718        retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12719        if (retval)
12720                return retval;
12721
12722        /* start polling on these eq's */
12723        list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12724                list_del_init(&eq->_poll_list);
12725                lpfc_sli4_start_polling(eq);
12726        }
12727
12728        return 0;
12729}
12730
12731static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12732{
12733        struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12734        struct lpfc_queue *eq, *next;
12735        unsigned int n;
12736        int retval;
12737
12738        if (!phba) {
12739                WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12740                return 0;
12741        }
12742
12743        if (__lpfc_cpuhp_checks(phba, &retval))
12744                return retval;
12745
12746        lpfc_irq_rebalance(phba, cpu, false);
12747
12748        list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12749                n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12750                if (n == cpu)
12751                        lpfc_sli4_stop_polling(eq);
12752        }
12753
12754        return 0;
12755}
12756
12757/**
12758 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12759 * @phba: pointer to lpfc hba data structure.
12760 *
12761 * This routine is invoked to enable the MSI-X interrupt vectors to device
12762 * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12763 * to cpus on the system.
12764 *
12765 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12766 * the number of cpus on the same numa node as this adapter.  The vectors are
12767 * allocated without requesting OS affinity mapping.  A vector will be
12768 * allocated and assigned to each online and offline cpu.  If the cpu is
12769 * online, then affinity will be set to that cpu.  If the cpu is offline, then
12770 * affinity will be set to the nearest peer cpu within the numa node that is
12771 * online.  If there are no online cpus within the numa node, affinity is not
12772 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12773 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12774 * configured.
12775 *
12776 * If numa mode is not enabled and there is more than 1 vector allocated, then
12777 * the driver relies on the managed irq interface where the OS assigns vector to
12778 * cpu affinity.  The driver will then use that affinity mapping to setup its
12779 * cpu mapping table.
12780 *
12781 * Return codes
12782 * 0 - successful
12783 * other values - error
12784 **/
12785static int
12786lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12787{
12788        int vectors, rc, index;
12789        char *name;
12790        const struct cpumask *aff_mask = NULL;
12791        unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12792        struct lpfc_vector_map_info *cpup;
12793        struct lpfc_hba_eq_hdl *eqhdl;
12794        const struct cpumask *maskp;
12795        unsigned int flags = PCI_IRQ_MSIX;
12796
12797        /* Set up MSI-X multi-message vectors */
12798        vectors = phba->cfg_irq_chann;
12799
12800        if (phba->irq_chann_mode != NORMAL_MODE)
12801                aff_mask = &phba->sli4_hba.irq_aff_mask;
12802
12803        if (aff_mask) {
12804                cpu_cnt = cpumask_weight(aff_mask);
12805                vectors = min(phba->cfg_irq_chann, cpu_cnt);
12806
12807                /* cpu: iterates over aff_mask including offline or online
12808                 * cpu_select: iterates over online aff_mask to set affinity
12809                 */
12810                cpu = cpumask_first(aff_mask);
12811                cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12812        } else {
12813                flags |= PCI_IRQ_AFFINITY;
12814        }
12815
12816        rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12817        if (rc < 0) {
12818                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12819                                "0484 PCI enable MSI-X failed (%d)\n", rc);
12820                goto vec_fail_out;
12821        }
12822        vectors = rc;
12823
12824        /* Assign MSI-X vectors to interrupt handlers */
12825        for (index = 0; index < vectors; index++) {
12826                eqhdl = lpfc_get_eq_hdl(index);
12827                name = eqhdl->handler_name;
12828                memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12829                snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12830                         LPFC_DRIVER_HANDLER_NAME"%d", index);
12831
12832                eqhdl->idx = index;
12833                rc = request_irq(pci_irq_vector(phba->pcidev, index),
12834                         &lpfc_sli4_hba_intr_handler, 0,
12835                         name, eqhdl);
12836                if (rc) {
12837                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12838                                        "0486 MSI-X fast-path (%d) "
12839                                        "request_irq failed (%d)\n", index, rc);
12840                        goto cfg_fail_out;
12841                }
12842
12843                eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12844
12845                if (aff_mask) {
12846                        /* If found a neighboring online cpu, set affinity */
12847                        if (cpu_select < nr_cpu_ids)
12848                                lpfc_irq_set_aff(eqhdl, cpu_select);
12849
12850                        /* Assign EQ to cpu_map */
12851                        lpfc_assign_eq_map_info(phba, index,
12852                                                LPFC_CPU_FIRST_IRQ,
12853                                                cpu);
12854
12855                        /* Iterate to next offline or online cpu in aff_mask */
12856                        cpu = cpumask_next(cpu, aff_mask);
12857
12858                        /* Find next online cpu in aff_mask to set affinity */
12859                        cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12860                } else if (vectors == 1) {
12861                        cpu = cpumask_first(cpu_present_mask);
12862                        lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12863                                                cpu);
12864                } else {
12865                        maskp = pci_irq_get_affinity(phba->pcidev, index);
12866
12867                        /* Loop through all CPUs associated with vector index */
12868                        for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12869                                cpup = &phba->sli4_hba.cpu_map[cpu];
12870
12871                                /* If this is the first CPU thats assigned to
12872                                 * this vector, set LPFC_CPU_FIRST_IRQ.
12873                                 *
12874                                 * With certain platforms its possible that irq
12875                                 * vectors are affinitized to all the cpu's.
12876                                 * This can result in each cpu_map.eq to be set
12877                                 * to the last vector, resulting in overwrite
12878                                 * of all the previous cpu_map.eq.  Ensure that
12879                                 * each vector receives a place in cpu_map.
12880                                 * Later call to lpfc_cpu_affinity_check will
12881                                 * ensure we are nicely balanced out.
12882                                 */
12883                                if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
12884                                        continue;
12885                                lpfc_assign_eq_map_info(phba, index,
12886                                                        LPFC_CPU_FIRST_IRQ,
12887                                                        cpu);
12888                                break;
12889                        }
12890                }
12891        }
12892
12893        if (vectors != phba->cfg_irq_chann) {
12894                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12895                                "3238 Reducing IO channels to match number of "
12896                                "MSI-X vectors, requested %d got %d\n",
12897                                phba->cfg_irq_chann, vectors);
12898                if (phba->cfg_irq_chann > vectors)
12899                        phba->cfg_irq_chann = vectors;
12900        }
12901
12902        return rc;
12903
12904cfg_fail_out:
12905        /* free the irq already requested */
12906        for (--index; index >= 0; index--) {
12907                eqhdl = lpfc_get_eq_hdl(index);
12908                lpfc_irq_clear_aff(eqhdl);
12909                irq_set_affinity_hint(eqhdl->irq, NULL);
12910                free_irq(eqhdl->irq, eqhdl);
12911        }
12912
12913        /* Unconfigure MSI-X capability structure */
12914        pci_free_irq_vectors(phba->pcidev);
12915
12916vec_fail_out:
12917        return rc;
12918}
12919
12920/**
12921 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
12922 * @phba: pointer to lpfc hba data structure.
12923 *
12924 * This routine is invoked to enable the MSI interrupt mode to device with
12925 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
12926 * called to enable the MSI vector. The device driver is responsible for
12927 * calling the request_irq() to register MSI vector with a interrupt the
12928 * handler, which is done in this function.
12929 *
12930 * Return codes
12931 *      0 - successful
12932 *      other values - error
12933 **/
12934static int
12935lpfc_sli4_enable_msi(struct lpfc_hba *phba)
12936{
12937        int rc, index;
12938        unsigned int cpu;
12939        struct lpfc_hba_eq_hdl *eqhdl;
12940
12941        rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
12942                                   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
12943        if (rc > 0)
12944                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12945                                "0487 PCI enable MSI mode success.\n");
12946        else {
12947                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12948                                "0488 PCI enable MSI mode failed (%d)\n", rc);
12949                return rc ? rc : -1;
12950        }
12951
12952        rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
12953                         0, LPFC_DRIVER_NAME, phba);
12954        if (rc) {
12955                pci_free_irq_vectors(phba->pcidev);
12956                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12957                                "0490 MSI request_irq failed (%d)\n", rc);
12958                return rc;
12959        }
12960
12961        eqhdl = lpfc_get_eq_hdl(0);
12962        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
12963
12964        cpu = cpumask_first(cpu_present_mask);
12965        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
12966
12967        for (index = 0; index < phba->cfg_irq_chann; index++) {
12968                eqhdl = lpfc_get_eq_hdl(index);
12969                eqhdl->idx = index;
12970        }
12971
12972        return 0;
12973}
12974
12975/**
12976 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
12977 * @phba: pointer to lpfc hba data structure.
12978 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12979 *
12980 * This routine is invoked to enable device interrupt and associate driver's
12981 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
12982 * interface spec. Depends on the interrupt mode configured to the driver,
12983 * the driver will try to fallback from the configured interrupt mode to an
12984 * interrupt mode which is supported by the platform, kernel, and device in
12985 * the order of:
12986 * MSI-X -> MSI -> IRQ.
12987 *
12988 * Return codes
12989 *      0 - successful
12990 *      other values - error
12991 **/
12992static uint32_t
12993lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12994{
12995        uint32_t intr_mode = LPFC_INTR_ERROR;
12996        int retval, idx;
12997
12998        if (cfg_mode == 2) {
12999                /* Preparation before conf_msi mbox cmd */
13000                retval = 0;
13001                if (!retval) {
13002                        /* Now, try to enable MSI-X interrupt mode */
13003                        retval = lpfc_sli4_enable_msix(phba);
13004                        if (!retval) {
13005                                /* Indicate initialization to MSI-X mode */
13006                                phba->intr_type = MSIX;
13007                                intr_mode = 2;
13008                        }
13009                }
13010        }
13011
13012        /* Fallback to MSI if MSI-X initialization failed */
13013        if (cfg_mode >= 1 && phba->intr_type == NONE) {
13014                retval = lpfc_sli4_enable_msi(phba);
13015                if (!retval) {
13016                        /* Indicate initialization to MSI mode */
13017                        phba->intr_type = MSI;
13018                        intr_mode = 1;
13019                }
13020        }
13021
13022        /* Fallback to INTx if both MSI-X/MSI initalization failed */
13023        if (phba->intr_type == NONE) {
13024                retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13025                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13026                if (!retval) {
13027                        struct lpfc_hba_eq_hdl *eqhdl;
13028                        unsigned int cpu;
13029
13030                        /* Indicate initialization to INTx mode */
13031                        phba->intr_type = INTx;
13032                        intr_mode = 0;
13033
13034                        eqhdl = lpfc_get_eq_hdl(0);
13035                        eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13036
13037                        cpu = cpumask_first(cpu_present_mask);
13038                        lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13039                                                cpu);
13040                        for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13041                                eqhdl = lpfc_get_eq_hdl(idx);
13042                                eqhdl->idx = idx;
13043                        }
13044                }
13045        }
13046        return intr_mode;
13047}
13048
13049/**
13050 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13051 * @phba: pointer to lpfc hba data structure.
13052 *
13053 * This routine is invoked to disable device interrupt and disassociate
13054 * the driver's interrupt handler(s) from interrupt vector(s) to device
13055 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13056 * will release the interrupt vector(s) for the message signaled interrupt.
13057 **/
13058static void
13059lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13060{
13061        /* Disable the currently initialized interrupt mode */
13062        if (phba->intr_type == MSIX) {
13063                int index;
13064                struct lpfc_hba_eq_hdl *eqhdl;
13065
13066                /* Free up MSI-X multi-message vectors */
13067                for (index = 0; index < phba->cfg_irq_chann; index++) {
13068                        eqhdl = lpfc_get_eq_hdl(index);
13069                        lpfc_irq_clear_aff(eqhdl);
13070                        irq_set_affinity_hint(eqhdl->irq, NULL);
13071                        free_irq(eqhdl->irq, eqhdl);
13072                }
13073        } else {
13074                free_irq(phba->pcidev->irq, phba);
13075        }
13076
13077        pci_free_irq_vectors(phba->pcidev);
13078
13079        /* Reset interrupt management states */
13080        phba->intr_type = NONE;
13081        phba->sli.slistat.sli_intr = 0;
13082}
13083
13084/**
13085 * lpfc_unset_hba - Unset SLI3 hba device initialization
13086 * @phba: pointer to lpfc hba data structure.
13087 *
13088 * This routine is invoked to unset the HBA device initialization steps to
13089 * a device with SLI-3 interface spec.
13090 **/
13091static void
13092lpfc_unset_hba(struct lpfc_hba *phba)
13093{
13094        struct lpfc_vport *vport = phba->pport;
13095        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
13096
13097        spin_lock_irq(shost->host_lock);
13098        vport->load_flag |= FC_UNLOADING;
13099        spin_unlock_irq(shost->host_lock);
13100
13101        kfree(phba->vpi_bmask);
13102        kfree(phba->vpi_ids);
13103
13104        lpfc_stop_hba_timers(phba);
13105
13106        phba->pport->work_port_events = 0;
13107
13108        lpfc_sli_hba_down(phba);
13109
13110        lpfc_sli_brdrestart(phba);
13111
13112        lpfc_sli_disable_intr(phba);
13113
13114        return;
13115}
13116
13117/**
13118 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13119 * @phba: Pointer to HBA context object.
13120 *
13121 * This function is called in the SLI4 code path to wait for completion
13122 * of device's XRIs exchange busy. It will check the XRI exchange busy
13123 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13124 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13125 * I/Os every 30 seconds, log error message, and wait forever. Only when
13126 * all XRI exchange busy complete, the driver unload shall proceed with
13127 * invoking the function reset ioctl mailbox command to the CNA and the
13128 * the rest of the driver unload resource release.
13129 **/
13130static void
13131lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13132{
13133        struct lpfc_sli4_hdw_queue *qp;
13134        int idx, ccnt;
13135        int wait_time = 0;
13136        int io_xri_cmpl = 1;
13137        int nvmet_xri_cmpl = 1;
13138        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13139
13140        /* Driver just aborted IOs during the hba_unset process.  Pause
13141         * here to give the HBA time to complete the IO and get entries
13142         * into the abts lists.
13143         */
13144        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13145
13146        /* Wait for NVME pending IO to flush back to transport. */
13147        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13148                lpfc_nvme_wait_for_io_drain(phba);
13149
13150        ccnt = 0;
13151        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13152                qp = &phba->sli4_hba.hdwq[idx];
13153                io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13154                if (!io_xri_cmpl) /* if list is NOT empty */
13155                        ccnt++;
13156        }
13157        if (ccnt)
13158                io_xri_cmpl = 0;
13159
13160        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13161                nvmet_xri_cmpl =
13162                        list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13163        }
13164
13165        while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13166                if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13167                        if (!nvmet_xri_cmpl)
13168                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13169                                                "6424 NVMET XRI exchange busy "
13170                                                "wait time: %d seconds.\n",
13171                                                wait_time/1000);
13172                        if (!io_xri_cmpl)
13173                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13174                                                "6100 IO XRI exchange busy "
13175                                                "wait time: %d seconds.\n",
13176                                                wait_time/1000);
13177                        if (!els_xri_cmpl)
13178                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13179                                                "2878 ELS XRI exchange busy "
13180                                                "wait time: %d seconds.\n",
13181                                                wait_time/1000);
13182                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13183                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13184                } else {
13185                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13186                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13187                }
13188
13189                ccnt = 0;
13190                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13191                        qp = &phba->sli4_hba.hdwq[idx];
13192                        io_xri_cmpl = list_empty(
13193                            &qp->lpfc_abts_io_buf_list);
13194                        if (!io_xri_cmpl) /* if list is NOT empty */
13195                                ccnt++;
13196                }
13197                if (ccnt)
13198                        io_xri_cmpl = 0;
13199
13200                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13201                        nvmet_xri_cmpl = list_empty(
13202                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13203                }
13204                els_xri_cmpl =
13205                        list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13206
13207        }
13208}
13209
13210/**
13211 * lpfc_sli4_hba_unset - Unset the fcoe hba
13212 * @phba: Pointer to HBA context object.
13213 *
13214 * This function is called in the SLI4 code path to reset the HBA's FCoE
13215 * function. The caller is not required to hold any lock. This routine
13216 * issues PCI function reset mailbox command to reset the FCoE function.
13217 * At the end of the function, it calls lpfc_hba_down_post function to
13218 * free any pending commands.
13219 **/
13220static void
13221lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13222{
13223        int wait_cnt = 0;
13224        LPFC_MBOXQ_t *mboxq;
13225        struct pci_dev *pdev = phba->pcidev;
13226
13227        lpfc_stop_hba_timers(phba);
13228        hrtimer_cancel(&phba->cmf_timer);
13229
13230        if (phba->pport)
13231                phba->sli4_hba.intr_enable = 0;
13232
13233        /*
13234         * Gracefully wait out the potential current outstanding asynchronous
13235         * mailbox command.
13236         */
13237
13238        /* First, block any pending async mailbox command from posted */
13239        spin_lock_irq(&phba->hbalock);
13240        phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13241        spin_unlock_irq(&phba->hbalock);
13242        /* Now, trying to wait it out if we can */
13243        while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13244                msleep(10);
13245                if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13246                        break;
13247        }
13248        /* Forcefully release the outstanding mailbox command if timed out */
13249        if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13250                spin_lock_irq(&phba->hbalock);
13251                mboxq = phba->sli.mbox_active;
13252                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13253                __lpfc_mbox_cmpl_put(phba, mboxq);
13254                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13255                phba->sli.mbox_active = NULL;
13256                spin_unlock_irq(&phba->hbalock);
13257        }
13258
13259        /* Abort all iocbs associated with the hba */
13260        lpfc_sli_hba_iocb_abort(phba);
13261
13262        /* Wait for completion of device XRI exchange busy */
13263        lpfc_sli4_xri_exchange_busy_wait(phba);
13264
13265        /* per-phba callback de-registration for hotplug event */
13266        if (phba->pport)
13267                lpfc_cpuhp_remove(phba);
13268
13269        /* Disable PCI subsystem interrupt */
13270        lpfc_sli4_disable_intr(phba);
13271
13272        /* Disable SR-IOV if enabled */
13273        if (phba->cfg_sriov_nr_virtfn)
13274                pci_disable_sriov(pdev);
13275
13276        /* Stop kthread signal shall trigger work_done one more time */
13277        kthread_stop(phba->worker_thread);
13278
13279        /* Disable FW logging to host memory */
13280        lpfc_ras_stop_fwlog(phba);
13281
13282        /* Unset the queues shared with the hardware then release all
13283         * allocated resources.
13284         */
13285        lpfc_sli4_queue_unset(phba);
13286        lpfc_sli4_queue_destroy(phba);
13287
13288        /* Reset SLI4 HBA FCoE function */
13289        lpfc_pci_function_reset(phba);
13290
13291        /* Free RAS DMA memory */
13292        if (phba->ras_fwlog.ras_enabled)
13293                lpfc_sli4_ras_dma_free(phba);
13294
13295        /* Stop the SLI4 device port */
13296        if (phba->pport)
13297                phba->pport->work_port_events = 0;
13298}
13299
13300static uint32_t
13301lpfc_cgn_crc32(uint32_t crc, u8 byte)
13302{
13303        uint32_t msb = 0;
13304        uint32_t bit;
13305
13306        for (bit = 0; bit < 8; bit++) {
13307                msb = (crc >> 31) & 1;
13308                crc <<= 1;
13309
13310                if (msb ^ (byte & 1)) {
13311                        crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13312                        crc |= 1;
13313                }
13314                byte >>= 1;
13315        }
13316        return crc;
13317}
13318
13319static uint32_t
13320lpfc_cgn_reverse_bits(uint32_t wd)
13321{
13322        uint32_t result = 0;
13323        uint32_t i;
13324
13325        for (i = 0; i < 32; i++) {
13326                result <<= 1;
13327                result |= (1 & (wd >> i));
13328        }
13329        return result;
13330}
13331
13332/*
13333 * The routine corresponds with the algorithm the HBA firmware
13334 * uses to validate the data integrity.
13335 */
13336uint32_t
13337lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13338{
13339        uint32_t  i;
13340        uint32_t result;
13341        uint8_t  *data = (uint8_t *)ptr;
13342
13343        for (i = 0; i < byteLen; ++i)
13344                crc = lpfc_cgn_crc32(crc, data[i]);
13345
13346        result = ~lpfc_cgn_reverse_bits(crc);
13347        return result;
13348}
13349
13350void
13351lpfc_init_congestion_buf(struct lpfc_hba *phba)
13352{
13353        struct lpfc_cgn_info *cp;
13354        struct timespec64 cmpl_time;
13355        struct tm broken;
13356        uint16_t size;
13357        uint32_t crc;
13358
13359        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13360                        "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13361
13362        if (!phba->cgn_i)
13363                return;
13364        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13365
13366        atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13367        atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13368        atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13369        atomic_set(&phba->cgn_sync_warn_cnt, 0);
13370
13371        atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
13372        atomic64_set(&phba->cgn_acqe_stat.warn, 0);
13373        atomic_set(&phba->cgn_driver_evt_cnt, 0);
13374        atomic_set(&phba->cgn_latency_evt_cnt, 0);
13375        atomic64_set(&phba->cgn_latency_evt, 0);
13376        phba->cgn_evt_minute = 0;
13377        phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13378
13379        memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
13380        cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13381        cp->cgn_info_version = LPFC_CGN_INFO_V3;
13382
13383        /* cgn parameters */
13384        cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13385        cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13386        cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13387        cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13388
13389        ktime_get_real_ts64(&cmpl_time);
13390        time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13391
13392        cp->cgn_info_month = broken.tm_mon + 1;
13393        cp->cgn_info_day = broken.tm_mday;
13394        cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13395        cp->cgn_info_hour = broken.tm_hour;
13396        cp->cgn_info_minute = broken.tm_min;
13397        cp->cgn_info_second = broken.tm_sec;
13398
13399        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13400                        "2643 CGNInfo Init: Start Time "
13401                        "%d/%d/%d %d:%d:%d\n",
13402                        cp->cgn_info_day, cp->cgn_info_month,
13403                        cp->cgn_info_year, cp->cgn_info_hour,
13404                        cp->cgn_info_minute, cp->cgn_info_second);
13405
13406        /* Fill in default LUN qdepth */
13407        if (phba->pport) {
13408                size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13409                cp->cgn_lunq = cpu_to_le16(size);
13410        }
13411
13412        /* last used Index initialized to 0xff already */
13413
13414        cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13415        cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13416        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13417        cp->cgn_info_crc = cpu_to_le32(crc);
13418
13419        phba->cgn_evt_timestamp = jiffies +
13420                msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13421}
13422
13423void
13424lpfc_init_congestion_stat(struct lpfc_hba *phba)
13425{
13426        struct lpfc_cgn_info *cp;
13427        struct timespec64 cmpl_time;
13428        struct tm broken;
13429        uint32_t crc;
13430
13431        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13432                        "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13433
13434        if (!phba->cgn_i)
13435                return;
13436
13437        cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13438        memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
13439
13440        ktime_get_real_ts64(&cmpl_time);
13441        time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13442
13443        cp->cgn_stat_month = broken.tm_mon + 1;
13444        cp->cgn_stat_day = broken.tm_mday;
13445        cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13446        cp->cgn_stat_hour = broken.tm_hour;
13447        cp->cgn_stat_minute = broken.tm_min;
13448
13449        lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13450                        "2647 CGNstat Init: Start Time "
13451                        "%d/%d/%d %d:%d\n",
13452                        cp->cgn_stat_day, cp->cgn_stat_month,
13453                        cp->cgn_stat_year, cp->cgn_stat_hour,
13454                        cp->cgn_stat_minute);
13455
13456        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13457        cp->cgn_info_crc = cpu_to_le32(crc);
13458}
13459
13460/**
13461 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13462 * @phba: Pointer to hba context object.
13463 * @reg: flag to determine register or unregister.
13464 */
13465static int
13466__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13467{
13468        struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13469        union  lpfc_sli4_cfg_shdr *shdr;
13470        uint32_t shdr_status, shdr_add_status;
13471        LPFC_MBOXQ_t *mboxq;
13472        int length, rc;
13473
13474        if (!phba->cgn_i)
13475                return -ENXIO;
13476
13477        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13478        if (!mboxq) {
13479                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13480                                "2641 REG_CONGESTION_BUF mbox allocation fail: "
13481                                "HBA state x%x reg %d\n",
13482                                phba->pport->port_state, reg);
13483                return -ENOMEM;
13484        }
13485
13486        length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13487                sizeof(struct lpfc_sli4_cfg_mhdr));
13488        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13489                         LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13490                         LPFC_SLI4_MBX_EMBED);
13491        reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13492        bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13493        if (reg > 0)
13494                bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13495        else
13496                bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13497        reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13498        reg_congestion_buf->addr_lo =
13499                putPaddrLow(phba->cgn_i->phys);
13500        reg_congestion_buf->addr_hi =
13501                putPaddrHigh(phba->cgn_i->phys);
13502
13503        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13504        shdr = (union lpfc_sli4_cfg_shdr *)
13505                &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13506        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13507        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13508                                 &shdr->response);
13509        mempool_free(mboxq, phba->mbox_mem_pool);
13510        if (shdr_status || shdr_add_status || rc) {
13511                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13512                                "2642 REG_CONGESTION_BUF mailbox "
13513                                "failed with status x%x add_status x%x,"
13514                                " mbx status x%x reg %d\n",
13515                                shdr_status, shdr_add_status, rc, reg);
13516                return -ENXIO;
13517        }
13518        return 0;
13519}
13520
13521int
13522lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13523{
13524        lpfc_cmf_stop(phba);
13525        return __lpfc_reg_congestion_buf(phba, 0);
13526}
13527
13528int
13529lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13530{
13531        return __lpfc_reg_congestion_buf(phba, 1);
13532}
13533
13534/**
13535 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13536 * @phba: Pointer to HBA context object.
13537 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13538 *
13539 * This function is called in the SLI4 code path to read the port's
13540 * sli4 capabilities.
13541 *
13542 * This function may be be called from any context that can block-wait
13543 * for the completion.  The expectation is that this routine is called
13544 * typically from probe_one or from the online routine.
13545 **/
13546int
13547lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13548{
13549        int rc;
13550        struct lpfc_mqe *mqe = &mboxq->u.mqe;
13551        struct lpfc_pc_sli4_params *sli4_params;
13552        uint32_t mbox_tmo;
13553        int length;
13554        bool exp_wqcq_pages = true;
13555        struct lpfc_sli4_parameters *mbx_sli4_parameters;
13556
13557        /*
13558         * By default, the driver assumes the SLI4 port requires RPI
13559         * header postings.  The SLI4_PARAM response will correct this
13560         * assumption.
13561         */
13562        phba->sli4_hba.rpi_hdrs_in_use = 1;
13563
13564        /* Read the port's SLI4 Config Parameters */
13565        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13566                  sizeof(struct lpfc_sli4_cfg_mhdr));
13567        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13568                         LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13569                         length, LPFC_SLI4_MBX_EMBED);
13570        if (!phba->sli4_hba.intr_enable)
13571                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13572        else {
13573                mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13574                rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13575        }
13576        if (unlikely(rc))
13577                return rc;
13578        sli4_params = &phba->sli4_hba.pc_sli4_params;
13579        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13580        sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13581        sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13582        sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13583        sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13584                                             mbx_sli4_parameters);
13585        sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13586                                             mbx_sli4_parameters);
13587        if (bf_get(cfg_phwq, mbx_sli4_parameters))
13588                phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13589        else
13590                phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13591        sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13592        sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13593                                           mbx_sli4_parameters);
13594        sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13595        sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13596        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13597        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13598        sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13599        sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13600        sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13601        sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13602        sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13603        sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13604        sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13605                                            mbx_sli4_parameters);
13606        sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13607        sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13608                                           mbx_sli4_parameters);
13609        phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13610        phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13611
13612        /* Check for Extended Pre-Registered SGL support */
13613        phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13614
13615        /* Check for firmware nvme support */
13616        rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13617                     bf_get(cfg_xib, mbx_sli4_parameters));
13618
13619        if (rc) {
13620                /* Save this to indicate the Firmware supports NVME */
13621                sli4_params->nvme = 1;
13622
13623                /* Firmware NVME support, check driver FC4 NVME support */
13624                if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13625                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13626                                        "6133 Disabling NVME support: "
13627                                        "FC4 type not supported: x%x\n",
13628                                        phba->cfg_enable_fc4_type);
13629                        goto fcponly;
13630                }
13631        } else {
13632                /* No firmware NVME support, check driver FC4 NVME support */
13633                sli4_params->nvme = 0;
13634                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13635                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13636                                        "6101 Disabling NVME support: Not "
13637                                        "supported by firmware (%d %d) x%x\n",
13638                                        bf_get(cfg_nvme, mbx_sli4_parameters),
13639                                        bf_get(cfg_xib, mbx_sli4_parameters),
13640                                        phba->cfg_enable_fc4_type);
13641fcponly:
13642                        phba->nvmet_support = 0;
13643                        phba->cfg_nvmet_mrq = 0;
13644                        phba->cfg_nvme_seg_cnt = 0;
13645
13646                        /* If no FC4 type support, move to just SCSI support */
13647                        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13648                                return -ENODEV;
13649                        phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13650                }
13651        }
13652
13653        /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13654         * accommodate 512K and 1M IOs in a single nvme buf.
13655         */
13656        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13657                phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13658
13659        /* Enable embedded Payload BDE if support is indicated */
13660        if (bf_get(cfg_pbde, mbx_sli4_parameters))
13661                phba->cfg_enable_pbde = 1;
13662        else
13663                phba->cfg_enable_pbde = 0;
13664
13665        /*
13666         * To support Suppress Response feature we must satisfy 3 conditions.
13667         * lpfc_suppress_rsp module parameter must be set (default).
13668         * In SLI4-Parameters Descriptor:
13669         * Extended Inline Buffers (XIB) must be supported.
13670         * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13671         * (double negative).
13672         */
13673        if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13674            !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13675                phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13676        else
13677                phba->cfg_suppress_rsp = 0;
13678
13679        if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13680                phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13681
13682        /* Make sure that sge_supp_len can be handled by the driver */
13683        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13684                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13685
13686        /*
13687         * Check whether the adapter supports an embedded copy of the
13688         * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13689         * to use this option, 128-byte WQEs must be used.
13690         */
13691        if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13692                phba->fcp_embed_io = 1;
13693        else
13694                phba->fcp_embed_io = 0;
13695
13696        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13697                        "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13698                        bf_get(cfg_xib, mbx_sli4_parameters),
13699                        phba->cfg_enable_pbde,
13700                        phba->fcp_embed_io, sli4_params->nvme,
13701                        phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13702
13703        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13704            LPFC_SLI_INTF_IF_TYPE_2) &&
13705            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13706                 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13707                exp_wqcq_pages = false;
13708
13709        if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13710            (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13711            exp_wqcq_pages &&
13712            (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13713                phba->enab_exp_wqcq_pages = 1;
13714        else
13715                phba->enab_exp_wqcq_pages = 0;
13716        /*
13717         * Check if the SLI port supports MDS Diagnostics
13718         */
13719        if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13720                phba->mds_diags_support = 1;
13721        else
13722                phba->mds_diags_support = 0;
13723
13724        /*
13725         * Check if the SLI port supports NSLER
13726         */
13727        if (bf_get(cfg_nsler, mbx_sli4_parameters))
13728                phba->nsler = 1;
13729        else
13730                phba->nsler = 0;
13731
13732        return 0;
13733}
13734
13735/**
13736 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13737 * @pdev: pointer to PCI device
13738 * @pid: pointer to PCI device identifier
13739 *
13740 * This routine is to be called to attach a device with SLI-3 interface spec
13741 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13742 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13743 * information of the device and driver to see if the driver state that it can
13744 * support this kind of device. If the match is successful, the driver core
13745 * invokes this routine. If this routine determines it can claim the HBA, it
13746 * does all the initialization that it needs to do to handle the HBA properly.
13747 *
13748 * Return code
13749 *      0 - driver can claim the device
13750 *      negative value - driver can not claim the device
13751 **/
13752static int
13753lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13754{
13755        struct lpfc_hba   *phba;
13756        struct lpfc_vport *vport = NULL;
13757        struct Scsi_Host  *shost = NULL;
13758        int error;
13759        uint32_t cfg_mode, intr_mode;
13760
13761        /* Allocate memory for HBA structure */
13762        phba = lpfc_hba_alloc(pdev);
13763        if (!phba)
13764                return -ENOMEM;
13765
13766        /* Perform generic PCI device enabling operation */
13767        error = lpfc_enable_pci_dev(phba);
13768        if (error)
13769                goto out_free_phba;
13770
13771        /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13772        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13773        if (error)
13774                goto out_disable_pci_dev;
13775
13776        /* Set up SLI-3 specific device PCI memory space */
13777        error = lpfc_sli_pci_mem_setup(phba);
13778        if (error) {
13779                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13780                                "1402 Failed to set up pci memory space.\n");
13781                goto out_disable_pci_dev;
13782        }
13783
13784        /* Set up SLI-3 specific device driver resources */
13785        error = lpfc_sli_driver_resource_setup(phba);
13786        if (error) {
13787                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13788                                "1404 Failed to set up driver resource.\n");
13789                goto out_unset_pci_mem_s3;
13790        }
13791
13792        /* Initialize and populate the iocb list per host */
13793
13794        error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13795        if (error) {
13796                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13797                                "1405 Failed to initialize iocb list.\n");
13798                goto out_unset_driver_resource_s3;
13799        }
13800
13801        /* Set up common device driver resources */
13802        error = lpfc_setup_driver_resource_phase2(phba);
13803        if (error) {
13804                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13805                                "1406 Failed to set up driver resource.\n");
13806                goto out_free_iocb_list;
13807        }
13808
13809        /* Get the default values for Model Name and Description */
13810        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13811
13812        /* Create SCSI host to the physical port */
13813        error = lpfc_create_shost(phba);
13814        if (error) {
13815                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13816                                "1407 Failed to create scsi host.\n");
13817                goto out_unset_driver_resource;
13818        }
13819
13820        /* Configure sysfs attributes */
13821        vport = phba->pport;
13822        error = lpfc_alloc_sysfs_attr(vport);
13823        if (error) {
13824                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13825                                "1476 Failed to allocate sysfs attr\n");
13826                goto out_destroy_shost;
13827        }
13828
13829        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13830        /* Now, trying to enable interrupt and bring up the device */
13831        cfg_mode = phba->cfg_use_msi;
13832        while (true) {
13833                /* Put device to a known state before enabling interrupt */
13834                lpfc_stop_port(phba);
13835                /* Configure and enable interrupt */
13836                intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13837                if (intr_mode == LPFC_INTR_ERROR) {
13838                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13839                                        "0431 Failed to enable interrupt.\n");
13840                        error = -ENODEV;
13841                        goto out_free_sysfs_attr;
13842                }
13843                /* SLI-3 HBA setup */
13844                if (lpfc_sli_hba_setup(phba)) {
13845                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13846                                        "1477 Failed to set up hba\n");
13847                        error = -ENODEV;
13848                        goto out_remove_device;
13849                }
13850
13851                /* Wait 50ms for the interrupts of previous mailbox commands */
13852                msleep(50);
13853                /* Check active interrupts on message signaled interrupts */
13854                if (intr_mode == 0 ||
13855                    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13856                        /* Log the current active interrupt mode */
13857                        phba->intr_mode = intr_mode;
13858                        lpfc_log_intr_mode(phba, intr_mode);
13859                        break;
13860                } else {
13861                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13862                                        "0447 Configure interrupt mode (%d) "
13863                                        "failed active interrupt test.\n",
13864                                        intr_mode);
13865                        /* Disable the current interrupt mode */
13866                        lpfc_sli_disable_intr(phba);
13867                        /* Try next level of interrupt mode */
13868                        cfg_mode = --intr_mode;
13869                }
13870        }
13871
13872        /* Perform post initialization setup */
13873        lpfc_post_init_setup(phba);
13874
13875        /* Check if there are static vports to be created. */
13876        lpfc_create_static_vport(phba);
13877
13878        return 0;
13879
13880out_remove_device:
13881        lpfc_unset_hba(phba);
13882out_free_sysfs_attr:
13883        lpfc_free_sysfs_attr(vport);
13884out_destroy_shost:
13885        lpfc_destroy_shost(phba);
13886out_unset_driver_resource:
13887        lpfc_unset_driver_resource_phase2(phba);
13888out_free_iocb_list:
13889        lpfc_free_iocb_list(phba);
13890out_unset_driver_resource_s3:
13891        lpfc_sli_driver_resource_unset(phba);
13892out_unset_pci_mem_s3:
13893        lpfc_sli_pci_mem_unset(phba);
13894out_disable_pci_dev:
13895        lpfc_disable_pci_dev(phba);
13896        if (shost)
13897                scsi_host_put(shost);
13898out_free_phba:
13899        lpfc_hba_free(phba);
13900        return error;
13901}
13902
13903/**
13904 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
13905 * @pdev: pointer to PCI device
13906 *
13907 * This routine is to be called to disattach a device with SLI-3 interface
13908 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13909 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13910 * device to be removed from the PCI subsystem properly.
13911 **/
13912static void
13913lpfc_pci_remove_one_s3(struct pci_dev *pdev)
13914{
13915        struct Scsi_Host  *shost = pci_get_drvdata(pdev);
13916        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13917        struct lpfc_vport **vports;
13918        struct lpfc_hba   *phba = vport->phba;
13919        int i;
13920
13921        spin_lock_irq(&phba->hbalock);
13922        vport->load_flag |= FC_UNLOADING;
13923        spin_unlock_irq(&phba->hbalock);
13924
13925        lpfc_free_sysfs_attr(vport);
13926
13927        /* Release all the vports against this physical port */
13928        vports = lpfc_create_vport_work_array(phba);
13929        if (vports != NULL)
13930                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13931                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13932                                continue;
13933                        fc_vport_terminate(vports[i]->fc_vport);
13934                }
13935        lpfc_destroy_vport_work_array(phba, vports);
13936
13937        /* Remove FC host with the physical port */
13938        fc_remove_host(shost);
13939        scsi_remove_host(shost);
13940
13941        /* Clean up all nodes, mailboxes and IOs. */
13942        lpfc_cleanup(vport);
13943
13944        /*
13945         * Bring down the SLI Layer. This step disable all interrupts,
13946         * clears the rings, discards all mailbox commands, and resets
13947         * the HBA.
13948         */
13949
13950        /* HBA interrupt will be disabled after this call */
13951        lpfc_sli_hba_down(phba);
13952        /* Stop kthread signal shall trigger work_done one more time */
13953        kthread_stop(phba->worker_thread);
13954        /* Final cleanup of txcmplq and reset the HBA */
13955        lpfc_sli_brdrestart(phba);
13956
13957        kfree(phba->vpi_bmask);
13958        kfree(phba->vpi_ids);
13959
13960        lpfc_stop_hba_timers(phba);
13961        spin_lock_irq(&phba->port_list_lock);
13962        list_del_init(&vport->listentry);
13963        spin_unlock_irq(&phba->port_list_lock);
13964
13965        lpfc_debugfs_terminate(vport);
13966
13967        /* Disable SR-IOV if enabled */
13968        if (phba->cfg_sriov_nr_virtfn)
13969                pci_disable_sriov(pdev);
13970
13971        /* Disable interrupt */
13972        lpfc_sli_disable_intr(phba);
13973
13974        scsi_host_put(shost);
13975
13976        /*
13977         * Call scsi_free before mem_free since scsi bufs are released to their
13978         * corresponding pools here.
13979         */
13980        lpfc_scsi_free(phba);
13981        lpfc_free_iocb_list(phba);
13982
13983        lpfc_mem_free_all(phba);
13984
13985        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
13986                          phba->hbqslimp.virt, phba->hbqslimp.phys);
13987
13988        /* Free resources associated with SLI2 interface */
13989        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
13990                          phba->slim2p.virt, phba->slim2p.phys);
13991
13992        /* unmap adapter SLIM and Control Registers */
13993        iounmap(phba->ctrl_regs_memmap_p);
13994        iounmap(phba->slim_memmap_p);
13995
13996        lpfc_hba_free(phba);
13997
13998        pci_release_mem_regions(pdev);
13999        pci_disable_device(pdev);
14000}
14001
14002/**
14003 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14004 * @dev_d: pointer to device
14005 *
14006 * This routine is to be called from the kernel's PCI subsystem to support
14007 * system Power Management (PM) to device with SLI-3 interface spec. When
14008 * PM invokes this method, it quiesces the device by stopping the driver's
14009 * worker thread for the device, turning off device's interrupt and DMA,
14010 * and bring the device offline. Note that as the driver implements the
14011 * minimum PM requirements to a power-aware driver's PM support for the
14012 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14013 * to the suspend() method call will be treated as SUSPEND and the driver will
14014 * fully reinitialize its device during resume() method call, the driver will
14015 * set device to PCI_D3hot state in PCI config space instead of setting it
14016 * according to the @msg provided by the PM.
14017 *
14018 * Return code
14019 *      0 - driver suspended the device
14020 *      Error otherwise
14021 **/
14022static int __maybe_unused
14023lpfc_pci_suspend_one_s3(struct device *dev_d)
14024{
14025        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14026        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14027
14028        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14029                        "0473 PCI device Power Management suspend.\n");
14030
14031        /* Bring down the device */
14032        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14033        lpfc_offline(phba);
14034        kthread_stop(phba->worker_thread);
14035
14036        /* Disable interrupt from device */
14037        lpfc_sli_disable_intr(phba);
14038
14039        return 0;
14040}
14041
14042/**
14043 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14044 * @dev_d: pointer to device
14045 *
14046 * This routine is to be called from the kernel's PCI subsystem to support
14047 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14048 * invokes this method, it restores the device's PCI config space state and
14049 * fully reinitializes the device and brings it online. Note that as the
14050 * driver implements the minimum PM requirements to a power-aware driver's
14051 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14052 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14053 * driver will fully reinitialize its device during resume() method call,
14054 * the device will be set to PCI_D0 directly in PCI config space before
14055 * restoring the state.
14056 *
14057 * Return code
14058 *      0 - driver suspended the device
14059 *      Error otherwise
14060 **/
14061static int __maybe_unused
14062lpfc_pci_resume_one_s3(struct device *dev_d)
14063{
14064        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14065        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14066        uint32_t intr_mode;
14067        int error;
14068
14069        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14070                        "0452 PCI device Power Management resume.\n");
14071
14072        /* Startup the kernel thread for this host adapter. */
14073        phba->worker_thread = kthread_run(lpfc_do_work, phba,
14074                                        "lpfc_worker_%d", phba->brd_no);
14075        if (IS_ERR(phba->worker_thread)) {
14076                error = PTR_ERR(phba->worker_thread);
14077                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14078                                "0434 PM resume failed to start worker "
14079                                "thread: error=x%x.\n", error);
14080                return error;
14081        }
14082
14083        /* Configure and enable interrupt */
14084        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14085        if (intr_mode == LPFC_INTR_ERROR) {
14086                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14087                                "0430 PM resume Failed to enable interrupt\n");
14088                return -EIO;
14089        } else
14090                phba->intr_mode = intr_mode;
14091
14092        /* Restart HBA and bring it online */
14093        lpfc_sli_brdrestart(phba);
14094        lpfc_online(phba);
14095
14096        /* Log the current active interrupt mode */
14097        lpfc_log_intr_mode(phba, phba->intr_mode);
14098
14099        return 0;
14100}
14101
14102/**
14103 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14104 * @phba: pointer to lpfc hba data structure.
14105 *
14106 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14107 * aborts all the outstanding SCSI I/Os to the pci device.
14108 **/
14109static void
14110lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14111{
14112        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14113                        "2723 PCI channel I/O abort preparing for recovery\n");
14114
14115        /*
14116         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14117         * and let the SCSI mid-layer to retry them to recover.
14118         */
14119        lpfc_sli_abort_fcp_rings(phba);
14120}
14121
14122/**
14123 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14124 * @phba: pointer to lpfc hba data structure.
14125 *
14126 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14127 * disables the device interrupt and pci device, and aborts the internal FCP
14128 * pending I/Os.
14129 **/
14130static void
14131lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14132{
14133        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14134                        "2710 PCI channel disable preparing for reset\n");
14135
14136        /* Block any management I/Os to the device */
14137        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14138
14139        /* Block all SCSI devices' I/Os on the host */
14140        lpfc_scsi_dev_block(phba);
14141
14142        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14143        lpfc_sli_flush_io_rings(phba);
14144
14145        /* stop all timers */
14146        lpfc_stop_hba_timers(phba);
14147
14148        /* Disable interrupt and pci device */
14149        lpfc_sli_disable_intr(phba);
14150        pci_disable_device(phba->pcidev);
14151}
14152
14153/**
14154 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14155 * @phba: pointer to lpfc hba data structure.
14156 *
14157 * This routine is called to prepare the SLI3 device for PCI slot permanently
14158 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14159 * pending I/Os.
14160 **/
14161static void
14162lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14163{
14164        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14165                        "2711 PCI channel permanent disable for failure\n");
14166        /* Block all SCSI devices' I/Os on the host */
14167        lpfc_scsi_dev_block(phba);
14168
14169        /* stop all timers */
14170        lpfc_stop_hba_timers(phba);
14171
14172        /* Clean up all driver's outstanding SCSI I/Os */
14173        lpfc_sli_flush_io_rings(phba);
14174}
14175
14176/**
14177 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14178 * @pdev: pointer to PCI device.
14179 * @state: the current PCI connection state.
14180 *
14181 * This routine is called from the PCI subsystem for I/O error handling to
14182 * device with SLI-3 interface spec. This function is called by the PCI
14183 * subsystem after a PCI bus error affecting this device has been detected.
14184 * When this function is invoked, it will need to stop all the I/Os and
14185 * interrupt(s) to the device. Once that is done, it will return
14186 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14187 * as desired.
14188 *
14189 * Return codes
14190 *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14191 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14192 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14193 **/
14194static pci_ers_result_t
14195lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14196{
14197        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14198        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14199
14200        switch (state) {
14201        case pci_channel_io_normal:
14202                /* Non-fatal error, prepare for recovery */
14203                lpfc_sli_prep_dev_for_recover(phba);
14204                return PCI_ERS_RESULT_CAN_RECOVER;
14205        case pci_channel_io_frozen:
14206                /* Fatal error, prepare for slot reset */
14207                lpfc_sli_prep_dev_for_reset(phba);
14208                return PCI_ERS_RESULT_NEED_RESET;
14209        case pci_channel_io_perm_failure:
14210                /* Permanent failure, prepare for device down */
14211                lpfc_sli_prep_dev_for_perm_failure(phba);
14212                return PCI_ERS_RESULT_DISCONNECT;
14213        default:
14214                /* Unknown state, prepare and request slot reset */
14215                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14216                                "0472 Unknown PCI error state: x%x\n", state);
14217                lpfc_sli_prep_dev_for_reset(phba);
14218                return PCI_ERS_RESULT_NEED_RESET;
14219        }
14220}
14221
14222/**
14223 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14224 * @pdev: pointer to PCI device.
14225 *
14226 * This routine is called from the PCI subsystem for error handling to
14227 * device with SLI-3 interface spec. This is called after PCI bus has been
14228 * reset to restart the PCI card from scratch, as if from a cold-boot.
14229 * During the PCI subsystem error recovery, after driver returns
14230 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14231 * recovery and then call this routine before calling the .resume method
14232 * to recover the device. This function will initialize the HBA device,
14233 * enable the interrupt, but it will just put the HBA to offline state
14234 * without passing any I/O traffic.
14235 *
14236 * Return codes
14237 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
14238 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14239 */
14240static pci_ers_result_t
14241lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14242{
14243        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14244        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14245        struct lpfc_sli *psli = &phba->sli;
14246        uint32_t intr_mode;
14247
14248        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14249        if (pci_enable_device_mem(pdev)) {
14250                printk(KERN_ERR "lpfc: Cannot re-enable "
14251                        "PCI device after reset.\n");
14252                return PCI_ERS_RESULT_DISCONNECT;
14253        }
14254
14255        pci_restore_state(pdev);
14256
14257        /*
14258         * As the new kernel behavior of pci_restore_state() API call clears
14259         * device saved_state flag, need to save the restored state again.
14260         */
14261        pci_save_state(pdev);
14262
14263        if (pdev->is_busmaster)
14264                pci_set_master(pdev);
14265
14266        spin_lock_irq(&phba->hbalock);
14267        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14268        spin_unlock_irq(&phba->hbalock);
14269
14270        /* Configure and enable interrupt */
14271        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14272        if (intr_mode == LPFC_INTR_ERROR) {
14273                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14274                                "0427 Cannot re-enable interrupt after "
14275                                "slot reset.\n");
14276                return PCI_ERS_RESULT_DISCONNECT;
14277        } else
14278                phba->intr_mode = intr_mode;
14279
14280        /* Take device offline, it will perform cleanup */
14281        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14282        lpfc_offline(phba);
14283        lpfc_sli_brdrestart(phba);
14284
14285        /* Log the current active interrupt mode */
14286        lpfc_log_intr_mode(phba, phba->intr_mode);
14287
14288        return PCI_ERS_RESULT_RECOVERED;
14289}
14290
14291/**
14292 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14293 * @pdev: pointer to PCI device
14294 *
14295 * This routine is called from the PCI subsystem for error handling to device
14296 * with SLI-3 interface spec. It is called when kernel error recovery tells
14297 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14298 * error recovery. After this call, traffic can start to flow from this device
14299 * again.
14300 */
14301static void
14302lpfc_io_resume_s3(struct pci_dev *pdev)
14303{
14304        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14305        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14306
14307        /* Bring device online, it will be no-op for non-fatal error resume */
14308        lpfc_online(phba);
14309}
14310
14311/**
14312 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14313 * @phba: pointer to lpfc hba data structure.
14314 *
14315 * returns the number of ELS/CT IOCBs to reserve
14316 **/
14317int
14318lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14319{
14320        int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14321
14322        if (phba->sli_rev == LPFC_SLI_REV4) {
14323                if (max_xri <= 100)
14324                        return 10;
14325                else if (max_xri <= 256)
14326                        return 25;
14327                else if (max_xri <= 512)
14328                        return 50;
14329                else if (max_xri <= 1024)
14330                        return 100;
14331                else if (max_xri <= 1536)
14332                        return 150;
14333                else if (max_xri <= 2048)
14334                        return 200;
14335                else
14336                        return 250;
14337        } else
14338                return 0;
14339}
14340
14341/**
14342 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14343 * @phba: pointer to lpfc hba data structure.
14344 *
14345 * returns the number of ELS/CT + NVMET IOCBs to reserve
14346 **/
14347int
14348lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14349{
14350        int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14351
14352        if (phba->nvmet_support)
14353                max_xri += LPFC_NVMET_BUF_POST;
14354        return max_xri;
14355}
14356
14357
14358static int
14359lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14360        uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14361        const struct firmware *fw)
14362{
14363        int rc;
14364        u8 sli_family;
14365
14366        sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14367        /* Three cases:  (1) FW was not supported on the detected adapter.
14368         * (2) FW update has been locked out administratively.
14369         * (3) Some other error during FW update.
14370         * In each case, an unmaskable message is written to the console
14371         * for admin diagnosis.
14372         */
14373        if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14374            (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14375             magic_number != MAGIC_NUMBER_G6) ||
14376            (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14377             magic_number != MAGIC_NUMBER_G7) ||
14378            (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14379             magic_number != MAGIC_NUMBER_G7P)) {
14380                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14381                                "3030 This firmware version is not supported on"
14382                                " this HBA model. Device:%x Magic:%x Type:%x "
14383                                "ID:%x Size %d %zd\n",
14384                                phba->pcidev->device, magic_number, ftype, fid,
14385                                fsize, fw->size);
14386                rc = -EINVAL;
14387        } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14388                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14389                                "3021 Firmware downloads have been prohibited "
14390                                "by a system configuration setting on "
14391                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
14392                                "%zd\n",
14393                                phba->pcidev->device, magic_number, ftype, fid,
14394                                fsize, fw->size);
14395                rc = -EACCES;
14396        } else {
14397                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14398                                "3022 FW Download failed. Add Status x%x "
14399                                "Device:%x Magic:%x Type:%x ID:%x Size %d "
14400                                "%zd\n",
14401                                offset, phba->pcidev->device, magic_number,
14402                                ftype, fid, fsize, fw->size);
14403                rc = -EIO;
14404        }
14405        return rc;
14406}
14407
14408/**
14409 * lpfc_write_firmware - attempt to write a firmware image to the port
14410 * @fw: pointer to firmware image returned from request_firmware.
14411 * @context: pointer to firmware image returned from request_firmware.
14412 *
14413 **/
14414static void
14415lpfc_write_firmware(const struct firmware *fw, void *context)
14416{
14417        struct lpfc_hba *phba = (struct lpfc_hba *)context;
14418        char fwrev[FW_REV_STR_SIZE];
14419        struct lpfc_grp_hdr *image;
14420        struct list_head dma_buffer_list;
14421        int i, rc = 0;
14422        struct lpfc_dmabuf *dmabuf, *next;
14423        uint32_t offset = 0, temp_offset = 0;
14424        uint32_t magic_number, ftype, fid, fsize;
14425
14426        /* It can be null in no-wait mode, sanity check */
14427        if (!fw) {
14428                rc = -ENXIO;
14429                goto out;
14430        }
14431        image = (struct lpfc_grp_hdr *)fw->data;
14432
14433        magic_number = be32_to_cpu(image->magic_number);
14434        ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14435        fid = bf_get_be32(lpfc_grp_hdr_id, image);
14436        fsize = be32_to_cpu(image->size);
14437
14438        INIT_LIST_HEAD(&dma_buffer_list);
14439        lpfc_decode_firmware_rev(phba, fwrev, 1);
14440        if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14441                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442                                "3023 Updating Firmware, Current Version:%s "
14443                                "New Version:%s\n",
14444                                fwrev, image->revision);
14445                for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14446                        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14447                                         GFP_KERNEL);
14448                        if (!dmabuf) {
14449                                rc = -ENOMEM;
14450                                goto release_out;
14451                        }
14452                        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14453                                                          SLI4_PAGE_SIZE,
14454                                                          &dmabuf->phys,
14455                                                          GFP_KERNEL);
14456                        if (!dmabuf->virt) {
14457                                kfree(dmabuf);
14458                                rc = -ENOMEM;
14459                                goto release_out;
14460                        }
14461                        list_add_tail(&dmabuf->list, &dma_buffer_list);
14462                }
14463                while (offset < fw->size) {
14464                        temp_offset = offset;
14465                        list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14466                                if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14467                                        memcpy(dmabuf->virt,
14468                                               fw->data + temp_offset,
14469                                               fw->size - temp_offset);
14470                                        temp_offset = fw->size;
14471                                        break;
14472                                }
14473                                memcpy(dmabuf->virt, fw->data + temp_offset,
14474                                       SLI4_PAGE_SIZE);
14475                                temp_offset += SLI4_PAGE_SIZE;
14476                        }
14477                        rc = lpfc_wr_object(phba, &dma_buffer_list,
14478                                    (fw->size - offset), &offset);
14479                        if (rc) {
14480                                rc = lpfc_log_write_firmware_error(phba, offset,
14481                                                                   magic_number,
14482                                                                   ftype,
14483                                                                   fid,
14484                                                                   fsize,
14485                                                                   fw);
14486                                goto release_out;
14487                        }
14488                }
14489                rc = offset;
14490        } else
14491                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14492                                "3029 Skipped Firmware update, Current "
14493                                "Version:%s New Version:%s\n",
14494                                fwrev, image->revision);
14495
14496release_out:
14497        list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14498                list_del(&dmabuf->list);
14499                dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14500                                  dmabuf->virt, dmabuf->phys);
14501                kfree(dmabuf);
14502        }
14503        release_firmware(fw);
14504out:
14505        if (rc < 0)
14506                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14507                                "3062 Firmware update error, status %d.\n", rc);
14508        else
14509                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14510                                "3024 Firmware update success: size %d.\n", rc);
14511}
14512
14513/**
14514 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14515 * @phba: pointer to lpfc hba data structure.
14516 * @fw_upgrade: which firmware to update.
14517 *
14518 * This routine is called to perform Linux generic firmware upgrade on device
14519 * that supports such feature.
14520 **/
14521int
14522lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14523{
14524        uint8_t file_name[ELX_MODEL_NAME_SIZE];
14525        int ret;
14526        const struct firmware *fw;
14527
14528        /* Only supported on SLI4 interface type 2 for now */
14529        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14530            LPFC_SLI_INTF_IF_TYPE_2)
14531                return -EPERM;
14532
14533        snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14534
14535        if (fw_upgrade == INT_FW_UPGRADE) {
14536                ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14537                                        file_name, &phba->pcidev->dev,
14538                                        GFP_KERNEL, (void *)phba,
14539                                        lpfc_write_firmware);
14540        } else if (fw_upgrade == RUN_FW_UPGRADE) {
14541                ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14542                if (!ret)
14543                        lpfc_write_firmware(fw, (void *)phba);
14544        } else {
14545                ret = -EINVAL;
14546        }
14547
14548        return ret;
14549}
14550
14551/**
14552 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14553 * @pdev: pointer to PCI device
14554 * @pid: pointer to PCI device identifier
14555 *
14556 * This routine is called from the kernel's PCI subsystem to device with
14557 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14558 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14559 * information of the device and driver to see if the driver state that it
14560 * can support this kind of device. If the match is successful, the driver
14561 * core invokes this routine. If this routine determines it can claim the HBA,
14562 * it does all the initialization that it needs to do to handle the HBA
14563 * properly.
14564 *
14565 * Return code
14566 *      0 - driver can claim the device
14567 *      negative value - driver can not claim the device
14568 **/
14569static int
14570lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14571{
14572        struct lpfc_hba   *phba;
14573        struct lpfc_vport *vport = NULL;
14574        struct Scsi_Host  *shost = NULL;
14575        int error;
14576        uint32_t cfg_mode, intr_mode;
14577
14578        /* Allocate memory for HBA structure */
14579        phba = lpfc_hba_alloc(pdev);
14580        if (!phba)
14581                return -ENOMEM;
14582
14583        INIT_LIST_HEAD(&phba->poll_list);
14584
14585        /* Perform generic PCI device enabling operation */
14586        error = lpfc_enable_pci_dev(phba);
14587        if (error)
14588                goto out_free_phba;
14589
14590        /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14591        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14592        if (error)
14593                goto out_disable_pci_dev;
14594
14595        /* Set up SLI-4 specific device PCI memory space */
14596        error = lpfc_sli4_pci_mem_setup(phba);
14597        if (error) {
14598                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14599                                "1410 Failed to set up pci memory space.\n");
14600                goto out_disable_pci_dev;
14601        }
14602
14603        /* Set up SLI-4 Specific device driver resources */
14604        error = lpfc_sli4_driver_resource_setup(phba);
14605        if (error) {
14606                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14607                                "1412 Failed to set up driver resource.\n");
14608                goto out_unset_pci_mem_s4;
14609        }
14610
14611        INIT_LIST_HEAD(&phba->active_rrq_list);
14612        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14613
14614        /* Set up common device driver resources */
14615        error = lpfc_setup_driver_resource_phase2(phba);
14616        if (error) {
14617                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14618                                "1414 Failed to set up driver resource.\n");
14619                goto out_unset_driver_resource_s4;
14620        }
14621
14622        /* Get the default values for Model Name and Description */
14623        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14624
14625        /* Now, trying to enable interrupt and bring up the device */
14626        cfg_mode = phba->cfg_use_msi;
14627
14628        /* Put device to a known state before enabling interrupt */
14629        phba->pport = NULL;
14630        lpfc_stop_port(phba);
14631
14632        /* Init cpu_map array */
14633        lpfc_cpu_map_array_init(phba);
14634
14635        /* Init hba_eq_hdl array */
14636        lpfc_hba_eq_hdl_array_init(phba);
14637
14638        /* Configure and enable interrupt */
14639        intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14640        if (intr_mode == LPFC_INTR_ERROR) {
14641                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14642                                "0426 Failed to enable interrupt.\n");
14643                error = -ENODEV;
14644                goto out_unset_driver_resource;
14645        }
14646        /* Default to single EQ for non-MSI-X */
14647        if (phba->intr_type != MSIX) {
14648                phba->cfg_irq_chann = 1;
14649                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14650                        if (phba->nvmet_support)
14651                                phba->cfg_nvmet_mrq = 1;
14652                }
14653        }
14654        lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14655
14656        /* Create SCSI host to the physical port */
14657        error = lpfc_create_shost(phba);
14658        if (error) {
14659                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14660                                "1415 Failed to create scsi host.\n");
14661                goto out_disable_intr;
14662        }
14663        vport = phba->pport;
14664        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14665
14666        /* Configure sysfs attributes */
14667        error = lpfc_alloc_sysfs_attr(vport);
14668        if (error) {
14669                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14670                                "1416 Failed to allocate sysfs attr\n");
14671                goto out_destroy_shost;
14672        }
14673
14674        /* Set up SLI-4 HBA */
14675        if (lpfc_sli4_hba_setup(phba)) {
14676                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14677                                "1421 Failed to set up hba\n");
14678                error = -ENODEV;
14679                goto out_free_sysfs_attr;
14680        }
14681
14682        /* Log the current active interrupt mode */
14683        phba->intr_mode = intr_mode;
14684        lpfc_log_intr_mode(phba, intr_mode);
14685
14686        /* Perform post initialization setup */
14687        lpfc_post_init_setup(phba);
14688
14689        /* NVME support in FW earlier in the driver load corrects the
14690         * FC4 type making a check for nvme_support unnecessary.
14691         */
14692        if (phba->nvmet_support == 0) {
14693                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14694                        /* Create NVME binding with nvme_fc_transport. This
14695                         * ensures the vport is initialized.  If the localport
14696                         * create fails, it should not unload the driver to
14697                         * support field issues.
14698                         */
14699                        error = lpfc_nvme_create_localport(vport);
14700                        if (error) {
14701                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14702                                                "6004 NVME registration "
14703                                                "failed, error x%x\n",
14704                                                error);
14705                        }
14706                }
14707        }
14708
14709        /* check for firmware upgrade or downgrade */
14710        if (phba->cfg_request_firmware_upgrade)
14711                lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14712
14713        /* Check if there are static vports to be created. */
14714        lpfc_create_static_vport(phba);
14715
14716        /* Enable RAS FW log support */
14717        lpfc_sli4_ras_setup(phba);
14718
14719        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14720        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14721
14722        return 0;
14723
14724out_free_sysfs_attr:
14725        lpfc_free_sysfs_attr(vport);
14726out_destroy_shost:
14727        lpfc_destroy_shost(phba);
14728out_disable_intr:
14729        lpfc_sli4_disable_intr(phba);
14730out_unset_driver_resource:
14731        lpfc_unset_driver_resource_phase2(phba);
14732out_unset_driver_resource_s4:
14733        lpfc_sli4_driver_resource_unset(phba);
14734out_unset_pci_mem_s4:
14735        lpfc_sli4_pci_mem_unset(phba);
14736out_disable_pci_dev:
14737        lpfc_disable_pci_dev(phba);
14738        if (shost)
14739                scsi_host_put(shost);
14740out_free_phba:
14741        lpfc_hba_free(phba);
14742        return error;
14743}
14744
14745/**
14746 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14747 * @pdev: pointer to PCI device
14748 *
14749 * This routine is called from the kernel's PCI subsystem to device with
14750 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14751 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14752 * device to be removed from the PCI subsystem properly.
14753 **/
14754static void
14755lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14756{
14757        struct Scsi_Host *shost = pci_get_drvdata(pdev);
14758        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14759        struct lpfc_vport **vports;
14760        struct lpfc_hba *phba = vport->phba;
14761        int i;
14762
14763        /* Mark the device unloading flag */
14764        spin_lock_irq(&phba->hbalock);
14765        vport->load_flag |= FC_UNLOADING;
14766        spin_unlock_irq(&phba->hbalock);
14767        if (phba->cgn_i)
14768                lpfc_unreg_congestion_buf(phba);
14769
14770        lpfc_free_sysfs_attr(vport);
14771
14772        /* Release all the vports against this physical port */
14773        vports = lpfc_create_vport_work_array(phba);
14774        if (vports != NULL)
14775                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14776                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14777                                continue;
14778                        fc_vport_terminate(vports[i]->fc_vport);
14779                }
14780        lpfc_destroy_vport_work_array(phba, vports);
14781
14782        /* Remove FC host with the physical port */
14783        fc_remove_host(shost);
14784        scsi_remove_host(shost);
14785
14786        /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
14787         * localports are destroyed after to cleanup all transport memory.
14788         */
14789        lpfc_cleanup(vport);
14790        lpfc_nvmet_destroy_targetport(phba);
14791        lpfc_nvme_destroy_localport(vport);
14792
14793        /* De-allocate multi-XRI pools */
14794        if (phba->cfg_xri_rebalancing)
14795                lpfc_destroy_multixri_pools(phba);
14796
14797        /*
14798         * Bring down the SLI Layer. This step disables all interrupts,
14799         * clears the rings, discards all mailbox commands, and resets
14800         * the HBA FCoE function.
14801         */
14802        lpfc_debugfs_terminate(vport);
14803
14804        lpfc_stop_hba_timers(phba);
14805        spin_lock_irq(&phba->port_list_lock);
14806        list_del_init(&vport->listentry);
14807        spin_unlock_irq(&phba->port_list_lock);
14808
14809        /* Perform scsi free before driver resource_unset since scsi
14810         * buffers are released to their corresponding pools here.
14811         */
14812        lpfc_io_free(phba);
14813        lpfc_free_iocb_list(phba);
14814        lpfc_sli4_hba_unset(phba);
14815
14816        lpfc_unset_driver_resource_phase2(phba);
14817        lpfc_sli4_driver_resource_unset(phba);
14818
14819        /* Unmap adapter Control and Doorbell registers */
14820        lpfc_sli4_pci_mem_unset(phba);
14821
14822        /* Release PCI resources and disable device's PCI function */
14823        scsi_host_put(shost);
14824        lpfc_disable_pci_dev(phba);
14825
14826        /* Finally, free the driver's device data structure */
14827        lpfc_hba_free(phba);
14828
14829        return;
14830}
14831
14832/**
14833 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14834 * @dev_d: pointer to device
14835 *
14836 * This routine is called from the kernel's PCI subsystem to support system
14837 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14838 * this method, it quiesces the device by stopping the driver's worker
14839 * thread for the device, turning off device's interrupt and DMA, and bring
14840 * the device offline. Note that as the driver implements the minimum PM
14841 * requirements to a power-aware driver's PM support for suspend/resume -- all
14842 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14843 * method call will be treated as SUSPEND and the driver will fully
14844 * reinitialize its device during resume() method call, the driver will set
14845 * device to PCI_D3hot state in PCI config space instead of setting it
14846 * according to the @msg provided by the PM.
14847 *
14848 * Return code
14849 *      0 - driver suspended the device
14850 *      Error otherwise
14851 **/
14852static int __maybe_unused
14853lpfc_pci_suspend_one_s4(struct device *dev_d)
14854{
14855        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14856        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14857
14858        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14859                        "2843 PCI device Power Management suspend.\n");
14860
14861        /* Bring down the device */
14862        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14863        lpfc_offline(phba);
14864        kthread_stop(phba->worker_thread);
14865
14866        /* Disable interrupt from device */
14867        lpfc_sli4_disable_intr(phba);
14868        lpfc_sli4_queue_destroy(phba);
14869
14870        return 0;
14871}
14872
14873/**
14874 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14875 * @dev_d: pointer to device
14876 *
14877 * This routine is called from the kernel's PCI subsystem to support system
14878 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14879 * this method, it restores the device's PCI config space state and fully
14880 * reinitializes the device and brings it online. Note that as the driver
14881 * implements the minimum PM requirements to a power-aware driver's PM for
14882 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14883 * to the suspend() method call will be treated as SUSPEND and the driver
14884 * will fully reinitialize its device during resume() method call, the device
14885 * will be set to PCI_D0 directly in PCI config space before restoring the
14886 * state.
14887 *
14888 * Return code
14889 *      0 - driver suspended the device
14890 *      Error otherwise
14891 **/
14892static int __maybe_unused
14893lpfc_pci_resume_one_s4(struct device *dev_d)
14894{
14895        struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14896        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14897        uint32_t intr_mode;
14898        int error;
14899
14900        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14901                        "0292 PCI device Power Management resume.\n");
14902
14903         /* Startup the kernel thread for this host adapter. */
14904        phba->worker_thread = kthread_run(lpfc_do_work, phba,
14905                                        "lpfc_worker_%d", phba->brd_no);
14906        if (IS_ERR(phba->worker_thread)) {
14907                error = PTR_ERR(phba->worker_thread);
14908                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14909                                "0293 PM resume failed to start worker "
14910                                "thread: error=x%x.\n", error);
14911                return error;
14912        }
14913
14914        /* Configure and enable interrupt */
14915        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
14916        if (intr_mode == LPFC_INTR_ERROR) {
14917                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14918                                "0294 PM resume Failed to enable interrupt\n");
14919                return -EIO;
14920        } else
14921                phba->intr_mode = intr_mode;
14922
14923        /* Restart HBA and bring it online */
14924        lpfc_sli_brdrestart(phba);
14925        lpfc_online(phba);
14926
14927        /* Log the current active interrupt mode */
14928        lpfc_log_intr_mode(phba, phba->intr_mode);
14929
14930        return 0;
14931}
14932
14933/**
14934 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
14935 * @phba: pointer to lpfc hba data structure.
14936 *
14937 * This routine is called to prepare the SLI4 device for PCI slot recover. It
14938 * aborts all the outstanding SCSI I/Os to the pci device.
14939 **/
14940static void
14941lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
14942{
14943        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14944                        "2828 PCI channel I/O abort preparing for recovery\n");
14945        /*
14946         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14947         * and let the SCSI mid-layer to retry them to recover.
14948         */
14949        lpfc_sli_abort_fcp_rings(phba);
14950}
14951
14952/**
14953 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
14954 * @phba: pointer to lpfc hba data structure.
14955 *
14956 * This routine is called to prepare the SLI4 device for PCI slot reset. It
14957 * disables the device interrupt and pci device, and aborts the internal FCP
14958 * pending I/Os.
14959 **/
14960static void
14961lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
14962{
14963        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14964                        "2826 PCI channel disable preparing for reset\n");
14965
14966        /* Block any management I/Os to the device */
14967        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
14968
14969        /* Block all SCSI devices' I/Os on the host */
14970        lpfc_scsi_dev_block(phba);
14971
14972        /* Flush all driver's outstanding I/Os as we are to reset */
14973        lpfc_sli_flush_io_rings(phba);
14974
14975        /* stop all timers */
14976        lpfc_stop_hba_timers(phba);
14977
14978        /* Disable interrupt and pci device */
14979        lpfc_sli4_disable_intr(phba);
14980        lpfc_sli4_queue_destroy(phba);
14981        pci_disable_device(phba->pcidev);
14982}
14983
14984/**
14985 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
14986 * @phba: pointer to lpfc hba data structure.
14987 *
14988 * This routine is called to prepare the SLI4 device for PCI slot permanently
14989 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14990 * pending I/Os.
14991 **/
14992static void
14993lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14994{
14995        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14996                        "2827 PCI channel permanent disable for failure\n");
14997
14998        /* Block all SCSI devices' I/Os on the host */
14999        lpfc_scsi_dev_block(phba);
15000
15001        /* stop all timers */
15002        lpfc_stop_hba_timers(phba);
15003
15004        /* Clean up all driver's outstanding I/Os */
15005        lpfc_sli_flush_io_rings(phba);
15006}
15007
15008/**
15009 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15010 * @pdev: pointer to PCI device.
15011 * @state: the current PCI connection state.
15012 *
15013 * This routine is called from the PCI subsystem for error handling to device
15014 * with SLI-4 interface spec. This function is called by the PCI subsystem
15015 * after a PCI bus error affecting this device has been detected. When this
15016 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15017 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15018 * for the PCI subsystem to perform proper recovery as desired.
15019 *
15020 * Return codes
15021 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15022 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15023 **/
15024static pci_ers_result_t
15025lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15026{
15027        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15028        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15029
15030        switch (state) {
15031        case pci_channel_io_normal:
15032                /* Non-fatal error, prepare for recovery */
15033                lpfc_sli4_prep_dev_for_recover(phba);
15034                return PCI_ERS_RESULT_CAN_RECOVER;
15035        case pci_channel_io_frozen:
15036                /* Fatal error, prepare for slot reset */
15037                lpfc_sli4_prep_dev_for_reset(phba);
15038                return PCI_ERS_RESULT_NEED_RESET;
15039        case pci_channel_io_perm_failure:
15040                /* Permanent failure, prepare for device down */
15041                lpfc_sli4_prep_dev_for_perm_failure(phba);
15042                return PCI_ERS_RESULT_DISCONNECT;
15043        default:
15044                /* Unknown state, prepare and request slot reset */
15045                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15046                                "2825 Unknown PCI error state: x%x\n", state);
15047                lpfc_sli4_prep_dev_for_reset(phba);
15048                return PCI_ERS_RESULT_NEED_RESET;
15049        }
15050}
15051
15052/**
15053 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15054 * @pdev: pointer to PCI device.
15055 *
15056 * This routine is called from the PCI subsystem for error handling to device
15057 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15058 * restart the PCI card from scratch, as if from a cold-boot. During the
15059 * PCI subsystem error recovery, after the driver returns
15060 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15061 * recovery and then call this routine before calling the .resume method to
15062 * recover the device. This function will initialize the HBA device, enable
15063 * the interrupt, but it will just put the HBA to offline state without
15064 * passing any I/O traffic.
15065 *
15066 * Return codes
15067 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15068 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15069 */
15070static pci_ers_result_t
15071lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15072{
15073        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15074        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15075        struct lpfc_sli *psli = &phba->sli;
15076        uint32_t intr_mode;
15077
15078        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15079        if (pci_enable_device_mem(pdev)) {
15080                printk(KERN_ERR "lpfc: Cannot re-enable "
15081                        "PCI device after reset.\n");
15082                return PCI_ERS_RESULT_DISCONNECT;
15083        }
15084
15085        pci_restore_state(pdev);
15086
15087        /*
15088         * As the new kernel behavior of pci_restore_state() API call clears
15089         * device saved_state flag, need to save the restored state again.
15090         */
15091        pci_save_state(pdev);
15092
15093        if (pdev->is_busmaster)
15094                pci_set_master(pdev);
15095
15096        spin_lock_irq(&phba->hbalock);
15097        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15098        spin_unlock_irq(&phba->hbalock);
15099
15100        /* Configure and enable interrupt */
15101        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15102        if (intr_mode == LPFC_INTR_ERROR) {
15103                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15104                                "2824 Cannot re-enable interrupt after "
15105                                "slot reset.\n");
15106                return PCI_ERS_RESULT_DISCONNECT;
15107        } else
15108                phba->intr_mode = intr_mode;
15109
15110        /* Log the current active interrupt mode */
15111        lpfc_log_intr_mode(phba, phba->intr_mode);
15112
15113        return PCI_ERS_RESULT_RECOVERED;
15114}
15115
15116/**
15117 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15118 * @pdev: pointer to PCI device
15119 *
15120 * This routine is called from the PCI subsystem for error handling to device
15121 * with SLI-4 interface spec. It is called when kernel error recovery tells
15122 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15123 * error recovery. After this call, traffic can start to flow from this device
15124 * again.
15125 **/
15126static void
15127lpfc_io_resume_s4(struct pci_dev *pdev)
15128{
15129        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15130        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15131
15132        /*
15133         * In case of slot reset, as function reset is performed through
15134         * mailbox command which needs DMA to be enabled, this operation
15135         * has to be moved to the io resume phase. Taking device offline
15136         * will perform the necessary cleanup.
15137         */
15138        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15139                /* Perform device reset */
15140                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15141                lpfc_offline(phba);
15142                lpfc_sli_brdrestart(phba);
15143                /* Bring the device back online */
15144                lpfc_online(phba);
15145        }
15146}
15147
15148/**
15149 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15150 * @pdev: pointer to PCI device
15151 * @pid: pointer to PCI device identifier
15152 *
15153 * This routine is to be registered to the kernel's PCI subsystem. When an
15154 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15155 * at PCI device-specific information of the device and driver to see if the
15156 * driver state that it can support this kind of device. If the match is
15157 * successful, the driver core invokes this routine. This routine dispatches
15158 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15159 * do all the initialization that it needs to do to handle the HBA device
15160 * properly.
15161 *
15162 * Return code
15163 *      0 - driver can claim the device
15164 *      negative value - driver can not claim the device
15165 **/
15166static int
15167lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15168{
15169        int rc;
15170        struct lpfc_sli_intf intf;
15171
15172        if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15173                return -ENODEV;
15174
15175        if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15176            (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15177                rc = lpfc_pci_probe_one_s4(pdev, pid);
15178        else
15179                rc = lpfc_pci_probe_one_s3(pdev, pid);
15180
15181        return rc;
15182}
15183
15184/**
15185 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15186 * @pdev: pointer to PCI device
15187 *
15188 * This routine is to be registered to the kernel's PCI subsystem. When an
15189 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15190 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15191 * remove routine, which will perform all the necessary cleanup for the
15192 * device to be removed from the PCI subsystem properly.
15193 **/
15194static void
15195lpfc_pci_remove_one(struct pci_dev *pdev)
15196{
15197        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15198        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15199
15200        switch (phba->pci_dev_grp) {
15201        case LPFC_PCI_DEV_LP:
15202                lpfc_pci_remove_one_s3(pdev);
15203                break;
15204        case LPFC_PCI_DEV_OC:
15205                lpfc_pci_remove_one_s4(pdev);
15206                break;
15207        default:
15208                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15209                                "1424 Invalid PCI device group: 0x%x\n",
15210                                phba->pci_dev_grp);
15211                break;
15212        }
15213        return;
15214}
15215
15216/**
15217 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15218 * @dev: pointer to device
15219 *
15220 * This routine is to be registered to the kernel's PCI subsystem to support
15221 * system Power Management (PM). When PM invokes this method, it dispatches
15222 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15223 * suspend the device.
15224 *
15225 * Return code
15226 *      0 - driver suspended the device
15227 *      Error otherwise
15228 **/
15229static int __maybe_unused
15230lpfc_pci_suspend_one(struct device *dev)
15231{
15232        struct Scsi_Host *shost = dev_get_drvdata(dev);
15233        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15234        int rc = -ENODEV;
15235
15236        switch (phba->pci_dev_grp) {
15237        case LPFC_PCI_DEV_LP:
15238                rc = lpfc_pci_suspend_one_s3(dev);
15239                break;
15240        case LPFC_PCI_DEV_OC:
15241                rc = lpfc_pci_suspend_one_s4(dev);
15242                break;
15243        default:
15244                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15245                                "1425 Invalid PCI device group: 0x%x\n",
15246                                phba->pci_dev_grp);
15247                break;
15248        }
15249        return rc;
15250}
15251
15252/**
15253 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15254 * @dev: pointer to device
15255 *
15256 * This routine is to be registered to the kernel's PCI subsystem to support
15257 * system Power Management (PM). When PM invokes this method, it dispatches
15258 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15259 * resume the device.
15260 *
15261 * Return code
15262 *      0 - driver suspended the device
15263 *      Error otherwise
15264 **/
15265static int __maybe_unused
15266lpfc_pci_resume_one(struct device *dev)
15267{
15268        struct Scsi_Host *shost = dev_get_drvdata(dev);
15269        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15270        int rc = -ENODEV;
15271
15272        switch (phba->pci_dev_grp) {
15273        case LPFC_PCI_DEV_LP:
15274                rc = lpfc_pci_resume_one_s3(dev);
15275                break;
15276        case LPFC_PCI_DEV_OC:
15277                rc = lpfc_pci_resume_one_s4(dev);
15278                break;
15279        default:
15280                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15281                                "1426 Invalid PCI device group: 0x%x\n",
15282                                phba->pci_dev_grp);
15283                break;
15284        }
15285        return rc;
15286}
15287
15288/**
15289 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15290 * @pdev: pointer to PCI device.
15291 * @state: the current PCI connection state.
15292 *
15293 * This routine is registered to the PCI subsystem for error handling. This
15294 * function is called by the PCI subsystem after a PCI bus error affecting
15295 * this device has been detected. When this routine is invoked, it dispatches
15296 * the action to the proper SLI-3 or SLI-4 device error detected handling
15297 * routine, which will perform the proper error detected operation.
15298 *
15299 * Return codes
15300 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15301 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15302 **/
15303static pci_ers_result_t
15304lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15305{
15306        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15307        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15308        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15309
15310        switch (phba->pci_dev_grp) {
15311        case LPFC_PCI_DEV_LP:
15312                rc = lpfc_io_error_detected_s3(pdev, state);
15313                break;
15314        case LPFC_PCI_DEV_OC:
15315                rc = lpfc_io_error_detected_s4(pdev, state);
15316                break;
15317        default:
15318                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15319                                "1427 Invalid PCI device group: 0x%x\n",
15320                                phba->pci_dev_grp);
15321                break;
15322        }
15323        return rc;
15324}
15325
15326/**
15327 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15328 * @pdev: pointer to PCI device.
15329 *
15330 * This routine is registered to the PCI subsystem for error handling. This
15331 * function is called after PCI bus has been reset to restart the PCI card
15332 * from scratch, as if from a cold-boot. When this routine is invoked, it
15333 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15334 * routine, which will perform the proper device reset.
15335 *
15336 * Return codes
15337 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15338 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15339 **/
15340static pci_ers_result_t
15341lpfc_io_slot_reset(struct pci_dev *pdev)
15342{
15343        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15344        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15345        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15346
15347        switch (phba->pci_dev_grp) {
15348        case LPFC_PCI_DEV_LP:
15349                rc = lpfc_io_slot_reset_s3(pdev);
15350                break;
15351        case LPFC_PCI_DEV_OC:
15352                rc = lpfc_io_slot_reset_s4(pdev);
15353                break;
15354        default:
15355                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15356                                "1428 Invalid PCI device group: 0x%x\n",
15357                                phba->pci_dev_grp);
15358                break;
15359        }
15360        return rc;
15361}
15362
15363/**
15364 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15365 * @pdev: pointer to PCI device
15366 *
15367 * This routine is registered to the PCI subsystem for error handling. It
15368 * is called when kernel error recovery tells the lpfc driver that it is
15369 * OK to resume normal PCI operation after PCI bus error recovery. When
15370 * this routine is invoked, it dispatches the action to the proper SLI-3
15371 * or SLI-4 device io_resume routine, which will resume the device operation.
15372 **/
15373static void
15374lpfc_io_resume(struct pci_dev *pdev)
15375{
15376        struct Scsi_Host *shost = pci_get_drvdata(pdev);
15377        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15378
15379        switch (phba->pci_dev_grp) {
15380        case LPFC_PCI_DEV_LP:
15381                lpfc_io_resume_s3(pdev);
15382                break;
15383        case LPFC_PCI_DEV_OC:
15384                lpfc_io_resume_s4(pdev);
15385                break;
15386        default:
15387                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15388                                "1429 Invalid PCI device group: 0x%x\n",
15389                                phba->pci_dev_grp);
15390                break;
15391        }
15392        return;
15393}
15394
15395/**
15396 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15397 * @phba: pointer to lpfc hba data structure.
15398 *
15399 * This routine checks to see if OAS is supported for this adapter. If
15400 * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15401 * the enable oas flag is cleared and the pool created for OAS device data
15402 * is destroyed.
15403 *
15404 **/
15405static void
15406lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15407{
15408
15409        if (!phba->cfg_EnableXLane)
15410                return;
15411
15412        if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15413                phba->cfg_fof = 1;
15414        } else {
15415                phba->cfg_fof = 0;
15416                mempool_destroy(phba->device_data_mem_pool);
15417                phba->device_data_mem_pool = NULL;
15418        }
15419
15420        return;
15421}
15422
15423/**
15424 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15425 * @phba: pointer to lpfc hba data structure.
15426 *
15427 * This routine checks to see if RAS is supported by the adapter. Check the
15428 * function through which RAS support enablement is to be done.
15429 **/
15430void
15431lpfc_sli4_ras_init(struct lpfc_hba *phba)
15432{
15433        /* if ASIC_GEN_NUM >= 0xC) */
15434        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15435                    LPFC_SLI_INTF_IF_TYPE_6) ||
15436            (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15437                    LPFC_SLI_INTF_FAMILY_G6)) {
15438                phba->ras_fwlog.ras_hwsupport = true;
15439                if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15440                    phba->cfg_ras_fwlog_buffsize)
15441                        phba->ras_fwlog.ras_enabled = true;
15442                else
15443                        phba->ras_fwlog.ras_enabled = false;
15444        } else {
15445                phba->ras_fwlog.ras_hwsupport = false;
15446        }
15447}
15448
15449
15450MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15451
15452static const struct pci_error_handlers lpfc_err_handler = {
15453        .error_detected = lpfc_io_error_detected,
15454        .slot_reset = lpfc_io_slot_reset,
15455        .resume = lpfc_io_resume,
15456};
15457
15458static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15459                         lpfc_pci_suspend_one,
15460                         lpfc_pci_resume_one);
15461
15462static struct pci_driver lpfc_driver = {
15463        .name           = LPFC_DRIVER_NAME,
15464        .id_table       = lpfc_id_table,
15465        .probe          = lpfc_pci_probe_one,
15466        .remove         = lpfc_pci_remove_one,
15467        .shutdown       = lpfc_pci_remove_one,
15468        .driver.pm      = &lpfc_pci_pm_ops_one,
15469        .err_handler    = &lpfc_err_handler,
15470};
15471
15472static const struct file_operations lpfc_mgmt_fop = {
15473        .owner = THIS_MODULE,
15474};
15475
15476static struct miscdevice lpfc_mgmt_dev = {
15477        .minor = MISC_DYNAMIC_MINOR,
15478        .name = "lpfcmgmt",
15479        .fops = &lpfc_mgmt_fop,
15480};
15481
15482/**
15483 * lpfc_init - lpfc module initialization routine
15484 *
15485 * This routine is to be invoked when the lpfc module is loaded into the
15486 * kernel. The special kernel macro module_init() is used to indicate the
15487 * role of this routine to the kernel as lpfc module entry point.
15488 *
15489 * Return codes
15490 *   0 - successful
15491 *   -ENOMEM - FC attach transport failed
15492 *   all others - failed
15493 */
15494static int __init
15495lpfc_init(void)
15496{
15497        int error = 0;
15498
15499        pr_info(LPFC_MODULE_DESC "\n");
15500        pr_info(LPFC_COPYRIGHT "\n");
15501
15502        error = misc_register(&lpfc_mgmt_dev);
15503        if (error)
15504                printk(KERN_ERR "Could not register lpfcmgmt device, "
15505                        "misc_register returned with status %d", error);
15506
15507        error = -ENOMEM;
15508        lpfc_transport_functions.vport_create = lpfc_vport_create;
15509        lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15510        lpfc_transport_template =
15511                                fc_attach_transport(&lpfc_transport_functions);
15512        if (lpfc_transport_template == NULL)
15513                goto unregister;
15514        lpfc_vport_transport_template =
15515                fc_attach_transport(&lpfc_vport_transport_functions);
15516        if (lpfc_vport_transport_template == NULL) {
15517                fc_release_transport(lpfc_transport_template);
15518                goto unregister;
15519        }
15520        lpfc_wqe_cmd_template();
15521        lpfc_nvmet_cmd_template();
15522
15523        /* Initialize in case vector mapping is needed */
15524        lpfc_present_cpu = num_present_cpus();
15525
15526        error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15527                                        "lpfc/sli4:online",
15528                                        lpfc_cpu_online, lpfc_cpu_offline);
15529        if (error < 0)
15530                goto cpuhp_failure;
15531        lpfc_cpuhp_state = error;
15532
15533        error = pci_register_driver(&lpfc_driver);
15534        if (error)
15535                goto unwind;
15536
15537        return error;
15538
15539unwind:
15540        cpuhp_remove_multi_state(lpfc_cpuhp_state);
15541cpuhp_failure:
15542        fc_release_transport(lpfc_transport_template);
15543        fc_release_transport(lpfc_vport_transport_template);
15544unregister:
15545        misc_deregister(&lpfc_mgmt_dev);
15546
15547        return error;
15548}
15549
15550void lpfc_dmp_dbg(struct lpfc_hba *phba)
15551{
15552        unsigned int start_idx;
15553        unsigned int dbg_cnt;
15554        unsigned int temp_idx;
15555        int i;
15556        int j = 0;
15557        unsigned long rem_nsec, iflags;
15558        bool log_verbose = false;
15559        struct lpfc_vport *port_iterator;
15560
15561        /* Don't dump messages if we explicitly set log_verbose for the
15562         * physical port or any vport.
15563         */
15564        if (phba->cfg_log_verbose)
15565                return;
15566
15567        spin_lock_irqsave(&phba->port_list_lock, iflags);
15568        list_for_each_entry(port_iterator, &phba->port_list, listentry) {
15569                if (port_iterator->load_flag & FC_UNLOADING)
15570                        continue;
15571                if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
15572                        if (port_iterator->cfg_log_verbose)
15573                                log_verbose = true;
15574
15575                        scsi_host_put(lpfc_shost_from_vport(port_iterator));
15576
15577                        if (log_verbose) {
15578                                spin_unlock_irqrestore(&phba->port_list_lock,
15579                                                       iflags);
15580                                return;
15581                        }
15582                }
15583        }
15584        spin_unlock_irqrestore(&phba->port_list_lock, iflags);
15585
15586        if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15587                return;
15588
15589        start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15590        dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15591        if (!dbg_cnt)
15592                goto out;
15593        temp_idx = start_idx;
15594        if (dbg_cnt >= DBG_LOG_SZ) {
15595                dbg_cnt = DBG_LOG_SZ;
15596                temp_idx -= 1;
15597        } else {
15598                if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15599                        temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15600                } else {
15601                        if (start_idx < dbg_cnt)
15602                                start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15603                        else
15604                                start_idx -= dbg_cnt;
15605                }
15606        }
15607        dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15608                 start_idx, temp_idx, dbg_cnt);
15609
15610        for (i = 0; i < dbg_cnt; i++) {
15611                if ((start_idx + i) < DBG_LOG_SZ)
15612                        temp_idx = (start_idx + i) % DBG_LOG_SZ;
15613                else
15614                        temp_idx = j++;
15615                rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15616                dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15617                         temp_idx,
15618                         (unsigned long)phba->dbg_log[temp_idx].t_ns,
15619                         rem_nsec / 1000,
15620                         phba->dbg_log[temp_idx].log);
15621        }
15622out:
15623        atomic_set(&phba->dbg_log_cnt, 0);
15624        atomic_set(&phba->dbg_log_dmping, 0);
15625}
15626
15627__printf(2, 3)
15628void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15629{
15630        unsigned int idx;
15631        va_list args;
15632        int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15633        struct va_format vaf;
15634
15635
15636        va_start(args, fmt);
15637        if (unlikely(dbg_dmping)) {
15638                vaf.fmt = fmt;
15639                vaf.va = &args;
15640                dev_info(&phba->pcidev->dev, "%pV", &vaf);
15641                va_end(args);
15642                return;
15643        }
15644        idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15645                DBG_LOG_SZ;
15646
15647        atomic_inc(&phba->dbg_log_cnt);
15648
15649        vscnprintf(phba->dbg_log[idx].log,
15650                   sizeof(phba->dbg_log[idx].log), fmt, args);
15651        va_end(args);
15652
15653        phba->dbg_log[idx].t_ns = local_clock();
15654}
15655
15656/**
15657 * lpfc_exit - lpfc module removal routine
15658 *
15659 * This routine is invoked when the lpfc module is removed from the kernel.
15660 * The special kernel macro module_exit() is used to indicate the role of
15661 * this routine to the kernel as lpfc module exit point.
15662 */
15663static void __exit
15664lpfc_exit(void)
15665{
15666        misc_deregister(&lpfc_mgmt_dev);
15667        pci_unregister_driver(&lpfc_driver);
15668        cpuhp_remove_multi_state(lpfc_cpuhp_state);
15669        fc_release_transport(lpfc_transport_template);
15670        fc_release_transport(lpfc_vport_transport_template);
15671        idr_destroy(&lpfc_hba_index);
15672}
15673
15674module_init(lpfc_init);
15675module_exit(lpfc_exit);
15676MODULE_LICENSE("GPL");
15677MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15678MODULE_AUTHOR("Broadcom");
15679MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15680