linux/drivers/scsi/lpfc/lpfc_init.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
   5 * EMULEX and SLI are trademarks of Emulex.                        *
   6 * www.emulex.com                                                  *
   7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
   8 *                                                                 *
   9 * This program is free software; you can redistribute it and/or   *
  10 * modify it under the terms of version 2 of the GNU General       *
  11 * Public License as published by the Free Software Foundation.    *
  12 * This program is distributed in the hope that it will be useful. *
  13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  18 * more details, a copy of which can be found in the file COPYING  *
  19 * included with this package.                                     *
  20 *******************************************************************/
  21
  22#include <linux/blkdev.h>
  23#include <linux/delay.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/idr.h>
  26#include <linux/interrupt.h>
  27#include <linux/module.h>
  28#include <linux/kthread.h>
  29#include <linux/pci.h>
  30#include <linux/spinlock.h>
  31#include <linux/ctype.h>
  32#include <linux/aer.h>
  33#include <linux/slab.h>
  34#include <linux/firmware.h>
  35#include <linux/miscdevice.h>
  36#include <linux/percpu.h>
  37
  38#include <scsi/scsi.h>
  39#include <scsi/scsi_device.h>
  40#include <scsi/scsi_host.h>
  41#include <scsi/scsi_transport_fc.h>
  42
  43#include "lpfc_hw4.h"
  44#include "lpfc_hw.h"
  45#include "lpfc_sli.h"
  46#include "lpfc_sli4.h"
  47#include "lpfc_nl.h"
  48#include "lpfc_disc.h"
  49#include "lpfc_scsi.h"
  50#include "lpfc.h"
  51#include "lpfc_logmsg.h"
  52#include "lpfc_crtn.h"
  53#include "lpfc_vport.h"
  54#include "lpfc_version.h"
  55
  56char *_dump_buf_data;
  57unsigned long _dump_buf_data_order;
  58char *_dump_buf_dif;
  59unsigned long _dump_buf_dif_order;
  60spinlock_t _dump_buf_lock;
  61
  62/* Used when mapping IRQ vectors in a driver centric manner */
  63uint16_t *lpfc_used_cpu;
  64uint32_t lpfc_present_cpu;
  65
  66static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  67static int lpfc_post_rcv_buf(struct lpfc_hba *);
  68static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  69static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  70static int lpfc_setup_endian_order(struct lpfc_hba *);
  71static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  72static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  73static void lpfc_init_sgl_list(struct lpfc_hba *);
  74static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  75static void lpfc_free_active_sgl(struct lpfc_hba *);
  76static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  77static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  78static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  79static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  81static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  83static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  84
  85static struct scsi_transport_template *lpfc_transport_template = NULL;
  86static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  87static DEFINE_IDR(lpfc_hba_index);
  88
  89/**
  90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
  91 * @phba: pointer to lpfc hba data structure.
  92 *
  93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
  94 * mailbox command. It retrieves the revision information from the HBA and
  95 * collects the Vital Product Data (VPD) about the HBA for preparing the
  96 * configuration of the HBA.
  97 *
  98 * Return codes:
  99 *   0 - success.
 100 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
 101 *   Any other value - indicates an error.
 102 **/
 103int
 104lpfc_config_port_prep(struct lpfc_hba *phba)
 105{
 106        lpfc_vpd_t *vp = &phba->vpd;
 107        int i = 0, rc;
 108        LPFC_MBOXQ_t *pmb;
 109        MAILBOX_t *mb;
 110        char *lpfc_vpd_data = NULL;
 111        uint16_t offset = 0;
 112        static char licensed[56] =
 113                    "key unlock for use with gnu public licensed code only\0";
 114        static int init_key = 1;
 115
 116        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 117        if (!pmb) {
 118                phba->link_state = LPFC_HBA_ERROR;
 119                return -ENOMEM;
 120        }
 121
 122        mb = &pmb->u.mb;
 123        phba->link_state = LPFC_INIT_MBX_CMDS;
 124
 125        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
 126                if (init_key) {
 127                        uint32_t *ptext = (uint32_t *) licensed;
 128
 129                        for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
 130                                *ptext = cpu_to_be32(*ptext);
 131                        init_key = 0;
 132                }
 133
 134                lpfc_read_nv(phba, pmb);
 135                memset((char*)mb->un.varRDnvp.rsvd3, 0,
 136                        sizeof (mb->un.varRDnvp.rsvd3));
 137                memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
 138                         sizeof (licensed));
 139
 140                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 141
 142                if (rc != MBX_SUCCESS) {
 143                        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 144                                        "0324 Config Port initialization "
 145                                        "error, mbxCmd x%x READ_NVPARM, "
 146                                        "mbxStatus x%x\n",
 147                                        mb->mbxCommand, mb->mbxStatus);
 148                        mempool_free(pmb, phba->mbox_mem_pool);
 149                        return -ERESTART;
 150                }
 151                memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
 152                       sizeof(phba->wwnn));
 153                memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
 154                       sizeof(phba->wwpn));
 155        }
 156
 157        phba->sli3_options = 0x0;
 158
 159        /* Setup and issue mailbox READ REV command */
 160        lpfc_read_rev(phba, pmb);
 161        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 162        if (rc != MBX_SUCCESS) {
 163                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 164                                "0439 Adapter failed to init, mbxCmd x%x "
 165                                "READ_REV, mbxStatus x%x\n",
 166                                mb->mbxCommand, mb->mbxStatus);
 167                mempool_free( pmb, phba->mbox_mem_pool);
 168                return -ERESTART;
 169        }
 170
 171
 172        /*
 173         * The value of rr must be 1 since the driver set the cv field to 1.
 174         * This setting requires the FW to set all revision fields.
 175         */
 176        if (mb->un.varRdRev.rr == 0) {
 177                vp->rev.rBit = 0;
 178                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 179                                "0440 Adapter failed to init, READ_REV has "
 180                                "missing revision information.\n");
 181                mempool_free(pmb, phba->mbox_mem_pool);
 182                return -ERESTART;
 183        }
 184
 185        if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
 186                mempool_free(pmb, phba->mbox_mem_pool);
 187                return -EINVAL;
 188        }
 189
 190        /* Save information as VPD data */
 191        vp->rev.rBit = 1;
 192        memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
 193        vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
 194        memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
 195        vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
 196        memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
 197        vp->rev.biuRev = mb->un.varRdRev.biuRev;
 198        vp->rev.smRev = mb->un.varRdRev.smRev;
 199        vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
 200        vp->rev.endecRev = mb->un.varRdRev.endecRev;
 201        vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
 202        vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
 203        vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
 204        vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
 205        vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
 206        vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
 207
 208        /* If the sli feature level is less then 9, we must
 209         * tear down all RPIs and VPIs on link down if NPIV
 210         * is enabled.
 211         */
 212        if (vp->rev.feaLevelHigh < 9)
 213                phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
 214
 215        if (lpfc_is_LC_HBA(phba->pcidev->device))
 216                memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
 217                                                sizeof (phba->RandomData));
 218
 219        /* Get adapter VPD information */
 220        lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
 221        if (!lpfc_vpd_data)
 222                goto out_free_mbox;
 223        do {
 224                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 225                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 226
 227                if (rc != MBX_SUCCESS) {
 228                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 229                                        "0441 VPD not present on adapter, "
 230                                        "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
 231                                        mb->mbxCommand, mb->mbxStatus);
 232                        mb->un.varDmp.word_cnt = 0;
 233                }
 234                /* dump mem may return a zero when finished or we got a
 235                 * mailbox error, either way we are done.
 236                 */
 237                if (mb->un.varDmp.word_cnt == 0)
 238                        break;
 239                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 240                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 241                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
 242                                      lpfc_vpd_data + offset,
 243                                      mb->un.varDmp.word_cnt);
 244                offset += mb->un.varDmp.word_cnt;
 245        } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
 246        lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
 247
 248        kfree(lpfc_vpd_data);
 249out_free_mbox:
 250        mempool_free(pmb, phba->mbox_mem_pool);
 251        return 0;
 252}
 253
 254/**
 255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
 256 * @phba: pointer to lpfc hba data structure.
 257 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 258 *
 259 * This is the completion handler for driver's configuring asynchronous event
 260 * mailbox command to the device. If the mailbox command returns successfully,
 261 * it will set internal async event support flag to 1; otherwise, it will
 262 * set internal async event support flag to 0.
 263 **/
 264static void
 265lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 266{
 267        if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 268                phba->temp_sensor_support = 1;
 269        else
 270                phba->temp_sensor_support = 0;
 271        mempool_free(pmboxq, phba->mbox_mem_pool);
 272        return;
 273}
 274
 275/**
 276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
 277 * @phba: pointer to lpfc hba data structure.
 278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
 279 *
 280 * This is the completion handler for dump mailbox command for getting
 281 * wake up parameters. When this command complete, the response contain
 282 * Option rom version of the HBA. This function translate the version number
 283 * into a human readable string and store it in OptionROMVersion.
 284 **/
 285static void
 286lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 287{
 288        struct prog_id *prg;
 289        uint32_t prog_id_word;
 290        char dist = ' ';
 291        /* character array used for decoding dist type. */
 292        char dist_char[] = "nabx";
 293
 294        if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 295                mempool_free(pmboxq, phba->mbox_mem_pool);
 296                return;
 297        }
 298
 299        prg = (struct prog_id *) &prog_id_word;
 300
 301        /* word 7 contain option rom version */
 302        prog_id_word = pmboxq->u.mb.un.varWords[7];
 303
 304        /* Decode the Option rom version word to a readable string */
 305        if (prg->dist < 4)
 306                dist = dist_char[prg->dist];
 307
 308        if ((prg->dist == 3) && (prg->num == 0))
 309                snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 310                        prg->ver, prg->rev, prg->lev);
 311        else
 312                snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 313                        prg->ver, prg->rev, prg->lev,
 314                        dist, prg->num);
 315        mempool_free(pmboxq, phba->mbox_mem_pool);
 316        return;
 317}
 318
 319/**
 320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
 321 *      cfg_soft_wwnn, cfg_soft_wwpn
 322 * @vport: pointer to lpfc vport data structure.
 323 *
 324 *
 325 * Return codes
 326 *   None.
 327 **/
 328void
 329lpfc_update_vport_wwn(struct lpfc_vport *vport)
 330{
 331        /* If the soft name exists then update it using the service params */
 332        if (vport->phba->cfg_soft_wwnn)
 333                u64_to_wwn(vport->phba->cfg_soft_wwnn,
 334                           vport->fc_sparam.nodeName.u.wwn);
 335        if (vport->phba->cfg_soft_wwpn)
 336                u64_to_wwn(vport->phba->cfg_soft_wwpn,
 337                           vport->fc_sparam.portName.u.wwn);
 338
 339        /*
 340         * If the name is empty or there exists a soft name
 341         * then copy the service params name, otherwise use the fc name
 342         */
 343        if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
 344                memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
 345                        sizeof(struct lpfc_name));
 346        else
 347                memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
 348                        sizeof(struct lpfc_name));
 349
 350        if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
 351                memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
 352                        sizeof(struct lpfc_name));
 353        else
 354                memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
 355                        sizeof(struct lpfc_name));
 356}
 357
 358/**
 359 * lpfc_config_port_post - Perform lpfc initialization after config port
 360 * @phba: pointer to lpfc hba data structure.
 361 *
 362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
 363 * command call. It performs all internal resource and state setups on the
 364 * port: post IOCB buffers, enable appropriate host interrupt attentions,
 365 * ELS ring timers, etc.
 366 *
 367 * Return codes
 368 *   0 - success.
 369 *   Any other value - error.
 370 **/
 371int
 372lpfc_config_port_post(struct lpfc_hba *phba)
 373{
 374        struct lpfc_vport *vport = phba->pport;
 375        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 376        LPFC_MBOXQ_t *pmb;
 377        MAILBOX_t *mb;
 378        struct lpfc_dmabuf *mp;
 379        struct lpfc_sli *psli = &phba->sli;
 380        uint32_t status, timeout;
 381        int i, j;
 382        int rc;
 383
 384        spin_lock_irq(&phba->hbalock);
 385        /*
 386         * If the Config port completed correctly the HBA is not
 387         * over heated any more.
 388         */
 389        if (phba->over_temp_state == HBA_OVER_TEMP)
 390                phba->over_temp_state = HBA_NORMAL_TEMP;
 391        spin_unlock_irq(&phba->hbalock);
 392
 393        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 394        if (!pmb) {
 395                phba->link_state = LPFC_HBA_ERROR;
 396                return -ENOMEM;
 397        }
 398        mb = &pmb->u.mb;
 399
 400        /* Get login parameters for NID.  */
 401        rc = lpfc_read_sparam(phba, pmb, 0);
 402        if (rc) {
 403                mempool_free(pmb, phba->mbox_mem_pool);
 404                return -ENOMEM;
 405        }
 406
 407        pmb->vport = vport;
 408        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 409                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 410                                "0448 Adapter failed init, mbxCmd x%x "
 411                                "READ_SPARM mbxStatus x%x\n",
 412                                mb->mbxCommand, mb->mbxStatus);
 413                phba->link_state = LPFC_HBA_ERROR;
 414                mp = (struct lpfc_dmabuf *) pmb->context1;
 415                mempool_free(pmb, phba->mbox_mem_pool);
 416                lpfc_mbuf_free(phba, mp->virt, mp->phys);
 417                kfree(mp);
 418                return -EIO;
 419        }
 420
 421        mp = (struct lpfc_dmabuf *) pmb->context1;
 422
 423        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 424        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 425        kfree(mp);
 426        pmb->context1 = NULL;
 427        lpfc_update_vport_wwn(vport);
 428
 429        /* Update the fc_host data structures with new wwn. */
 430        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 431        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 432        fc_host_max_npiv_vports(shost) = phba->max_vpi;
 433
 434        /* If no serial number in VPD data, use low 6 bytes of WWNN */
 435        /* This should be consolidated into parse_vpd ? - mr */
 436        if (phba->SerialNumber[0] == 0) {
 437                uint8_t *outptr;
 438
 439                outptr = &vport->fc_nodename.u.s.IEEE[0];
 440                for (i = 0; i < 12; i++) {
 441                        status = *outptr++;
 442                        j = ((status & 0xf0) >> 4);
 443                        if (j <= 9)
 444                                phba->SerialNumber[i] =
 445                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 446                        else
 447                                phba->SerialNumber[i] =
 448                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 449                        i++;
 450                        j = (status & 0xf);
 451                        if (j <= 9)
 452                                phba->SerialNumber[i] =
 453                                    (char)((uint8_t) 0x30 + (uint8_t) j);
 454                        else
 455                                phba->SerialNumber[i] =
 456                                    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
 457                }
 458        }
 459
 460        lpfc_read_config(phba, pmb);
 461        pmb->vport = vport;
 462        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 463                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 464                                "0453 Adapter failed to init, mbxCmd x%x "
 465                                "READ_CONFIG, mbxStatus x%x\n",
 466                                mb->mbxCommand, mb->mbxStatus);
 467                phba->link_state = LPFC_HBA_ERROR;
 468                mempool_free( pmb, phba->mbox_mem_pool);
 469                return -EIO;
 470        }
 471
 472        /* Check if the port is disabled */
 473        lpfc_sli_read_link_ste(phba);
 474
 475        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 476        i = (mb->un.varRdConfig.max_xri + 1);
 477        if (phba->cfg_hba_queue_depth > i) {
 478                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 479                                "3359 HBA queue depth changed from %d to %d\n",
 480                                phba->cfg_hba_queue_depth, i);
 481                phba->cfg_hba_queue_depth = i;
 482        }
 483
 484        /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
 485        i = (mb->un.varRdConfig.max_xri >> 3);
 486        if (phba->pport->cfg_lun_queue_depth > i) {
 487                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 488                                "3360 LUN queue depth changed from %d to %d\n",
 489                                phba->pport->cfg_lun_queue_depth, i);
 490                phba->pport->cfg_lun_queue_depth = i;
 491        }
 492
 493        phba->lmt = mb->un.varRdConfig.lmt;
 494
 495        /* Get the default values for Model Name and Description */
 496        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 497
 498        phba->link_state = LPFC_LINK_DOWN;
 499
 500        /* Only process IOCBs on ELS ring till hba_state is READY */
 501        if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
 502                psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
 503        if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
 504                psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
 505        if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
 506                psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
 507
 508        /* Post receive buffers for desired rings */
 509        if (phba->sli_rev != 3)
 510                lpfc_post_rcv_buf(phba);
 511
 512        /*
 513         * Configure HBA MSI-X attention conditions to messages if MSI-X mode
 514         */
 515        if (phba->intr_type == MSIX) {
 516                rc = lpfc_config_msi(phba, pmb);
 517                if (rc) {
 518                        mempool_free(pmb, phba->mbox_mem_pool);
 519                        return -EIO;
 520                }
 521                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 522                if (rc != MBX_SUCCESS) {
 523                        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 524                                        "0352 Config MSI mailbox command "
 525                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
 526                                        pmb->u.mb.mbxCommand,
 527                                        pmb->u.mb.mbxStatus);
 528                        mempool_free(pmb, phba->mbox_mem_pool);
 529                        return -EIO;
 530                }
 531        }
 532
 533        spin_lock_irq(&phba->hbalock);
 534        /* Initialize ERATT handling flag */
 535        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 536
 537        /* Enable appropriate host interrupts */
 538        if (lpfc_readl(phba->HCregaddr, &status)) {
 539                spin_unlock_irq(&phba->hbalock);
 540                return -EIO;
 541        }
 542        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 543        if (psli->num_rings > 0)
 544                status |= HC_R0INT_ENA;
 545        if (psli->num_rings > 1)
 546                status |= HC_R1INT_ENA;
 547        if (psli->num_rings > 2)
 548                status |= HC_R2INT_ENA;
 549        if (psli->num_rings > 3)
 550                status |= HC_R3INT_ENA;
 551
 552        if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
 553            (phba->cfg_poll & DISABLE_FCP_RING_INT))
 554                status &= ~(HC_R0INT_ENA);
 555
 556        writel(status, phba->HCregaddr);
 557        readl(phba->HCregaddr); /* flush */
 558        spin_unlock_irq(&phba->hbalock);
 559
 560        /* Set up ring-0 (ELS) timer */
 561        timeout = phba->fc_ratov * 2;
 562        mod_timer(&vport->els_tmofunc,
 563                  jiffies + msecs_to_jiffies(1000 * timeout));
 564        /* Set up heart beat (HB) timer */
 565        mod_timer(&phba->hb_tmofunc,
 566                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 567        phba->hb_outstanding = 0;
 568        phba->last_completion_time = jiffies;
 569        /* Set up error attention (ERATT) polling timer */
 570        mod_timer(&phba->eratt_poll,
 571                  jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 572
 573        if (phba->hba_flag & LINK_DISABLED) {
 574                lpfc_printf_log(phba,
 575                        KERN_ERR, LOG_INIT,
 576                        "2598 Adapter Link is disabled.\n");
 577                lpfc_down_link(phba, pmb);
 578                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 579                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 580                if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 581                        lpfc_printf_log(phba,
 582                        KERN_ERR, LOG_INIT,
 583                        "2599 Adapter failed to issue DOWN_LINK"
 584                        " mbox command rc 0x%x\n", rc);
 585
 586                        mempool_free(pmb, phba->mbox_mem_pool);
 587                        return -EIO;
 588                }
 589        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 590                mempool_free(pmb, phba->mbox_mem_pool);
 591                rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 592                if (rc)
 593                        return rc;
 594        }
 595        /* MBOX buffer will be freed in mbox compl */
 596        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 597        if (!pmb) {
 598                phba->link_state = LPFC_HBA_ERROR;
 599                return -ENOMEM;
 600        }
 601
 602        lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 603        pmb->mbox_cmpl = lpfc_config_async_cmpl;
 604        pmb->vport = phba->pport;
 605        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 606
 607        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 608                lpfc_printf_log(phba,
 609                                KERN_ERR,
 610                                LOG_INIT,
 611                                "0456 Adapter failed to issue "
 612                                "ASYNCEVT_ENABLE mbox status x%x\n",
 613                                rc);
 614                mempool_free(pmb, phba->mbox_mem_pool);
 615        }
 616
 617        /* Get Option rom version */
 618        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 619        if (!pmb) {
 620                phba->link_state = LPFC_HBA_ERROR;
 621                return -ENOMEM;
 622        }
 623
 624        lpfc_dump_wakeup_param(phba, pmb);
 625        pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 626        pmb->vport = phba->pport;
 627        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 628
 629        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 630                lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
 631                                "to get Option ROM version status x%x\n", rc);
 632                mempool_free(pmb, phba->mbox_mem_pool);
 633        }
 634
 635        return 0;
 636}
 637
 638/**
 639 * lpfc_hba_init_link - Initialize the FC link
 640 * @phba: pointer to lpfc hba data structure.
 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 642 *
 643 * This routine will issue the INIT_LINK mailbox command call.
 644 * It is available to other drivers through the lpfc_hba data
 645 * structure for use as a delayed link up mechanism with the
 646 * module parameter lpfc_suppress_link_up.
 647 *
 648 * Return code
 649 *              0 - success
 650 *              Any other value - error
 651 **/
 652static int
 653lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 654{
 655        return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
 656}
 657
 658/**
 659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
 660 * @phba: pointer to lpfc hba data structure.
 661 * @fc_topology: desired fc topology.
 662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 663 *
 664 * This routine will issue the INIT_LINK mailbox command call.
 665 * It is available to other drivers through the lpfc_hba data
 666 * structure for use as a delayed link up mechanism with the
 667 * module parameter lpfc_suppress_link_up.
 668 *
 669 * Return code
 670 *              0 - success
 671 *              Any other value - error
 672 **/
 673int
 674lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 675                               uint32_t flag)
 676{
 677        struct lpfc_vport *vport = phba->pport;
 678        LPFC_MBOXQ_t *pmb;
 679        MAILBOX_t *mb;
 680        int rc;
 681
 682        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 683        if (!pmb) {
 684                phba->link_state = LPFC_HBA_ERROR;
 685                return -ENOMEM;
 686        }
 687        mb = &pmb->u.mb;
 688        pmb->vport = vport;
 689
 690        if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
 691            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
 692             !(phba->lmt & LMT_1Gb)) ||
 693            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
 694             !(phba->lmt & LMT_2Gb)) ||
 695            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
 696             !(phba->lmt & LMT_4Gb)) ||
 697            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
 698             !(phba->lmt & LMT_8Gb)) ||
 699            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 700             !(phba->lmt & LMT_10Gb)) ||
 701            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 702             !(phba->lmt & LMT_16Gb))) {
 703                /* Reset link speed to auto */
 704                lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 705                        "1302 Invalid speed for this board:%d "
 706                        "Reset link speed to auto.\n",
 707                        phba->cfg_link_speed);
 708                        phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 709        }
 710        lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 711        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 712        if (phba->sli_rev < LPFC_SLI_REV4)
 713                lpfc_set_loopback_flag(phba);
 714        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 715        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 716                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 717                        "0498 Adapter failed to init, mbxCmd x%x "
 718                        "INIT_LINK, mbxStatus x%x\n",
 719                        mb->mbxCommand, mb->mbxStatus);
 720                if (phba->sli_rev <= LPFC_SLI_REV3) {
 721                        /* Clear all interrupt enable conditions */
 722                        writel(0, phba->HCregaddr);
 723                        readl(phba->HCregaddr); /* flush */
 724                        /* Clear all pending interrupts */
 725                        writel(0xffffffff, phba->HAregaddr);
 726                        readl(phba->HAregaddr); /* flush */
 727                }
 728                phba->link_state = LPFC_HBA_ERROR;
 729                if (rc != MBX_BUSY || flag == MBX_POLL)
 730                        mempool_free(pmb, phba->mbox_mem_pool);
 731                return -EIO;
 732        }
 733        phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 734        if (flag == MBX_POLL)
 735                mempool_free(pmb, phba->mbox_mem_pool);
 736
 737        return 0;
 738}
 739
 740/**
 741 * lpfc_hba_down_link - this routine downs the FC link
 742 * @phba: pointer to lpfc hba data structure.
 743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
 744 *
 745 * This routine will issue the DOWN_LINK mailbox command call.
 746 * It is available to other drivers through the lpfc_hba data
 747 * structure for use to stop the link.
 748 *
 749 * Return code
 750 *              0 - success
 751 *              Any other value - error
 752 **/
 753static int
 754lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 755{
 756        LPFC_MBOXQ_t *pmb;
 757        int rc;
 758
 759        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 760        if (!pmb) {
 761                phba->link_state = LPFC_HBA_ERROR;
 762                return -ENOMEM;
 763        }
 764
 765        lpfc_printf_log(phba,
 766                KERN_ERR, LOG_INIT,
 767                "0491 Adapter Link is disabled.\n");
 768        lpfc_down_link(phba, pmb);
 769        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 770        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 771        if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
 772                lpfc_printf_log(phba,
 773                KERN_ERR, LOG_INIT,
 774                "2522 Adapter failed to issue DOWN_LINK"
 775                " mbox command rc 0x%x\n", rc);
 776
 777                mempool_free(pmb, phba->mbox_mem_pool);
 778                return -EIO;
 779        }
 780        if (flag == MBX_POLL)
 781                mempool_free(pmb, phba->mbox_mem_pool);
 782
 783        return 0;
 784}
 785
 786/**
 787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
 788 * @phba: pointer to lpfc HBA data structure.
 789 *
 790 * This routine will do LPFC uninitialization before the HBA is reset when
 791 * bringing down the SLI Layer.
 792 *
 793 * Return codes
 794 *   0 - success.
 795 *   Any other value - error.
 796 **/
 797int
 798lpfc_hba_down_prep(struct lpfc_hba *phba)
 799{
 800        struct lpfc_vport **vports;
 801        int i;
 802
 803        if (phba->sli_rev <= LPFC_SLI_REV3) {
 804                /* Disable interrupts */
 805                writel(0, phba->HCregaddr);
 806                readl(phba->HCregaddr); /* flush */
 807        }
 808
 809        if (phba->pport->load_flag & FC_UNLOADING)
 810                lpfc_cleanup_discovery_resources(phba->pport);
 811        else {
 812                vports = lpfc_create_vport_work_array(phba);
 813                if (vports != NULL)
 814                        for (i = 0; i <= phba->max_vports &&
 815                                vports[i] != NULL; i++)
 816                                lpfc_cleanup_discovery_resources(vports[i]);
 817                lpfc_destroy_vport_work_array(phba, vports);
 818        }
 819        return 0;
 820}
 821
 822/**
 823 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
 824 * rspiocb which got deferred
 825 *
 826 * @phba: pointer to lpfc HBA data structure.
 827 *
 828 * This routine will cleanup completed slow path events after HBA is reset
 829 * when bringing down the SLI Layer.
 830 *
 831 *
 832 * Return codes
 833 *   void.
 834 **/
 835static void
 836lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
 837{
 838        struct lpfc_iocbq *rspiocbq;
 839        struct hbq_dmabuf *dmabuf;
 840        struct lpfc_cq_event *cq_event;
 841
 842        spin_lock_irq(&phba->hbalock);
 843        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
 844        spin_unlock_irq(&phba->hbalock);
 845
 846        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 847                /* Get the response iocb from the head of work queue */
 848                spin_lock_irq(&phba->hbalock);
 849                list_remove_head(&phba->sli4_hba.sp_queue_event,
 850                                 cq_event, struct lpfc_cq_event, list);
 851                spin_unlock_irq(&phba->hbalock);
 852
 853                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
 854                case CQE_CODE_COMPL_WQE:
 855                        rspiocbq = container_of(cq_event, struct lpfc_iocbq,
 856                                                 cq_event);
 857                        lpfc_sli_release_iocbq(phba, rspiocbq);
 858                        break;
 859                case CQE_CODE_RECEIVE:
 860                case CQE_CODE_RECEIVE_V1:
 861                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
 862                                              cq_event);
 863                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
 864                }
 865        }
 866}
 867
 868/**
 869 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
 870 * @phba: pointer to lpfc HBA data structure.
 871 *
 872 * This routine will cleanup posted ELS buffers after the HBA is reset
 873 * when bringing down the SLI Layer.
 874 *
 875 *
 876 * Return codes
 877 *   void.
 878 **/
 879static void
 880lpfc_hba_free_post_buf(struct lpfc_hba *phba)
 881{
 882        struct lpfc_sli *psli = &phba->sli;
 883        struct lpfc_sli_ring *pring;
 884        struct lpfc_dmabuf *mp, *next_mp;
 885        LIST_HEAD(buflist);
 886        int count;
 887
 888        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 889                lpfc_sli_hbqbuf_free_all(phba);
 890        else {
 891                /* Cleanup preposted buffers on the ELS ring */
 892                pring = &psli->ring[LPFC_ELS_RING];
 893                spin_lock_irq(&phba->hbalock);
 894                list_splice_init(&pring->postbufq, &buflist);
 895                spin_unlock_irq(&phba->hbalock);
 896
 897                count = 0;
 898                list_for_each_entry_safe(mp, next_mp, &buflist, list) {
 899                        list_del(&mp->list);
 900                        count++;
 901                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
 902                        kfree(mp);
 903                }
 904
 905                spin_lock_irq(&phba->hbalock);
 906                pring->postbufq_cnt -= count;
 907                spin_unlock_irq(&phba->hbalock);
 908        }
 909}
 910
 911/**
 912 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
 913 * @phba: pointer to lpfc HBA data structure.
 914 *
 915 * This routine will cleanup the txcmplq after the HBA is reset when bringing
 916 * down the SLI Layer.
 917 *
 918 * Return codes
 919 *   void
 920 **/
 921static void
 922lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
 923{
 924        struct lpfc_sli *psli = &phba->sli;
 925        struct lpfc_sli_ring *pring;
 926        LIST_HEAD(completions);
 927        int i;
 928
 929        for (i = 0; i < psli->num_rings; i++) {
 930                pring = &psli->ring[i];
 931                if (phba->sli_rev >= LPFC_SLI_REV4)
 932                        spin_lock_irq(&pring->ring_lock);
 933                else
 934                        spin_lock_irq(&phba->hbalock);
 935                /* At this point in time the HBA is either reset or DOA. Either
 936                 * way, nothing should be on txcmplq as it will NEVER complete.
 937                 */
 938                list_splice_init(&pring->txcmplq, &completions);
 939                pring->txcmplq_cnt = 0;
 940
 941                if (phba->sli_rev >= LPFC_SLI_REV4)
 942                        spin_unlock_irq(&pring->ring_lock);
 943                else
 944                        spin_unlock_irq(&phba->hbalock);
 945
 946                /* Cancel all the IOCBs from the completions list */
 947                lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 948                                      IOERR_SLI_ABORTED);
 949                lpfc_sli_abort_iocb_ring(phba, pring);
 950        }
 951}
 952
 953/**
 954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
 955        int i;
 956 * @phba: pointer to lpfc HBA data structure.
 957 *
 958 * This routine will do uninitialization after the HBA is reset when bring
 959 * down the SLI Layer.
 960 *
 961 * Return codes
 962 *   0 - success.
 963 *   Any other value - error.
 964 **/
 965static int
 966lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 967{
 968        lpfc_hba_free_post_buf(phba);
 969        lpfc_hba_clean_txcmplq(phba);
 970        return 0;
 971}
 972
 973/**
 974 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
 975 * @phba: pointer to lpfc HBA data structure.
 976 *
 977 * This routine will do uninitialization after the HBA is reset when bring
 978 * down the SLI Layer.
 979 *
 980 * Return codes
 981 *   0 - success.
 982 *   Any other value - error.
 983 **/
 984static int
 985lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 986{
 987        struct lpfc_scsi_buf *psb, *psb_next;
 988        LIST_HEAD(aborts);
 989        unsigned long iflag = 0;
 990        struct lpfc_sglq *sglq_entry = NULL;
 991        struct lpfc_sli *psli = &phba->sli;
 992        struct lpfc_sli_ring *pring;
 993
 994        lpfc_hba_free_post_buf(phba);
 995        lpfc_hba_clean_txcmplq(phba);
 996        pring = &psli->ring[LPFC_ELS_RING];
 997
 998        /* At this point in time the HBA is either reset or DOA. Either
 999         * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1000         * on the lpfc_sgl_list so that it can either be freed if the
1001         * driver is unloading or reposted if the driver is restarting
1002         * the port.
1003         */
1004        spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
1005                                        /* scsl_buf_list */
1006        /* abts_sgl_list_lock required because worker thread uses this
1007         * list.
1008         */
1009        spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
1010        list_for_each_entry(sglq_entry,
1011                &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1012                sglq_entry->state = SGL_FREED;
1013
1014        spin_lock(&pring->ring_lock);
1015        list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1016                        &phba->sli4_hba.lpfc_sgl_list);
1017        spin_unlock(&pring->ring_lock);
1018        spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
1019        /* abts_scsi_buf_list_lock required because worker thread uses this
1020         * list.
1021         */
1022        spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1023        list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1024                        &aborts);
1025        spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1026        spin_unlock_irq(&phba->hbalock);
1027
1028        list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1029                psb->pCmd = NULL;
1030                psb->status = IOSTAT_SUCCESS;
1031        }
1032        spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1033        list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1034        spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1035
1036        lpfc_sli4_free_sp_events(phba);
1037        return 0;
1038}
1039
1040/**
1041 * lpfc_hba_down_post - Wrapper func for hba down post routine
1042 * @phba: pointer to lpfc HBA data structure.
1043 *
1044 * This routine wraps the actual SLI3 or SLI4 routine for performing
1045 * uninitialization after the HBA is reset when bring down the SLI Layer.
1046 *
1047 * Return codes
1048 *   0 - success.
1049 *   Any other value - error.
1050 **/
1051int
1052lpfc_hba_down_post(struct lpfc_hba *phba)
1053{
1054        return (*phba->lpfc_hba_down_post)(phba);
1055}
1056
1057/**
1058 * lpfc_hb_timeout - The HBA-timer timeout handler
1059 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1060 *
1061 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1062 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1063 * work-port-events bitmap and the worker thread is notified. This timeout
1064 * event will be used by the worker thread to invoke the actual timeout
1065 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1066 * be performed in the timeout handler and the HBA timeout event bit shall
1067 * be cleared by the worker thread after it has taken the event bitmap out.
1068 **/
1069static void
1070lpfc_hb_timeout(unsigned long ptr)
1071{
1072        struct lpfc_hba *phba;
1073        uint32_t tmo_posted;
1074        unsigned long iflag;
1075
1076        phba = (struct lpfc_hba *)ptr;
1077
1078        /* Check for heart beat timeout conditions */
1079        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1080        tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1081        if (!tmo_posted)
1082                phba->pport->work_port_events |= WORKER_HB_TMO;
1083        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1084
1085        /* Tell the worker thread there is work to do */
1086        if (!tmo_posted)
1087                lpfc_worker_wake_up(phba);
1088        return;
1089}
1090
1091/**
1092 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1093 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1094 *
1095 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1096 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1097 * work-port-events bitmap and the worker thread is notified. This timeout
1098 * event will be used by the worker thread to invoke the actual timeout
1099 * handler routine, lpfc_rrq_handler. Any periodical operations will
1100 * be performed in the timeout handler and the RRQ timeout event bit shall
1101 * be cleared by the worker thread after it has taken the event bitmap out.
1102 **/
1103static void
1104lpfc_rrq_timeout(unsigned long ptr)
1105{
1106        struct lpfc_hba *phba;
1107        unsigned long iflag;
1108
1109        phba = (struct lpfc_hba *)ptr;
1110        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1111        if (!(phba->pport->load_flag & FC_UNLOADING))
1112                phba->hba_flag |= HBA_RRQ_ACTIVE;
1113        else
1114                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1115        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1116
1117        if (!(phba->pport->load_flag & FC_UNLOADING))
1118                lpfc_worker_wake_up(phba);
1119}
1120
1121/**
1122 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1123 * @phba: pointer to lpfc hba data structure.
1124 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1125 *
1126 * This is the callback function to the lpfc heart-beat mailbox command.
1127 * If configured, the lpfc driver issues the heart-beat mailbox command to
1128 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1129 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1130 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1131 * heart-beat outstanding state. Once the mailbox command comes back and
1132 * no error conditions detected, the heart-beat mailbox command timer is
1133 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1134 * state is cleared for the next heart-beat. If the timer expired with the
1135 * heart-beat outstanding state set, the driver will put the HBA offline.
1136 **/
1137static void
1138lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1139{
1140        unsigned long drvr_flag;
1141
1142        spin_lock_irqsave(&phba->hbalock, drvr_flag);
1143        phba->hb_outstanding = 0;
1144        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1145
1146        /* Check and reset heart-beat timer is necessary */
1147        mempool_free(pmboxq, phba->mbox_mem_pool);
1148        if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1149                !(phba->link_state == LPFC_HBA_ERROR) &&
1150                !(phba->pport->load_flag & FC_UNLOADING))
1151                mod_timer(&phba->hb_tmofunc,
1152                          jiffies +
1153                          msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1154        return;
1155}
1156
1157/**
1158 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1159 * @phba: pointer to lpfc hba data structure.
1160 *
1161 * This is the actual HBA-timer timeout handler to be invoked by the worker
1162 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1163 * handler performs any periodic operations needed for the device. If such
1164 * periodic event has already been attended to either in the interrupt handler
1165 * or by processing slow-ring or fast-ring events within the HBA-timer
1166 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1167 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1168 * is configured and there is no heart-beat mailbox command outstanding, a
1169 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1170 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1171 * to offline.
1172 **/
1173void
1174lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1175{
1176        struct lpfc_vport **vports;
1177        LPFC_MBOXQ_t *pmboxq;
1178        struct lpfc_dmabuf *buf_ptr;
1179        int retval, i;
1180        struct lpfc_sli *psli = &phba->sli;
1181        LIST_HEAD(completions);
1182
1183        vports = lpfc_create_vport_work_array(phba);
1184        if (vports != NULL)
1185                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1186                        lpfc_rcv_seq_check_edtov(vports[i]);
1187        lpfc_destroy_vport_work_array(phba, vports);
1188
1189        if ((phba->link_state == LPFC_HBA_ERROR) ||
1190                (phba->pport->load_flag & FC_UNLOADING) ||
1191                (phba->pport->fc_flag & FC_OFFLINE_MODE))
1192                return;
1193
1194        spin_lock_irq(&phba->pport->work_port_lock);
1195
1196        if (time_after(phba->last_completion_time +
1197                        msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1198                        jiffies)) {
1199                spin_unlock_irq(&phba->pport->work_port_lock);
1200                if (!phba->hb_outstanding)
1201                        mod_timer(&phba->hb_tmofunc,
1202                                jiffies +
1203                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1204                else
1205                        mod_timer(&phba->hb_tmofunc,
1206                                jiffies +
1207                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1208                return;
1209        }
1210        spin_unlock_irq(&phba->pport->work_port_lock);
1211
1212        if (phba->elsbuf_cnt &&
1213                (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1214                spin_lock_irq(&phba->hbalock);
1215                list_splice_init(&phba->elsbuf, &completions);
1216                phba->elsbuf_cnt = 0;
1217                phba->elsbuf_prev_cnt = 0;
1218                spin_unlock_irq(&phba->hbalock);
1219
1220                while (!list_empty(&completions)) {
1221                        list_remove_head(&completions, buf_ptr,
1222                                struct lpfc_dmabuf, list);
1223                        lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1224                        kfree(buf_ptr);
1225                }
1226        }
1227        phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1228
1229        /* If there is no heart beat outstanding, issue a heartbeat command */
1230        if (phba->cfg_enable_hba_heartbeat) {
1231                if (!phba->hb_outstanding) {
1232                        if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1233                                (list_empty(&psli->mboxq))) {
1234                                pmboxq = mempool_alloc(phba->mbox_mem_pool,
1235                                                        GFP_KERNEL);
1236                                if (!pmboxq) {
1237                                        mod_timer(&phba->hb_tmofunc,
1238                                                 jiffies +
1239                                                 msecs_to_jiffies(1000 *
1240                                                 LPFC_HB_MBOX_INTERVAL));
1241                                        return;
1242                                }
1243
1244                                lpfc_heart_beat(phba, pmboxq);
1245                                pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1246                                pmboxq->vport = phba->pport;
1247                                retval = lpfc_sli_issue_mbox(phba, pmboxq,
1248                                                MBX_NOWAIT);
1249
1250                                if (retval != MBX_BUSY &&
1251                                        retval != MBX_SUCCESS) {
1252                                        mempool_free(pmboxq,
1253                                                        phba->mbox_mem_pool);
1254                                        mod_timer(&phba->hb_tmofunc,
1255                                                jiffies +
1256                                                msecs_to_jiffies(1000 *
1257                                                LPFC_HB_MBOX_INTERVAL));
1258                                        return;
1259                                }
1260                                phba->skipped_hb = 0;
1261                                phba->hb_outstanding = 1;
1262                        } else if (time_before_eq(phba->last_completion_time,
1263                                        phba->skipped_hb)) {
1264                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1265                                        "2857 Last completion time not "
1266                                        " updated in %d ms\n",
1267                                        jiffies_to_msecs(jiffies
1268                                                 - phba->last_completion_time));
1269                        } else
1270                                phba->skipped_hb = jiffies;
1271
1272                        mod_timer(&phba->hb_tmofunc,
1273                                 jiffies +
1274                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1275                        return;
1276                } else {
1277                        /*
1278                        * If heart beat timeout called with hb_outstanding set
1279                        * we need to give the hb mailbox cmd a chance to
1280                        * complete or TMO.
1281                        */
1282                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1283                                        "0459 Adapter heartbeat still out"
1284                                        "standing:last compl time was %d ms.\n",
1285                                        jiffies_to_msecs(jiffies
1286                                                 - phba->last_completion_time));
1287                        mod_timer(&phba->hb_tmofunc,
1288                                jiffies +
1289                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1290                }
1291        }
1292}
1293
1294/**
1295 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1296 * @phba: pointer to lpfc hba data structure.
1297 *
1298 * This routine is called to bring the HBA offline when HBA hardware error
1299 * other than Port Error 6 has been detected.
1300 **/
1301static void
1302lpfc_offline_eratt(struct lpfc_hba *phba)
1303{
1304        struct lpfc_sli   *psli = &phba->sli;
1305
1306        spin_lock_irq(&phba->hbalock);
1307        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1308        spin_unlock_irq(&phba->hbalock);
1309        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1310
1311        lpfc_offline(phba);
1312        lpfc_reset_barrier(phba);
1313        spin_lock_irq(&phba->hbalock);
1314        lpfc_sli_brdreset(phba);
1315        spin_unlock_irq(&phba->hbalock);
1316        lpfc_hba_down_post(phba);
1317        lpfc_sli_brdready(phba, HS_MBRDY);
1318        lpfc_unblock_mgmt_io(phba);
1319        phba->link_state = LPFC_HBA_ERROR;
1320        return;
1321}
1322
1323/**
1324 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1325 * @phba: pointer to lpfc hba data structure.
1326 *
1327 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1328 * other than Port Error 6 has been detected.
1329 **/
1330void
1331lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1332{
1333        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1334        lpfc_offline(phba);
1335        lpfc_sli4_brdreset(phba);
1336        lpfc_hba_down_post(phba);
1337        lpfc_sli4_post_status_check(phba);
1338        lpfc_unblock_mgmt_io(phba);
1339        phba->link_state = LPFC_HBA_ERROR;
1340}
1341
1342/**
1343 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1344 * @phba: pointer to lpfc hba data structure.
1345 *
1346 * This routine is invoked to handle the deferred HBA hardware error
1347 * conditions. This type of error is indicated by HBA by setting ER1
1348 * and another ER bit in the host status register. The driver will
1349 * wait until the ER1 bit clears before handling the error condition.
1350 **/
1351static void
1352lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1353{
1354        uint32_t old_host_status = phba->work_hs;
1355        struct lpfc_sli *psli = &phba->sli;
1356
1357        /* If the pci channel is offline, ignore possible errors,
1358         * since we cannot communicate with the pci card anyway.
1359         */
1360        if (pci_channel_offline(phba->pcidev)) {
1361                spin_lock_irq(&phba->hbalock);
1362                phba->hba_flag &= ~DEFER_ERATT;
1363                spin_unlock_irq(&phba->hbalock);
1364                return;
1365        }
1366
1367        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1368                "0479 Deferred Adapter Hardware Error "
1369                "Data: x%x x%x x%x\n",
1370                phba->work_hs,
1371                phba->work_status[0], phba->work_status[1]);
1372
1373        spin_lock_irq(&phba->hbalock);
1374        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1375        spin_unlock_irq(&phba->hbalock);
1376
1377
1378        /*
1379         * Firmware stops when it triggred erratt. That could cause the I/Os
1380         * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1381         * SCSI layer retry it after re-establishing link.
1382         */
1383        lpfc_sli_abort_fcp_rings(phba);
1384
1385        /*
1386         * There was a firmware error. Take the hba offline and then
1387         * attempt to restart it.
1388         */
1389        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1390        lpfc_offline(phba);
1391
1392        /* Wait for the ER1 bit to clear.*/
1393        while (phba->work_hs & HS_FFER1) {
1394                msleep(100);
1395                if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1396                        phba->work_hs = UNPLUG_ERR ;
1397                        break;
1398                }
1399                /* If driver is unloading let the worker thread continue */
1400                if (phba->pport->load_flag & FC_UNLOADING) {
1401                        phba->work_hs = 0;
1402                        break;
1403                }
1404        }
1405
1406        /*
1407         * This is to ptrotect against a race condition in which
1408         * first write to the host attention register clear the
1409         * host status register.
1410         */
1411        if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1412                phba->work_hs = old_host_status & ~HS_FFER1;
1413
1414        spin_lock_irq(&phba->hbalock);
1415        phba->hba_flag &= ~DEFER_ERATT;
1416        spin_unlock_irq(&phba->hbalock);
1417        phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1418        phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1419}
1420
1421static void
1422lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1423{
1424        struct lpfc_board_event_header board_event;
1425        struct Scsi_Host *shost;
1426
1427        board_event.event_type = FC_REG_BOARD_EVENT;
1428        board_event.subcategory = LPFC_EVENT_PORTINTERR;
1429        shost = lpfc_shost_from_vport(phba->pport);
1430        fc_host_post_vendor_event(shost, fc_get_event_number(),
1431                                  sizeof(board_event),
1432                                  (char *) &board_event,
1433                                  LPFC_NL_VENDOR_ID);
1434}
1435
1436/**
1437 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1438 * @phba: pointer to lpfc hba data structure.
1439 *
1440 * This routine is invoked to handle the following HBA hardware error
1441 * conditions:
1442 * 1 - HBA error attention interrupt
1443 * 2 - DMA ring index out of range
1444 * 3 - Mailbox command came back as unknown
1445 **/
1446static void
1447lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1448{
1449        struct lpfc_vport *vport = phba->pport;
1450        struct lpfc_sli   *psli = &phba->sli;
1451        uint32_t event_data;
1452        unsigned long temperature;
1453        struct temp_event temp_event_data;
1454        struct Scsi_Host  *shost;
1455
1456        /* If the pci channel is offline, ignore possible errors,
1457         * since we cannot communicate with the pci card anyway.
1458         */
1459        if (pci_channel_offline(phba->pcidev)) {
1460                spin_lock_irq(&phba->hbalock);
1461                phba->hba_flag &= ~DEFER_ERATT;
1462                spin_unlock_irq(&phba->hbalock);
1463                return;
1464        }
1465
1466        /* If resets are disabled then leave the HBA alone and return */
1467        if (!phba->cfg_enable_hba_reset)
1468                return;
1469
1470        /* Send an internal error event to mgmt application */
1471        lpfc_board_errevt_to_mgmt(phba);
1472
1473        if (phba->hba_flag & DEFER_ERATT)
1474                lpfc_handle_deferred_eratt(phba);
1475
1476        if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1477                if (phba->work_hs & HS_FFER6)
1478                        /* Re-establishing Link */
1479                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1480                                        "1301 Re-establishing Link "
1481                                        "Data: x%x x%x x%x\n",
1482                                        phba->work_hs, phba->work_status[0],
1483                                        phba->work_status[1]);
1484                if (phba->work_hs & HS_FFER8)
1485                        /* Device Zeroization */
1486                        lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1487                                        "2861 Host Authentication device "
1488                                        "zeroization Data:x%x x%x x%x\n",
1489                                        phba->work_hs, phba->work_status[0],
1490                                        phba->work_status[1]);
1491
1492                spin_lock_irq(&phba->hbalock);
1493                psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1494                spin_unlock_irq(&phba->hbalock);
1495
1496                /*
1497                * Firmware stops when it triggled erratt with HS_FFER6.
1498                * That could cause the I/Os dropped by the firmware.
1499                * Error iocb (I/O) on txcmplq and let the SCSI layer
1500                * retry it after re-establishing link.
1501                */
1502                lpfc_sli_abort_fcp_rings(phba);
1503
1504                /*
1505                 * There was a firmware error.  Take the hba offline and then
1506                 * attempt to restart it.
1507                 */
1508                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1509                lpfc_offline(phba);
1510                lpfc_sli_brdrestart(phba);
1511                if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1512                        lpfc_unblock_mgmt_io(phba);
1513                        return;
1514                }
1515                lpfc_unblock_mgmt_io(phba);
1516        } else if (phba->work_hs & HS_CRIT_TEMP) {
1517                temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1518                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1519                temp_event_data.event_code = LPFC_CRIT_TEMP;
1520                temp_event_data.data = (uint32_t)temperature;
1521
1522                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1523                                "0406 Adapter maximum temperature exceeded "
1524                                "(%ld), taking this port offline "
1525                                "Data: x%x x%x x%x\n",
1526                                temperature, phba->work_hs,
1527                                phba->work_status[0], phba->work_status[1]);
1528
1529                shost = lpfc_shost_from_vport(phba->pport);
1530                fc_host_post_vendor_event(shost, fc_get_event_number(),
1531                                          sizeof(temp_event_data),
1532                                          (char *) &temp_event_data,
1533                                          SCSI_NL_VID_TYPE_PCI
1534                                          | PCI_VENDOR_ID_EMULEX);
1535
1536                spin_lock_irq(&phba->hbalock);
1537                phba->over_temp_state = HBA_OVER_TEMP;
1538                spin_unlock_irq(&phba->hbalock);
1539                lpfc_offline_eratt(phba);
1540
1541        } else {
1542                /* The if clause above forces this code path when the status
1543                 * failure is a value other than FFER6. Do not call the offline
1544                 * twice. This is the adapter hardware error path.
1545                 */
1546                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1547                                "0457 Adapter Hardware Error "
1548                                "Data: x%x x%x x%x\n",
1549                                phba->work_hs,
1550                                phba->work_status[0], phba->work_status[1]);
1551
1552                event_data = FC_REG_DUMP_EVENT;
1553                shost = lpfc_shost_from_vport(vport);
1554                fc_host_post_vendor_event(shost, fc_get_event_number(),
1555                                sizeof(event_data), (char *) &event_data,
1556                                SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1557
1558                lpfc_offline_eratt(phba);
1559        }
1560        return;
1561}
1562
1563/**
1564 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1565 * @phba: pointer to lpfc hba data structure.
1566 * @mbx_action: flag for mailbox shutdown action.
1567 *
1568 * This routine is invoked to perform an SLI4 port PCI function reset in
1569 * response to port status register polling attention. It waits for port
1570 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1571 * During this process, interrupt vectors are freed and later requested
1572 * for handling possible port resource change.
1573 **/
1574static int
1575lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1576                            bool en_rn_msg)
1577{
1578        int rc;
1579        uint32_t intr_mode;
1580
1581        /*
1582         * On error status condition, driver need to wait for port
1583         * ready before performing reset.
1584         */
1585        rc = lpfc_sli4_pdev_status_reg_wait(phba);
1586        if (!rc) {
1587                /* need reset: attempt for port recovery */
1588                if (en_rn_msg)
1589                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1590                                        "2887 Reset Needed: Attempting Port "
1591                                        "Recovery...\n");
1592                lpfc_offline_prep(phba, mbx_action);
1593                lpfc_offline(phba);
1594                /* release interrupt for possible resource change */
1595                lpfc_sli4_disable_intr(phba);
1596                lpfc_sli_brdrestart(phba);
1597                /* request and enable interrupt */
1598                intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1599                if (intr_mode == LPFC_INTR_ERROR) {
1600                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1601                                        "3175 Failed to enable interrupt\n");
1602                        return -EIO;
1603                } else {
1604                        phba->intr_mode = intr_mode;
1605                }
1606                rc = lpfc_online(phba);
1607                if (rc == 0)
1608                        lpfc_unblock_mgmt_io(phba);
1609        }
1610        return rc;
1611}
1612
1613/**
1614 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1615 * @phba: pointer to lpfc hba data structure.
1616 *
1617 * This routine is invoked to handle the SLI4 HBA hardware error attention
1618 * conditions.
1619 **/
1620static void
1621lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1622{
1623        struct lpfc_vport *vport = phba->pport;
1624        uint32_t event_data;
1625        struct Scsi_Host *shost;
1626        uint32_t if_type;
1627        struct lpfc_register portstat_reg = {0};
1628        uint32_t reg_err1, reg_err2;
1629        uint32_t uerrlo_reg, uemasklo_reg;
1630        uint32_t pci_rd_rc1, pci_rd_rc2;
1631        bool en_rn_msg = true;
1632        int rc;
1633
1634        /* If the pci channel is offline, ignore possible errors, since
1635         * we cannot communicate with the pci card anyway.
1636         */
1637        if (pci_channel_offline(phba->pcidev))
1638                return;
1639        /* If resets are disabled then leave the HBA alone and return */
1640        if (!phba->cfg_enable_hba_reset)
1641                return;
1642
1643        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1644        switch (if_type) {
1645        case LPFC_SLI_INTF_IF_TYPE_0:
1646                pci_rd_rc1 = lpfc_readl(
1647                                phba->sli4_hba.u.if_type0.UERRLOregaddr,
1648                                &uerrlo_reg);
1649                pci_rd_rc2 = lpfc_readl(
1650                                phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1651                                &uemasklo_reg);
1652                /* consider PCI bus read error as pci_channel_offline */
1653                if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1654                        return;
1655                lpfc_sli4_offline_eratt(phba);
1656                break;
1657        case LPFC_SLI_INTF_IF_TYPE_2:
1658                pci_rd_rc1 = lpfc_readl(
1659                                phba->sli4_hba.u.if_type2.STATUSregaddr,
1660                                &portstat_reg.word0);
1661                /* consider PCI bus read error as pci_channel_offline */
1662                if (pci_rd_rc1 == -EIO) {
1663                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1664                                "3151 PCI bus read access failure: x%x\n",
1665                                readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1666                        return;
1667                }
1668                reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1669                reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1670                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1671                        /* TODO: Register for Overtemp async events. */
1672                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1673                                "2889 Port Overtemperature event, "
1674                                "taking port offline\n");
1675                        spin_lock_irq(&phba->hbalock);
1676                        phba->over_temp_state = HBA_OVER_TEMP;
1677                        spin_unlock_irq(&phba->hbalock);
1678                        lpfc_sli4_offline_eratt(phba);
1679                        break;
1680                }
1681                if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1682                    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1683                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1684                                        "3143 Port Down: Firmware Update "
1685                                        "Detected\n");
1686                        en_rn_msg = false;
1687                } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1688                         reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1689                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1690                                        "3144 Port Down: Debug Dump\n");
1691                else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1692                         reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1693                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1694                                        "3145 Port Down: Provisioning\n");
1695
1696                /* Check port status register for function reset */
1697                rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1698                                en_rn_msg);
1699                if (rc == 0) {
1700                        /* don't report event on forced debug dump */
1701                        if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1702                            reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1703                                return;
1704                        else
1705                                break;
1706                }
1707                /* fall through for not able to recover */
1708                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1709                                "3152 Unrecoverable error, bring the port "
1710                                "offline\n");
1711                lpfc_sli4_offline_eratt(phba);
1712                break;
1713        case LPFC_SLI_INTF_IF_TYPE_1:
1714        default:
1715                break;
1716        }
1717        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1718                        "3123 Report dump event to upper layer\n");
1719        /* Send an internal error event to mgmt application */
1720        lpfc_board_errevt_to_mgmt(phba);
1721
1722        event_data = FC_REG_DUMP_EVENT;
1723        shost = lpfc_shost_from_vport(vport);
1724        fc_host_post_vendor_event(shost, fc_get_event_number(),
1725                                  sizeof(event_data), (char *) &event_data,
1726                                  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1727}
1728
1729/**
1730 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1731 * @phba: pointer to lpfc HBA data structure.
1732 *
1733 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1734 * routine from the API jump table function pointer from the lpfc_hba struct.
1735 *
1736 * Return codes
1737 *   0 - success.
1738 *   Any other value - error.
1739 **/
1740void
1741lpfc_handle_eratt(struct lpfc_hba *phba)
1742{
1743        (*phba->lpfc_handle_eratt)(phba);
1744}
1745
1746/**
1747 * lpfc_handle_latt - The HBA link event handler
1748 * @phba: pointer to lpfc hba data structure.
1749 *
1750 * This routine is invoked from the worker thread to handle a HBA host
1751 * attention link event.
1752 **/
1753void
1754lpfc_handle_latt(struct lpfc_hba *phba)
1755{
1756        struct lpfc_vport *vport = phba->pport;
1757        struct lpfc_sli   *psli = &phba->sli;
1758        LPFC_MBOXQ_t *pmb;
1759        volatile uint32_t control;
1760        struct lpfc_dmabuf *mp;
1761        int rc = 0;
1762
1763        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1764        if (!pmb) {
1765                rc = 1;
1766                goto lpfc_handle_latt_err_exit;
1767        }
1768
1769        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1770        if (!mp) {
1771                rc = 2;
1772                goto lpfc_handle_latt_free_pmb;
1773        }
1774
1775        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1776        if (!mp->virt) {
1777                rc = 3;
1778                goto lpfc_handle_latt_free_mp;
1779        }
1780
1781        /* Cleanup any outstanding ELS commands */
1782        lpfc_els_flush_all_cmd(phba);
1783
1784        psli->slistat.link_event++;
1785        lpfc_read_topology(phba, pmb, mp);
1786        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1787        pmb->vport = vport;
1788        /* Block ELS IOCBs until we have processed this mbox command */
1789        phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1790        rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1791        if (rc == MBX_NOT_FINISHED) {
1792                rc = 4;
1793                goto lpfc_handle_latt_free_mbuf;
1794        }
1795
1796        /* Clear Link Attention in HA REG */
1797        spin_lock_irq(&phba->hbalock);
1798        writel(HA_LATT, phba->HAregaddr);
1799        readl(phba->HAregaddr); /* flush */
1800        spin_unlock_irq(&phba->hbalock);
1801
1802        return;
1803
1804lpfc_handle_latt_free_mbuf:
1805        phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1806        lpfc_mbuf_free(phba, mp->virt, mp->phys);
1807lpfc_handle_latt_free_mp:
1808        kfree(mp);
1809lpfc_handle_latt_free_pmb:
1810        mempool_free(pmb, phba->mbox_mem_pool);
1811lpfc_handle_latt_err_exit:
1812        /* Enable Link attention interrupts */
1813        spin_lock_irq(&phba->hbalock);
1814        psli->sli_flag |= LPFC_PROCESS_LA;
1815        control = readl(phba->HCregaddr);
1816        control |= HC_LAINT_ENA;
1817        writel(control, phba->HCregaddr);
1818        readl(phba->HCregaddr); /* flush */
1819
1820        /* Clear Link Attention in HA REG */
1821        writel(HA_LATT, phba->HAregaddr);
1822        readl(phba->HAregaddr); /* flush */
1823        spin_unlock_irq(&phba->hbalock);
1824        lpfc_linkdown(phba);
1825        phba->link_state = LPFC_HBA_ERROR;
1826
1827        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1828                     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1829
1830        return;
1831}
1832
1833/**
1834 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1835 * @phba: pointer to lpfc hba data structure.
1836 * @vpd: pointer to the vital product data.
1837 * @len: length of the vital product data in bytes.
1838 *
1839 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1840 * an array of characters. In this routine, the ModelName, ProgramType, and
1841 * ModelDesc, etc. fields of the phba data structure will be populated.
1842 *
1843 * Return codes
1844 *   0 - pointer to the VPD passed in is NULL
1845 *   1 - success
1846 **/
1847int
1848lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1849{
1850        uint8_t lenlo, lenhi;
1851        int Length;
1852        int i, j;
1853        int finished = 0;
1854        int index = 0;
1855
1856        if (!vpd)
1857                return 0;
1858
1859        /* Vital Product */
1860        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1861                        "0455 Vital Product Data: x%x x%x x%x x%x\n",
1862                        (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1863                        (uint32_t) vpd[3]);
1864        while (!finished && (index < (len - 4))) {
1865                switch (vpd[index]) {
1866                case 0x82:
1867                case 0x91:
1868                        index += 1;
1869                        lenlo = vpd[index];
1870                        index += 1;
1871                        lenhi = vpd[index];
1872                        index += 1;
1873                        i = ((((unsigned short)lenhi) << 8) + lenlo);
1874                        index += i;
1875                        break;
1876                case 0x90:
1877                        index += 1;
1878                        lenlo = vpd[index];
1879                        index += 1;
1880                        lenhi = vpd[index];
1881                        index += 1;
1882                        Length = ((((unsigned short)lenhi) << 8) + lenlo);
1883                        if (Length > len - index)
1884                                Length = len - index;
1885                        while (Length > 0) {
1886                        /* Look for Serial Number */
1887                        if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1888                                index += 2;
1889                                i = vpd[index];
1890                                index += 1;
1891                                j = 0;
1892                                Length -= (3+i);
1893                                while(i--) {
1894                                        phba->SerialNumber[j++] = vpd[index++];
1895                                        if (j == 31)
1896                                                break;
1897                                }
1898                                phba->SerialNumber[j] = 0;
1899                                continue;
1900                        }
1901                        else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1902                                phba->vpd_flag |= VPD_MODEL_DESC;
1903                                index += 2;
1904                                i = vpd[index];
1905                                index += 1;
1906                                j = 0;
1907                                Length -= (3+i);
1908                                while(i--) {
1909                                        phba->ModelDesc[j++] = vpd[index++];
1910                                        if (j == 255)
1911                                                break;
1912                                }
1913                                phba->ModelDesc[j] = 0;
1914                                continue;
1915                        }
1916                        else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1917                                phba->vpd_flag |= VPD_MODEL_NAME;
1918                                index += 2;
1919                                i = vpd[index];
1920                                index += 1;
1921                                j = 0;
1922                                Length -= (3+i);
1923                                while(i--) {
1924                                        phba->ModelName[j++] = vpd[index++];
1925                                        if (j == 79)
1926                                                break;
1927                                }
1928                                phba->ModelName[j] = 0;
1929                                continue;
1930                        }
1931                        else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1932                                phba->vpd_flag |= VPD_PROGRAM_TYPE;
1933                                index += 2;
1934                                i = vpd[index];
1935                                index += 1;
1936                                j = 0;
1937                                Length -= (3+i);
1938                                while(i--) {
1939                                        phba->ProgramType[j++] = vpd[index++];
1940                                        if (j == 255)
1941                                                break;
1942                                }
1943                                phba->ProgramType[j] = 0;
1944                                continue;
1945                        }
1946                        else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1947                                phba->vpd_flag |= VPD_PORT;
1948                                index += 2;
1949                                i = vpd[index];
1950                                index += 1;
1951                                j = 0;
1952                                Length -= (3+i);
1953                                while(i--) {
1954                                        if ((phba->sli_rev == LPFC_SLI_REV4) &&
1955                                            (phba->sli4_hba.pport_name_sta ==
1956                                             LPFC_SLI4_PPNAME_GET)) {
1957                                                j++;
1958                                                index++;
1959                                        } else
1960                                                phba->Port[j++] = vpd[index++];
1961                                        if (j == 19)
1962                                                break;
1963                                }
1964                                if ((phba->sli_rev != LPFC_SLI_REV4) ||
1965                                    (phba->sli4_hba.pport_name_sta ==
1966                                     LPFC_SLI4_PPNAME_NON))
1967                                        phba->Port[j] = 0;
1968                                continue;
1969                        }
1970                        else {
1971                                index += 2;
1972                                i = vpd[index];
1973                                index += 1;
1974                                index += i;
1975                                Length -= (3 + i);
1976                        }
1977                }
1978                finished = 0;
1979                break;
1980                case 0x78:
1981                        finished = 1;
1982                        break;
1983                default:
1984                        index ++;
1985                        break;
1986                }
1987        }
1988
1989        return(1);
1990}
1991
1992/**
1993 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1994 * @phba: pointer to lpfc hba data structure.
1995 * @mdp: pointer to the data structure to hold the derived model name.
1996 * @descp: pointer to the data structure to hold the derived description.
1997 *
1998 * This routine retrieves HBA's description based on its registered PCI device
1999 * ID. The @descp passed into this function points to an array of 256 chars. It
2000 * shall be returned with the model name, maximum speed, and the host bus type.
2001 * The @mdp passed into this function points to an array of 80 chars. When the
2002 * function returns, the @mdp will be filled with the model name.
2003 **/
2004static void
2005lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2006{
2007        lpfc_vpd_t *vp;
2008        uint16_t dev_id = phba->pcidev->device;
2009        int max_speed;
2010        int GE = 0;
2011        int oneConnect = 0; /* default is not a oneConnect */
2012        struct {
2013                char *name;
2014                char *bus;
2015                char *function;
2016        } m = {"<Unknown>", "", ""};
2017
2018        if (mdp && mdp[0] != '\0'
2019                && descp && descp[0] != '\0')
2020                return;
2021
2022        if (phba->lmt & LMT_16Gb)
2023                max_speed = 16;
2024        else if (phba->lmt & LMT_10Gb)
2025                max_speed = 10;
2026        else if (phba->lmt & LMT_8Gb)
2027                max_speed = 8;
2028        else if (phba->lmt & LMT_4Gb)
2029                max_speed = 4;
2030        else if (phba->lmt & LMT_2Gb)
2031                max_speed = 2;
2032        else if (phba->lmt & LMT_1Gb)
2033                max_speed = 1;
2034        else
2035                max_speed = 0;
2036
2037        vp = &phba->vpd;
2038
2039        switch (dev_id) {
2040        case PCI_DEVICE_ID_FIREFLY:
2041                m = (typeof(m)){"LP6000", "PCI",
2042                                "Obsolete, Unsupported Fibre Channel Adapter"};
2043                break;
2044        case PCI_DEVICE_ID_SUPERFLY:
2045                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2046                        m = (typeof(m)){"LP7000", "PCI", ""};
2047                else
2048                        m = (typeof(m)){"LP7000E", "PCI", ""};
2049                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2050                break;
2051        case PCI_DEVICE_ID_DRAGONFLY:
2052                m = (typeof(m)){"LP8000", "PCI",
2053                                "Obsolete, Unsupported Fibre Channel Adapter"};
2054                break;
2055        case PCI_DEVICE_ID_CENTAUR:
2056                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2057                        m = (typeof(m)){"LP9002", "PCI", ""};
2058                else
2059                        m = (typeof(m)){"LP9000", "PCI", ""};
2060                m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2061                break;
2062        case PCI_DEVICE_ID_RFLY:
2063                m = (typeof(m)){"LP952", "PCI",
2064                                "Obsolete, Unsupported Fibre Channel Adapter"};
2065                break;
2066        case PCI_DEVICE_ID_PEGASUS:
2067                m = (typeof(m)){"LP9802", "PCI-X",
2068                                "Obsolete, Unsupported Fibre Channel Adapter"};
2069                break;
2070        case PCI_DEVICE_ID_THOR:
2071                m = (typeof(m)){"LP10000", "PCI-X",
2072                                "Obsolete, Unsupported Fibre Channel Adapter"};
2073                break;
2074        case PCI_DEVICE_ID_VIPER:
2075                m = (typeof(m)){"LPX1000",  "PCI-X",
2076                                "Obsolete, Unsupported Fibre Channel Adapter"};
2077                break;
2078        case PCI_DEVICE_ID_PFLY:
2079                m = (typeof(m)){"LP982", "PCI-X",
2080                                "Obsolete, Unsupported Fibre Channel Adapter"};
2081                break;
2082        case PCI_DEVICE_ID_TFLY:
2083                m = (typeof(m)){"LP1050", "PCI-X",
2084                                "Obsolete, Unsupported Fibre Channel Adapter"};
2085                break;
2086        case PCI_DEVICE_ID_HELIOS:
2087                m = (typeof(m)){"LP11000", "PCI-X2",
2088                                "Obsolete, Unsupported Fibre Channel Adapter"};
2089                break;
2090        case PCI_DEVICE_ID_HELIOS_SCSP:
2091                m = (typeof(m)){"LP11000-SP", "PCI-X2",
2092                                "Obsolete, Unsupported Fibre Channel Adapter"};
2093                break;
2094        case PCI_DEVICE_ID_HELIOS_DCSP:
2095                m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2096                                "Obsolete, Unsupported Fibre Channel Adapter"};
2097                break;
2098        case PCI_DEVICE_ID_NEPTUNE:
2099                m = (typeof(m)){"LPe1000", "PCIe",
2100                                "Obsolete, Unsupported Fibre Channel Adapter"};
2101                break;
2102        case PCI_DEVICE_ID_NEPTUNE_SCSP:
2103                m = (typeof(m)){"LPe1000-SP", "PCIe",
2104                                "Obsolete, Unsupported Fibre Channel Adapter"};
2105                break;
2106        case PCI_DEVICE_ID_NEPTUNE_DCSP:
2107                m = (typeof(m)){"LPe1002-SP", "PCIe",
2108                                "Obsolete, Unsupported Fibre Channel Adapter"};
2109                break;
2110        case PCI_DEVICE_ID_BMID:
2111                m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2112                break;
2113        case PCI_DEVICE_ID_BSMB:
2114                m = (typeof(m)){"LP111", "PCI-X2",
2115                                "Obsolete, Unsupported Fibre Channel Adapter"};
2116                break;
2117        case PCI_DEVICE_ID_ZEPHYR:
2118                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2119                break;
2120        case PCI_DEVICE_ID_ZEPHYR_SCSP:
2121                m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2122                break;
2123        case PCI_DEVICE_ID_ZEPHYR_DCSP:
2124                m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2125                GE = 1;
2126                break;
2127        case PCI_DEVICE_ID_ZMID:
2128                m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2129                break;
2130        case PCI_DEVICE_ID_ZSMB:
2131                m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2132                break;
2133        case PCI_DEVICE_ID_LP101:
2134                m = (typeof(m)){"LP101", "PCI-X",
2135                                "Obsolete, Unsupported Fibre Channel Adapter"};
2136                break;
2137        case PCI_DEVICE_ID_LP10000S:
2138                m = (typeof(m)){"LP10000-S", "PCI",
2139                                "Obsolete, Unsupported Fibre Channel Adapter"};
2140                break;
2141        case PCI_DEVICE_ID_LP11000S:
2142                m = (typeof(m)){"LP11000-S", "PCI-X2",
2143                                "Obsolete, Unsupported Fibre Channel Adapter"};
2144                break;
2145        case PCI_DEVICE_ID_LPE11000S:
2146                m = (typeof(m)){"LPe11000-S", "PCIe",
2147                                "Obsolete, Unsupported Fibre Channel Adapter"};
2148                break;
2149        case PCI_DEVICE_ID_SAT:
2150                m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2151                break;
2152        case PCI_DEVICE_ID_SAT_MID:
2153                m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2154                break;
2155        case PCI_DEVICE_ID_SAT_SMB:
2156                m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2157                break;
2158        case PCI_DEVICE_ID_SAT_DCSP:
2159                m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2160                break;
2161        case PCI_DEVICE_ID_SAT_SCSP:
2162                m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2163                break;
2164        case PCI_DEVICE_ID_SAT_S:
2165                m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2166                break;
2167        case PCI_DEVICE_ID_HORNET:
2168                m = (typeof(m)){"LP21000", "PCIe",
2169                                "Obsolete, Unsupported FCoE Adapter"};
2170                GE = 1;
2171                break;
2172        case PCI_DEVICE_ID_PROTEUS_VF:
2173                m = (typeof(m)){"LPev12000", "PCIe IOV",
2174                                "Obsolete, Unsupported Fibre Channel Adapter"};
2175                break;
2176        case PCI_DEVICE_ID_PROTEUS_PF:
2177                m = (typeof(m)){"LPev12000", "PCIe IOV",
2178                                "Obsolete, Unsupported Fibre Channel Adapter"};
2179                break;
2180        case PCI_DEVICE_ID_PROTEUS_S:
2181                m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2182                                "Obsolete, Unsupported Fibre Channel Adapter"};
2183                break;
2184        case PCI_DEVICE_ID_TIGERSHARK:
2185                oneConnect = 1;
2186                m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2187                break;
2188        case PCI_DEVICE_ID_TOMCAT:
2189                oneConnect = 1;
2190                m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2191                break;
2192        case PCI_DEVICE_ID_FALCON:
2193                m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2194                                "EmulexSecure Fibre"};
2195                break;
2196        case PCI_DEVICE_ID_BALIUS:
2197                m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2198                                "Obsolete, Unsupported Fibre Channel Adapter"};
2199                break;
2200        case PCI_DEVICE_ID_LANCER_FC:
2201                m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2202                break;
2203        case PCI_DEVICE_ID_LANCER_FC_VF:
2204                m = (typeof(m)){"LPe16000", "PCIe",
2205                                "Obsolete, Unsupported Fibre Channel Adapter"};
2206                break;
2207        case PCI_DEVICE_ID_LANCER_FCOE:
2208                oneConnect = 1;
2209                m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2210                break;
2211        case PCI_DEVICE_ID_LANCER_FCOE_VF:
2212                oneConnect = 1;
2213                m = (typeof(m)){"OCe15100", "PCIe",
2214                                "Obsolete, Unsupported FCoE"};
2215                break;
2216        case PCI_DEVICE_ID_SKYHAWK:
2217        case PCI_DEVICE_ID_SKYHAWK_VF:
2218                oneConnect = 1;
2219                m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2220                break;
2221        default:
2222                m = (typeof(m)){"Unknown", "", ""};
2223                break;
2224        }
2225
2226        if (mdp && mdp[0] == '\0')
2227                snprintf(mdp, 79,"%s", m.name);
2228        /*
2229         * oneConnect hba requires special processing, they are all initiators
2230         * and we put the port number on the end
2231         */
2232        if (descp && descp[0] == '\0') {
2233                if (oneConnect)
2234                        snprintf(descp, 255,
2235                                "Emulex OneConnect %s, %s Initiator %s",
2236                                m.name, m.function,
2237                                phba->Port);
2238                else if (max_speed == 0)
2239                        snprintf(descp, 255,
2240                                "Emulex %s %s %s ",
2241                                m.name, m.bus, m.function);
2242                else
2243                        snprintf(descp, 255,
2244                                "Emulex %s %d%s %s %s",
2245                                m.name, max_speed, (GE) ? "GE" : "Gb",
2246                                m.bus, m.function);
2247        }
2248}
2249
2250/**
2251 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2252 * @phba: pointer to lpfc hba data structure.
2253 * @pring: pointer to a IOCB ring.
2254 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2255 *
2256 * This routine posts a given number of IOCBs with the associated DMA buffer
2257 * descriptors specified by the cnt argument to the given IOCB ring.
2258 *
2259 * Return codes
2260 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2261 **/
2262int
2263lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2264{
2265        IOCB_t *icmd;
2266        struct lpfc_iocbq *iocb;
2267        struct lpfc_dmabuf *mp1, *mp2;
2268
2269        cnt += pring->missbufcnt;
2270
2271        /* While there are buffers to post */
2272        while (cnt > 0) {
2273                /* Allocate buffer for  command iocb */
2274                iocb = lpfc_sli_get_iocbq(phba);
2275                if (iocb == NULL) {
2276                        pring->missbufcnt = cnt;
2277                        return cnt;
2278                }
2279                icmd = &iocb->iocb;
2280
2281                /* 2 buffers can be posted per command */
2282                /* Allocate buffer to post */
2283                mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2284                if (mp1)
2285                    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2286                if (!mp1 || !mp1->virt) {
2287                        kfree(mp1);
2288                        lpfc_sli_release_iocbq(phba, iocb);
2289                        pring->missbufcnt = cnt;
2290                        return cnt;
2291                }
2292
2293                INIT_LIST_HEAD(&mp1->list);
2294                /* Allocate buffer to post */
2295                if (cnt > 1) {
2296                        mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2297                        if (mp2)
2298                                mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2299                                                            &mp2->phys);
2300                        if (!mp2 || !mp2->virt) {
2301                                kfree(mp2);
2302                                lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2303                                kfree(mp1);
2304                                lpfc_sli_release_iocbq(phba, iocb);
2305                                pring->missbufcnt = cnt;
2306                                return cnt;
2307                        }
2308
2309                        INIT_LIST_HEAD(&mp2->list);
2310                } else {
2311                        mp2 = NULL;
2312                }
2313
2314                icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2315                icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2316                icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2317                icmd->ulpBdeCount = 1;
2318                cnt--;
2319                if (mp2) {
2320                        icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2321                        icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2322                        icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2323                        cnt--;
2324                        icmd->ulpBdeCount = 2;
2325                }
2326
2327                icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2328                icmd->ulpLe = 1;
2329
2330                if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2331                    IOCB_ERROR) {
2332                        lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2333                        kfree(mp1);
2334                        cnt++;
2335                        if (mp2) {
2336                                lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2337                                kfree(mp2);
2338                                cnt++;
2339                        }
2340                        lpfc_sli_release_iocbq(phba, iocb);
2341                        pring->missbufcnt = cnt;
2342                        return cnt;
2343                }
2344                lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2345                if (mp2)
2346                        lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2347        }
2348        pring->missbufcnt = 0;
2349        return 0;
2350}
2351
2352/**
2353 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2354 * @phba: pointer to lpfc hba data structure.
2355 *
2356 * This routine posts initial receive IOCB buffers to the ELS ring. The
2357 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2358 * set to 64 IOCBs.
2359 *
2360 * Return codes
2361 *   0 - success (currently always success)
2362 **/
2363static int
2364lpfc_post_rcv_buf(struct lpfc_hba *phba)
2365{
2366        struct lpfc_sli *psli = &phba->sli;
2367
2368        /* Ring 0, ELS / CT buffers */
2369        lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2370        /* Ring 2 - FCP no buffers needed */
2371
2372        return 0;
2373}
2374
2375#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2376
2377/**
2378 * lpfc_sha_init - Set up initial array of hash table entries
2379 * @HashResultPointer: pointer to an array as hash table.
2380 *
2381 * This routine sets up the initial values to the array of hash table entries
2382 * for the LC HBAs.
2383 **/
2384static void
2385lpfc_sha_init(uint32_t * HashResultPointer)
2386{
2387        HashResultPointer[0] = 0x67452301;
2388        HashResultPointer[1] = 0xEFCDAB89;
2389        HashResultPointer[2] = 0x98BADCFE;
2390        HashResultPointer[3] = 0x10325476;
2391        HashResultPointer[4] = 0xC3D2E1F0;
2392}
2393
2394/**
2395 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2396 * @HashResultPointer: pointer to an initial/result hash table.
2397 * @HashWorkingPointer: pointer to an working hash table.
2398 *
2399 * This routine iterates an initial hash table pointed by @HashResultPointer
2400 * with the values from the working hash table pointeed by @HashWorkingPointer.
2401 * The results are putting back to the initial hash table, returned through
2402 * the @HashResultPointer as the result hash table.
2403 **/
2404static void
2405lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2406{
2407        int t;
2408        uint32_t TEMP;
2409        uint32_t A, B, C, D, E;
2410        t = 16;
2411        do {
2412                HashWorkingPointer[t] =
2413                    S(1,
2414                      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2415                                                                     8] ^
2416                      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2417        } while (++t <= 79);
2418        t = 0;
2419        A = HashResultPointer[0];
2420        B = HashResultPointer[1];
2421        C = HashResultPointer[2];
2422        D = HashResultPointer[3];
2423        E = HashResultPointer[4];
2424
2425        do {
2426                if (t < 20) {
2427                        TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2428                } else if (t < 40) {
2429                        TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2430                } else if (t < 60) {
2431                        TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2432                } else {
2433                        TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2434                }
2435                TEMP += S(5, A) + E + HashWorkingPointer[t];
2436                E = D;
2437                D = C;
2438                C = S(30, B);
2439                B = A;
2440                A = TEMP;
2441        } while (++t <= 79);
2442
2443        HashResultPointer[0] += A;
2444        HashResultPointer[1] += B;
2445        HashResultPointer[2] += C;
2446        HashResultPointer[3] += D;
2447        HashResultPointer[4] += E;
2448
2449}
2450
2451/**
2452 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2453 * @RandomChallenge: pointer to the entry of host challenge random number array.
2454 * @HashWorking: pointer to the entry of the working hash array.
2455 *
2456 * This routine calculates the working hash array referred by @HashWorking
2457 * from the challenge random numbers associated with the host, referred by
2458 * @RandomChallenge. The result is put into the entry of the working hash
2459 * array and returned by reference through @HashWorking.
2460 **/
2461static void
2462lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2463{
2464        *HashWorking = (*RandomChallenge ^ *HashWorking);
2465}
2466
2467/**
2468 * lpfc_hba_init - Perform special handling for LC HBA initialization
2469 * @phba: pointer to lpfc hba data structure.
2470 * @hbainit: pointer to an array of unsigned 32-bit integers.
2471 *
2472 * This routine performs the special handling for LC HBA initialization.
2473 **/
2474void
2475lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2476{
2477        int t;
2478        uint32_t *HashWorking;
2479        uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2480
2481        HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2482        if (!HashWorking)
2483                return;
2484
2485        HashWorking[0] = HashWorking[78] = *pwwnn++;
2486        HashWorking[1] = HashWorking[79] = *pwwnn;
2487
2488        for (t = 0; t < 7; t++)
2489                lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2490
2491        lpfc_sha_init(hbainit);
2492        lpfc_sha_iterate(hbainit, HashWorking);
2493        kfree(HashWorking);
2494}
2495
2496/**
2497 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2498 * @vport: pointer to a virtual N_Port data structure.
2499 *
2500 * This routine performs the necessary cleanups before deleting the @vport.
2501 * It invokes the discovery state machine to perform necessary state
2502 * transitions and to release the ndlps associated with the @vport. Note,
2503 * the physical port is treated as @vport 0.
2504 **/
2505void
2506lpfc_cleanup(struct lpfc_vport *vport)
2507{
2508        struct lpfc_hba   *phba = vport->phba;
2509        struct lpfc_nodelist *ndlp, *next_ndlp;
2510        int i = 0;
2511
2512        if (phba->link_state > LPFC_LINK_DOWN)
2513                lpfc_port_link_failure(vport);
2514
2515        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2516                if (!NLP_CHK_NODE_ACT(ndlp)) {
2517                        ndlp = lpfc_enable_node(vport, ndlp,
2518                                                NLP_STE_UNUSED_NODE);
2519                        if (!ndlp)
2520                                continue;
2521                        spin_lock_irq(&phba->ndlp_lock);
2522                        NLP_SET_FREE_REQ(ndlp);
2523                        spin_unlock_irq(&phba->ndlp_lock);
2524                        /* Trigger the release of the ndlp memory */
2525                        lpfc_nlp_put(ndlp);
2526                        continue;
2527                }
2528                spin_lock_irq(&phba->ndlp_lock);
2529                if (NLP_CHK_FREE_REQ(ndlp)) {
2530                        /* The ndlp should not be in memory free mode already */
2531                        spin_unlock_irq(&phba->ndlp_lock);
2532                        continue;
2533                } else
2534                        /* Indicate request for freeing ndlp memory */
2535                        NLP_SET_FREE_REQ(ndlp);
2536                spin_unlock_irq(&phba->ndlp_lock);
2537
2538                if (vport->port_type != LPFC_PHYSICAL_PORT &&
2539                    ndlp->nlp_DID == Fabric_DID) {
2540                        /* Just free up ndlp with Fabric_DID for vports */
2541                        lpfc_nlp_put(ndlp);
2542                        continue;
2543                }
2544
2545                /* take care of nodes in unused state before the state
2546                 * machine taking action.
2547                 */
2548                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2549                        lpfc_nlp_put(ndlp);
2550                        continue;
2551                }
2552
2553                if (ndlp->nlp_type & NLP_FABRIC)
2554                        lpfc_disc_state_machine(vport, ndlp, NULL,
2555                                        NLP_EVT_DEVICE_RECOVERY);
2556
2557                lpfc_disc_state_machine(vport, ndlp, NULL,
2558                                             NLP_EVT_DEVICE_RM);
2559        }
2560
2561        /* At this point, ALL ndlp's should be gone
2562         * because of the previous NLP_EVT_DEVICE_RM.
2563         * Lets wait for this to happen, if needed.
2564         */
2565        while (!list_empty(&vport->fc_nodes)) {
2566                if (i++ > 3000) {
2567                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2568                                "0233 Nodelist not empty\n");
2569                        list_for_each_entry_safe(ndlp, next_ndlp,
2570                                                &vport->fc_nodes, nlp_listp) {
2571                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2572                                                LOG_NODE,
2573                                                "0282 did:x%x ndlp:x%p "
2574                                                "usgmap:x%x refcnt:%d\n",
2575                                                ndlp->nlp_DID, (void *)ndlp,
2576                                                ndlp->nlp_usg_map,
2577                                                atomic_read(
2578                                                        &ndlp->kref.refcount));
2579                        }
2580                        break;
2581                }
2582
2583                /* Wait for any activity on ndlps to settle */
2584                msleep(10);
2585        }
2586        lpfc_cleanup_vports_rrqs(vport, NULL);
2587}
2588
2589/**
2590 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2591 * @vport: pointer to a virtual N_Port data structure.
2592 *
2593 * This routine stops all the timers associated with a @vport. This function
2594 * is invoked before disabling or deleting a @vport. Note that the physical
2595 * port is treated as @vport 0.
2596 **/
2597void
2598lpfc_stop_vport_timers(struct lpfc_vport *vport)
2599{
2600        del_timer_sync(&vport->els_tmofunc);
2601        del_timer_sync(&vport->fc_fdmitmo);
2602        del_timer_sync(&vport->delayed_disc_tmo);
2603        lpfc_can_disctmo(vport);
2604        return;
2605}
2606
2607/**
2608 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2609 * @phba: pointer to lpfc hba data structure.
2610 *
2611 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2612 * caller of this routine should already hold the host lock.
2613 **/
2614void
2615__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2616{
2617        /* Clear pending FCF rediscovery wait flag */
2618        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2619
2620        /* Now, try to stop the timer */
2621        del_timer(&phba->fcf.redisc_wait);
2622}
2623
2624/**
2625 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2626 * @phba: pointer to lpfc hba data structure.
2627 *
2628 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2629 * checks whether the FCF rediscovery wait timer is pending with the host
2630 * lock held before proceeding with disabling the timer and clearing the
2631 * wait timer pendig flag.
2632 **/
2633void
2634lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2635{
2636        spin_lock_irq(&phba->hbalock);
2637        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2638                /* FCF rediscovery timer already fired or stopped */
2639                spin_unlock_irq(&phba->hbalock);
2640                return;
2641        }
2642        __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2643        /* Clear failover in progress flags */
2644        phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2645        spin_unlock_irq(&phba->hbalock);
2646}
2647
2648/**
2649 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2650 * @phba: pointer to lpfc hba data structure.
2651 *
2652 * This routine stops all the timers associated with a HBA. This function is
2653 * invoked before either putting a HBA offline or unloading the driver.
2654 **/
2655void
2656lpfc_stop_hba_timers(struct lpfc_hba *phba)
2657{
2658        lpfc_stop_vport_timers(phba->pport);
2659        del_timer_sync(&phba->sli.mbox_tmo);
2660        del_timer_sync(&phba->fabric_block_timer);
2661        del_timer_sync(&phba->eratt_poll);
2662        del_timer_sync(&phba->hb_tmofunc);
2663        if (phba->sli_rev == LPFC_SLI_REV4) {
2664                del_timer_sync(&phba->rrq_tmr);
2665                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2666        }
2667        phba->hb_outstanding = 0;
2668
2669        switch (phba->pci_dev_grp) {
2670        case LPFC_PCI_DEV_LP:
2671                /* Stop any LightPulse device specific driver timers */
2672                del_timer_sync(&phba->fcp_poll_timer);
2673                break;
2674        case LPFC_PCI_DEV_OC:
2675                /* Stop any OneConnect device sepcific driver timers */
2676                lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2677                break;
2678        default:
2679                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2680                                "0297 Invalid device group (x%x)\n",
2681                                phba->pci_dev_grp);
2682                break;
2683        }
2684        return;
2685}
2686
2687/**
2688 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2689 * @phba: pointer to lpfc hba data structure.
2690 *
2691 * This routine marks a HBA's management interface as blocked. Once the HBA's
2692 * management interface is marked as blocked, all the user space access to
2693 * the HBA, whether they are from sysfs interface or libdfc interface will
2694 * all be blocked. The HBA is set to block the management interface when the
2695 * driver prepares the HBA interface for online or offline.
2696 **/
2697static void
2698lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2699{
2700        unsigned long iflag;
2701        uint8_t actcmd = MBX_HEARTBEAT;
2702        unsigned long timeout;
2703
2704        spin_lock_irqsave(&phba->hbalock, iflag);
2705        phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2706        spin_unlock_irqrestore(&phba->hbalock, iflag);
2707        if (mbx_action == LPFC_MBX_NO_WAIT)
2708                return;
2709        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2710        spin_lock_irqsave(&phba->hbalock, iflag);
2711        if (phba->sli.mbox_active) {
2712                actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2713                /* Determine how long we might wait for the active mailbox
2714                 * command to be gracefully completed by firmware.
2715                 */
2716                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2717                                phba->sli.mbox_active) * 1000) + jiffies;
2718        }
2719        spin_unlock_irqrestore(&phba->hbalock, iflag);
2720
2721        /* Wait for the outstnading mailbox command to complete */
2722        while (phba->sli.mbox_active) {
2723                /* Check active mailbox complete status every 2ms */
2724                msleep(2);
2725                if (time_after(jiffies, timeout)) {
2726                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2727                                "2813 Mgmt IO is Blocked %x "
2728                                "- mbox cmd %x still active\n",
2729                                phba->sli.sli_flag, actcmd);
2730                        break;
2731                }
2732        }
2733}
2734
2735/**
2736 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2737 * @phba: pointer to lpfc hba data structure.
2738 *
2739 * Allocate RPIs for all active remote nodes. This is needed whenever
2740 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2741 * is to fixup the temporary rpi assignments.
2742 **/
2743void
2744lpfc_sli4_node_prep(struct lpfc_hba *phba)
2745{
2746        struct lpfc_nodelist  *ndlp, *next_ndlp;
2747        struct lpfc_vport **vports;
2748        int i;
2749
2750        if (phba->sli_rev != LPFC_SLI_REV4)
2751                return;
2752
2753        vports = lpfc_create_vport_work_array(phba);
2754        if (vports != NULL) {
2755                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2756                        if (vports[i]->load_flag & FC_UNLOADING)
2757                                continue;
2758
2759                        list_for_each_entry_safe(ndlp, next_ndlp,
2760                                                 &vports[i]->fc_nodes,
2761                                                 nlp_listp) {
2762                                if (NLP_CHK_NODE_ACT(ndlp))
2763                                        ndlp->nlp_rpi =
2764                                                lpfc_sli4_alloc_rpi(phba);
2765                        }
2766                }
2767        }
2768        lpfc_destroy_vport_work_array(phba, vports);
2769}
2770
2771/**
2772 * lpfc_online - Initialize and bring a HBA online
2773 * @phba: pointer to lpfc hba data structure.
2774 *
2775 * This routine initializes the HBA and brings a HBA online. During this
2776 * process, the management interface is blocked to prevent user space access
2777 * to the HBA interfering with the driver initialization.
2778 *
2779 * Return codes
2780 *   0 - successful
2781 *   1 - failed
2782 **/
2783int
2784lpfc_online(struct lpfc_hba *phba)
2785{
2786        struct lpfc_vport *vport;
2787        struct lpfc_vport **vports;
2788        int i;
2789        bool vpis_cleared = false;
2790
2791        if (!phba)
2792                return 0;
2793        vport = phba->pport;
2794
2795        if (!(vport->fc_flag & FC_OFFLINE_MODE))
2796                return 0;
2797
2798        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2799                        "0458 Bring Adapter online\n");
2800
2801        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2802
2803        if (!lpfc_sli_queue_setup(phba)) {
2804                lpfc_unblock_mgmt_io(phba);
2805                return 1;
2806        }
2807
2808        if (phba->sli_rev == LPFC_SLI_REV4) {
2809                if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2810                        lpfc_unblock_mgmt_io(phba);
2811                        return 1;
2812                }
2813                spin_lock_irq(&phba->hbalock);
2814                if (!phba->sli4_hba.max_cfg_param.vpi_used)
2815                        vpis_cleared = true;
2816                spin_unlock_irq(&phba->hbalock);
2817        } else {
2818                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2819                        lpfc_unblock_mgmt_io(phba);
2820                        return 1;
2821                }
2822        }
2823
2824        vports = lpfc_create_vport_work_array(phba);
2825        if (vports != NULL)
2826                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2827                        struct Scsi_Host *shost;
2828                        shost = lpfc_shost_from_vport(vports[i]);
2829                        spin_lock_irq(shost->host_lock);
2830                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2831                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2832                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2833                        if (phba->sli_rev == LPFC_SLI_REV4) {
2834                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2835                                if ((vpis_cleared) &&
2836                                    (vports[i]->port_type !=
2837                                        LPFC_PHYSICAL_PORT))
2838                                        vports[i]->vpi = 0;
2839                        }
2840                        spin_unlock_irq(shost->host_lock);
2841                }
2842                lpfc_destroy_vport_work_array(phba, vports);
2843
2844        lpfc_unblock_mgmt_io(phba);
2845        return 0;
2846}
2847
2848/**
2849 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2850 * @phba: pointer to lpfc hba data structure.
2851 *
2852 * This routine marks a HBA's management interface as not blocked. Once the
2853 * HBA's management interface is marked as not blocked, all the user space
2854 * access to the HBA, whether they are from sysfs interface or libdfc
2855 * interface will be allowed. The HBA is set to block the management interface
2856 * when the driver prepares the HBA interface for online or offline and then
2857 * set to unblock the management interface afterwards.
2858 **/
2859void
2860lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2861{
2862        unsigned long iflag;
2863
2864        spin_lock_irqsave(&phba->hbalock, iflag);
2865        phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2866        spin_unlock_irqrestore(&phba->hbalock, iflag);
2867}
2868
2869/**
2870 * lpfc_offline_prep - Prepare a HBA to be brought offline
2871 * @phba: pointer to lpfc hba data structure.
2872 *
2873 * This routine is invoked to prepare a HBA to be brought offline. It performs
2874 * unregistration login to all the nodes on all vports and flushes the mailbox
2875 * queue to make it ready to be brought offline.
2876 **/
2877void
2878lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2879{
2880        struct lpfc_vport *vport = phba->pport;
2881        struct lpfc_nodelist  *ndlp, *next_ndlp;
2882        struct lpfc_vport **vports;
2883        struct Scsi_Host *shost;
2884        int i;
2885
2886        if (vport->fc_flag & FC_OFFLINE_MODE)
2887                return;
2888
2889        lpfc_block_mgmt_io(phba, mbx_action);
2890
2891        lpfc_linkdown(phba);
2892
2893        /* Issue an unreg_login to all nodes on all vports */
2894        vports = lpfc_create_vport_work_array(phba);
2895        if (vports != NULL) {
2896                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2897                        if (vports[i]->load_flag & FC_UNLOADING)
2898                                continue;
2899                        shost = lpfc_shost_from_vport(vports[i]);
2900                        spin_lock_irq(shost->host_lock);
2901                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2902                        vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2903                        vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2904                        spin_unlock_irq(shost->host_lock);
2905
2906                        shost = lpfc_shost_from_vport(vports[i]);
2907                        list_for_each_entry_safe(ndlp, next_ndlp,
2908                                                 &vports[i]->fc_nodes,
2909                                                 nlp_listp) {
2910                                if (!NLP_CHK_NODE_ACT(ndlp))
2911                                        continue;
2912                                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2913                                        continue;
2914                                if (ndlp->nlp_type & NLP_FABRIC) {
2915                                        lpfc_disc_state_machine(vports[i], ndlp,
2916                                                NULL, NLP_EVT_DEVICE_RECOVERY);
2917                                        lpfc_disc_state_machine(vports[i], ndlp,
2918                                                NULL, NLP_EVT_DEVICE_RM);
2919                                }
2920                                spin_lock_irq(shost->host_lock);
2921                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2922                                spin_unlock_irq(shost->host_lock);
2923                                /*
2924                                 * Whenever an SLI4 port goes offline, free the
2925                                 * RPI. Get a new RPI when the adapter port
2926                                 * comes back online.
2927                                 */
2928                                if (phba->sli_rev == LPFC_SLI_REV4)
2929                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2930                                lpfc_unreg_rpi(vports[i], ndlp);
2931                        }
2932                }
2933        }
2934        lpfc_destroy_vport_work_array(phba, vports);
2935
2936        lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2937}
2938
2939/**
2940 * lpfc_offline - Bring a HBA offline
2941 * @phba: pointer to lpfc hba data structure.
2942 *
2943 * This routine actually brings a HBA offline. It stops all the timers
2944 * associated with the HBA, brings down the SLI layer, and eventually
2945 * marks the HBA as in offline state for the upper layer protocol.
2946 **/
2947void
2948lpfc_offline(struct lpfc_hba *phba)
2949{
2950        struct Scsi_Host  *shost;
2951        struct lpfc_vport **vports;
2952        int i;
2953
2954        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2955                return;
2956
2957        /* stop port and all timers associated with this hba */
2958        lpfc_stop_port(phba);
2959        vports = lpfc_create_vport_work_array(phba);
2960        if (vports != NULL)
2961                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2962                        lpfc_stop_vport_timers(vports[i]);
2963        lpfc_destroy_vport_work_array(phba, vports);
2964        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2965                        "0460 Bring Adapter offline\n");
2966        /* Bring down the SLI Layer and cleanup.  The HBA is offline
2967           now.  */
2968        lpfc_sli_hba_down(phba);
2969        spin_lock_irq(&phba->hbalock);
2970        phba->work_ha = 0;
2971        spin_unlock_irq(&phba->hbalock);
2972        vports = lpfc_create_vport_work_array(phba);
2973        if (vports != NULL)
2974                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2975                        shost = lpfc_shost_from_vport(vports[i]);
2976                        spin_lock_irq(shost->host_lock);
2977                        vports[i]->work_port_events = 0;
2978                        vports[i]->fc_flag |= FC_OFFLINE_MODE;
2979                        spin_unlock_irq(shost->host_lock);
2980                }
2981        lpfc_destroy_vport_work_array(phba, vports);
2982}
2983
2984/**
2985 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2986 * @phba: pointer to lpfc hba data structure.
2987 *
2988 * This routine is to free all the SCSI buffers and IOCBs from the driver
2989 * list back to kernel. It is called from lpfc_pci_remove_one to free
2990 * the internal resources before the device is removed from the system.
2991 **/
2992static void
2993lpfc_scsi_free(struct lpfc_hba *phba)
2994{
2995        struct lpfc_scsi_buf *sb, *sb_next;
2996        struct lpfc_iocbq *io, *io_next;
2997
2998        spin_lock_irq(&phba->hbalock);
2999
3000        /* Release all the lpfc_scsi_bufs maintained by this host. */
3001
3002        spin_lock(&phba->scsi_buf_list_put_lock);
3003        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3004                                 list) {
3005                list_del(&sb->list);
3006                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3007                              sb->dma_handle);
3008                kfree(sb);
3009                phba->total_scsi_bufs--;
3010        }
3011        spin_unlock(&phba->scsi_buf_list_put_lock);
3012
3013        spin_lock(&phba->scsi_buf_list_get_lock);
3014        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3015                                 list) {
3016                list_del(&sb->list);
3017                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3018                              sb->dma_handle);
3019                kfree(sb);
3020                phba->total_scsi_bufs--;
3021        }
3022        spin_unlock(&phba->scsi_buf_list_get_lock);
3023
3024        /* Release all the lpfc_iocbq entries maintained by this host. */
3025        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
3026                list_del(&io->list);
3027                kfree(io);
3028                phba->total_iocbq_bufs--;
3029        }
3030
3031        spin_unlock_irq(&phba->hbalock);
3032}
3033
3034/**
3035 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
3036 * @phba: pointer to lpfc hba data structure.
3037 *
3038 * This routine first calculates the sizes of the current els and allocated
3039 * scsi sgl lists, and then goes through all sgls to updates the physical
3040 * XRIs assigned due to port function reset. During port initialization, the
3041 * current els and allocated scsi sgl lists are 0s.
3042 *
3043 * Return codes
3044 *   0 - successful (for now, it always returns 0)
3045 **/
3046int
3047lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3048{
3049        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3050        struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
3051        uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
3052        LIST_HEAD(els_sgl_list);
3053        LIST_HEAD(scsi_sgl_list);
3054        int rc;
3055        struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3056
3057        /*
3058         * update on pci function's els xri-sgl list
3059         */
3060        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3061        if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3062                /* els xri-sgl expanded */
3063                xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3064                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3065                                "3157 ELS xri-sgl count increased from "
3066                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3067                                els_xri_cnt);
3068                /* allocate the additional els sgls */
3069                for (i = 0; i < xri_cnt; i++) {
3070                        sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3071                                             GFP_KERNEL);
3072                        if (sglq_entry == NULL) {
3073                                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3074                                                "2562 Failure to allocate an "
3075                                                "ELS sgl entry:%d\n", i);
3076                                rc = -ENOMEM;
3077                                goto out_free_mem;
3078                        }
3079                        sglq_entry->buff_type = GEN_BUFF_TYPE;
3080                        sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3081                                                           &sglq_entry->phys);
3082                        if (sglq_entry->virt == NULL) {
3083                                kfree(sglq_entry);
3084                                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3085                                                "2563 Failure to allocate an "
3086                                                "ELS mbuf:%d\n", i);
3087                                rc = -ENOMEM;
3088                                goto out_free_mem;
3089                        }
3090                        sglq_entry->sgl = sglq_entry->virt;
3091                        memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3092                        sglq_entry->state = SGL_FREED;
3093                        list_add_tail(&sglq_entry->list, &els_sgl_list);
3094                }
3095                spin_lock_irq(&phba->hbalock);
3096                spin_lock(&pring->ring_lock);
3097                list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3098                spin_unlock(&pring->ring_lock);
3099                spin_unlock_irq(&phba->hbalock);
3100        } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3101                /* els xri-sgl shrinked */
3102                xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3103                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3104                                "3158 ELS xri-sgl count decreased from "
3105                                "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3106                                els_xri_cnt);
3107                spin_lock_irq(&phba->hbalock);
3108                spin_lock(&pring->ring_lock);
3109                list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
3110                spin_unlock(&pring->ring_lock);
3111                spin_unlock_irq(&phba->hbalock);
3112                /* release extra els sgls from list */
3113                for (i = 0; i < xri_cnt; i++) {
3114                        list_remove_head(&els_sgl_list,
3115                                         sglq_entry, struct lpfc_sglq, list);
3116                        if (sglq_entry) {
3117                                lpfc_mbuf_free(phba, sglq_entry->virt,
3118                                               sglq_entry->phys);
3119                                kfree(sglq_entry);
3120                        }
3121                }
3122                spin_lock_irq(&phba->hbalock);
3123                spin_lock(&pring->ring_lock);
3124                list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3125                spin_unlock(&pring->ring_lock);
3126                spin_unlock_irq(&phba->hbalock);
3127        } else
3128                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3129                                "3163 ELS xri-sgl count unchanged: %d\n",
3130                                els_xri_cnt);
3131        phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3132
3133        /* update xris to els sgls on the list */
3134        sglq_entry = NULL;
3135        sglq_entry_next = NULL;
3136        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3137                                 &phba->sli4_hba.lpfc_sgl_list, list) {
3138                lxri = lpfc_sli4_next_xritag(phba);
3139                if (lxri == NO_XRI) {
3140                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3141                                        "2400 Failed to allocate xri for "
3142                                        "ELS sgl\n");
3143                        rc = -ENOMEM;
3144                        goto out_free_mem;
3145                }
3146                sglq_entry->sli4_lxritag = lxri;
3147                sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3148        }
3149
3150        /*
3151         * update on pci function's allocated scsi xri-sgl list
3152         */
3153        phba->total_scsi_bufs = 0;
3154
3155        /* maximum number of xris available for scsi buffers */
3156        phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3157                                      els_xri_cnt;
3158
3159        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3160                        "2401 Current allocated SCSI xri-sgl count:%d, "
3161                        "maximum  SCSI xri count:%d\n",
3162                        phba->sli4_hba.scsi_xri_cnt,
3163                        phba->sli4_hba.scsi_xri_max);
3164
3165        spin_lock_irq(&phba->scsi_buf_list_get_lock);
3166        spin_lock(&phba->scsi_buf_list_put_lock);
3167        list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3168        list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3169        spin_unlock(&phba->scsi_buf_list_put_lock);
3170        spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3171
3172        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3173                /* max scsi xri shrinked below the allocated scsi buffers */
3174                scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3175                                        phba->sli4_hba.scsi_xri_max;
3176                /* release the extra allocated scsi buffers */
3177                for (i = 0; i < scsi_xri_cnt; i++) {
3178                        list_remove_head(&scsi_sgl_list, psb,
3179                                         struct lpfc_scsi_buf, list);
3180                        if (psb) {
3181                                pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
3182                                              psb->data, psb->dma_handle);
3183                                kfree(psb);
3184                        }
3185                }
3186                spin_lock_irq(&phba->scsi_buf_list_get_lock);
3187                phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3188                spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3189        }
3190
3191        /* update xris associated to remaining allocated scsi buffers */
3192        psb = NULL;
3193        psb_next = NULL;
3194        list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3195                lxri = lpfc_sli4_next_xritag(phba);
3196                if (lxri == NO_XRI) {
3197                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3198                                        "2560 Failed to allocate xri for "
3199                                        "scsi buffer\n");
3200                        rc = -ENOMEM;
3201                        goto out_free_mem;
3202                }
3203                psb->cur_iocbq.sli4_lxritag = lxri;
3204                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3205        }
3206        spin_lock_irq(&phba->scsi_buf_list_get_lock);
3207        spin_lock(&phba->scsi_buf_list_put_lock);
3208        list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3209        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3210        spin_unlock(&phba->scsi_buf_list_put_lock);
3211        spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3212
3213        return 0;
3214
3215out_free_mem:
3216        lpfc_free_els_sgl_list(phba);
3217        lpfc_scsi_free(phba);
3218        return rc;
3219}
3220
3221/**
3222 * lpfc_create_port - Create an FC port
3223 * @phba: pointer to lpfc hba data structure.
3224 * @instance: a unique integer ID to this FC port.
3225 * @dev: pointer to the device data structure.
3226 *
3227 * This routine creates a FC port for the upper layer protocol. The FC port
3228 * can be created on top of either a physical port or a virtual port provided
3229 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3230 * and associates the FC port created before adding the shost into the SCSI
3231 * layer.
3232 *
3233 * Return codes
3234 *   @vport - pointer to the virtual N_Port data structure.
3235 *   NULL - port create failed.
3236 **/
3237struct lpfc_vport *
3238lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3239{
3240        struct lpfc_vport *vport;
3241        struct Scsi_Host  *shost;
3242        int error = 0;
3243
3244        if (dev != &phba->pcidev->dev)
3245                shost = scsi_host_alloc(&lpfc_vport_template,
3246                                        sizeof(struct lpfc_vport));
3247        else
3248                shost = scsi_host_alloc(&lpfc_template,
3249                                        sizeof(struct lpfc_vport));
3250        if (!shost)
3251                goto out;
3252
3253        vport = (struct lpfc_vport *) shost->hostdata;
3254        vport->phba = phba;
3255        vport->load_flag |= FC_LOADING;
3256        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3257        vport->fc_rscn_flush = 0;
3258
3259        lpfc_get_vport_cfgparam(vport);
3260        shost->unique_id = instance;
3261        shost->max_id = LPFC_MAX_TARGET;
3262        shost->max_lun = vport->cfg_max_luns;
3263        shost->this_id = -1;
3264        shost->max_cmd_len = 16;
3265        if (phba->sli_rev == LPFC_SLI_REV4) {
3266                shost->dma_boundary =
3267                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3268                shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3269        }
3270
3271        /*
3272         * Set initial can_queue value since 0 is no longer supported and
3273         * scsi_add_host will fail. This will be adjusted later based on the
3274         * max xri value determined in hba setup.
3275         */
3276        shost->can_queue = phba->cfg_hba_queue_depth - 10;
3277        if (dev != &phba->pcidev->dev) {
3278                shost->transportt = lpfc_vport_transport_template;
3279                vport->port_type = LPFC_NPIV_PORT;
3280        } else {
3281                shost->transportt = lpfc_transport_template;
3282                vport->port_type = LPFC_PHYSICAL_PORT;
3283        }
3284
3285        /* Initialize all internally managed lists. */
3286        INIT_LIST_HEAD(&vport->fc_nodes);
3287        INIT_LIST_HEAD(&vport->rcv_buffer_list);
3288        spin_lock_init(&vport->work_port_lock);
3289
3290        init_timer(&vport->fc_disctmo);
3291        vport->fc_disctmo.function = lpfc_disc_timeout;
3292        vport->fc_disctmo.data = (unsigned long)vport;
3293
3294        init_timer(&vport->fc_fdmitmo);
3295        vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3296        vport->fc_fdmitmo.data = (unsigned long)vport;
3297
3298        init_timer(&vport->els_tmofunc);
3299        vport->els_tmofunc.function = lpfc_els_timeout;
3300        vport->els_tmofunc.data = (unsigned long)vport;
3301
3302        init_timer(&vport->delayed_disc_tmo);
3303        vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3304        vport->delayed_disc_tmo.data = (unsigned long)vport;
3305
3306        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3307        if (error)
3308                goto out_put_shost;
3309
3310        spin_lock_irq(&phba->hbalock);
3311        list_add_tail(&vport->listentry, &phba->port_list);
3312        spin_unlock_irq(&phba->hbalock);
3313        return vport;
3314
3315out_put_shost:
3316        scsi_host_put(shost);
3317out:
3318        return NULL;
3319}
3320
3321/**
3322 * destroy_port -  destroy an FC port
3323 * @vport: pointer to an lpfc virtual N_Port data structure.
3324 *
3325 * This routine destroys a FC port from the upper layer protocol. All the
3326 * resources associated with the port are released.
3327 **/
3328void
3329destroy_port(struct lpfc_vport *vport)
3330{
3331        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3332        struct lpfc_hba  *phba = vport->phba;
3333
3334        lpfc_debugfs_terminate(vport);
3335        fc_remove_host(shost);
3336        scsi_remove_host(shost);
3337
3338        spin_lock_irq(&phba->hbalock);
3339        list_del_init(&vport->listentry);
3340        spin_unlock_irq(&phba->hbalock);
3341
3342        lpfc_cleanup(vport);
3343        return;
3344}
3345
3346/**
3347 * lpfc_get_instance - Get a unique integer ID
3348 *
3349 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3350 * uses the kernel idr facility to perform the task.
3351 *
3352 * Return codes:
3353 *   instance - a unique integer ID allocated as the new instance.
3354 *   -1 - lpfc get instance failed.
3355 **/
3356int
3357lpfc_get_instance(void)
3358{
3359        int ret;
3360
3361        ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3362        return ret < 0 ? -1 : ret;
3363}
3364
3365/**
3366 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3367 * @shost: pointer to SCSI host data structure.
3368 * @time: elapsed time of the scan in jiffies.
3369 *
3370 * This routine is called by the SCSI layer with a SCSI host to determine
3371 * whether the scan host is finished.
3372 *
3373 * Note: there is no scan_start function as adapter initialization will have
3374 * asynchronously kicked off the link initialization.
3375 *
3376 * Return codes
3377 *   0 - SCSI host scan is not over yet.
3378 *   1 - SCSI host scan is over.
3379 **/
3380int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3381{
3382        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3383        struct lpfc_hba   *phba = vport->phba;
3384        int stat = 0;
3385
3386        spin_lock_irq(shost->host_lock);
3387
3388        if (vport->load_flag & FC_UNLOADING) {
3389                stat = 1;
3390                goto finished;
3391        }
3392        if (time >= msecs_to_jiffies(30 * 1000)) {
3393                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3394                                "0461 Scanning longer than 30 "
3395                                "seconds.  Continuing initialization\n");
3396                stat = 1;
3397                goto finished;
3398        }
3399        if (time >= msecs_to_jiffies(15 * 1000) &&
3400            phba->link_state <= LPFC_LINK_DOWN) {
3401                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3402                                "0465 Link down longer than 15 "
3403                                "seconds.  Continuing initialization\n");
3404                stat = 1;
3405                goto finished;
3406        }
3407
3408        if (vport->port_state != LPFC_VPORT_READY)
3409                goto finished;
3410        if (vport->num_disc_nodes || vport->fc_prli_sent)
3411                goto finished;
3412        if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3413                goto finished;
3414        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3415                goto finished;
3416
3417        stat = 1;
3418
3419finished:
3420        spin_unlock_irq(shost->host_lock);
3421        return stat;
3422}
3423
3424/**
3425 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3426 * @shost: pointer to SCSI host data structure.
3427 *
3428 * This routine initializes a given SCSI host attributes on a FC port. The
3429 * SCSI host can be either on top of a physical port or a virtual port.
3430 **/
3431void lpfc_host_attrib_init(struct Scsi_Host *shost)
3432{
3433        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3434        struct lpfc_hba   *phba = vport->phba;
3435        /*
3436         * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
3437         */
3438
3439        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3440        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3441        fc_host_supported_classes(shost) = FC_COS_CLASS3;
3442
3443        memset(fc_host_supported_fc4s(shost), 0,
3444               sizeof(fc_host_supported_fc4s(shost)));
3445        fc_host_supported_fc4s(shost)[2] = 1;
3446        fc_host_supported_fc4s(shost)[7] = 1;
3447
3448        lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3449                                 sizeof fc_host_symbolic_name(shost));
3450
3451        fc_host_supported_speeds(shost) = 0;
3452        if (phba->lmt & LMT_16Gb)
3453                fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3454        if (phba->lmt & LMT_10Gb)
3455                fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3456        if (phba->lmt & LMT_8Gb)
3457                fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3458        if (phba->lmt & LMT_4Gb)
3459                fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3460        if (phba->lmt & LMT_2Gb)
3461                fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3462        if (phba->lmt & LMT_1Gb)
3463                fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3464
3465        fc_host_maxframe_size(shost) =
3466                (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3467                (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3468
3469        fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3470
3471        /* This value is also unchanging */
3472        memset(fc_host_active_fc4s(shost), 0,
3473               sizeof(fc_host_active_fc4s(shost)));
3474        fc_host_active_fc4s(shost)[2] = 1;
3475        fc_host_active_fc4s(shost)[7] = 1;
3476
3477        fc_host_max_npiv_vports(shost) = phba->max_vpi;
3478        spin_lock_irq(shost->host_lock);
3479        vport->load_flag &= ~FC_LOADING;
3480        spin_unlock_irq(shost->host_lock);
3481}
3482
3483/**
3484 * lpfc_stop_port_s3 - Stop SLI3 device port
3485 * @phba: pointer to lpfc hba data structure.
3486 *
3487 * This routine is invoked to stop an SLI3 device port, it stops the device
3488 * from generating interrupts and stops the device driver's timers for the
3489 * device.
3490 **/
3491static void
3492lpfc_stop_port_s3(struct lpfc_hba *phba)
3493{
3494        /* Clear all interrupt enable conditions */
3495        writel(0, phba->HCregaddr);
3496        readl(phba->HCregaddr); /* flush */
3497        /* Clear all pending interrupts */
3498        writel(0xffffffff, phba->HAregaddr);
3499        readl(phba->HAregaddr); /* flush */
3500
3501        /* Reset some HBA SLI setup states */
3502        lpfc_stop_hba_timers(phba);
3503        phba->pport->work_port_events = 0;
3504}
3505
3506/**
3507 * lpfc_stop_port_s4 - Stop SLI4 device port
3508 * @phba: pointer to lpfc hba data structure.
3509 *
3510 * This routine is invoked to stop an SLI4 device port, it stops the device
3511 * from generating interrupts and stops the device driver's timers for the
3512 * device.
3513 **/
3514static void
3515lpfc_stop_port_s4(struct lpfc_hba *phba)
3516{
3517        /* Reset some HBA SLI4 setup states */
3518        lpfc_stop_hba_timers(phba);
3519        phba->pport->work_port_events = 0;
3520        phba->sli4_hba.intr_enable = 0;
3521}
3522
3523/**
3524 * lpfc_stop_port - Wrapper function for stopping hba port
3525 * @phba: Pointer to HBA context object.
3526 *
3527 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3528 * the API jump table function pointer from the lpfc_hba struct.
3529 **/
3530void
3531lpfc_stop_port(struct lpfc_hba *phba)
3532{
3533        phba->lpfc_stop_port(phba);
3534}
3535
3536/**
3537 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3538 * @phba: Pointer to hba for which this call is being executed.
3539 *
3540 * This routine starts the timer waiting for the FCF rediscovery to complete.
3541 **/
3542void
3543lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3544{
3545        unsigned long fcf_redisc_wait_tmo =
3546                (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3547        /* Start fcf rediscovery wait period timer */
3548        mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3549        spin_lock_irq(&phba->hbalock);
3550        /* Allow action to new fcf asynchronous event */
3551        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3552        /* Mark the FCF rediscovery pending state */
3553        phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3554        spin_unlock_irq(&phba->hbalock);
3555}
3556
3557/**
3558 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3559 * @ptr: Map to lpfc_hba data structure pointer.
3560 *
3561 * This routine is invoked when waiting for FCF table rediscover has been
3562 * timed out. If new FCF record(s) has (have) been discovered during the
3563 * wait period, a new FCF event shall be added to the FCOE async event
3564 * list, and then worker thread shall be waked up for processing from the
3565 * worker thread context.
3566 **/
3567static void
3568lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3569{
3570        struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3571
3572        /* Don't send FCF rediscovery event if timer cancelled */
3573        spin_lock_irq(&phba->hbalock);
3574        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3575                spin_unlock_irq(&phba->hbalock);
3576                return;
3577        }
3578        /* Clear FCF rediscovery timer pending flag */
3579        phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3580        /* FCF rediscovery event to worker thread */
3581        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3582        spin_unlock_irq(&phba->hbalock);
3583        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3584                        "2776 FCF rediscover quiescent timer expired\n");
3585        /* wake up worker thread */
3586        lpfc_worker_wake_up(phba);
3587}
3588
3589/**
3590 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3591 * @phba: pointer to lpfc hba data structure.
3592 * @acqe_link: pointer to the async link completion queue entry.
3593 *
3594 * This routine is to parse the SLI4 link-attention link fault code and
3595 * translate it into the base driver's read link attention mailbox command
3596 * status.
3597 *
3598 * Return: Link-attention status in terms of base driver's coding.
3599 **/
3600static uint16_t
3601lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3602                           struct lpfc_acqe_link *acqe_link)
3603{
3604        uint16_t latt_fault;
3605
3606        switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3607        case LPFC_ASYNC_LINK_FAULT_NONE:
3608        case LPFC_ASYNC_LINK_FAULT_LOCAL:
3609        case LPFC_ASYNC_LINK_FAULT_REMOTE:
3610                latt_fault = 0;
3611                break;
3612        default:
3613                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3614                                "0398 Invalid link fault code: x%x\n",
3615                                bf_get(lpfc_acqe_link_fault, acqe_link));
3616                latt_fault = MBXERR_ERROR;
3617                break;
3618        }
3619        return latt_fault;
3620}
3621
3622/**
3623 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3624 * @phba: pointer to lpfc hba data structure.
3625 * @acqe_link: pointer to the async link completion queue entry.
3626 *
3627 * This routine is to parse the SLI4 link attention type and translate it
3628 * into the base driver's link attention type coding.
3629 *
3630 * Return: Link attention type in terms of base driver's coding.
3631 **/
3632static uint8_t
3633lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3634                          struct lpfc_acqe_link *acqe_link)
3635{
3636        uint8_t att_type;
3637
3638        switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3639        case LPFC_ASYNC_LINK_STATUS_DOWN:
3640        case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3641                att_type = LPFC_ATT_LINK_DOWN;
3642                break;
3643        case LPFC_ASYNC_LINK_STATUS_UP:
3644                /* Ignore physical link up events - wait for logical link up */
3645                att_type = LPFC_ATT_RESERVED;
3646                break;
3647        case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3648                att_type = LPFC_ATT_LINK_UP;
3649                break;
3650        default:
3651                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3652                                "0399 Invalid link attention type: x%x\n",
3653                                bf_get(lpfc_acqe_link_status, acqe_link));
3654                att_type = LPFC_ATT_RESERVED;
3655                break;
3656        }
3657        return att_type;
3658}
3659
3660/**
3661 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3662 * @phba: pointer to lpfc hba data structure.
3663 * @acqe_link: pointer to the async link completion queue entry.
3664 *
3665 * This routine is to parse the SLI4 link-attention link speed and translate
3666 * it into the base driver's link-attention link speed coding.
3667 *
3668 * Return: Link-attention link speed in terms of base driver's coding.
3669 **/
3670static uint8_t
3671lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3672                                struct lpfc_acqe_link *acqe_link)
3673{
3674        uint8_t link_speed;
3675
3676        switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3677        case LPFC_ASYNC_LINK_SPEED_ZERO:
3678        case LPFC_ASYNC_LINK_SPEED_10MBPS:
3679        case LPFC_ASYNC_LINK_SPEED_100MBPS:
3680                link_speed = LPFC_LINK_SPEED_UNKNOWN;
3681                break;
3682        case LPFC_ASYNC_LINK_SPEED_1GBPS:
3683                link_speed = LPFC_LINK_SPEED_1GHZ;
3684                break;
3685        case LPFC_ASYNC_LINK_SPEED_10GBPS:
3686                link_speed = LPFC_LINK_SPEED_10GHZ;
3687                break;
3688        default:
3689                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3690                                "0483 Invalid link-attention link speed: x%x\n",
3691                                bf_get(lpfc_acqe_link_speed, acqe_link));
3692                link_speed = LPFC_LINK_SPEED_UNKNOWN;
3693                break;
3694        }
3695        return link_speed;
3696}
3697
3698/**
3699 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3700 * @phba: pointer to lpfc hba data structure.
3701 *
3702 * This routine is to get an SLI3 FC port's link speed in Mbps.
3703 *
3704 * Return: link speed in terms of Mbps.
3705 **/
3706uint32_t
3707lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3708{
3709        uint32_t link_speed;
3710
3711        if (!lpfc_is_link_up(phba))
3712                return 0;
3713
3714        switch (phba->fc_linkspeed) {
3715        case LPFC_LINK_SPEED_1GHZ:
3716                link_speed = 1000;
3717                break;
3718        case LPFC_LINK_SPEED_2GHZ:
3719                link_speed = 2000;
3720                break;
3721        case LPFC_LINK_SPEED_4GHZ:
3722                link_speed = 4000;
3723                break;
3724        case LPFC_LINK_SPEED_8GHZ:
3725                link_speed = 8000;
3726                break;
3727        case LPFC_LINK_SPEED_10GHZ:
3728                link_speed = 10000;
3729                break;
3730        case LPFC_LINK_SPEED_16GHZ:
3731                link_speed = 16000;
3732                break;
3733        default:
3734                link_speed = 0;
3735        }
3736        return link_speed;
3737}
3738
3739/**
3740 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3741 * @phba: pointer to lpfc hba data structure.
3742 * @evt_code: asynchronous event code.
3743 * @speed_code: asynchronous event link speed code.
3744 *
3745 * This routine is to parse the giving SLI4 async event link speed code into
3746 * value of Mbps for the link speed.
3747 *
3748 * Return: link speed in terms of Mbps.
3749 **/
3750static uint32_t
3751lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3752                           uint8_t speed_code)
3753{
3754        uint32_t port_speed;
3755
3756        switch (evt_code) {
3757        case LPFC_TRAILER_CODE_LINK:
3758                switch (speed_code) {
3759                case LPFC_EVT_CODE_LINK_NO_LINK:
3760                        port_speed = 0;
3761                        break;
3762                case LPFC_EVT_CODE_LINK_10_MBIT:
3763                        port_speed = 10;
3764                        break;
3765                case LPFC_EVT_CODE_LINK_100_MBIT:
3766                        port_speed = 100;
3767                        break;
3768                case LPFC_EVT_CODE_LINK_1_GBIT:
3769                        port_speed = 1000;
3770                        break;
3771                case LPFC_EVT_CODE_LINK_10_GBIT:
3772                        port_speed = 10000;
3773                        break;
3774                default:
3775                        port_speed = 0;
3776                }
3777                break;
3778        case LPFC_TRAILER_CODE_FC:
3779                switch (speed_code) {
3780                case LPFC_EVT_CODE_FC_NO_LINK:
3781                        port_speed = 0;
3782                        break;
3783                case LPFC_EVT_CODE_FC_1_GBAUD:
3784                        port_speed = 1000;
3785                        break;
3786                case LPFC_EVT_CODE_FC_2_GBAUD:
3787                        port_speed = 2000;
3788                        break;
3789                case LPFC_EVT_CODE_FC_4_GBAUD:
3790                        port_speed = 4000;
3791                        break;
3792                case LPFC_EVT_CODE_FC_8_GBAUD:
3793                        port_speed = 8000;
3794                        break;
3795                case LPFC_EVT_CODE_FC_10_GBAUD:
3796                        port_speed = 10000;
3797                        break;
3798                case LPFC_EVT_CODE_FC_16_GBAUD:
3799                        port_speed = 16000;
3800                        break;
3801                default:
3802                        port_speed = 0;
3803                }
3804                break;
3805        default:
3806                port_speed = 0;
3807        }
3808        return port_speed;
3809}
3810
3811/**
3812 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3813 * @phba: pointer to lpfc hba data structure.
3814 * @acqe_link: pointer to the async link completion queue entry.
3815 *
3816 * This routine is to handle the SLI4 asynchronous FCoE link event.
3817 **/
3818static void
3819lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3820                         struct lpfc_acqe_link *acqe_link)
3821{
3822        struct lpfc_dmabuf *mp;
3823        LPFC_MBOXQ_t *pmb;
3824        MAILBOX_t *mb;
3825        struct lpfc_mbx_read_top *la;
3826        uint8_t att_type;
3827        int rc;
3828
3829        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3830        if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3831                return;
3832        phba->fcoe_eventtag = acqe_link->event_tag;
3833        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3834        if (!pmb) {
3835                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3836                                "0395 The mboxq allocation failed\n");
3837                return;
3838        }
3839        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3840        if (!mp) {
3841                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3842                                "0396 The lpfc_dmabuf allocation failed\n");
3843                goto out_free_pmb;
3844        }
3845        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3846        if (!mp->virt) {
3847                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3848                                "0397 The mbuf allocation failed\n");
3849                goto out_free_dmabuf;
3850        }
3851
3852        /* Cleanup any outstanding ELS commands */
3853        lpfc_els_flush_all_cmd(phba);
3854
3855        /* Block ELS IOCBs until we have done process link event */
3856        phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3857
3858        /* Update link event statistics */
3859        phba->sli.slistat.link_event++;
3860
3861        /* Create lpfc_handle_latt mailbox command from link ACQE */
3862        lpfc_read_topology(phba, pmb, mp);
3863        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3864        pmb->vport = phba->pport;
3865
3866        /* Keep the link status for extra SLI4 state machine reference */
3867        phba->sli4_hba.link_state.speed =
3868                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3869                                bf_get(lpfc_acqe_link_speed, acqe_link));
3870        phba->sli4_hba.link_state.duplex =
3871                                bf_get(lpfc_acqe_link_duplex, acqe_link);
3872        phba->sli4_hba.link_state.status =
3873                                bf_get(lpfc_acqe_link_status, acqe_link);
3874        phba->sli4_hba.link_state.type =
3875                                bf_get(lpfc_acqe_link_type, acqe_link);
3876        phba->sli4_hba.link_state.number =
3877                                bf_get(lpfc_acqe_link_number, acqe_link);
3878        phba->sli4_hba.link_state.fault =
3879                                bf_get(lpfc_acqe_link_fault, acqe_link);
3880        phba->sli4_hba.link_state.logical_speed =
3881                        bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3882
3883        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3884                        "2900 Async FC/FCoE Link event - Speed:%dGBit "
3885                        "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3886                        "Logical speed:%dMbps Fault:%d\n",
3887                        phba->sli4_hba.link_state.speed,
3888                        phba->sli4_hba.link_state.topology,
3889                        phba->sli4_hba.link_state.status,
3890                        phba->sli4_hba.link_state.type,
3891                        phba->sli4_hba.link_state.number,
3892                        phba->sli4_hba.link_state.logical_speed,
3893                        phba->sli4_hba.link_state.fault);
3894        /*
3895         * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3896         * topology info. Note: Optional for non FC-AL ports.
3897         */
3898        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3899                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3900                if (rc == MBX_NOT_FINISHED)
3901                        goto out_free_dmabuf;
3902                return;
3903        }
3904        /*
3905         * For FCoE Mode: fill in all the topology information we need and call
3906         * the READ_TOPOLOGY completion routine to continue without actually
3907         * sending the READ_TOPOLOGY mailbox command to the port.
3908         */
3909        /* Parse and translate status field */
3910        mb = &pmb->u.mb;
3911        mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3912
3913        /* Parse and translate link attention fields */
3914        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3915        la->eventTag = acqe_link->event_tag;
3916        bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3917        bf_set(lpfc_mbx_read_top_link_spd, la,
3918               lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3919
3920        /* Fake the the following irrelvant fields */
3921        bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3922        bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3923        bf_set(lpfc_mbx_read_top_il, la, 0);
3924        bf_set(lpfc_mbx_read_top_pb, la, 0);
3925        bf_set(lpfc_mbx_read_top_fa, la, 0);
3926        bf_set(lpfc_mbx_read_top_mm, la, 0);
3927
3928        /* Invoke the lpfc_handle_latt mailbox command callback function */
3929        lpfc_mbx_cmpl_read_topology(phba, pmb);
3930
3931        return;
3932
3933out_free_dmabuf:
3934        kfree(mp);
3935out_free_pmb:
3936        mempool_free(pmb, phba->mbox_mem_pool);
3937}
3938
3939/**
3940 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3941 * @phba: pointer to lpfc hba data structure.
3942 * @acqe_fc: pointer to the async fc completion queue entry.
3943 *
3944 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3945 * that the event was received and then issue a read_topology mailbox command so
3946 * that the rest of the driver will treat it the same as SLI3.
3947 **/
3948static void
3949lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3950{
3951        struct lpfc_dmabuf *mp;
3952        LPFC_MBOXQ_t *pmb;
3953        int rc;
3954
3955        if (bf_get(lpfc_trailer_type, acqe_fc) !=
3956            LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3957                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3958                                "2895 Non FC link Event detected.(%d)\n",
3959                                bf_get(lpfc_trailer_type, acqe_fc));
3960                return;
3961        }
3962        /* Keep the link status for extra SLI4 state machine reference */
3963        phba->sli4_hba.link_state.speed =
3964                        lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3965                                bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3966        phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3967        phba->sli4_hba.link_state.topology =
3968                                bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3969        phba->sli4_hba.link_state.status =
3970                                bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3971        phba->sli4_hba.link_state.type =
3972                                bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3973        phba->sli4_hba.link_state.number =
3974                                bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3975        phba->sli4_hba.link_state.fault =
3976                                bf_get(lpfc_acqe_link_fault, acqe_fc);
3977        phba->sli4_hba.link_state.logical_speed =
3978                                bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3979        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3980                        "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3981                        "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3982                        "%dMbps Fault:%d\n",
3983                        phba->sli4_hba.link_state.speed,
3984                        phba->sli4_hba.link_state.topology,
3985                        phba->sli4_hba.link_state.status,
3986                        phba->sli4_hba.link_state.type,
3987                        phba->sli4_hba.link_state.number,
3988                        phba->sli4_hba.link_state.logical_speed,
3989                        phba->sli4_hba.link_state.fault);
3990        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3991        if (!pmb) {
3992                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3993                                "2897 The mboxq allocation failed\n");
3994                return;
3995        }
3996        mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3997        if (!mp) {
3998                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3999                                "2898 The lpfc_dmabuf allocation failed\n");
4000                goto out_free_pmb;
4001        }
4002        mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4003        if (!mp->virt) {
4004                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4005                                "2899 The mbuf allocation failed\n");
4006                goto out_free_dmabuf;
4007        }
4008
4009        /* Cleanup any outstanding ELS commands */
4010        lpfc_els_flush_all_cmd(phba);
4011
4012        /* Block ELS IOCBs until we have done process link event */
4013        phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
4014
4015        /* Update link event statistics */
4016        phba->sli.slistat.link_event++;
4017
4018        /* Create lpfc_handle_latt mailbox command from link ACQE */
4019        lpfc_read_topology(phba, pmb, mp);
4020        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4021        pmb->vport = phba->pport;
4022
4023        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4024        if (rc == MBX_NOT_FINISHED)
4025                goto out_free_dmabuf;
4026        return;
4027
4028out_free_dmabuf:
4029        kfree(mp);
4030out_free_pmb:
4031        mempool_free(pmb, phba->mbox_mem_pool);
4032}
4033
4034/**
4035 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4036 * @phba: pointer to lpfc hba data structure.
4037 * @acqe_fc: pointer to the async SLI completion queue entry.
4038 *
4039 * This routine is to handle the SLI4 asynchronous SLI events.
4040 **/
4041static void
4042lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4043{
4044        char port_name;
4045        char message[128];
4046        uint8_t status;
4047        struct lpfc_acqe_misconfigured_event *misconfigured;
4048
4049        /* special case misconfigured event as it contains data for all ports */
4050        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
4051                 LPFC_SLI_INTF_IF_TYPE_2) ||
4052                (bf_get(lpfc_trailer_type, acqe_sli) !=
4053                        LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
4054                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4055                                "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4056                                "x%08x SLI Event Type:%d\n",
4057                                acqe_sli->event_data1, acqe_sli->event_data2,
4058                                bf_get(lpfc_trailer_type, acqe_sli));
4059                return;
4060        }
4061
4062        port_name = phba->Port[0];
4063        if (port_name == 0x00)
4064                port_name = '?'; /* get port name is empty */
4065
4066        misconfigured = (struct lpfc_acqe_misconfigured_event *)
4067                                        &acqe_sli->event_data1;
4068
4069        /* fetch the status for this port */
4070        switch (phba->sli4_hba.lnk_info.lnk_no) {
4071        case LPFC_LINK_NUMBER_0:
4072                status = bf_get(lpfc_sli_misconfigured_port0,
4073                                        &misconfigured->theEvent);
4074                break;
4075        case LPFC_LINK_NUMBER_1:
4076                status = bf_get(lpfc_sli_misconfigured_port1,
4077                                        &misconfigured->theEvent);
4078                break;
4079        case LPFC_LINK_NUMBER_2:
4080                status = bf_get(lpfc_sli_misconfigured_port2,
4081                                        &misconfigured->theEvent);
4082                break;
4083        case LPFC_LINK_NUMBER_3:
4084                status = bf_get(lpfc_sli_misconfigured_port3,
4085                                        &misconfigured->theEvent);
4086                break;
4087        default:
4088                status = ~LPFC_SLI_EVENT_STATUS_VALID;
4089                break;
4090        }
4091
4092        switch (status) {
4093        case LPFC_SLI_EVENT_STATUS_VALID:
4094                return; /* no message if the sfp is okay */
4095        case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4096                sprintf(message, "Optics faulted/incorrectly installed/not " \
4097                                "installed - Reseat optics, if issue not "
4098                                "resolved, replace.");
4099                break;
4100        case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4101                sprintf(message,
4102                        "Optics of two types installed - Remove one optic or " \
4103                        "install matching pair of optics.");
4104                break;
4105        case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4106                sprintf(message, "Incompatible optics - Replace with " \
4107                                "compatible optics for card to function.");
4108                break;
4109        default:
4110                /* firmware is reporting a status we don't know about */
4111                sprintf(message, "Unknown event status x%02x", status);
4112                break;
4113        }
4114
4115        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4116                        "3176 Misconfigured Physical Port - "
4117                        "Port Name %c %s\n", port_name, message);
4118}
4119
4120/**
4121 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4122 * @vport: pointer to vport data structure.
4123 *
4124 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4125 * response to a CVL event.
4126 *
4127 * Return the pointer to the ndlp with the vport if successful, otherwise
4128 * return NULL.
4129 **/
4130static struct lpfc_nodelist *
4131lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4132{
4133        struct lpfc_nodelist *ndlp;
4134        struct Scsi_Host *shost;
4135        struct lpfc_hba *phba;
4136
4137        if (!vport)
4138                return NULL;
4139        phba = vport->phba;
4140        if (!phba)
4141                return NULL;
4142        ndlp = lpfc_findnode_did(vport, Fabric_DID);
4143        if (!ndlp) {
4144                /* Cannot find existing Fabric ndlp, so allocate a new one */
4145                ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4146                if (!ndlp)
4147                        return 0;
4148                lpfc_nlp_init(vport, ndlp, Fabric_DID);
4149                /* Set the node type */
4150                ndlp->nlp_type |= NLP_FABRIC;
4151                /* Put ndlp onto node list */
4152                lpfc_enqueue_node(vport, ndlp);
4153        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4154                /* re-setup ndlp without removing from node list */
4155                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4156                if (!ndlp)
4157                        return 0;
4158        }
4159        if ((phba->pport->port_state < LPFC_FLOGI) &&
4160                (phba->pport->port_state != LPFC_VPORT_FAILED))
4161                return NULL;
4162        /* If virtual link is not yet instantiated ignore CVL */
4163        if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4164                && (vport->port_state != LPFC_VPORT_FAILED))
4165                return NULL;
4166        shost = lpfc_shost_from_vport(vport);
4167        if (!shost)
4168                return NULL;
4169        lpfc_linkdown_port(vport);
4170        lpfc_cleanup_pending_mbox(vport);
4171        spin_lock_irq(shost->host_lock);
4172        vport->fc_flag |= FC_VPORT_CVL_RCVD;
4173        spin_unlock_irq(shost->host_lock);
4174
4175        return ndlp;
4176}
4177
4178/**
4179 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4180 * @vport: pointer to lpfc hba data structure.
4181 *
4182 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4183 * response to a FCF dead event.
4184 **/
4185static void
4186lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4187{
4188        struct lpfc_vport **vports;
4189        int i;
4190
4191        vports = lpfc_create_vport_work_array(phba);
4192        if (vports)
4193                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4194                        lpfc_sli4_perform_vport_cvl(vports[i]);
4195        lpfc_destroy_vport_work_array(phba, vports);
4196}
4197
4198/**
4199 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4200 * @phba: pointer to lpfc hba data structure.
4201 * @acqe_link: pointer to the async fcoe completion queue entry.
4202 *
4203 * This routine is to handle the SLI4 asynchronous fcoe event.
4204 **/
4205static void
4206lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4207                        struct lpfc_acqe_fip *acqe_fip)
4208{
4209        uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4210        int rc;
4211        struct lpfc_vport *vport;
4212        struct lpfc_nodelist *ndlp;
4213        struct Scsi_Host  *shost;
4214        int active_vlink_present;
4215        struct lpfc_vport **vports;
4216        int i;
4217
4218        phba->fc_eventTag = acqe_fip->event_tag;
4219        phba->fcoe_eventtag = acqe_fip->event_tag;
4220        switch (event_type) {
4221        case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4222        case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4223                if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4224                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4225                                        LOG_DISCOVERY,
4226                                        "2546 New FCF event, evt_tag:x%x, "
4227                                        "index:x%x\n",
4228                                        acqe_fip->event_tag,
4229                                        acqe_fip->index);
4230                else
4231                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4232                                        LOG_DISCOVERY,
4233                                        "2788 FCF param modified event, "
4234                                        "evt_tag:x%x, index:x%x\n",
4235                                        acqe_fip->event_tag,
4236                                        acqe_fip->index);
4237                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4238                        /*
4239                         * During period of FCF discovery, read the FCF
4240                         * table record indexed by the event to update
4241                         * FCF roundrobin failover eligible FCF bmask.
4242                         */
4243                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4244                                        LOG_DISCOVERY,
4245                                        "2779 Read FCF (x%x) for updating "
4246                                        "roundrobin FCF failover bmask\n",
4247                                        acqe_fip->index);
4248                        rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4249                }
4250
4251                /* If the FCF discovery is in progress, do nothing. */
4252                spin_lock_irq(&phba->hbalock);
4253                if (phba->hba_flag & FCF_TS_INPROG) {
4254                        spin_unlock_irq(&phba->hbalock);
4255                        break;
4256                }
4257                /* If fast FCF failover rescan event is pending, do nothing */
4258                if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4259                        spin_unlock_irq(&phba->hbalock);
4260                        break;
4261                }
4262
4263                /* If the FCF has been in discovered state, do nothing. */
4264                if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4265                        spin_unlock_irq(&phba->hbalock);
4266                        break;
4267                }
4268                spin_unlock_irq(&phba->hbalock);
4269
4270                /* Otherwise, scan the entire FCF table and re-discover SAN */
4271                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4272                                "2770 Start FCF table scan per async FCF "
4273                                "event, evt_tag:x%x, index:x%x\n",
4274                                acqe_fip->event_tag, acqe_fip->index);
4275                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4276                                                     LPFC_FCOE_FCF_GET_FIRST);
4277                if (rc)
4278                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4279                                        "2547 Issue FCF scan read FCF mailbox "
4280                                        "command failed (x%x)\n", rc);
4281                break;
4282
4283        case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4284                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4285                        "2548 FCF Table full count 0x%x tag 0x%x\n",
4286                        bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4287                        acqe_fip->event_tag);
4288                break;
4289
4290        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4291                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4292                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4293                        "2549 FCF (x%x) disconnected from network, "
4294                        "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4295                /*
4296                 * If we are in the middle of FCF failover process, clear
4297                 * the corresponding FCF bit in the roundrobin bitmap.
4298                 */
4299                spin_lock_irq(&phba->hbalock);
4300                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4301                        spin_unlock_irq(&phba->hbalock);
4302                        /* Update FLOGI FCF failover eligible FCF bmask */
4303                        lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4304                        break;
4305                }
4306                spin_unlock_irq(&phba->hbalock);
4307
4308                /* If the event is not for currently used fcf do nothing */
4309                if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4310                        break;
4311
4312                /*
4313                 * Otherwise, request the port to rediscover the entire FCF
4314                 * table for a fast recovery from case that the current FCF
4315                 * is no longer valid as we are not in the middle of FCF
4316                 * failover process already.
4317                 */
4318                spin_lock_irq(&phba->hbalock);
4319                /* Mark the fast failover process in progress */
4320                phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4321                spin_unlock_irq(&phba->hbalock);
4322
4323                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4324                                "2771 Start FCF fast failover process due to "
4325                                "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4326                                "\n", acqe_fip->event_tag, acqe_fip->index);
4327                rc = lpfc_sli4_redisc_fcf_table(phba);
4328                if (rc) {
4329                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4330                                        LOG_DISCOVERY,
4331                                        "2772 Issue FCF rediscover mabilbox "
4332                                        "command failed, fail through to FCF "
4333                                        "dead event\n");
4334                        spin_lock_irq(&phba->hbalock);
4335                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4336                        spin_unlock_irq(&phba->hbalock);
4337                        /*
4338                         * Last resort will fail over by treating this
4339                         * as a link down to FCF registration.
4340                         */
4341                        lpfc_sli4_fcf_dead_failthrough(phba);
4342                } else {
4343                        /* Reset FCF roundrobin bmask for new discovery */
4344                        lpfc_sli4_clear_fcf_rr_bmask(phba);
4345                        /*
4346                         * Handling fast FCF failover to a DEAD FCF event is
4347                         * considered equalivant to receiving CVL to all vports.
4348                         */
4349                        lpfc_sli4_perform_all_vport_cvl(phba);
4350                }
4351                break;
4352        case LPFC_FIP_EVENT_TYPE_CVL:
4353                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4354                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4355                        "2718 Clear Virtual Link Received for VPI 0x%x"
4356                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4357
4358                vport = lpfc_find_vport_by_vpid(phba,
4359                                                acqe_fip->index);
4360                ndlp = lpfc_sli4_perform_vport_cvl(vport);
4361                if (!ndlp)
4362                        break;
4363                active_vlink_present = 0;
4364
4365                vports = lpfc_create_vport_work_array(phba);
4366                if (vports) {
4367                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4368                                        i++) {
4369                                if ((!(vports[i]->fc_flag &
4370                                        FC_VPORT_CVL_RCVD)) &&
4371                                        (vports[i]->port_state > LPFC_FDISC)) {
4372                                        active_vlink_present = 1;
4373                                        break;
4374                                }
4375                        }
4376                        lpfc_destroy_vport_work_array(phba, vports);
4377                }
4378
4379                if (active_vlink_present) {
4380                        /*
4381                         * If there are other active VLinks present,
4382                         * re-instantiate the Vlink using FDISC.
4383                         */
4384                        mod_timer(&ndlp->nlp_delayfunc,
4385                                  jiffies + msecs_to_jiffies(1000));
4386                        shost = lpfc_shost_from_vport(vport);
4387                        spin_lock_irq(shost->host_lock);
4388                        ndlp->nlp_flag |= NLP_DELAY_TMO;
4389                        spin_unlock_irq(shost->host_lock);
4390                        ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4391                        vport->port_state = LPFC_FDISC;
4392                } else {
4393                        /*
4394                         * Otherwise, we request port to rediscover
4395                         * the entire FCF table for a fast recovery
4396                         * from possible case that the current FCF
4397                         * is no longer valid if we are not already
4398                         * in the FCF failover process.
4399                         */
4400                        spin_lock_irq(&phba->hbalock);
4401                        if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4402                                spin_unlock_irq(&phba->hbalock);
4403                                break;
4404                        }
4405                        /* Mark the fast failover process in progress */
4406                        phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4407                        spin_unlock_irq(&phba->hbalock);
4408                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4409                                        LOG_DISCOVERY,
4410                                        "2773 Start FCF failover per CVL, "
4411                                        "evt_tag:x%x\n", acqe_fip->event_tag);
4412                        rc = lpfc_sli4_redisc_fcf_table(phba);
4413                        if (rc) {
4414                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4415                                                LOG_DISCOVERY,
4416                                                "2774 Issue FCF rediscover "
4417                                                "mabilbox command failed, "
4418                                                "through to CVL event\n");
4419                                spin_lock_irq(&phba->hbalock);
4420                                phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4421                                spin_unlock_irq(&phba->hbalock);
4422                                /*
4423                                 * Last resort will be re-try on the
4424                                 * the current registered FCF entry.
4425                                 */
4426                                lpfc_retry_pport_discovery(phba);
4427                        } else
4428                                /*
4429                                 * Reset FCF roundrobin bmask for new
4430                                 * discovery.
4431                                 */
4432                                lpfc_sli4_clear_fcf_rr_bmask(phba);
4433                }
4434                break;
4435        default:
4436                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4437                        "0288 Unknown FCoE event type 0x%x event tag "
4438                        "0x%x\n", event_type, acqe_fip->event_tag);
4439                break;
4440        }
4441}
4442
4443/**
4444 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4445 * @phba: pointer to lpfc hba data structure.
4446 * @acqe_link: pointer to the async dcbx completion queue entry.
4447 *
4448 * This routine is to handle the SLI4 asynchronous dcbx event.
4449 **/
4450static void
4451lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4452                         struct lpfc_acqe_dcbx *acqe_dcbx)
4453{
4454        phba->fc_eventTag = acqe_dcbx->event_tag;
4455        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4456                        "0290 The SLI4 DCBX asynchronous event is not "
4457                        "handled yet\n");
4458}
4459
4460/**
4461 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4462 * @phba: pointer to lpfc hba data structure.
4463 * @acqe_link: pointer to the async grp5 completion queue entry.
4464 *
4465 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4466 * is an asynchronous notified of a logical link speed change.  The Port
4467 * reports the logical link speed in units of 10Mbps.
4468 **/
4469static void
4470lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4471                         struct lpfc_acqe_grp5 *acqe_grp5)
4472{
4473        uint16_t prev_ll_spd;
4474
4475        phba->fc_eventTag = acqe_grp5->event_tag;
4476        phba->fcoe_eventtag = acqe_grp5->event_tag;
4477        prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4478        phba->sli4_hba.link_state.logical_speed =
4479                (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4480        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4481                        "2789 GRP5 Async Event: Updating logical link speed "
4482                        "from %dMbps to %dMbps\n", prev_ll_spd,
4483                        phba->sli4_hba.link_state.logical_speed);
4484}
4485
4486/**
4487 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4488 * @phba: pointer to lpfc hba data structure.
4489 *
4490 * This routine is invoked by the worker thread to process all the pending
4491 * SLI4 asynchronous events.
4492 **/
4493void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4494{
4495        struct lpfc_cq_event *cq_event;
4496
4497        /* First, declare the async event has been handled */
4498        spin_lock_irq(&phba->hbalock);
4499        phba->hba_flag &= ~ASYNC_EVENT;
4500        spin_unlock_irq(&phba->hbalock);
4501        /* Now, handle all the async events */
4502        while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4503                /* Get the first event from the head of the event queue */
4504                spin_lock_irq(&phba->hbalock);
4505                list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4506                                 cq_event, struct lpfc_cq_event, list);
4507                spin_unlock_irq(&phba->hbalock);
4508                /* Process the asynchronous event */
4509                switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4510                case LPFC_TRAILER_CODE_LINK:
4511                        lpfc_sli4_async_link_evt(phba,
4512                                                 &cq_event->cqe.acqe_link);
4513                        break;
4514                case LPFC_TRAILER_CODE_FCOE:
4515                        lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4516                        break;
4517                case LPFC_TRAILER_CODE_DCBX:
4518                        lpfc_sli4_async_dcbx_evt(phba,
4519                                                 &cq_event->cqe.acqe_dcbx);
4520                        break;
4521                case LPFC_TRAILER_CODE_GRP5:
4522                        lpfc_sli4_async_grp5_evt(phba,
4523                                                 &cq_event->cqe.acqe_grp5);
4524                        break;
4525                case LPFC_TRAILER_CODE_FC:
4526                        lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4527                        break;
4528                case LPFC_TRAILER_CODE_SLI:
4529                        lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4530                        break;
4531                default:
4532                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4533                                        "1804 Invalid asynchrous event code: "
4534                                        "x%x\n", bf_get(lpfc_trailer_code,
4535                                        &cq_event->cqe.mcqe_cmpl));
4536                        break;
4537                }
4538                /* Free the completion event processed to the free pool */
4539                lpfc_sli4_cq_event_release(phba, cq_event);
4540        }
4541}
4542
4543/**
4544 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4545 * @phba: pointer to lpfc hba data structure.
4546 *
4547 * This routine is invoked by the worker thread to process FCF table
4548 * rediscovery pending completion event.
4549 **/
4550void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4551{
4552        int rc;
4553
4554        spin_lock_irq(&phba->hbalock);
4555        /* Clear FCF rediscovery timeout event */
4556        phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4557        /* Clear driver fast failover FCF record flag */
4558        phba->fcf.failover_rec.flag = 0;
4559        /* Set state for FCF fast failover */
4560        phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4561        spin_unlock_irq(&phba->hbalock);
4562
4563        /* Scan FCF table from the first entry to re-discover SAN */
4564        lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4565                        "2777 Start post-quiescent FCF table scan\n");
4566        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4567        if (rc)
4568                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4569                                "2747 Issue FCF scan read FCF mailbox "
4570                                "command failed 0x%x\n", rc);
4571}
4572
4573/**
4574 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4575 * @phba: pointer to lpfc hba data structure.
4576 * @dev_grp: The HBA PCI-Device group number.
4577 *
4578 * This routine is invoked to set up the per HBA PCI-Device group function
4579 * API jump table entries.
4580 *
4581 * Return: 0 if success, otherwise -ENODEV
4582 **/
4583int
4584lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4585{
4586        int rc;
4587
4588        /* Set up lpfc PCI-device group */
4589        phba->pci_dev_grp = dev_grp;
4590
4591        /* The LPFC_PCI_DEV_OC uses SLI4 */
4592        if (dev_grp == LPFC_PCI_DEV_OC)
4593                phba->sli_rev = LPFC_SLI_REV4;
4594
4595        /* Set up device INIT API function jump table */
4596        rc = lpfc_init_api_table_setup(phba, dev_grp);
4597        if (rc)
4598                return -ENODEV;
4599        /* Set up SCSI API function jump table */
4600        rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4601        if (rc)
4602                return -ENODEV;
4603        /* Set up SLI API function jump table */
4604        rc = lpfc_sli_api_table_setup(phba, dev_grp);
4605        if (rc)
4606                return -ENODEV;
4607        /* Set up MBOX API function jump table */
4608        rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4609        if (rc)
4610                return -ENODEV;
4611
4612        return 0;
4613}
4614
4615/**
4616 * lpfc_log_intr_mode - Log the active interrupt mode
4617 * @phba: pointer to lpfc hba data structure.
4618 * @intr_mode: active interrupt mode adopted.
4619 *
4620 * This routine it invoked to log the currently used active interrupt mode
4621 * to the device.
4622 **/
4623static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4624{
4625        switch (intr_mode) {
4626        case 0:
4627                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4628                                "0470 Enable INTx interrupt mode.\n");
4629                break;
4630        case 1:
4631                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4632                                "0481 Enabled MSI interrupt mode.\n");
4633                break;
4634        case 2:
4635                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4636                                "0480 Enabled MSI-X interrupt mode.\n");
4637                break;
4638        default:
4639                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4640                                "0482 Illegal interrupt mode.\n");
4641                break;
4642        }
4643        return;
4644}
4645
4646/**
4647 * lpfc_enable_pci_dev - Enable a generic PCI device.
4648 * @phba: pointer to lpfc hba data structure.
4649 *
4650 * This routine is invoked to enable the PCI device that is common to all
4651 * PCI devices.
4652 *
4653 * Return codes
4654 *      0 - successful
4655 *      other values - error
4656 **/
4657static int
4658lpfc_enable_pci_dev(struct lpfc_hba *phba)
4659{
4660        struct pci_dev *pdev;
4661        int bars = 0;
4662
4663        /* Obtain PCI device reference */
4664        if (!phba->pcidev)
4665                goto out_error;
4666        else
4667                pdev = phba->pcidev;
4668        /* Select PCI BARs */
4669        bars = pci_select_bars(pdev, IORESOURCE_MEM);
4670        /* Enable PCI device */
4671        if (pci_enable_device_mem(pdev))
4672                goto out_error;
4673        /* Request PCI resource for the device */
4674        if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4675                goto out_disable_device;
4676        /* Set up device as PCI master and save state for EEH */
4677        pci_set_master(pdev);
4678        pci_try_set_mwi(pdev);
4679        pci_save_state(pdev);
4680
4681        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4682        if (pci_is_pcie(pdev))
4683                pdev->needs_freset = 1;
4684
4685        return 0;
4686
4687out_disable_device:
4688        pci_disable_device(pdev);
4689out_error:
4690        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4691                        "1401 Failed to enable pci device, bars:x%x\n", bars);
4692        return -ENODEV;
4693}
4694
4695/**
4696 * lpfc_disable_pci_dev - Disable a generic PCI device.
4697 * @phba: pointer to lpfc hba data structure.
4698 *
4699 * This routine is invoked to disable the PCI device that is common to all
4700 * PCI devices.
4701 **/
4702static void
4703lpfc_disable_pci_dev(struct lpfc_hba *phba)
4704{
4705        struct pci_dev *pdev;
4706        int bars;
4707
4708        /* Obtain PCI device reference */
4709        if (!phba->pcidev)
4710                return;
4711        else
4712                pdev = phba->pcidev;
4713        /* Select PCI BARs */
4714        bars = pci_select_bars(pdev, IORESOURCE_MEM);
4715        /* Release PCI resource and disable PCI device */
4716        pci_release_selected_regions(pdev, bars);
4717        pci_disable_device(pdev);
4718
4719        return;
4720}
4721
4722/**
4723 * lpfc_reset_hba - Reset a hba
4724 * @phba: pointer to lpfc hba data structure.
4725 *
4726 * This routine is invoked to reset a hba device. It brings the HBA
4727 * offline, performs a board restart, and then brings the board back
4728 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4729 * on outstanding mailbox commands.
4730 **/
4731void
4732lpfc_reset_hba(struct lpfc_hba *phba)
4733{
4734        /* If resets are disabled then set error state and return. */
4735        if (!phba->cfg_enable_hba_reset) {
4736                phba->link_state = LPFC_HBA_ERROR;
4737                return;
4738        }
4739        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
4740                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4741        else
4742                lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
4743        lpfc_offline(phba);
4744        lpfc_sli_brdrestart(phba);
4745        lpfc_online(phba);
4746        lpfc_unblock_mgmt_io(phba);
4747}
4748
4749/**
4750 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4751 * @phba: pointer to lpfc hba data structure.
4752 *
4753 * This function enables the PCI SR-IOV virtual functions to a physical
4754 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4755 * enable the number of virtual functions to the physical function. As
4756 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4757 * API call does not considered as an error condition for most of the device.
4758 **/
4759uint16_t
4760lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4761{
4762        struct pci_dev *pdev = phba->pcidev;
4763        uint16_t nr_virtfn;
4764        int pos;
4765
4766        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4767        if (pos == 0)
4768                return 0;
4769
4770        pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4771        return nr_virtfn;
4772}
4773
4774/**
4775 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4776 * @phba: pointer to lpfc hba data structure.
4777 * @nr_vfn: number of virtual functions to be enabled.
4778 *
4779 * This function enables the PCI SR-IOV virtual functions to a physical
4780 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4781 * enable the number of virtual functions to the physical function. As
4782 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4783 * API call does not considered as an error condition for most of the device.
4784 **/
4785int
4786lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4787{
4788        struct pci_dev *pdev = phba->pcidev;
4789        uint16_t max_nr_vfn;
4790        int rc;
4791
4792        max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4793        if (nr_vfn > max_nr_vfn) {
4794                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4795                                "3057 Requested vfs (%d) greater than "
4796                                "supported vfs (%d)", nr_vfn, max_nr_vfn);
4797                return -EINVAL;
4798        }
4799
4800        rc = pci_enable_sriov(pdev, nr_vfn);
4801        if (rc) {
4802                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4803                                "2806 Failed to enable sriov on this device "
4804                                "with vfn number nr_vf:%d, rc:%d\n",
4805                                nr_vfn, rc);
4806        } else
4807                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4808                                "2807 Successful enable sriov on this device "
4809                                "with vfn number nr_vf:%d\n", nr_vfn);
4810        return rc;
4811}
4812
4813/**
4814 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4815 * @phba: pointer to lpfc hba data structure.
4816 *
4817 * This routine is invoked to set up the driver internal resources specific to
4818 * support the SLI-3 HBA device it attached to.
4819 *
4820 * Return codes
4821 *      0 - successful
4822 *      other values - error
4823 **/
4824static int
4825lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4826{
4827        struct lpfc_sli *psli;
4828        int rc;
4829
4830        /*
4831         * Initialize timers used by driver
4832         */
4833
4834        /* Heartbeat timer */
4835        init_timer(&phba->hb_tmofunc);
4836        phba->hb_tmofunc.function = lpfc_hb_timeout;
4837        phba->hb_tmofunc.data = (unsigned long)phba;
4838
4839        psli = &phba->sli;
4840        /* MBOX heartbeat timer */
4841        init_timer(&psli->mbox_tmo);
4842        psli->mbox_tmo.function = lpfc_mbox_timeout;
4843        psli->mbox_tmo.data = (unsigned long) phba;
4844        /* FCP polling mode timer */
4845        init_timer(&phba->fcp_poll_timer);
4846        phba->fcp_poll_timer.function = lpfc_poll_timeout;
4847        phba->fcp_poll_timer.data = (unsigned long) phba;
4848        /* Fabric block timer */
4849        init_timer(&phba->fabric_block_timer);
4850        phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4851        phba->fabric_block_timer.data = (unsigned long) phba;
4852        /* EA polling mode timer */
4853        init_timer(&phba->eratt_poll);
4854        phba->eratt_poll.function = lpfc_poll_eratt;
4855        phba->eratt_poll.data = (unsigned long) phba;
4856
4857        /* Host attention work mask setup */
4858        phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4859        phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4860
4861        /* Get all the module params for configuring this host */
4862        lpfc_get_cfgparam(phba);
4863        if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4864                phba->menlo_flag |= HBA_MENLO_SUPPORT;
4865                /* check for menlo minimum sg count */
4866                if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4867                        phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4868        }
4869
4870        if (!phba->sli.ring)
4871                phba->sli.ring = (struct lpfc_sli_ring *)
4872                        kzalloc(LPFC_SLI3_MAX_RING *
4873                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4874        if (!phba->sli.ring)
4875                return -ENOMEM;
4876
4877        /*
4878         * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4879         * used to create the sg_dma_buf_pool must be dynamically calculated.
4880         */
4881
4882        /* Initialize the host templates the configured values. */
4883        lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4884        lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4885
4886        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4887        if (phba->cfg_enable_bg) {
4888                /*
4889                 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4890                 * the FCP rsp, and a BDE for each. Sice we have no control
4891                 * over how many protection data segments the SCSI Layer
4892                 * will hand us (ie: there could be one for every block
4893                 * in the IO), we just allocate enough BDEs to accomidate
4894                 * our max amount and we need to limit lpfc_sg_seg_cnt to
4895                 * minimize the risk of running out.
4896                 */
4897                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4898                        sizeof(struct fcp_rsp) +
4899                        (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4900
4901                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4902                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4903
4904                /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4905                phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4906        } else {
4907                /*
4908                 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4909                 * the FCP rsp, a BDE for each, and a BDE for up to
4910                 * cfg_sg_seg_cnt data segments.
4911                 */
4912                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4913                        sizeof(struct fcp_rsp) +
4914                        ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4915
4916                /* Total BDEs in BPL for scsi_sg_list */
4917                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4918        }
4919
4920        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4921                        "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4922                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4923                        phba->cfg_total_seg_cnt);
4924
4925        phba->max_vpi = LPFC_MAX_VPI;
4926        /* This will be set to correct value after config_port mbox */
4927        phba->max_vports = 0;
4928
4929        /*
4930         * Initialize the SLI Layer to run with lpfc HBAs.
4931         */
4932        lpfc_sli_setup(phba);
4933        lpfc_sli_queue_setup(phba);
4934
4935        /* Allocate device driver memory */
4936        if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4937                return -ENOMEM;
4938
4939        /*
4940         * Enable sr-iov virtual functions if supported and configured
4941         * through the module parameter.
4942         */
4943        if (phba->cfg_sriov_nr_virtfn > 0) {
4944                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4945                                                 phba->cfg_sriov_nr_virtfn);
4946                if (rc) {
4947                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4948                                        "2808 Requested number of SR-IOV "
4949                                        "virtual functions (%d) is not "
4950                                        "supported\n",
4951                                        phba->cfg_sriov_nr_virtfn);
4952                        phba->cfg_sriov_nr_virtfn = 0;
4953                }
4954        }
4955
4956        return 0;
4957}
4958
4959/**
4960 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4961 * @phba: pointer to lpfc hba data structure.
4962 *
4963 * This routine is invoked to unset the driver internal resources set up
4964 * specific for supporting the SLI-3 HBA device it attached to.
4965 **/
4966static void
4967lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4968{
4969        /* Free device driver memory allocated */
4970        lpfc_mem_free_all(phba);
4971
4972        return;
4973}
4974
4975/**
4976 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4977 * @phba: pointer to lpfc hba data structure.
4978 *
4979 * This routine is invoked to set up the driver internal resources specific to
4980 * support the SLI-4 HBA device it attached to.
4981 *
4982 * Return codes
4983 *      0 - successful
4984 *      other values - error
4985 **/
4986static int
4987lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4988{
4989        struct lpfc_vector_map_info *cpup;
4990        struct lpfc_sli *psli;
4991        LPFC_MBOXQ_t *mboxq;
4992        int rc, i, hbq_count, max_buf_size;
4993        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4994        struct lpfc_mqe *mqe;
4995        int longs;
4996        int fof_vectors = 0;
4997
4998        /* Get all the module params for configuring this host */
4999        lpfc_get_cfgparam(phba);
5000
5001        /* Before proceed, wait for POST done and device ready */
5002        rc = lpfc_sli4_post_status_check(phba);
5003        if (rc)
5004                return -ENODEV;
5005
5006        /*
5007         * Initialize timers used by driver
5008         */
5009
5010        /* Heartbeat timer */
5011        init_timer(&phba->hb_tmofunc);
5012        phba->hb_tmofunc.function = lpfc_hb_timeout;
5013        phba->hb_tmofunc.data = (unsigned long)phba;
5014        init_timer(&phba->rrq_tmr);
5015        phba->rrq_tmr.function = lpfc_rrq_timeout;
5016        phba->rrq_tmr.data = (unsigned long)phba;
5017
5018        psli = &phba->sli;
5019        /* MBOX heartbeat timer */
5020        init_timer(&psli->mbox_tmo);
5021        psli->mbox_tmo.function = lpfc_mbox_timeout;
5022        psli->mbox_tmo.data = (unsigned long) phba;
5023        /* Fabric block timer */
5024        init_timer(&phba->fabric_block_timer);
5025        phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
5026        phba->fabric_block_timer.data = (unsigned long) phba;
5027        /* EA polling mode timer */
5028        init_timer(&phba->eratt_poll);
5029        phba->eratt_poll.function = lpfc_poll_eratt;
5030        phba->eratt_poll.data = (unsigned long) phba;
5031        /* FCF rediscover timer */
5032        init_timer(&phba->fcf.redisc_wait);
5033        phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
5034        phba->fcf.redisc_wait.data = (unsigned long)phba;
5035
5036        /*
5037         * Control structure for handling external multi-buffer mailbox
5038         * command pass-through.
5039         */
5040        memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5041                sizeof(struct lpfc_mbox_ext_buf_ctx));
5042        INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5043
5044        phba->max_vpi = LPFC_MAX_VPI;
5045
5046        /* This will be set to correct value after the read_config mbox */
5047        phba->max_vports = 0;
5048
5049        /* Program the default value of vlan_id and fc_map */
5050        phba->valid_vlan = 0;
5051        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5052        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5053        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5054
5055        /*
5056         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5057         * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
5058         */
5059        if (!phba->sli.ring)
5060                phba->sli.ring = kzalloc(
5061                        (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
5062                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5063        if (!phba->sli.ring)
5064                return -ENOMEM;
5065
5066        /*
5067         * It doesn't matter what family our adapter is in, we are
5068         * limited to 2 Pages, 512 SGEs, for our SGL.
5069         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5070         */
5071        max_buf_size = (2 * SLI4_PAGE_SIZE);
5072        if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
5073                phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
5074
5075        /*
5076         * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5077         * used to create the sg_dma_buf_pool must be dynamically calculated.
5078         */
5079
5080        if (phba->cfg_enable_bg) {
5081                /*
5082                 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5083                 * the FCP rsp, and a SGE for each. Sice we have no control
5084                 * over how many protection data segments the SCSI Layer
5085                 * will hand us (ie: there could be one for every block
5086                 * in the IO), we just allocate enough SGEs to accomidate
5087                 * our max amount and we need to limit lpfc_sg_seg_cnt to
5088                 * minimize the risk of running out.
5089                 */
5090                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5091                        sizeof(struct fcp_rsp) + max_buf_size;
5092
5093                /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5094                phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5095
5096                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
5097                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
5098        } else {
5099                /*
5100                 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5101                 * the FCP rsp, a SGE for each, and a SGE for up to
5102                 * cfg_sg_seg_cnt data segments.
5103                 */
5104                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5105                        sizeof(struct fcp_rsp) +
5106                        ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
5107
5108                /* Total SGEs for scsi_sg_list */
5109                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5110                /*
5111                 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
5112                 * to post 1 page for the SGL.
5113                 */
5114        }
5115
5116        /* Initialize the host templates with the updated values. */
5117        lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5118        lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5119
5120        if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
5121                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5122        else
5123                phba->cfg_sg_dma_buf_size =
5124                        SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5125
5126        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5127                        "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5128                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5129                        phba->cfg_total_seg_cnt);
5130
5131        /* Initialize buffer queue management fields */
5132        hbq_count = lpfc_sli_hbq_count();
5133        for (i = 0; i < hbq_count; ++i)
5134                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5135        INIT_LIST_HEAD(&phba->rb_pend_list);
5136        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5137        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
5138
5139        /*
5140         * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5141         */
5142        /* Initialize the Abort scsi buffer list used by driver */
5143        spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5144        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5145        /* This abort list used by worker thread */
5146        spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
5147
5148        /*
5149         * Initialize driver internal slow-path work queues
5150         */
5151
5152        /* Driver internel slow-path CQ Event pool */
5153        INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5154        /* Response IOCB work queue list */
5155        INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5156        /* Asynchronous event CQ Event work queue list */
5157        INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5158        /* Fast-path XRI aborted CQ Event work queue list */
5159        INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5160        /* Slow-path XRI aborted CQ Event work queue list */
5161        INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5162        /* Receive queue CQ Event work queue list */
5163        INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5164
5165        /* Initialize extent block lists. */
5166        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5167        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5168        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5169        INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5170
5171        /* Initialize the driver internal SLI layer lists. */
5172        lpfc_sli_setup(phba);
5173        lpfc_sli_queue_setup(phba);
5174
5175        /* Allocate device driver memory */
5176        rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5177        if (rc)
5178                return -ENOMEM;
5179
5180        /* IF Type 2 ports get initialized now. */
5181        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5182            LPFC_SLI_INTF_IF_TYPE_2) {
5183                rc = lpfc_pci_function_reset(phba);
5184                if (unlikely(rc))
5185                        return -ENODEV;
5186        }
5187
5188        /* Create the bootstrap mailbox command */
5189        rc = lpfc_create_bootstrap_mbox(phba);
5190        if (unlikely(rc))
5191                goto out_free_mem;
5192
5193        /* Set up the host's endian order with the device. */
5194        rc = lpfc_setup_endian_order(phba);
5195        if (unlikely(rc))
5196                goto out_free_bsmbx;
5197
5198        /* Set up the hba's configuration parameters. */
5199        rc = lpfc_sli4_read_config(phba);
5200        if (unlikely(rc))
5201                goto out_free_bsmbx;
5202        rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5203        if (unlikely(rc))
5204                goto out_free_bsmbx;
5205
5206        /* IF Type 0 ports get initialized now. */
5207        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5208            LPFC_SLI_INTF_IF_TYPE_0) {
5209                rc = lpfc_pci_function_reset(phba);
5210                if (unlikely(rc))
5211                        goto out_free_bsmbx;
5212        }
5213
5214        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5215                                                       GFP_KERNEL);
5216        if (!mboxq) {
5217                rc = -ENOMEM;
5218                goto out_free_bsmbx;
5219        }
5220
5221        /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5222        lpfc_supported_pages(mboxq);
5223        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5224        if (!rc) {
5225                mqe = &mboxq->u.mqe;
5226                memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5227                       LPFC_MAX_SUPPORTED_PAGES);
5228                for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5229                        switch (pn_page[i]) {
5230                        case LPFC_SLI4_PARAMETERS:
5231                                phba->sli4_hba.pc_sli4_params.supported = 1;
5232                                break;
5233                        default:
5234                                break;
5235                        }
5236                }
5237                /* Read the port's SLI4 Parameters capabilities if supported. */
5238                if (phba->sli4_hba.pc_sli4_params.supported)
5239                        rc = lpfc_pc_sli4_params_get(phba, mboxq);
5240                if (rc) {
5241                        mempool_free(mboxq, phba->mbox_mem_pool);
5242                        rc = -EIO;
5243                        goto out_free_bsmbx;
5244                }
5245        }
5246        /*
5247         * Get sli4 parameters that override parameters from Port capabilities.
5248         * If this call fails, it isn't critical unless the SLI4 parameters come
5249         * back in conflict.
5250         */
5251        rc = lpfc_get_sli4_parameters(phba, mboxq);
5252        if (rc) {
5253                if (phba->sli4_hba.extents_in_use &&
5254                    phba->sli4_hba.rpi_hdrs_in_use) {
5255                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5256                                "2999 Unsupported SLI4 Parameters "
5257                                "Extents and RPI headers enabled.\n");
5258                        goto out_free_bsmbx;
5259                }
5260        }
5261        mempool_free(mboxq, phba->mbox_mem_pool);
5262
5263        /* Verify OAS is supported */
5264        lpfc_sli4_oas_verify(phba);
5265        if (phba->cfg_fof)
5266                fof_vectors = 1;
5267
5268        /* Verify all the SLI4 queues */
5269        rc = lpfc_sli4_queue_verify(phba);
5270        if (rc)
5271                goto out_free_bsmbx;
5272
5273        /* Create driver internal CQE event pool */
5274        rc = lpfc_sli4_cq_event_pool_create(phba);
5275        if (rc)
5276                goto out_free_bsmbx;
5277
5278        /* Initialize sgl lists per host */
5279        lpfc_init_sgl_list(phba);
5280
5281        /* Allocate and initialize active sgl array */
5282        rc = lpfc_init_active_sgl_array(phba);
5283        if (rc) {
5284                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5285                                "1430 Failed to initialize sgl list.\n");
5286                goto out_destroy_cq_event_pool;
5287        }
5288        rc = lpfc_sli4_init_rpi_hdrs(phba);
5289        if (rc) {
5290                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5291                                "1432 Failed to initialize rpi headers.\n");
5292                goto out_free_active_sgl;
5293        }
5294
5295        /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5296        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5297        phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5298                                         GFP_KERNEL);
5299        if (!phba->fcf.fcf_rr_bmask) {
5300                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5301                                "2759 Failed allocate memory for FCF round "
5302                                "robin failover bmask\n");
5303                rc = -ENOMEM;
5304                goto out_remove_rpi_hdrs;
5305        }
5306
5307        phba->sli4_hba.fcp_eq_hdl =
5308                        kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5309                            (fof_vectors + phba->cfg_fcp_io_channel)),
5310                            GFP_KERNEL);
5311        if (!phba->sli4_hba.fcp_eq_hdl) {
5312                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5313                                "2572 Failed allocate memory for "
5314                                "fast-path per-EQ handle array\n");
5315                rc = -ENOMEM;
5316                goto out_free_fcf_rr_bmask;
5317        }
5318
5319        phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5320                                  (fof_vectors +
5321                                   phba->cfg_fcp_io_channel)), GFP_KERNEL);
5322        if (!phba->sli4_hba.msix_entries) {
5323                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324                                "2573 Failed allocate memory for msi-x "
5325                                "interrupt vector entries\n");
5326                rc = -ENOMEM;
5327                goto out_free_fcp_eq_hdl;
5328        }
5329
5330        phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5331                                         phba->sli4_hba.num_present_cpu),
5332                                         GFP_KERNEL);
5333        if (!phba->sli4_hba.cpu_map) {
5334                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5335                                "3327 Failed allocate memory for msi-x "
5336                                "interrupt vector mapping\n");
5337                rc = -ENOMEM;
5338                goto out_free_msix;
5339        }
5340        if (lpfc_used_cpu == NULL) {
5341                lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5342                                         GFP_KERNEL);
5343                if (!lpfc_used_cpu) {
5344                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5345                                        "3335 Failed allocate memory for msi-x "
5346                                        "interrupt vector mapping\n");
5347                        kfree(phba->sli4_hba.cpu_map);
5348                        rc = -ENOMEM;
5349                        goto out_free_msix;
5350                }
5351                for (i = 0; i < lpfc_present_cpu; i++)
5352                        lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5353        }
5354
5355        /* Initialize io channels for round robin */
5356        cpup = phba->sli4_hba.cpu_map;
5357        rc = 0;
5358        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5359                cpup->channel_id = rc;
5360                rc++;
5361                if (rc >= phba->cfg_fcp_io_channel)
5362                        rc = 0;
5363        }
5364
5365        /*
5366         * Enable sr-iov virtual functions if supported and configured
5367         * through the module parameter.
5368         */
5369        if (phba->cfg_sriov_nr_virtfn > 0) {
5370                rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5371                                                 phba->cfg_sriov_nr_virtfn);
5372                if (rc) {
5373                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5374                                        "3020 Requested number of SR-IOV "
5375                                        "virtual functions (%d) is not "
5376                                        "supported\n",
5377                                        phba->cfg_sriov_nr_virtfn);
5378                        phba->cfg_sriov_nr_virtfn = 0;
5379                }
5380        }
5381
5382        return 0;
5383
5384out_free_msix:
5385        kfree(phba->sli4_hba.msix_entries);
5386out_free_fcp_eq_hdl:
5387        kfree(phba->sli4_hba.fcp_eq_hdl);
5388out_free_fcf_rr_bmask:
5389        kfree(phba->fcf.fcf_rr_bmask);
5390out_remove_rpi_hdrs:
5391        lpfc_sli4_remove_rpi_hdrs(phba);
5392out_free_active_sgl:
5393        lpfc_free_active_sgl(phba);
5394out_destroy_cq_event_pool:
5395        lpfc_sli4_cq_event_pool_destroy(phba);
5396out_free_bsmbx:
5397        lpfc_destroy_bootstrap_mbox(phba);
5398out_free_mem:
5399        lpfc_mem_free(phba);
5400        return rc;
5401}
5402
5403/**
5404 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5405 * @phba: pointer to lpfc hba data structure.
5406 *
5407 * This routine is invoked to unset the driver internal resources set up
5408 * specific for supporting the SLI-4 HBA device it attached to.
5409 **/
5410static void
5411lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5412{
5413        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5414
5415        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5416        kfree(phba->sli4_hba.cpu_map);
5417        phba->sli4_hba.num_present_cpu = 0;
5418        phba->sli4_hba.num_online_cpu = 0;
5419        phba->sli4_hba.curr_disp_cpu = 0;
5420
5421        /* Free memory allocated for msi-x interrupt vector entries */
5422        kfree(phba->sli4_hba.msix_entries);
5423
5424        /* Free memory allocated for fast-path work queue handles */
5425        kfree(phba->sli4_hba.fcp_eq_hdl);
5426
5427        /* Free the allocated rpi headers. */
5428        lpfc_sli4_remove_rpi_hdrs(phba);
5429        lpfc_sli4_remove_rpis(phba);
5430
5431        /* Free eligible FCF index bmask */
5432        kfree(phba->fcf.fcf_rr_bmask);
5433
5434        /* Free the ELS sgl list */
5435        lpfc_free_active_sgl(phba);
5436        lpfc_free_els_sgl_list(phba);
5437
5438        /* Free the completion queue EQ event pool */
5439        lpfc_sli4_cq_event_release_all(phba);
5440        lpfc_sli4_cq_event_pool_destroy(phba);
5441
5442        /* Release resource identifiers. */
5443        lpfc_sli4_dealloc_resource_identifiers(phba);
5444
5445        /* Free the bsmbx region. */
5446        lpfc_destroy_bootstrap_mbox(phba);
5447
5448        /* Free the SLI Layer memory with SLI4 HBAs */
5449        lpfc_mem_free_all(phba);
5450
5451        /* Free the current connect table */
5452        list_for_each_entry_safe(conn_entry, next_conn_entry,
5453                &phba->fcf_conn_rec_list, list) {
5454                list_del_init(&conn_entry->list);
5455                kfree(conn_entry);
5456        }
5457
5458        return;
5459}
5460
5461/**
5462 * lpfc_init_api_table_setup - Set up init api function jump table
5463 * @phba: The hba struct for which this call is being executed.
5464 * @dev_grp: The HBA PCI-Device group number.
5465 *
5466 * This routine sets up the device INIT interface API function jump table
5467 * in @phba struct.
5468 *
5469 * Returns: 0 - success, -ENODEV - failure.
5470 **/
5471int
5472lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5473{
5474        phba->lpfc_hba_init_link = lpfc_hba_init_link;
5475        phba->lpfc_hba_down_link = lpfc_hba_down_link;
5476        phba->lpfc_selective_reset = lpfc_selective_reset;
5477        switch (dev_grp) {
5478        case LPFC_PCI_DEV_LP:
5479                phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5480                phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5481                phba->lpfc_stop_port = lpfc_stop_port_s3;
5482                break;
5483        case LPFC_PCI_DEV_OC:
5484                phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5485                phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5486                phba->lpfc_stop_port = lpfc_stop_port_s4;
5487                break;
5488        default:
5489                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5490                                "1431 Invalid HBA PCI-device group: 0x%x\n",
5491                                dev_grp);
5492                return -ENODEV;
5493                break;
5494        }
5495        return 0;
5496}
5497
5498/**
5499 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5500 * @phba: pointer to lpfc hba data structure.
5501 *
5502 * This routine is invoked to set up the driver internal resources before the
5503 * device specific resource setup to support the HBA device it attached to.
5504 *
5505 * Return codes
5506 *      0 - successful
5507 *      other values - error
5508 **/
5509static int
5510lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5511{
5512        /*
5513         * Driver resources common to all SLI revisions
5514         */
5515        atomic_set(&phba->fast_event_count, 0);
5516        spin_lock_init(&phba->hbalock);
5517
5518        /* Initialize ndlp management spinlock */
5519        spin_lock_init(&phba->ndlp_lock);
5520
5521        INIT_LIST_HEAD(&phba->port_list);
5522        INIT_LIST_HEAD(&phba->work_list);
5523        init_waitqueue_head(&phba->wait_4_mlo_m_q);
5524
5525        /* Initialize the wait queue head for the kernel thread */
5526        init_waitqueue_head(&phba->work_waitq);
5527
5528        /* Initialize the scsi buffer list used by driver for scsi IO */
5529        spin_lock_init(&phba->scsi_buf_list_get_lock);
5530        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5531        spin_lock_init(&phba->scsi_buf_list_put_lock);
5532        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5533
5534        /* Initialize the fabric iocb list */
5535        INIT_LIST_HEAD(&phba->fabric_iocb_list);
5536
5537        /* Initialize list to save ELS buffers */
5538        INIT_LIST_HEAD(&phba->elsbuf);
5539
5540        /* Initialize FCF connection rec list */
5541        INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5542
5543        /* Initialize OAS configuration list */
5544        spin_lock_init(&phba->devicelock);
5545        INIT_LIST_HEAD(&phba->luns);
5546
5547        return 0;
5548}
5549
5550/**
5551 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5552 * @phba: pointer to lpfc hba data structure.
5553 *
5554 * This routine is invoked to set up the driver internal resources after the
5555 * device specific resource setup to support the HBA device it attached to.
5556 *
5557 * Return codes
5558 *      0 - successful
5559 *      other values - error
5560 **/
5561static int
5562lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5563{
5564        int error;
5565
5566        /* Startup the kernel thread for this host adapter. */
5567        phba->worker_thread = kthread_run(lpfc_do_work, phba,
5568                                          "lpfc_worker_%d", phba->brd_no);
5569        if (IS_ERR(phba->worker_thread)) {
5570                error = PTR_ERR(phba->worker_thread);
5571                return error;
5572        }
5573
5574        return 0;
5575}
5576
5577/**
5578 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5579 * @phba: pointer to lpfc hba data structure.
5580 *
5581 * This routine is invoked to unset the driver internal resources set up after
5582 * the device specific resource setup for supporting the HBA device it
5583 * attached to.
5584 **/
5585static void
5586lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5587{
5588        /* Stop kernel worker thread */
5589        kthread_stop(phba->worker_thread);
5590}
5591
5592/**
5593 * lpfc_free_iocb_list - Free iocb list.
5594 * @phba: pointer to lpfc hba data structure.
5595 *
5596 * This routine is invoked to free the driver's IOCB list and memory.
5597 **/
5598static void
5599lpfc_free_iocb_list(struct lpfc_hba *phba)
5600{
5601        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5602
5603        spin_lock_irq(&phba->hbalock);
5604        list_for_each_entry_safe(iocbq_entry, iocbq_next,
5605                                 &phba->lpfc_iocb_list, list) {
5606                list_del(&iocbq_entry->list);
5607                kfree(iocbq_entry);
5608                phba->total_iocbq_bufs--;
5609        }
5610        spin_unlock_irq(&phba->hbalock);
5611
5612        return;
5613}
5614
5615/**
5616 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5617 * @phba: pointer to lpfc hba data structure.
5618 *
5619 * This routine is invoked to allocate and initizlize the driver's IOCB
5620 * list and set up the IOCB tag array accordingly.
5621 *
5622 * Return codes
5623 *      0 - successful
5624 *      other values - error
5625 **/
5626static int
5627lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5628{
5629        struct lpfc_iocbq *iocbq_entry = NULL;
5630        uint16_t iotag;
5631        int i;
5632
5633        /* Initialize and populate the iocb list per host.  */
5634        INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5635        for (i = 0; i < iocb_count; i++) {
5636                iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5637                if (iocbq_entry == NULL) {
5638                        printk(KERN_ERR "%s: only allocated %d iocbs of "
5639                                "expected %d count. Unloading driver.\n",
5640                                __func__, i, LPFC_IOCB_LIST_CNT);
5641                        goto out_free_iocbq;
5642                }
5643
5644                iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5645                if (iotag == 0) {
5646                        kfree(iocbq_entry);
5647                        printk(KERN_ERR "%s: failed to allocate IOTAG. "
5648                                "Unloading driver.\n", __func__);
5649                        goto out_free_iocbq;
5650                }
5651                iocbq_entry->sli4_lxritag = NO_XRI;
5652                iocbq_entry->sli4_xritag = NO_XRI;
5653
5654                spin_lock_irq(&phba->hbalock);
5655                list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5656                phba->total_iocbq_bufs++;
5657                spin_unlock_irq(&phba->hbalock);
5658        }
5659
5660        return 0;
5661
5662out_free_iocbq:
5663        lpfc_free_iocb_list(phba);
5664
5665        return -ENOMEM;
5666}
5667
5668/**
5669 * lpfc_free_sgl_list - Free a given sgl list.
5670 * @phba: pointer to lpfc hba data structure.
5671 * @sglq_list: pointer to the head of sgl list.
5672 *
5673 * This routine is invoked to free a give sgl list and memory.
5674 **/
5675void
5676lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5677{
5678        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5679
5680        list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5681                list_del(&sglq_entry->list);
5682                lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5683                kfree(sglq_entry);
5684        }
5685}
5686
5687/**
5688 * lpfc_free_els_sgl_list - Free els sgl list.
5689 * @phba: pointer to lpfc hba data structure.
5690 *
5691 * This routine is invoked to free the driver's els sgl list and memory.
5692 **/
5693static void
5694lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5695{
5696        LIST_HEAD(sglq_list);
5697        struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5698
5699        /* Retrieve all els sgls from driver list */
5700        spin_lock_irq(&phba->hbalock);
5701        spin_lock(&pring->ring_lock);
5702        list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5703        spin_unlock(&pring->ring_lock);
5704        spin_unlock_irq(&phba->hbalock);
5705
5706        /* Now free the sgl list */
5707        lpfc_free_sgl_list(phba, &sglq_list);
5708}
5709
5710/**
5711 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5712 * @phba: pointer to lpfc hba data structure.
5713 *
5714 * This routine is invoked to allocate the driver's active sgl memory.
5715 * This array will hold the sglq_entry's for active IOs.
5716 **/
5717static int
5718lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5719{
5720        int size;
5721        size = sizeof(struct lpfc_sglq *);
5722        size *= phba->sli4_hba.max_cfg_param.max_xri;
5723
5724        phba->sli4_hba.lpfc_sglq_active_list =
5725                kzalloc(size, GFP_KERNEL);
5726        if (!phba->sli4_hba.lpfc_sglq_active_list)
5727                return -ENOMEM;
5728        return 0;
5729}
5730
5731/**
5732 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5733 * @phba: pointer to lpfc hba data structure.
5734 *
5735 * This routine is invoked to walk through the array of active sglq entries
5736 * and free all of the resources.
5737 * This is just a place holder for now.
5738 **/
5739static void
5740lpfc_free_active_sgl(struct lpfc_hba *phba)
5741{
5742        kfree(phba->sli4_hba.lpfc_sglq_active_list);
5743}
5744
5745/**
5746 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5747 * @phba: pointer to lpfc hba data structure.
5748 *
5749 * This routine is invoked to allocate and initizlize the driver's sgl
5750 * list and set up the sgl xritag tag array accordingly.
5751 *
5752 **/
5753static void
5754lpfc_init_sgl_list(struct lpfc_hba *phba)
5755{
5756        /* Initialize and populate the sglq list per host/VF. */
5757        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5758        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5759
5760        /* els xri-sgl book keeping */
5761        phba->sli4_hba.els_xri_cnt = 0;
5762
5763        /* scsi xri-buffer book keeping */
5764        phba->sli4_hba.scsi_xri_cnt = 0;
5765}
5766
5767/**
5768 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5769 * @phba: pointer to lpfc hba data structure.
5770 *
5771 * This routine is invoked to post rpi header templates to the
5772 * port for those SLI4 ports that do not support extents.  This routine
5773 * posts a PAGE_SIZE memory region to the port to hold up to
5774 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5775 * and should be called only when interrupts are disabled.
5776 *
5777 * Return codes
5778 *      0 - successful
5779 *      -ERROR - otherwise.
5780 **/
5781int
5782lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5783{
5784        int rc = 0;
5785        struct lpfc_rpi_hdr *rpi_hdr;
5786
5787        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5788        if (!phba->sli4_hba.rpi_hdrs_in_use)
5789                return rc;
5790        if (phba->sli4_hba.extents_in_use)
5791                return -EIO;
5792
5793        rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5794        if (!rpi_hdr) {
5795                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5796                                "0391 Error during rpi post operation\n");
5797                lpfc_sli4_remove_rpis(phba);
5798                rc = -ENODEV;
5799        }
5800
5801        return rc;
5802}
5803
5804/**
5805 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5806 * @phba: pointer to lpfc hba data structure.
5807 *
5808 * This routine is invoked to allocate a single 4KB memory region to
5809 * support rpis and stores them in the phba.  This single region
5810 * provides support for up to 64 rpis.  The region is used globally
5811 * by the device.
5812 *
5813 * Returns:
5814 *   A valid rpi hdr on success.
5815 *   A NULL pointer on any failure.
5816 **/
5817struct lpfc_rpi_hdr *
5818lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5819{
5820        uint16_t rpi_limit, curr_rpi_range;
5821        struct lpfc_dmabuf *dmabuf;
5822        struct lpfc_rpi_hdr *rpi_hdr;
5823        uint32_t rpi_count;
5824
5825        /*
5826         * If the SLI4 port supports extents, posting the rpi header isn't
5827         * required.  Set the expected maximum count and let the actual value
5828         * get set when extents are fully allocated.
5829         */
5830        if (!phba->sli4_hba.rpi_hdrs_in_use)
5831                return NULL;
5832        if (phba->sli4_hba.extents_in_use)
5833                return NULL;
5834
5835        /* The limit on the logical index is just the max_rpi count. */
5836        rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5837        phba->sli4_hba.max_cfg_param.max_rpi - 1;
5838
5839        spin_lock_irq(&phba->hbalock);
5840        /*
5841         * Establish the starting RPI in this header block.  The starting
5842         * rpi is normalized to a zero base because the physical rpi is
5843         * port based.
5844         */
5845        curr_rpi_range = phba->sli4_hba.next_rpi;
5846        spin_unlock_irq(&phba->hbalock);
5847
5848        /*
5849         * The port has a limited number of rpis. The increment here
5850         * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5851         * and to allow the full max_rpi range per port.
5852         */
5853        if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5854                rpi_count = rpi_limit - curr_rpi_range;
5855        else
5856                rpi_count = LPFC_RPI_HDR_COUNT;
5857
5858        if (!rpi_count)
5859                return NULL;
5860        /*
5861         * First allocate the protocol header region for the port.  The
5862         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5863         */
5864        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5865        if (!dmabuf)
5866                return NULL;
5867
5868        dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
5869                                           LPFC_HDR_TEMPLATE_SIZE,
5870                                           &dmabuf->phys, GFP_KERNEL);
5871        if (!dmabuf->virt) {
5872                rpi_hdr = NULL;
5873                goto err_free_dmabuf;
5874        }
5875
5876        if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5877                rpi_hdr = NULL;
5878                goto err_free_coherent;
5879        }
5880
5881        /* Save the rpi header data for cleanup later. */
5882        rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5883        if (!rpi_hdr)
5884                goto err_free_coherent;
5885
5886        rpi_hdr->dmabuf = dmabuf;
5887        rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5888        rpi_hdr->page_count = 1;
5889        spin_lock_irq(&phba->hbalock);
5890
5891        /* The rpi_hdr stores the logical index only. */
5892        rpi_hdr->start_rpi = curr_rpi_range;
5893        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5894
5895        /*
5896         * The next_rpi stores the next logical module-64 rpi value used
5897         * to post physical rpis in subsequent rpi postings.
5898         */
5899        phba->sli4_hba.next_rpi += rpi_count;
5900        spin_unlock_irq(&phba->hbalock);
5901        return rpi_hdr;
5902
5903 err_free_coherent:
5904        dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5905                          dmabuf->virt, dmabuf->phys);
5906 err_free_dmabuf:
5907        kfree(dmabuf);
5908        return NULL;
5909}
5910
5911/**
5912 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5913 * @phba: pointer to lpfc hba data structure.
5914 *
5915 * This routine is invoked to remove all memory resources allocated
5916 * to support rpis for SLI4 ports not supporting extents. This routine
5917 * presumes the caller has released all rpis consumed by fabric or port
5918 * logins and is prepared to have the header pages removed.
5919 **/
5920void
5921lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5922{
5923        struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5924
5925        if (!phba->sli4_hba.rpi_hdrs_in_use)
5926                goto exit;
5927
5928        list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5929                                 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5930                list_del(&rpi_hdr->list);
5931                dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5932                                  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5933                kfree(rpi_hdr->dmabuf);
5934                kfree(rpi_hdr);
5935        }
5936 exit:
5937        /* There are no rpis available to the port now. */
5938        phba->sli4_hba.next_rpi = 0;
5939}
5940
5941/**
5942 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5943 * @pdev: pointer to pci device data structure.
5944 *
5945 * This routine is invoked to allocate the driver hba data structure for an
5946 * HBA device. If the allocation is successful, the phba reference to the
5947 * PCI device data structure is set.
5948 *
5949 * Return codes
5950 *      pointer to @phba - successful
5951 *      NULL - error
5952 **/
5953static struct lpfc_hba *
5954lpfc_hba_alloc(struct pci_dev *pdev)
5955{
5956        struct lpfc_hba *phba;
5957
5958        /* Allocate memory for HBA structure */
5959        phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5960        if (!phba) {
5961                dev_err(&pdev->dev, "failed to allocate hba struct\n");
5962                return NULL;
5963        }
5964
5965        /* Set reference to PCI device in HBA structure */
5966        phba->pcidev = pdev;
5967
5968        /* Assign an unused board number */
5969        phba->brd_no = lpfc_get_instance();
5970        if (phba->brd_no < 0) {
5971                kfree(phba);
5972                return NULL;
5973        }
5974
5975        spin_lock_init(&phba->ct_ev_lock);
5976        INIT_LIST_HEAD(&phba->ct_ev_waiters);
5977
5978        return phba;
5979}
5980
5981/**
5982 * lpfc_hba_free - Free driver hba data structure with a device.
5983 * @phba: pointer to lpfc hba data structure.
5984 *
5985 * This routine is invoked to free the driver hba data structure with an
5986 * HBA device.
5987 **/
5988static void
5989lpfc_hba_free(struct lpfc_hba *phba)
5990{
5991        /* Release the driver assigned board number */
5992        idr_remove(&lpfc_hba_index, phba->brd_no);
5993
5994        /* Free memory allocated with sli rings */
5995        kfree(phba->sli.ring);
5996        phba->sli.ring = NULL;
5997
5998        kfree(phba);
5999        return;
6000}
6001
6002/**
6003 * lpfc_create_shost - Create hba physical port with associated scsi host.
6004 * @phba: pointer to lpfc hba data structure.
6005 *
6006 * This routine is invoked to create HBA physical port and associate a SCSI
6007 * host with it.
6008 *
6009 * Return codes
6010 *      0 - successful
6011 *      other values - error
6012 **/
6013static int
6014lpfc_create_shost(struct lpfc_hba *phba)
6015{
6016        struct lpfc_vport *vport;
6017        struct Scsi_Host  *shost;
6018
6019        /* Initialize HBA FC structure */
6020        phba->fc_edtov = FF_DEF_EDTOV;
6021        phba->fc_ratov = FF_DEF_RATOV;
6022        phba->fc_altov = FF_DEF_ALTOV;
6023        phba->fc_arbtov = FF_DEF_ARBTOV;
6024
6025        atomic_set(&phba->sdev_cnt, 0);
6026        vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6027        if (!vport)
6028                return -ENODEV;
6029
6030        shost = lpfc_shost_from_vport(vport);
6031        phba->pport = vport;
6032        lpfc_debugfs_initialize(vport);
6033        /* Put reference to SCSI host to driver's device private data */
6034        pci_set_drvdata(phba->pcidev, shost);
6035
6036        return 0;
6037}
6038
6039/**
6040 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6041 * @phba: pointer to lpfc hba data structure.
6042 *
6043 * This routine is invoked to destroy HBA physical port and the associated
6044 * SCSI host.
6045 **/
6046static void
6047lpfc_destroy_shost(struct lpfc_hba *phba)
6048{
6049        struct lpfc_vport *vport = phba->pport;
6050
6051        /* Destroy physical port that associated with the SCSI host */
6052        destroy_port(vport);
6053
6054        return;
6055}
6056
6057/**
6058 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6059 * @phba: pointer to lpfc hba data structure.
6060 * @shost: the shost to be used to detect Block guard settings.
6061 *
6062 * This routine sets up the local Block guard protocol settings for @shost.
6063 * This routine also allocates memory for debugging bg buffers.
6064 **/
6065static void
6066lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6067{
6068        uint32_t old_mask;
6069        uint32_t old_guard;
6070
6071        int pagecnt = 10;
6072        if (lpfc_prot_mask && lpfc_prot_guard) {
6073                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6074                                "1478 Registering BlockGuard with the "
6075                                "SCSI layer\n");
6076
6077                old_mask = lpfc_prot_mask;
6078                old_guard = lpfc_prot_guard;
6079
6080                /* Only allow supported values */
6081                lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
6082                        SHOST_DIX_TYPE0_PROTECTION |
6083                        SHOST_DIX_TYPE1_PROTECTION);
6084                lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
6085
6086                /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6087                if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6088                        lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
6089
6090                if (lpfc_prot_mask && lpfc_prot_guard) {
6091                        if ((old_mask != lpfc_prot_mask) ||
6092                                (old_guard != lpfc_prot_guard))
6093                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6094                                        "1475 Registering BlockGuard with the "
6095                                        "SCSI layer: mask %d  guard %d\n",
6096                                        lpfc_prot_mask, lpfc_prot_guard);
6097
6098                        scsi_host_set_prot(shost, lpfc_prot_mask);
6099                        scsi_host_set_guard(shost, lpfc_prot_guard);
6100                } else
6101                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6102                                "1479 Not Registering BlockGuard with the SCSI "
6103                                "layer, Bad protection parameters: %d %d\n",
6104                                old_mask, old_guard);
6105        }
6106
6107        if (!_dump_buf_data) {
6108                while (pagecnt) {
6109                        spin_lock_init(&_dump_buf_lock);
6110                        _dump_buf_data =
6111                                (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6112                        if (_dump_buf_data) {
6113                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6114                                        "9043 BLKGRD: allocated %d pages for "
6115                                       "_dump_buf_data at 0x%p\n",
6116                                       (1 << pagecnt), _dump_buf_data);
6117                                _dump_buf_data_order = pagecnt;
6118                                memset(_dump_buf_data, 0,
6119                                       ((1 << PAGE_SHIFT) << pagecnt));
6120                                break;
6121                        } else
6122                                --pagecnt;
6123                }
6124                if (!_dump_buf_data_order)
6125                        lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6126                                "9044 BLKGRD: ERROR unable to allocate "
6127                               "memory for hexdump\n");
6128        } else
6129                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6130                        "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6131                       "\n", _dump_buf_data);
6132        if (!_dump_buf_dif) {
6133                while (pagecnt) {
6134                        _dump_buf_dif =
6135                                (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6136                        if (_dump_buf_dif) {
6137                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6138                                        "9046 BLKGRD: allocated %d pages for "
6139                                       "_dump_buf_dif at 0x%p\n",
6140                                       (1 << pagecnt), _dump_buf_dif);
6141                                _dump_buf_dif_order = pagecnt;
6142                                memset(_dump_buf_dif, 0,
6143                                       ((1 << PAGE_SHIFT) << pagecnt));
6144                                break;
6145                        } else
6146                                --pagecnt;
6147                }
6148                if (!_dump_buf_dif_order)
6149                        lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6150                        "9047 BLKGRD: ERROR unable to allocate "
6151                               "memory for hexdump\n");
6152        } else
6153                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6154                        "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6155                       _dump_buf_dif);
6156}
6157
6158/**
6159 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6160 * @phba: pointer to lpfc hba data structure.
6161 *
6162 * This routine is invoked to perform all the necessary post initialization
6163 * setup for the device.
6164 **/
6165static void
6166lpfc_post_init_setup(struct lpfc_hba *phba)
6167{
6168        struct Scsi_Host  *shost;
6169        struct lpfc_adapter_event_header adapter_event;
6170
6171        /* Get the default values for Model Name and Description */
6172        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6173
6174        /*
6175         * hba setup may have changed the hba_queue_depth so we need to
6176         * adjust the value of can_queue.
6177         */
6178        shost = pci_get_drvdata(phba->pcidev);
6179        shost->can_queue = phba->cfg_hba_queue_depth - 10;
6180        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6181                lpfc_setup_bg(phba, shost);
6182
6183        lpfc_host_attrib_init(shost);
6184
6185        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6186                spin_lock_irq(shost->host_lock);
6187                lpfc_poll_start_timer(phba);
6188                spin_unlock_irq(shost->host_lock);
6189        }
6190
6191        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6192                        "0428 Perform SCSI scan\n");
6193        /* Send board arrival event to upper layer */
6194        adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6195        adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6196        fc_host_post_vendor_event(shost, fc_get_event_number(),
6197                                  sizeof(adapter_event),
6198                                  (char *) &adapter_event,
6199                                  LPFC_NL_VENDOR_ID);
6200        return;
6201}
6202
6203/**
6204 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6205 * @phba: pointer to lpfc hba data structure.
6206 *
6207 * This routine is invoked to set up the PCI device memory space for device
6208 * with SLI-3 interface spec.
6209 *
6210 * Return codes
6211 *      0 - successful
6212 *      other values - error
6213 **/
6214static int
6215lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6216{
6217        struct pci_dev *pdev;
6218        unsigned long bar0map_len, bar2map_len;
6219        int i, hbq_count;
6220        void *ptr;
6221        int error = -ENODEV;
6222
6223        /* Obtain PCI device reference */
6224        if (!phba->pcidev)
6225                return error;
6226        else
6227                pdev = phba->pcidev;
6228
6229        /* Set the device DMA mask size */
6230        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6231         || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6232                if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6233                 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6234                        return error;
6235                }
6236        }
6237
6238        /* Get the bus address of Bar0 and Bar2 and the number of bytes
6239         * required by each mapping.
6240         */
6241        phba->pci_bar0_map = pci_resource_start(pdev, 0);
6242        bar0map_len = pci_resource_len(pdev, 0);
6243
6244        phba->pci_bar2_map = pci_resource_start(pdev, 2);
6245        bar2map_len = pci_resource_len(pdev, 2);
6246
6247        /* Map HBA SLIM to a kernel virtual address. */
6248        phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6249        if (!phba->slim_memmap_p) {
6250                dev_printk(KERN_ERR, &pdev->dev,
6251                           "ioremap failed for SLIM memory.\n");
6252                goto out;
6253        }
6254
6255        /* Map HBA Control Registers to a kernel virtual address. */
6256        phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6257        if (!phba->ctrl_regs_memmap_p) {
6258                dev_printk(KERN_ERR, &pdev->dev,
6259                           "ioremap failed for HBA control registers.\n");
6260                goto out_iounmap_slim;
6261        }
6262
6263        /* Allocate memory for SLI-2 structures */
6264        phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6265                                                &phba->slim2p.phys, GFP_KERNEL);
6266        if (!phba->slim2p.virt)
6267                goto out_iounmap;
6268
6269        phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6270        phba->mbox_ext = (phba->slim2p.virt +
6271                offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6272        phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6273        phba->IOCBs = (phba->slim2p.virt +
6274                       offsetof(struct lpfc_sli2_slim, IOCBs));
6275
6276        phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6277                                                 lpfc_sli_hbq_size(),
6278                                                 &phba->hbqslimp.phys,
6279                                                 GFP_KERNEL);
6280        if (!phba->hbqslimp.virt)
6281                goto out_free_slim;
6282
6283        hbq_count = lpfc_sli_hbq_count();
6284        ptr = phba->hbqslimp.virt;
6285        for (i = 0; i < hbq_count; ++i) {
6286                phba->hbqs[i].hbq_virt = ptr;
6287                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6288                ptr += (lpfc_hbq_defs[i]->entry_count *
6289                        sizeof(struct lpfc_hbq_entry));
6290        }
6291        phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6292        phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6293
6294        memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6295
6296        INIT_LIST_HEAD(&phba->rb_pend_list);
6297
6298        phba->MBslimaddr = phba->slim_memmap_p;
6299        phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6300        phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6301        phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6302        phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6303
6304        return 0;
6305
6306out_free_slim:
6307        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6308                          phba->slim2p.virt, phba->slim2p.phys);
6309out_iounmap:
6310        iounmap(phba->ctrl_regs_memmap_p);
6311out_iounmap_slim:
6312        iounmap(phba->slim_memmap_p);
6313out:
6314        return error;
6315}
6316
6317/**
6318 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6319 * @phba: pointer to lpfc hba data structure.
6320 *
6321 * This routine is invoked to unset the PCI device memory space for device
6322 * with SLI-3 interface spec.
6323 **/
6324static void
6325lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6326{
6327        struct pci_dev *pdev;
6328
6329        /* Obtain PCI device reference */
6330        if (!phba->pcidev)
6331                return;
6332        else
6333                pdev = phba->pcidev;
6334
6335        /* Free coherent DMA memory allocated */
6336        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6337                          phba->hbqslimp.virt, phba->hbqslimp.phys);
6338        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6339                          phba->slim2p.virt, phba->slim2p.phys);
6340
6341        /* I/O memory unmap */
6342        iounmap(phba->ctrl_regs_memmap_p);
6343        iounmap(phba->slim_memmap_p);
6344
6345        return;
6346}
6347
6348/**
6349 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6350 * @phba: pointer to lpfc hba data structure.
6351 *
6352 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6353 * done and check status.
6354 *
6355 * Return 0 if successful, otherwise -ENODEV.
6356 **/
6357int
6358lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6359{
6360        struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6361        struct lpfc_register reg_data;
6362        int i, port_error = 0;
6363        uint32_t if_type;
6364
6365        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6366        memset(&reg_data, 0, sizeof(reg_data));
6367        if (!phba->sli4_hba.PSMPHRregaddr)
6368                return -ENODEV;
6369
6370        /* Wait up to 30 seconds for the SLI Port POST done and ready */
6371        for (i = 0; i < 3000; i++) {
6372                if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6373                        &portsmphr_reg.word0) ||
6374                        (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6375                        /* Port has a fatal POST error, break out */
6376                        port_error = -ENODEV;
6377                        break;
6378                }
6379                if (LPFC_POST_STAGE_PORT_READY ==
6380                    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6381                        break;
6382                msleep(10);
6383        }
6384
6385        /*
6386         * If there was a port error during POST, then don't proceed with
6387         * other register reads as the data may not be valid.  Just exit.
6388         */
6389        if (port_error) {
6390                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6391                        "1408 Port Failed POST - portsmphr=0x%x, "
6392                        "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6393                        "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6394                        portsmphr_reg.word0,
6395                        bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6396                        bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6397                        bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6398                        bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6399                        bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6400                        bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6401                        bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6402                        bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6403        } else {
6404                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6405                                "2534 Device Info: SLIFamily=0x%x, "
6406                                "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6407                                "SLIHint_2=0x%x, FT=0x%x\n",
6408                                bf_get(lpfc_sli_intf_sli_family,
6409                                       &phba->sli4_hba.sli_intf),
6410                                bf_get(lpfc_sli_intf_slirev,
6411                                       &phba->sli4_hba.sli_intf),
6412                                bf_get(lpfc_sli_intf_if_type,
6413                                       &phba->sli4_hba.sli_intf),
6414                                bf_get(lpfc_sli_intf_sli_hint1,
6415                                       &phba->sli4_hba.sli_intf),
6416                                bf_get(lpfc_sli_intf_sli_hint2,
6417                                       &phba->sli4_hba.sli_intf),
6418                                bf_get(lpfc_sli_intf_func_type,
6419                                       &phba->sli4_hba.sli_intf));
6420                /*
6421                 * Check for other Port errors during the initialization
6422                 * process.  Fail the load if the port did not come up
6423                 * correctly.
6424                 */
6425                if_type = bf_get(lpfc_sli_intf_if_type,
6426                                 &phba->sli4_hba.sli_intf);
6427                switch (if_type) {
6428                case LPFC_SLI_INTF_IF_TYPE_0:
6429                        phba->sli4_hba.ue_mask_lo =
6430                              readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6431                        phba->sli4_hba.ue_mask_hi =
6432                              readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6433                        uerrlo_reg.word0 =
6434                              readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6435                        uerrhi_reg.word0 =
6436                                readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6437                        if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6438                            (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6439                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6440                                                "1422 Unrecoverable Error "
6441                                                "Detected during POST "
6442                                                "uerr_lo_reg=0x%x, "
6443                                                "uerr_hi_reg=0x%x, "
6444                                                "ue_mask_lo_reg=0x%x, "
6445                                                "ue_mask_hi_reg=0x%x\n",
6446                                                uerrlo_reg.word0,
6447                                                uerrhi_reg.word0,
6448                                                phba->sli4_hba.ue_mask_lo,
6449                                                phba->sli4_hba.ue_mask_hi);
6450                                port_error = -ENODEV;
6451                        }
6452                        break;
6453                case LPFC_SLI_INTF_IF_TYPE_2:
6454                        /* Final checks.  The port status should be clean. */
6455                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6456                                &reg_data.word0) ||
6457                                (bf_get(lpfc_sliport_status_err, &reg_data) &&
6458                                 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
6459                                phba->work_status[0] =
6460                                        readl(phba->sli4_hba.u.if_type2.
6461                                              ERR1regaddr);
6462                                phba->work_status[1] =
6463                                        readl(phba->sli4_hba.u.if_type2.
6464                                              ERR2regaddr);
6465                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6466                                        "2888 Unrecoverable port error "
6467                                        "following POST: port status reg "
6468                                        "0x%x, port_smphr reg 0x%x, "
6469                                        "error 1=0x%x, error 2=0x%x\n",
6470                                        reg_data.word0,
6471                                        portsmphr_reg.word0,
6472                                        phba->work_status[0],
6473                                        phba->work_status[1]);
6474                                port_error = -ENODEV;
6475                        }
6476                        break;
6477                case LPFC_SLI_INTF_IF_TYPE_1:
6478                default:
6479                        break;
6480                }
6481        }
6482        return port_error;
6483}
6484
6485/**
6486 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6487 * @phba: pointer to lpfc hba data structure.
6488 * @if_type:  The SLI4 interface type getting configured.
6489 *
6490 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6491 * memory map.
6492 **/
6493static void
6494lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6495{
6496        switch (if_type) {
6497        case LPFC_SLI_INTF_IF_TYPE_0:
6498                phba->sli4_hba.u.if_type0.UERRLOregaddr =
6499                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6500                phba->sli4_hba.u.if_type0.UERRHIregaddr =
6501                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6502                phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6503                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6504                phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6505                        phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6506                phba->sli4_hba.SLIINTFregaddr =
6507                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6508                break;
6509        case LPFC_SLI_INTF_IF_TYPE_2:
6510                phba->sli4_hba.u.if_type2.ERR1regaddr =
6511                        phba->sli4_hba.conf_regs_memmap_p +
6512                                                LPFC_CTL_PORT_ER1_OFFSET;
6513                phba->sli4_hba.u.if_type2.ERR2regaddr =
6514                        phba->sli4_hba.conf_regs_memmap_p +
6515                                                LPFC_CTL_PORT_ER2_OFFSET;
6516                phba->sli4_hba.u.if_type2.CTRLregaddr =
6517                        phba->sli4_hba.conf_regs_memmap_p +
6518                                                LPFC_CTL_PORT_CTL_OFFSET;
6519                phba->sli4_hba.u.if_type2.STATUSregaddr =
6520                        phba->sli4_hba.conf_regs_memmap_p +
6521                                                LPFC_CTL_PORT_STA_OFFSET;
6522                phba->sli4_hba.SLIINTFregaddr =
6523                        phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6524                phba->sli4_hba.PSMPHRregaddr =
6525                        phba->sli4_hba.conf_regs_memmap_p +
6526                                                LPFC_CTL_PORT_SEM_OFFSET;
6527                phba->sli4_hba.RQDBregaddr =
6528                        phba->sli4_hba.conf_regs_memmap_p +
6529                                                LPFC_ULP0_RQ_DOORBELL;
6530                phba->sli4_hba.WQDBregaddr =
6531                        phba->sli4_hba.conf_regs_memmap_p +
6532                                                LPFC_ULP0_WQ_DOORBELL;
6533                phba->sli4_hba.EQCQDBregaddr =
6534                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6535                phba->sli4_hba.MQDBregaddr =
6536                        phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6537                phba->sli4_hba.BMBXregaddr =
6538                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6539                break;
6540        case LPFC_SLI_INTF_IF_TYPE_1:
6541        default:
6542                dev_printk(KERN_ERR, &phba->pcidev->dev,
6543                           "FATAL - unsupported SLI4 interface type - %d\n",
6544                           if_type);
6545                break;
6546        }
6547}
6548
6549/**
6550 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6551 * @phba: pointer to lpfc hba data structure.
6552 *
6553 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6554 * memory map.
6555 **/
6556static void
6557lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6558{
6559        phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6560                LPFC_SLIPORT_IF0_SMPHR;
6561        phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6562                LPFC_HST_ISR0;
6563        phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6564                LPFC_HST_IMR0;
6565        phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6566                LPFC_HST_ISCR0;
6567}
6568
6569/**
6570 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6571 * @phba: pointer to lpfc hba data structure.
6572 * @vf: virtual function number
6573 *
6574 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6575 * based on the given viftual function number, @vf.
6576 *
6577 * Return 0 if successful, otherwise -ENODEV.
6578 **/
6579static int
6580lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6581{
6582        if (vf > LPFC_VIR_FUNC_MAX)
6583                return -ENODEV;
6584
6585        phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6586                                vf * LPFC_VFR_PAGE_SIZE +
6587                                        LPFC_ULP0_RQ_DOORBELL);
6588        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6589                                vf * LPFC_VFR_PAGE_SIZE +
6590                                        LPFC_ULP0_WQ_DOORBELL);
6591        phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6592                                vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6593        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6594                                vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6595        phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6596                                vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6597        return 0;
6598}
6599
6600/**
6601 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6602 * @phba: pointer to lpfc hba data structure.
6603 *
6604 * This routine is invoked to create the bootstrap mailbox
6605 * region consistent with the SLI-4 interface spec.  This
6606 * routine allocates all memory necessary to communicate
6607 * mailbox commands to the port and sets up all alignment
6608 * needs.  No locks are expected to be held when calling
6609 * this routine.
6610 *
6611 * Return codes
6612 *      0 - successful
6613 *      -ENOMEM - could not allocated memory.
6614 **/
6615static int
6616lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6617{
6618        uint32_t bmbx_size;
6619        struct lpfc_dmabuf *dmabuf;
6620        struct dma_address *dma_address;
6621        uint32_t pa_addr;
6622        uint64_t phys_addr;
6623
6624        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6625        if (!dmabuf)
6626                return -ENOMEM;
6627
6628        /*
6629         * The bootstrap mailbox region is comprised of 2 parts
6630         * plus an alignment restriction of 16 bytes.
6631         */
6632        bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6633        dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
6634                                           &dmabuf->phys, GFP_KERNEL);
6635        if (!dmabuf->virt) {
6636                kfree(dmabuf);
6637                return -ENOMEM;
6638        }
6639
6640        /*
6641         * Initialize the bootstrap mailbox pointers now so that the register
6642         * operations are simple later.  The mailbox dma address is required
6643         * to be 16-byte aligned.  Also align the virtual memory as each
6644         * maibox is copied into the bmbx mailbox region before issuing the
6645         * command to the port.
6646         */
6647        phba->sli4_hba.bmbx.dmabuf = dmabuf;
6648        phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6649
6650        phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6651                                              LPFC_ALIGN_16_BYTE);
6652        phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6653                                              LPFC_ALIGN_16_BYTE);
6654
6655        /*
6656         * Set the high and low physical addresses now.  The SLI4 alignment
6657         * requirement is 16 bytes and the mailbox is posted to the port
6658         * as two 30-bit addresses.  The other data is a bit marking whether
6659         * the 30-bit address is the high or low address.
6660         * Upcast bmbx aphys to 64bits so shift instruction compiles
6661         * clean on 32 bit machines.
6662         */
6663        dma_address = &phba->sli4_hba.bmbx.dma_address;
6664        phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6665        pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6666        dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6667                                           LPFC_BMBX_BIT1_ADDR_HI);
6668
6669        pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6670        dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6671                                           LPFC_BMBX_BIT1_ADDR_LO);
6672        return 0;
6673}
6674
6675/**
6676 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6677 * @phba: pointer to lpfc hba data structure.
6678 *
6679 * This routine is invoked to teardown the bootstrap mailbox
6680 * region and release all host resources. This routine requires
6681 * the caller to ensure all mailbox commands recovered, no
6682 * additional mailbox comands are sent, and interrupts are disabled
6683 * before calling this routine.
6684 *
6685 **/
6686static void
6687lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6688{
6689        dma_free_coherent(&phba->pcidev->dev,
6690                          phba->sli4_hba.bmbx.bmbx_size,
6691                          phba->sli4_hba.bmbx.dmabuf->virt,
6692                          phba->sli4_hba.bmbx.dmabuf->phys);
6693
6694        kfree(phba->sli4_hba.bmbx.dmabuf);
6695        memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6696}
6697
6698/**
6699 * lpfc_sli4_read_config - Get the config parameters.
6700 * @phba: pointer to lpfc hba data structure.
6701 *
6702 * This routine is invoked to read the configuration parameters from the HBA.
6703 * The configuration parameters are used to set the base and maximum values
6704 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6705 * allocation for the port.
6706 *
6707 * Return codes
6708 *      0 - successful
6709 *      -ENOMEM - No available memory
6710 *      -EIO - The mailbox failed to complete successfully.
6711 **/
6712int
6713lpfc_sli4_read_config(struct lpfc_hba *phba)
6714{
6715        LPFC_MBOXQ_t *pmb;
6716        struct lpfc_mbx_read_config *rd_config;
6717        union  lpfc_sli4_cfg_shdr *shdr;
6718        uint32_t shdr_status, shdr_add_status;
6719        struct lpfc_mbx_get_func_cfg *get_func_cfg;
6720        struct lpfc_rsrc_desc_fcfcoe *desc;
6721        char *pdesc_0;
6722        int length, i, rc = 0, rc2;
6723
6724        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6725        if (!pmb) {
6726                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6727                                "2011 Unable to allocate memory for issuing "
6728                                "SLI_CONFIG_SPECIAL mailbox command\n");
6729                return -ENOMEM;
6730        }
6731
6732        lpfc_read_config(phba, pmb);
6733
6734        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6735        if (rc != MBX_SUCCESS) {
6736                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6737                        "2012 Mailbox failed , mbxCmd x%x "
6738                        "READ_CONFIG, mbxStatus x%x\n",
6739                        bf_get(lpfc_mqe_command, &pmb->u.mqe),
6740                        bf_get(lpfc_mqe_status, &pmb->u.mqe));
6741                rc = -EIO;
6742        } else {
6743                rd_config = &pmb->u.mqe.un.rd_config;
6744                if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6745                        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6746                        phba->sli4_hba.lnk_info.lnk_tp =
6747                                bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6748                        phba->sli4_hba.lnk_info.lnk_no =
6749                                bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6750                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6751                                        "3081 lnk_type:%d, lnk_numb:%d\n",
6752                                        phba->sli4_hba.lnk_info.lnk_tp,
6753                                        phba->sli4_hba.lnk_info.lnk_no);
6754                } else
6755                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6756                                        "3082 Mailbox (x%x) returned ldv:x0\n",
6757                                        bf_get(lpfc_mqe_command, &pmb->u.mqe));
6758                phba->sli4_hba.extents_in_use =
6759                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6760                phba->sli4_hba.max_cfg_param.max_xri =
6761                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6762                phba->sli4_hba.max_cfg_param.xri_base =
6763                        bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6764                phba->sli4_hba.max_cfg_param.max_vpi =
6765                        bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6766                phba->sli4_hba.max_cfg_param.vpi_base =
6767                        bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6768                phba->sli4_hba.max_cfg_param.max_rpi =
6769                        bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6770                phba->sli4_hba.max_cfg_param.rpi_base =
6771                        bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6772                phba->sli4_hba.max_cfg_param.max_vfi =
6773                        bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6774                phba->sli4_hba.max_cfg_param.vfi_base =
6775                        bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6776                phba->sli4_hba.max_cfg_param.max_fcfi =
6777                        bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6778                phba->sli4_hba.max_cfg_param.max_eq =
6779                        bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6780                phba->sli4_hba.max_cfg_param.max_rq =
6781                        bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6782                phba->sli4_hba.max_cfg_param.max_wq =
6783                        bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6784                phba->sli4_hba.max_cfg_param.max_cq =
6785                        bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6786                phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6787                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6788                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6789                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6790                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6791                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6792                phba->max_vports = phba->max_vpi;
6793                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6794                                "2003 cfg params Extents? %d "
6795                                "XRI(B:%d M:%d), "
6796                                "VPI(B:%d M:%d) "
6797                                "VFI(B:%d M:%d) "
6798                                "RPI(B:%d M:%d) "
6799                                "FCFI(Count:%d)\n",
6800                                phba->sli4_hba.extents_in_use,
6801                                phba->sli4_hba.max_cfg_param.xri_base,
6802                                phba->sli4_hba.max_cfg_param.max_xri,
6803                                phba->sli4_hba.max_cfg_param.vpi_base,
6804                                phba->sli4_hba.max_cfg_param.max_vpi,
6805                                phba->sli4_hba.max_cfg_param.vfi_base,
6806                                phba->sli4_hba.max_cfg_param.max_vfi,
6807                                phba->sli4_hba.max_cfg_param.rpi_base,
6808                                phba->sli4_hba.max_cfg_param.max_rpi,
6809                                phba->sli4_hba.max_cfg_param.max_fcfi);
6810        }
6811
6812        if (rc)
6813                goto read_cfg_out;
6814
6815        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6816        length = phba->sli4_hba.max_cfg_param.max_xri -
6817                        lpfc_sli4_get_els_iocb_cnt(phba);
6818        if (phba->cfg_hba_queue_depth > length) {
6819                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6820                                "3361 HBA queue depth changed from %d to %d\n",
6821                                phba->cfg_hba_queue_depth, length);
6822                phba->cfg_hba_queue_depth = length;
6823        }
6824
6825        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6826            LPFC_SLI_INTF_IF_TYPE_2)
6827                goto read_cfg_out;
6828
6829        /* get the pf# and vf# for SLI4 if_type 2 port */
6830        length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6831                  sizeof(struct lpfc_sli4_cfg_mhdr));
6832        lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6833                         LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6834                         length, LPFC_SLI4_MBX_EMBED);
6835
6836        rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6837        shdr = (union lpfc_sli4_cfg_shdr *)
6838                                &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6839        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6840        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6841        if (rc2 || shdr_status || shdr_add_status) {
6842                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6843                                "3026 Mailbox failed , mbxCmd x%x "
6844                                "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6845                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
6846                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
6847                goto read_cfg_out;
6848        }
6849
6850        /* search for fc_fcoe resrouce descriptor */
6851        get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6852
6853        pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6854        desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6855        length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6856        if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6857                length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6858        else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6859                goto read_cfg_out;
6860
6861        for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6862                desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6863                if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6864                    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6865                        phba->sli4_hba.iov.pf_number =
6866                                bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6867                        phba->sli4_hba.iov.vf_number =
6868                                bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6869                        break;
6870                }
6871        }
6872
6873        if (i < LPFC_RSRC_DESC_MAX_NUM)
6874                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6875                                "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6876                                "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6877                                phba->sli4_hba.iov.vf_number);
6878        else
6879                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6880                                "3028 GET_FUNCTION_CONFIG: failed to find "
6881                                "Resrouce Descriptor:x%x\n",
6882                                LPFC_RSRC_DESC_TYPE_FCFCOE);
6883
6884read_cfg_out:
6885        mempool_free(pmb, phba->mbox_mem_pool);
6886        return rc;
6887}
6888
6889/**
6890 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6891 * @phba: pointer to lpfc hba data structure.
6892 *
6893 * This routine is invoked to setup the port-side endian order when
6894 * the port if_type is 0.  This routine has no function for other
6895 * if_types.
6896 *
6897 * Return codes
6898 *      0 - successful
6899 *      -ENOMEM - No available memory
6900 *      -EIO - The mailbox failed to complete successfully.
6901 **/
6902static int
6903lpfc_setup_endian_order(struct lpfc_hba *phba)
6904{
6905        LPFC_MBOXQ_t *mboxq;
6906        uint32_t if_type, rc = 0;
6907        uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6908                                      HOST_ENDIAN_HIGH_WORD1};
6909
6910        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6911        switch (if_type) {
6912        case LPFC_SLI_INTF_IF_TYPE_0:
6913                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6914                                                       GFP_KERNEL);
6915                if (!mboxq) {
6916                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6917                                        "0492 Unable to allocate memory for "
6918                                        "issuing SLI_CONFIG_SPECIAL mailbox "
6919                                        "command\n");
6920                        return -ENOMEM;
6921                }
6922
6923                /*
6924                 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6925                 * two words to contain special data values and no other data.
6926                 */
6927                memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6928                memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6929                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6930                if (rc != MBX_SUCCESS) {
6931                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6932                                        "0493 SLI_CONFIG_SPECIAL mailbox "
6933                                        "failed with status x%x\n",
6934                                        rc);
6935                        rc = -EIO;
6936                }
6937                mempool_free(mboxq, phba->mbox_mem_pool);
6938                break;
6939        case LPFC_SLI_INTF_IF_TYPE_2:
6940        case LPFC_SLI_INTF_IF_TYPE_1:
6941        default:
6942                break;
6943        }
6944        return rc;
6945}
6946
6947/**
6948 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6949 * @phba: pointer to lpfc hba data structure.
6950 *
6951 * This routine is invoked to check the user settable queue counts for EQs and
6952 * CQs. after this routine is called the counts will be set to valid values that
6953 * adhere to the constraints of the system's interrupt vectors and the port's
6954 * queue resources.
6955 *
6956 * Return codes
6957 *      0 - successful
6958 *      -ENOMEM - No available memory
6959 **/
6960static int
6961lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6962{
6963        int cfg_fcp_io_channel;
6964        uint32_t cpu;
6965        uint32_t i = 0;
6966        int fof_vectors = phba->cfg_fof ? 1 : 0;
6967
6968        /*
6969         * Sanity check for configured queue parameters against the run-time
6970         * device parameters
6971         */
6972
6973        /* Sanity check on HBA EQ parameters */
6974        cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6975
6976        /* It doesn't make sense to have more io channels then online CPUs */
6977        for_each_present_cpu(cpu) {
6978                if (cpu_online(cpu))
6979                        i++;
6980        }
6981        phba->sli4_hba.num_online_cpu = i;
6982        phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6983        phba->sli4_hba.curr_disp_cpu = 0;
6984
6985        if (i < cfg_fcp_io_channel) {
6986                lpfc_printf_log(phba,
6987                                KERN_ERR, LOG_INIT,
6988                                "3188 Reducing IO channels to match number of "
6989                                "online CPUs: from %d to %d\n",
6990                                cfg_fcp_io_channel, i);
6991                cfg_fcp_io_channel = i;
6992        }
6993
6994        if (cfg_fcp_io_channel + fof_vectors >
6995            phba->sli4_hba.max_cfg_param.max_eq) {
6996                if (phba->sli4_hba.max_cfg_param.max_eq <
6997                    LPFC_FCP_IO_CHAN_MIN) {
6998                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6999                                        "2574 Not enough EQs (%d) from the "
7000                                        "pci function for supporting FCP "
7001                                        "EQs (%d)\n",
7002                                        phba->sli4_hba.max_cfg_param.max_eq,
7003                                        phba->cfg_fcp_io_channel);
7004                        goto out_error;
7005                }
7006                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7007                                "2575 Reducing IO channels to match number of "
7008                                "available EQs: from %d to %d\n",
7009                                cfg_fcp_io_channel,
7010                                phba->sli4_hba.max_cfg_param.max_eq);
7011                cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
7012                        fof_vectors;
7013        }
7014
7015        /* The actual number of FCP event queues adopted */
7016        phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
7017
7018        /* Get EQ depth from module parameter, fake the default for now */
7019        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7020        phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
7021
7022        /* Get CQ depth from module parameter, fake the default for now */
7023        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7024        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
7025
7026        return 0;
7027out_error:
7028        return -ENOMEM;
7029}
7030
7031/**
7032 * lpfc_sli4_queue_create - Create all the SLI4 queues
7033 * @phba: pointer to lpfc hba data structure.
7034 *
7035 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7036 * operation. For each SLI4 queue type, the parameters such as queue entry
7037 * count (queue depth) shall be taken from the module parameter. For now,
7038 * we just use some constant number as place holder.
7039 *
7040 * Return codes
7041 *      0 - successful
7042 *      -ENOMEM - No availble memory
7043 *      -EIO - The mailbox failed to complete successfully.
7044 **/
7045int
7046lpfc_sli4_queue_create(struct lpfc_hba *phba)
7047{
7048        struct lpfc_queue *qdesc;
7049        int idx;
7050
7051        /*
7052         * Create HBA Record arrays.
7053         */
7054        if (!phba->cfg_fcp_io_channel)
7055                return -ERANGE;
7056
7057        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
7058        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
7059        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
7060        phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
7061        phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
7062        phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
7063
7064        phba->sli4_hba.hba_eq =  kzalloc((sizeof(struct lpfc_queue *) *
7065                                phba->cfg_fcp_io_channel), GFP_KERNEL);
7066        if (!phba->sli4_hba.hba_eq) {
7067                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7068                        "2576 Failed allocate memory for "
7069                        "fast-path EQ record array\n");
7070                goto out_error;
7071        }
7072
7073        phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
7074                                phba->cfg_fcp_io_channel), GFP_KERNEL);
7075        if (!phba->sli4_hba.fcp_cq) {
7076                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7077                                "2577 Failed allocate memory for fast-path "
7078                                "CQ record array\n");
7079                goto out_error;
7080        }
7081
7082        phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
7083                                phba->cfg_fcp_io_channel), GFP_KERNEL);
7084        if (!phba->sli4_hba.fcp_wq) {
7085                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7086                                "2578 Failed allocate memory for fast-path "
7087                                "WQ record array\n");
7088                goto out_error;
7089        }
7090
7091        /*
7092         * Since the first EQ can have multiple CQs associated with it,
7093         * this array is used to quickly see if we have a FCP fast-path
7094         * CQ match.
7095         */
7096        phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
7097                                         phba->cfg_fcp_io_channel), GFP_KERNEL);
7098        if (!phba->sli4_hba.fcp_cq_map) {
7099                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7100                                "2545 Failed allocate memory for fast-path "
7101                                "CQ map\n");
7102                goto out_error;
7103        }
7104
7105        /*
7106         * Create HBA Event Queues (EQs).  The cfg_fcp_io_channel specifies
7107         * how many EQs to create.
7108         */
7109        for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7110
7111                /* Create EQs */
7112                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
7113                                              phba->sli4_hba.eq_ecount);
7114                if (!qdesc) {
7115                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7116                                        "0497 Failed allocate EQ (%d)\n", idx);
7117                        goto out_error;
7118                }
7119                phba->sli4_hba.hba_eq[idx] = qdesc;
7120
7121                /* Create Fast Path FCP CQs */
7122                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7123                                              phba->sli4_hba.cq_ecount);
7124                if (!qdesc) {
7125                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7126                                        "0499 Failed allocate fast-path FCP "
7127                                        "CQ (%d)\n", idx);
7128                        goto out_error;
7129                }
7130                phba->sli4_hba.fcp_cq[idx] = qdesc;
7131
7132                /* Create Fast Path FCP WQs */
7133                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7134                                              phba->sli4_hba.wq_ecount);
7135                if (!qdesc) {
7136                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7137                                        "0503 Failed allocate fast-path FCP "
7138                                        "WQ (%d)\n", idx);
7139                        goto out_error;
7140                }
7141                phba->sli4_hba.fcp_wq[idx] = qdesc;
7142        }
7143
7144
7145        /*
7146         * Create Slow Path Completion Queues (CQs)
7147         */
7148
7149        /* Create slow-path Mailbox Command Complete Queue */
7150        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7151                                      phba->sli4_hba.cq_ecount);
7152        if (!qdesc) {
7153                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7154                                "0500 Failed allocate slow-path mailbox CQ\n");
7155                goto out_error;
7156        }
7157        phba->sli4_hba.mbx_cq = qdesc;
7158
7159        /* Create slow-path ELS Complete Queue */
7160        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7161                                      phba->sli4_hba.cq_ecount);
7162        if (!qdesc) {
7163                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7164                                "0501 Failed allocate slow-path ELS CQ\n");
7165                goto out_error;
7166        }
7167        phba->sli4_hba.els_cq = qdesc;
7168
7169
7170        /*
7171         * Create Slow Path Work Queues (WQs)
7172         */
7173
7174        /* Create Mailbox Command Queue */
7175
7176        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7177                                      phba->sli4_hba.mq_ecount);
7178        if (!qdesc) {
7179                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7180                                "0505 Failed allocate slow-path MQ\n");
7181                goto out_error;
7182        }
7183        phba->sli4_hba.mbx_wq = qdesc;
7184
7185        /*
7186         * Create ELS Work Queues
7187         */
7188
7189        /* Create slow-path ELS Work Queue */
7190        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7191                                      phba->sli4_hba.wq_ecount);
7192        if (!qdesc) {
7193                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7194                                "0504 Failed allocate slow-path ELS WQ\n");
7195                goto out_error;
7196        }
7197        phba->sli4_hba.els_wq = qdesc;
7198
7199        /*
7200         * Create Receive Queue (RQ)
7201         */
7202
7203        /* Create Receive Queue for header */
7204        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7205                                      phba->sli4_hba.rq_ecount);
7206        if (!qdesc) {
7207                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7208                                "0506 Failed allocate receive HRQ\n");
7209                goto out_error;
7210        }
7211        phba->sli4_hba.hdr_rq = qdesc;
7212
7213        /* Create Receive Queue for data */
7214        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7215                                      phba->sli4_hba.rq_ecount);
7216        if (!qdesc) {
7217                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7218                                "0507 Failed allocate receive DRQ\n");
7219                goto out_error;
7220        }
7221        phba->sli4_hba.dat_rq = qdesc;
7222
7223        /* Create the Queues needed for Flash Optimized Fabric operations */
7224        if (phba->cfg_fof)
7225                lpfc_fof_queue_create(phba);
7226        return 0;
7227
7228out_error:
7229        lpfc_sli4_queue_destroy(phba);
7230        return -ENOMEM;
7231}
7232
7233/**
7234 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7235 * @phba: pointer to lpfc hba data structure.
7236 *
7237 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7238 * operation.
7239 *
7240 * Return codes
7241 *      0 - successful
7242 *      -ENOMEM - No available memory
7243 *      -EIO - The mailbox failed to complete successfully.
7244 **/
7245void
7246lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7247{
7248        int idx;
7249
7250        if (phba->cfg_fof)
7251                lpfc_fof_queue_destroy(phba);
7252
7253        if (phba->sli4_hba.hba_eq != NULL) {
7254                /* Release HBA event queue */
7255                for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7256                        if (phba->sli4_hba.hba_eq[idx] != NULL) {
7257                                lpfc_sli4_queue_free(
7258                                        phba->sli4_hba.hba_eq[idx]);
7259                                phba->sli4_hba.hba_eq[idx] = NULL;
7260                        }
7261                }
7262                kfree(phba->sli4_hba.hba_eq);
7263                phba->sli4_hba.hba_eq = NULL;
7264        }
7265
7266        if (phba->sli4_hba.fcp_cq != NULL) {
7267                /* Release FCP completion queue */
7268                for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7269                        if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7270                                lpfc_sli4_queue_free(
7271                                        phba->sli4_hba.fcp_cq[idx]);
7272                                phba->sli4_hba.fcp_cq[idx] = NULL;
7273                        }
7274                }
7275                kfree(phba->sli4_hba.fcp_cq);
7276                phba->sli4_hba.fcp_cq = NULL;
7277        }
7278
7279        if (phba->sli4_hba.fcp_wq != NULL) {
7280                /* Release FCP work queue */
7281                for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7282                        if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7283                                lpfc_sli4_queue_free(
7284                                        phba->sli4_hba.fcp_wq[idx]);
7285                                phba->sli4_hba.fcp_wq[idx] = NULL;
7286                        }
7287                }
7288                kfree(phba->sli4_hba.fcp_wq);
7289                phba->sli4_hba.fcp_wq = NULL;
7290        }
7291
7292        /* Release FCP CQ mapping array */
7293        if (phba->sli4_hba.fcp_cq_map != NULL) {
7294                kfree(phba->sli4_hba.fcp_cq_map);
7295                phba->sli4_hba.fcp_cq_map = NULL;
7296        }
7297
7298        /* Release mailbox command work queue */
7299        if (phba->sli4_hba.mbx_wq != NULL) {
7300                lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7301                phba->sli4_hba.mbx_wq = NULL;
7302        }
7303
7304        /* Release ELS work queue */
7305        if (phba->sli4_hba.els_wq != NULL) {
7306                lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7307                phba->sli4_hba.els_wq = NULL;
7308        }
7309
7310        /* Release unsolicited receive queue */
7311        if (phba->sli4_hba.hdr_rq != NULL) {
7312                lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7313                phba->sli4_hba.hdr_rq = NULL;
7314        }
7315        if (phba->sli4_hba.dat_rq != NULL) {
7316                lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7317                phba->sli4_hba.dat_rq = NULL;
7318        }
7319
7320        /* Release ELS complete queue */
7321        if (phba->sli4_hba.els_cq != NULL) {
7322                lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7323                phba->sli4_hba.els_cq = NULL;
7324        }
7325
7326        /* Release mailbox command complete queue */
7327        if (phba->sli4_hba.mbx_cq != NULL) {
7328                lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7329                phba->sli4_hba.mbx_cq = NULL;
7330        }
7331
7332        return;
7333}
7334
7335/**
7336 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7337 * @phba: pointer to lpfc hba data structure.
7338 *
7339 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7340 * operation.
7341 *
7342 * Return codes
7343 *      0 - successful
7344 *      -ENOMEM - No available memory
7345 *      -EIO - The mailbox failed to complete successfully.
7346 **/
7347int
7348lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7349{
7350        struct lpfc_sli *psli = &phba->sli;
7351        struct lpfc_sli_ring *pring;
7352        int rc = -ENOMEM;
7353        int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7354        int fcp_cq_index = 0;
7355        uint32_t shdr_status, shdr_add_status;
7356        union lpfc_sli4_cfg_shdr *shdr;
7357        LPFC_MBOXQ_t *mboxq;
7358        uint32_t length;
7359
7360        /* Check for dual-ULP support */
7361        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7362        if (!mboxq) {
7363                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7364                                "3249 Unable to allocate memory for "
7365                                "QUERY_FW_CFG mailbox command\n");
7366                return -ENOMEM;
7367        }
7368        length = (sizeof(struct lpfc_mbx_query_fw_config) -
7369                  sizeof(struct lpfc_sli4_cfg_mhdr));
7370        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7371                         LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7372                         length, LPFC_SLI4_MBX_EMBED);
7373
7374        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7375
7376        shdr = (union lpfc_sli4_cfg_shdr *)
7377                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7378        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7379        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7380        if (shdr_status || shdr_add_status || rc) {
7381                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7382                                "3250 QUERY_FW_CFG mailbox failed with status "
7383                                "x%x add_status x%x, mbx status x%x\n",
7384                                shdr_status, shdr_add_status, rc);
7385                if (rc != MBX_TIMEOUT)
7386                        mempool_free(mboxq, phba->mbox_mem_pool);
7387                rc = -ENXIO;
7388                goto out_error;
7389        }
7390
7391        phba->sli4_hba.fw_func_mode =
7392                        mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7393        phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7394        phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7395        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7396                        "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7397                        "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7398                        phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7399
7400        if (rc != MBX_TIMEOUT)
7401                mempool_free(mboxq, phba->mbox_mem_pool);
7402
7403        /*
7404         * Set up HBA Event Queues (EQs)
7405         */
7406
7407        /* Set up HBA event queue */
7408        if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7409                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7410                                "3147 Fast-path EQs not allocated\n");
7411                rc = -ENOMEM;
7412                goto out_error;
7413        }
7414        for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7415                if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7416                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7417                                        "0522 Fast-path EQ (%d) not "
7418                                        "allocated\n", fcp_eqidx);
7419                        rc = -ENOMEM;
7420                        goto out_destroy_hba_eq;
7421                }
7422                rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7423                         (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7424                if (rc) {
7425                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7426                                        "0523 Failed setup of fast-path EQ "
7427                                        "(%d), rc = 0x%x\n", fcp_eqidx,
7428                                        (uint32_t)rc);
7429                        goto out_destroy_hba_eq;
7430                }
7431                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7432                                "2584 HBA EQ setup: "
7433                                "queue[%d]-id=%d\n", fcp_eqidx,
7434                                phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7435        }
7436
7437        /* Set up fast-path FCP Response Complete Queue */
7438        if (!phba->sli4_hba.fcp_cq) {
7439                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7440                                "3148 Fast-path FCP CQ array not "
7441                                "allocated\n");
7442                rc = -ENOMEM;
7443                goto out_destroy_hba_eq;
7444        }
7445
7446        for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7447                if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7448                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7449                                        "0526 Fast-path FCP CQ (%d) not "
7450                                        "allocated\n", fcp_cqidx);
7451                        rc = -ENOMEM;
7452                        goto out_destroy_fcp_cq;
7453                }
7454                rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7455                        phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7456                if (rc) {
7457                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7458                                        "0527 Failed setup of fast-path FCP "
7459                                        "CQ (%d), rc = 0x%x\n", fcp_cqidx,
7460                                        (uint32_t)rc);
7461                        goto out_destroy_fcp_cq;
7462                }
7463
7464                /* Setup fcp_cq_map for fast lookup */
7465                phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7466                                phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7467
7468                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7469                                "2588 FCP CQ setup: cq[%d]-id=%d, "
7470                                "parent seq[%d]-id=%d\n",
7471                                fcp_cqidx,
7472                                phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7473                                fcp_cqidx,
7474                                phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7475        }
7476
7477        /* Set up fast-path FCP Work Queue */
7478        if (!phba->sli4_hba.fcp_wq) {
7479                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7480                                "3149 Fast-path FCP WQ array not "
7481                                "allocated\n");
7482                rc = -ENOMEM;
7483                goto out_destroy_fcp_cq;
7484        }
7485
7486        for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7487                if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7488                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7489                                        "0534 Fast-path FCP WQ (%d) not "
7490                                        "allocated\n", fcp_wqidx);
7491                        rc = -ENOMEM;
7492                        goto out_destroy_fcp_wq;
7493                }
7494                rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7495                                    phba->sli4_hba.fcp_cq[fcp_wqidx],
7496                                    LPFC_FCP);
7497                if (rc) {
7498                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7499                                        "0535 Failed setup of fast-path FCP "
7500                                        "WQ (%d), rc = 0x%x\n", fcp_wqidx,
7501                                        (uint32_t)rc);
7502                        goto out_destroy_fcp_wq;
7503                }
7504
7505                /* Bind this WQ to the next FCP ring */
7506                pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7507                pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7508                phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7509
7510                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7511                                "2591 FCP WQ setup: wq[%d]-id=%d, "
7512                                "parent cq[%d]-id=%d\n",
7513                                fcp_wqidx,
7514                                phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7515                                fcp_cq_index,
7516                                phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7517        }
7518        /*
7519         * Set up Complete Queues (CQs)
7520         */
7521
7522        /* Set up slow-path MBOX Complete Queue as the first CQ */
7523        if (!phba->sli4_hba.mbx_cq) {
7524                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7525                                "0528 Mailbox CQ not allocated\n");
7526                rc = -ENOMEM;
7527                goto out_destroy_fcp_wq;
7528        }
7529        rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7530                        phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7531        if (rc) {
7532                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7533                                "0529 Failed setup of slow-path mailbox CQ: "
7534                                "rc = 0x%x\n", (uint32_t)rc);
7535                goto out_destroy_fcp_wq;
7536        }
7537        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7538                        "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7539                        phba->sli4_hba.mbx_cq->queue_id,
7540                        phba->sli4_hba.hba_eq[0]->queue_id);
7541
7542        /* Set up slow-path ELS Complete Queue */
7543        if (!phba->sli4_hba.els_cq) {
7544                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7545                                "0530 ELS CQ not allocated\n");
7546                rc = -ENOMEM;
7547                goto out_destroy_mbx_cq;
7548        }
7549        rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7550                        phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7551        if (rc) {
7552                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7553                                "0531 Failed setup of slow-path ELS CQ: "
7554                                "rc = 0x%x\n", (uint32_t)rc);
7555                goto out_destroy_mbx_cq;
7556        }
7557        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7558                        "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7559                        phba->sli4_hba.els_cq->queue_id,
7560                        phba->sli4_hba.hba_eq[0]->queue_id);
7561
7562        /*
7563         * Set up all the Work Queues (WQs)
7564         */
7565
7566        /* Set up Mailbox Command Queue */
7567        if (!phba->sli4_hba.mbx_wq) {
7568                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7569                                "0538 Slow-path MQ not allocated\n");
7570                rc = -ENOMEM;
7571                goto out_destroy_els_cq;
7572        }
7573        rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7574                            phba->sli4_hba.mbx_cq, LPFC_MBOX);
7575        if (rc) {
7576                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7577                                "0539 Failed setup of slow-path MQ: "
7578                                "rc = 0x%x\n", rc);
7579                goto out_destroy_els_cq;
7580        }
7581        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7582                        "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7583                        phba->sli4_hba.mbx_wq->queue_id,
7584                        phba->sli4_hba.mbx_cq->queue_id);
7585
7586        /* Set up slow-path ELS Work Queue */
7587        if (!phba->sli4_hba.els_wq) {
7588                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7589                                "0536 Slow-path ELS WQ not allocated\n");
7590                rc = -ENOMEM;
7591                goto out_destroy_mbx_wq;
7592        }
7593        rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7594                            phba->sli4_hba.els_cq, LPFC_ELS);
7595        if (rc) {
7596                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7597                                "0537 Failed setup of slow-path ELS WQ: "
7598                                "rc = 0x%x\n", (uint32_t)rc);
7599                goto out_destroy_mbx_wq;
7600        }
7601
7602        /* Bind this WQ to the ELS ring */
7603        pring = &psli->ring[LPFC_ELS_RING];
7604        pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7605        phba->sli4_hba.els_cq->pring = pring;
7606
7607        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7608                        "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7609                        phba->sli4_hba.els_wq->queue_id,
7610                        phba->sli4_hba.els_cq->queue_id);
7611
7612        /*
7613         * Create Receive Queue (RQ)
7614         */
7615        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7616                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7617                                "0540 Receive Queue not allocated\n");
7618                rc = -ENOMEM;
7619                goto out_destroy_els_wq;
7620        }
7621
7622        lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7623        lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7624
7625        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7626                            phba->sli4_hba.els_cq, LPFC_USOL);
7627        if (rc) {
7628                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7629                                "0541 Failed setup of Receive Queue: "
7630                                "rc = 0x%x\n", (uint32_t)rc);
7631                goto out_destroy_fcp_wq;
7632        }
7633
7634        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7635                        "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7636                        "parent cq-id=%d\n",
7637                        phba->sli4_hba.hdr_rq->queue_id,
7638                        phba->sli4_hba.dat_rq->queue_id,
7639                        phba->sli4_hba.els_cq->queue_id);
7640
7641        if (phba->cfg_fof) {
7642                rc = lpfc_fof_queue_setup(phba);
7643                if (rc) {
7644                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7645                                        "0549 Failed setup of FOF Queues: "
7646                                        "rc = 0x%x\n", rc);
7647                        goto out_destroy_els_rq;
7648                }
7649        }
7650        return 0;
7651
7652out_destroy_els_rq:
7653        lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7654out_destroy_els_wq:
7655        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7656out_destroy_mbx_wq:
7657        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7658out_destroy_els_cq:
7659        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7660out_destroy_mbx_cq:
7661        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7662out_destroy_fcp_wq:
7663        for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7664                lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7665out_destroy_fcp_cq:
7666        for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7667                lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7668out_destroy_hba_eq:
7669        for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7670                lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7671out_error:
7672        return rc;
7673}
7674
7675/**
7676 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7677 * @phba: pointer to lpfc hba data structure.
7678 *
7679 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7680 * operation.
7681 *
7682 * Return codes
7683 *      0 - successful
7684 *      -ENOMEM - No available memory
7685 *      -EIO - The mailbox failed to complete successfully.
7686 **/
7687void
7688lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7689{
7690        int fcp_qidx;
7691
7692        /* Unset the queues created for Flash Optimized Fabric operations */
7693        if (phba->cfg_fof)
7694                lpfc_fof_queue_destroy(phba);
7695        /* Unset mailbox command work queue */
7696        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7697        /* Unset ELS work queue */
7698        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7699        /* Unset unsolicited receive queue */
7700        lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7701        /* Unset FCP work queue */
7702        if (phba->sli4_hba.fcp_wq) {
7703                for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7704                     fcp_qidx++)
7705                        lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7706        }
7707        /* Unset mailbox command complete queue */
7708        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7709        /* Unset ELS complete queue */
7710        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7711        /* Unset FCP response complete queue */
7712        if (phba->sli4_hba.fcp_cq) {
7713                for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7714                     fcp_qidx++)
7715                        lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7716        }
7717        /* Unset fast-path event queue */
7718        if (phba->sli4_hba.hba_eq) {
7719                for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7720                     fcp_qidx++)
7721                        lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7722        }
7723}
7724
7725/**
7726 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7727 * @phba: pointer to lpfc hba data structure.
7728 *
7729 * This routine is invoked to allocate and set up a pool of completion queue
7730 * events. The body of the completion queue event is a completion queue entry
7731 * CQE. For now, this pool is used for the interrupt service routine to queue
7732 * the following HBA completion queue events for the worker thread to process:
7733 *   - Mailbox asynchronous events
7734 *   - Receive queue completion unsolicited events
7735 * Later, this can be used for all the slow-path events.
7736 *
7737 * Return codes
7738 *      0 - successful
7739 *      -ENOMEM - No available memory
7740 **/
7741static int
7742lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7743{
7744        struct lpfc_cq_event *cq_event;
7745        int i;
7746
7747        for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7748                cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7749                if (!cq_event)
7750                        goto out_pool_create_fail;
7751                list_add_tail(&cq_event->list,
7752                              &phba->sli4_hba.sp_cqe_event_pool);
7753        }
7754        return 0;
7755
7756out_pool_create_fail:
7757        lpfc_sli4_cq_event_pool_destroy(phba);
7758        return -ENOMEM;
7759}
7760
7761/**
7762 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7763 * @phba: pointer to lpfc hba data structure.
7764 *
7765 * This routine is invoked to free the pool of completion queue events at
7766 * driver unload time. Note that, it is the responsibility of the driver
7767 * cleanup routine to free all the outstanding completion-queue events
7768 * allocated from this pool back into the pool before invoking this routine
7769 * to destroy the pool.
7770 **/
7771static void
7772lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7773{
7774        struct lpfc_cq_event *cq_event, *next_cq_event;
7775
7776        list_for_each_entry_safe(cq_event, next_cq_event,
7777                                 &phba->sli4_hba.sp_cqe_event_pool, list) {
7778                list_del(&cq_event->list);
7779                kfree(cq_event);
7780        }
7781}
7782
7783/**
7784 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7785 * @phba: pointer to lpfc hba data structure.
7786 *
7787 * This routine is the lock free version of the API invoked to allocate a
7788 * completion-queue event from the free pool.
7789 *
7790 * Return: Pointer to the newly allocated completion-queue event if successful
7791 *         NULL otherwise.
7792 **/
7793struct lpfc_cq_event *
7794__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7795{
7796        struct lpfc_cq_event *cq_event = NULL;
7797
7798        list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7799                         struct lpfc_cq_event, list);
7800        return cq_event;
7801}
7802
7803/**
7804 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7805 * @phba: pointer to lpfc hba data structure.
7806 *
7807 * This routine is the lock version of the API invoked to allocate a
7808 * completion-queue event from the free pool.
7809 *
7810 * Return: Pointer to the newly allocated completion-queue event if successful
7811 *         NULL otherwise.
7812 **/
7813struct lpfc_cq_event *
7814lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7815{
7816        struct lpfc_cq_event *cq_event;
7817        unsigned long iflags;
7818
7819        spin_lock_irqsave(&phba->hbalock, iflags);
7820        cq_event = __lpfc_sli4_cq_event_alloc(phba);
7821        spin_unlock_irqrestore(&phba->hbalock, iflags);
7822        return cq_event;
7823}
7824
7825/**
7826 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7827 * @phba: pointer to lpfc hba data structure.
7828 * @cq_event: pointer to the completion queue event to be freed.
7829 *
7830 * This routine is the lock free version of the API invoked to release a
7831 * completion-queue event back into the free pool.
7832 **/
7833void
7834__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7835                             struct lpfc_cq_event *cq_event)
7836{
7837        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7838}
7839
7840/**
7841 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7842 * @phba: pointer to lpfc hba data structure.
7843 * @cq_event: pointer to the completion queue event to be freed.
7844 *
7845 * This routine is the lock version of the API invoked to release a
7846 * completion-queue event back into the free pool.
7847 **/
7848void
7849lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7850                           struct lpfc_cq_event *cq_event)
7851{
7852        unsigned long iflags;
7853        spin_lock_irqsave(&phba->hbalock, iflags);
7854        __lpfc_sli4_cq_event_release(phba, cq_event);
7855        spin_unlock_irqrestore(&phba->hbalock, iflags);
7856}
7857
7858/**
7859 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7860 * @phba: pointer to lpfc hba data structure.
7861 *
7862 * This routine is to free all the pending completion-queue events to the
7863 * back into the free pool for device reset.
7864 **/
7865static void
7866lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7867{
7868        LIST_HEAD(cqelist);
7869        struct lpfc_cq_event *cqe;
7870        unsigned long iflags;
7871
7872        /* Retrieve all the pending WCQEs from pending WCQE lists */
7873        spin_lock_irqsave(&phba->hbalock, iflags);
7874        /* Pending FCP XRI abort events */
7875        list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7876                         &cqelist);
7877        /* Pending ELS XRI abort events */
7878        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7879                         &cqelist);
7880        /* Pending asynnc events */
7881        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7882                         &cqelist);
7883        spin_unlock_irqrestore(&phba->hbalock, iflags);
7884
7885        while (!list_empty(&cqelist)) {
7886                list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7887                lpfc_sli4_cq_event_release(phba, cqe);
7888        }
7889}
7890
7891/**
7892 * lpfc_pci_function_reset - Reset pci function.
7893 * @phba: pointer to lpfc hba data structure.
7894 *
7895 * This routine is invoked to request a PCI function reset. It will destroys
7896 * all resources assigned to the PCI function which originates this request.
7897 *
7898 * Return codes
7899 *      0 - successful
7900 *      -ENOMEM - No available memory
7901 *      -EIO - The mailbox failed to complete successfully.
7902 **/
7903int
7904lpfc_pci_function_reset(struct lpfc_hba *phba)
7905{
7906        LPFC_MBOXQ_t *mboxq;
7907        uint32_t rc = 0, if_type;
7908        uint32_t shdr_status, shdr_add_status;
7909        uint32_t rdy_chk;
7910        uint32_t port_reset = 0;
7911        union lpfc_sli4_cfg_shdr *shdr;
7912        struct lpfc_register reg_data;
7913        uint16_t devid;
7914
7915        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7916        switch (if_type) {
7917        case LPFC_SLI_INTF_IF_TYPE_0:
7918                mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7919                                                       GFP_KERNEL);
7920                if (!mboxq) {
7921                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7922                                        "0494 Unable to allocate memory for "
7923                                        "issuing SLI_FUNCTION_RESET mailbox "
7924                                        "command\n");
7925                        return -ENOMEM;
7926                }
7927
7928                /* Setup PCI function reset mailbox-ioctl command */
7929                lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7930                                 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7931                                 LPFC_SLI4_MBX_EMBED);
7932                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7933                shdr = (union lpfc_sli4_cfg_shdr *)
7934                        &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7935                shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7936                shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7937                                         &shdr->response);
7938                if (rc != MBX_TIMEOUT)
7939                        mempool_free(mboxq, phba->mbox_mem_pool);
7940                if (shdr_status || shdr_add_status || rc) {
7941                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7942                                        "0495 SLI_FUNCTION_RESET mailbox "
7943                                        "failed with status x%x add_status x%x,"
7944                                        " mbx status x%x\n",
7945                                        shdr_status, shdr_add_status, rc);
7946                        rc = -ENXIO;
7947                }
7948                break;
7949        case LPFC_SLI_INTF_IF_TYPE_2:
7950wait:
7951                /*
7952                 * Poll the Port Status Register and wait for RDY for
7953                 * up to 30 seconds. If the port doesn't respond, treat
7954                 * it as an error.
7955                 */
7956                for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
7957                        if (lpfc_readl(phba->sli4_hba.u.if_type2.
7958                                STATUSregaddr, &reg_data.word0)) {
7959                                rc = -ENODEV;
7960                                goto out;
7961                        }
7962                        if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7963                                break;
7964                        msleep(20);
7965                }
7966
7967                if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
7968                        phba->work_status[0] = readl(
7969                                phba->sli4_hba.u.if_type2.ERR1regaddr);
7970                        phba->work_status[1] = readl(
7971                                phba->sli4_hba.u.if_type2.ERR2regaddr);
7972                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7973                                        "2890 Port not ready, port status reg "
7974                                        "0x%x error 1=0x%x, error 2=0x%x\n",
7975                                        reg_data.word0,
7976                                        phba->work_status[0],
7977                                        phba->work_status[1]);
7978                        rc = -ENODEV;
7979                        goto out;
7980                }
7981
7982                if (!port_reset) {
7983                        /*
7984                         * Reset the port now
7985                         */
7986                        reg_data.word0 = 0;
7987                        bf_set(lpfc_sliport_ctrl_end, &reg_data,
7988                               LPFC_SLIPORT_LITTLE_ENDIAN);
7989                        bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7990                               LPFC_SLIPORT_INIT_PORT);
7991                        writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7992                               CTRLregaddr);
7993                        /* flush */
7994                        pci_read_config_word(phba->pcidev,
7995                                             PCI_DEVICE_ID, &devid);
7996
7997                        port_reset = 1;
7998                        msleep(20);
7999                        goto wait;
8000                } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
8001                        rc = -ENODEV;
8002                        goto out;
8003                }
8004                break;
8005
8006        case LPFC_SLI_INTF_IF_TYPE_1:
8007        default:
8008                break;
8009        }
8010
8011out:
8012        /* Catch the not-ready port failure after a port reset. */
8013        if (rc) {
8014                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8015                                "3317 HBA not functional: IP Reset Failed "
8016                                "try: echo fw_reset > board_mode\n");
8017                rc = -ENODEV;
8018        }
8019
8020        return rc;
8021}
8022
8023/**
8024 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
8025 * @phba: pointer to lpfc hba data structure.
8026 *
8027 * This routine is invoked to set up the PCI device memory space for device
8028 * with SLI-4 interface spec.
8029 *
8030 * Return codes
8031 *      0 - successful
8032 *      other values - error
8033 **/
8034static int
8035lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8036{
8037        struct pci_dev *pdev;
8038        unsigned long bar0map_len, bar1map_len, bar2map_len;
8039        int error = -ENODEV;
8040        uint32_t if_type;
8041
8042        /* Obtain PCI device reference */
8043        if (!phba->pcidev)
8044                return error;
8045        else
8046                pdev = phba->pcidev;
8047
8048        /* Set the device DMA mask size */
8049        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
8050         || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
8051                if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
8052                 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
8053                        return error;
8054                }
8055        }
8056
8057        /*
8058         * The BARs and register set definitions and offset locations are
8059         * dependent on the if_type.
8060         */
8061        if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
8062                                  &phba->sli4_hba.sli_intf.word0)) {
8063                return error;
8064        }
8065
8066        /* There is no SLI3 failback for SLI4 devices. */
8067        if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
8068            LPFC_SLI_INTF_VALID) {
8069                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8070                                "2894 SLI_INTF reg contents invalid "
8071                                "sli_intf reg 0x%x\n",
8072                                phba->sli4_hba.sli_intf.word0);
8073                return error;
8074        }
8075
8076        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8077        /*
8078         * Get the bus address of SLI4 device Bar regions and the
8079         * number of bytes required by each mapping. The mapping of the
8080         * particular PCI BARs regions is dependent on the type of
8081         * SLI4 device.
8082         */
8083        if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
8084                phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
8085                bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
8086
8087                /*
8088                 * Map SLI4 PCI Config Space Register base to a kernel virtual
8089                 * addr
8090                 */
8091                phba->sli4_hba.conf_regs_memmap_p =
8092                        ioremap(phba->pci_bar0_map, bar0map_len);
8093                if (!phba->sli4_hba.conf_regs_memmap_p) {
8094                        dev_printk(KERN_ERR, &pdev->dev,
8095                                   "ioremap failed for SLI4 PCI config "
8096                                   "registers.\n");
8097                        goto out;
8098                }
8099                phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
8100                /* Set up BAR0 PCI config space register memory map */
8101                lpfc_sli4_bar0_register_memmap(phba, if_type);
8102        } else {
8103                phba->pci_bar0_map = pci_resource_start(pdev, 1);
8104                bar0map_len = pci_resource_len(pdev, 1);
8105                if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8106                        dev_printk(KERN_ERR, &pdev->dev,
8107                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
8108                        goto out;
8109                }
8110                phba->sli4_hba.conf_regs_memmap_p =
8111                                ioremap(phba->pci_bar0_map, bar0map_len);
8112                if (!phba->sli4_hba.conf_regs_memmap_p) {
8113                        dev_printk(KERN_ERR, &pdev->dev,
8114                                "ioremap failed for SLI4 PCI config "
8115                                "registers.\n");
8116                                goto out;
8117                }
8118                lpfc_sli4_bar0_register_memmap(phba, if_type);
8119        }
8120
8121        if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8122            (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
8123                /*
8124                 * Map SLI4 if type 0 HBA Control Register base to a kernel
8125                 * virtual address and setup the registers.
8126                 */
8127                phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8128                bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8129                phba->sli4_hba.ctrl_regs_memmap_p =
8130                                ioremap(phba->pci_bar1_map, bar1map_len);
8131                if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8132                        dev_printk(KERN_ERR, &pdev->dev,
8133                           "ioremap failed for SLI4 HBA control registers.\n");
8134                        goto out_iounmap_conf;
8135                }
8136                phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
8137                lpfc_sli4_bar1_register_memmap(phba);
8138        }
8139
8140        if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8141            (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8142                /*
8143                 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8144                 * virtual address and setup the registers.
8145                 */
8146                phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8147                bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8148                phba->sli4_hba.drbl_regs_memmap_p =
8149                                ioremap(phba->pci_bar2_map, bar2map_len);
8150                if (!phba->sli4_hba.drbl_regs_memmap_p) {
8151                        dev_printk(KERN_ERR, &pdev->dev,
8152                           "ioremap failed for SLI4 HBA doorbell registers.\n");
8153                        goto out_iounmap_ctrl;
8154                }
8155                phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8156                error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8157                if (error)
8158                        goto out_iounmap_all;
8159        }
8160
8161        return 0;
8162
8163out_iounmap_all:
8164        iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8165out_iounmap_ctrl:
8166        iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8167out_iounmap_conf:
8168        iounmap(phba->sli4_hba.conf_regs_memmap_p);
8169out:
8170        return error;
8171}
8172
8173/**
8174 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8175 * @phba: pointer to lpfc hba data structure.
8176 *
8177 * This routine is invoked to unset the PCI device memory space for device
8178 * with SLI-4 interface spec.
8179 **/
8180static void
8181lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8182{
8183        uint32_t if_type;
8184        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8185
8186        switch (if_type) {
8187        case LPFC_SLI_INTF_IF_TYPE_0:
8188                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8189                iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8190                iounmap(phba->sli4_hba.conf_regs_memmap_p);
8191                break;
8192        case LPFC_SLI_INTF_IF_TYPE_2:
8193                iounmap(phba->sli4_hba.conf_regs_memmap_p);
8194                break;
8195        case LPFC_SLI_INTF_IF_TYPE_1:
8196        default:
8197                dev_printk(KERN_ERR, &phba->pcidev->dev,
8198                           "FATAL - unsupported SLI4 interface type - %d\n",
8199                           if_type);
8200                break;
8201        }
8202}
8203
8204/**
8205 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8206 * @phba: pointer to lpfc hba data structure.
8207 *
8208 * This routine is invoked to enable the MSI-X interrupt vectors to device
8209 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
8210 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
8211 * once invoked, enables either all or nothing, depending on the current
8212 * availability of PCI vector resources. The device driver is responsible
8213 * for calling the individual request_irq() to register each MSI-X vector
8214 * with a interrupt handler, which is done in this function. Note that
8215 * later when device is unloading, the driver should always call free_irq()
8216 * on all MSI-X vectors it has done request_irq() on before calling
8217 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8218 * will be left with MSI-X enabled and leaks its vectors.
8219 *
8220 * Return codes
8221 *   0 - successful
8222 *   other values - error
8223 **/
8224static int
8225lpfc_sli_enable_msix(struct lpfc_hba *phba)
8226{
8227        int rc, i;
8228        LPFC_MBOXQ_t *pmb;
8229
8230        /* Set up MSI-X multi-message vectors */
8231        for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8232                phba->msix_entries[i].entry = i;
8233
8234        /* Configure MSI-X capability structure */
8235        rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
8236                                   LPFC_MSIX_VECTORS);
8237        if (rc) {
8238                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8239                                "0420 PCI enable MSI-X failed (%d)\n", rc);
8240                goto vec_fail_out;
8241        }
8242        for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8243                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8244                                "0477 MSI-X entry[%d]: vector=x%x "
8245                                "message=%d\n", i,
8246                                phba->msix_entries[i].vector,
8247                                phba->msix_entries[i].entry);
8248        /*
8249         * Assign MSI-X vectors to interrupt handlers
8250         */
8251
8252        /* vector-0 is associated to slow-path handler */
8253        rc = request_irq(phba->msix_entries[0].vector,
8254                         &lpfc_sli_sp_intr_handler, IRQF_SHARED,
8255                         LPFC_SP_DRIVER_HANDLER_NAME, phba);
8256        if (rc) {
8257                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8258                                "0421 MSI-X slow-path request_irq failed "
8259                                "(%d)\n", rc);
8260                goto msi_fail_out;
8261        }
8262
8263        /* vector-1 is associated to fast-path handler */
8264        rc = request_irq(phba->msix_entries[1].vector,
8265                         &lpfc_sli_fp_intr_handler, IRQF_SHARED,
8266                         LPFC_FP_DRIVER_HANDLER_NAME, phba);
8267
8268        if (rc) {
8269                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8270                                "0429 MSI-X fast-path request_irq failed "
8271                                "(%d)\n", rc);
8272                goto irq_fail_out;
8273        }
8274
8275        /*
8276         * Configure HBA MSI-X attention conditions to messages
8277         */
8278        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8279
8280        if (!pmb) {
8281                rc = -ENOMEM;
8282                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8283                                "0474 Unable to allocate memory for issuing "
8284                                "MBOX_CONFIG_MSI command\n");
8285                goto mem_fail_out;
8286        }
8287        rc = lpfc_config_msi(phba, pmb);
8288        if (rc)
8289                goto mbx_fail_out;
8290        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8291        if (rc != MBX_SUCCESS) {
8292                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8293                                "0351 Config MSI mailbox command failed, "
8294                                "mbxCmd x%x, mbxStatus x%x\n",
8295                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8296                goto mbx_fail_out;
8297        }
8298
8299        /* Free memory allocated for mailbox command */
8300        mempool_free(pmb, phba->mbox_mem_pool);
8301        return rc;
8302
8303mbx_fail_out:
8304        /* Free memory allocated for mailbox command */
8305        mempool_free(pmb, phba->mbox_mem_pool);
8306
8307mem_fail_out:
8308        /* free the irq already requested */
8309        free_irq(phba->msix_entries[1].vector, phba);
8310
8311irq_fail_out:
8312        /* free the irq already requested */
8313        free_irq(phba->msix_entries[0].vector, phba);
8314
8315msi_fail_out:
8316        /* Unconfigure MSI-X capability structure */
8317        pci_disable_msix(phba->pcidev);
8318
8319vec_fail_out:
8320        return rc;
8321}
8322
8323/**
8324 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8325 * @phba: pointer to lpfc hba data structure.
8326 *
8327 * This routine is invoked to release the MSI-X vectors and then disable the
8328 * MSI-X interrupt mode to device with SLI-3 interface spec.
8329 **/
8330static void
8331lpfc_sli_disable_msix(struct lpfc_hba *phba)
8332{
8333        int i;
8334
8335        /* Free up MSI-X multi-message vectors */
8336        for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8337                free_irq(phba->msix_entries[i].vector, phba);
8338        /* Disable MSI-X */
8339        pci_disable_msix(phba->pcidev);
8340
8341        return;
8342}
8343
8344/**
8345 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8346 * @phba: pointer to lpfc hba data structure.
8347 *
8348 * This routine is invoked to enable the MSI interrupt mode to device with
8349 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8350 * enable the MSI vector. The device driver is responsible for calling the
8351 * request_irq() to register MSI vector with a interrupt the handler, which
8352 * is done in this function.
8353 *
8354 * Return codes
8355 *      0 - successful
8356 *      other values - error
8357 */
8358static int
8359lpfc_sli_enable_msi(struct lpfc_hba *phba)
8360{
8361        int rc;
8362
8363        rc = pci_enable_msi(phba->pcidev);
8364        if (!rc)
8365                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8366                                "0462 PCI enable MSI mode success.\n");
8367        else {
8368                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8369                                "0471 PCI enable MSI mode failed (%d)\n", rc);
8370                return rc;
8371        }
8372
8373        rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8374                         IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8375        if (rc) {
8376                pci_disable_msi(phba->pcidev);
8377                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8378                                "0478 MSI request_irq failed (%d)\n", rc);
8379        }
8380        return rc;
8381}
8382
8383/**
8384 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8385 * @phba: pointer to lpfc hba data structure.
8386 *
8387 * This routine is invoked to disable the MSI interrupt mode to device with
8388 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8389 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8390 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8391 * its vector.
8392 */
8393static void
8394lpfc_sli_disable_msi(struct lpfc_hba *phba)
8395{
8396        free_irq(phba->pcidev->irq, phba);
8397        pci_disable_msi(phba->pcidev);
8398        return;
8399}
8400
8401/**
8402 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8403 * @phba: pointer to lpfc hba data structure.
8404 *
8405 * This routine is invoked to enable device interrupt and associate driver's
8406 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8407 * spec. Depends on the interrupt mode configured to the driver, the driver
8408 * will try to fallback from the configured interrupt mode to an interrupt
8409 * mode which is supported by the platform, kernel, and device in the order
8410 * of:
8411 * MSI-X -> MSI -> IRQ.
8412 *
8413 * Return codes
8414 *   0 - successful
8415 *   other values - error
8416 **/
8417static uint32_t
8418lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8419{
8420        uint32_t intr_mode = LPFC_INTR_ERROR;
8421        int retval;
8422
8423        if (cfg_mode == 2) {
8424                /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8425                retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8426                if (!retval) {
8427                        /* Now, try to enable MSI-X interrupt mode */
8428                        retval = lpfc_sli_enable_msix(phba);
8429                        if (!retval) {
8430                                /* Indicate initialization to MSI-X mode */
8431                                phba->intr_type = MSIX;
8432                                intr_mode = 2;
8433                        }
8434                }
8435        }
8436
8437        /* Fallback to MSI if MSI-X initialization failed */
8438        if (cfg_mode >= 1 && phba->intr_type == NONE) {
8439                retval = lpfc_sli_enable_msi(phba);
8440                if (!retval) {
8441                        /* Indicate initialization to MSI mode */
8442                        phba->intr_type = MSI;
8443                        intr_mode = 1;
8444                }
8445        }
8446
8447        /* Fallback to INTx if both MSI-X/MSI initalization failed */
8448        if (phba->intr_type == NONE) {
8449                retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8450                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8451                if (!retval) {
8452                        /* Indicate initialization to INTx mode */
8453                        phba->intr_type = INTx;
8454                        intr_mode = 0;
8455                }
8456        }
8457        return intr_mode;
8458}
8459
8460/**
8461 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8462 * @phba: pointer to lpfc hba data structure.
8463 *
8464 * This routine is invoked to disable device interrupt and disassociate the
8465 * driver's interrupt handler(s) from interrupt vector(s) to device with
8466 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8467 * release the interrupt vector(s) for the message signaled interrupt.
8468 **/
8469static void
8470lpfc_sli_disable_intr(struct lpfc_hba *phba)
8471{
8472        /* Disable the currently initialized interrupt mode */
8473        if (phba->intr_type == MSIX)
8474                lpfc_sli_disable_msix(phba);
8475        else if (phba->intr_type == MSI)
8476                lpfc_sli_disable_msi(phba);
8477        else if (phba->intr_type == INTx)
8478                free_irq(phba->pcidev->irq, phba);
8479
8480        /* Reset interrupt management states */
8481        phba->intr_type = NONE;
8482        phba->sli.slistat.sli_intr = 0;
8483
8484        return;
8485}
8486
8487/**
8488 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8489 * @phba: pointer to lpfc hba data structure.
8490 *
8491 * Find next available CPU to use for IRQ to CPU affinity.
8492 */
8493static int
8494lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8495{
8496        struct lpfc_vector_map_info *cpup;
8497        int cpu;
8498
8499        cpup = phba->sli4_hba.cpu_map;
8500        for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8501                /* CPU must be online */
8502                if (cpu_online(cpu)) {
8503                        if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8504                            (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8505                            (cpup->phys_id == phys_id)) {
8506                                return cpu;
8507                        }
8508                }
8509                cpup++;
8510        }
8511
8512        /*
8513         * If we get here, we have used ALL CPUs for the specific
8514         * phys_id. Now we need to clear out lpfc_used_cpu and start
8515         * reusing CPUs.
8516         */
8517
8518        for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8519                if (lpfc_used_cpu[cpu] == phys_id)
8520                        lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8521        }
8522
8523        cpup = phba->sli4_hba.cpu_map;
8524        for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8525                /* CPU must be online */
8526                if (cpu_online(cpu)) {
8527                        if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8528                            (cpup->phys_id == phys_id)) {
8529                                return cpu;
8530                        }
8531                }
8532                cpup++;
8533        }
8534        return LPFC_VECTOR_MAP_EMPTY;
8535}
8536
8537/**
8538 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8539 * @phba:       pointer to lpfc hba data structure.
8540 * @vectors:    number of HBA vectors
8541 *
8542 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8543 * affinization across multple physical CPUs (numa nodes).
8544 * In addition, this routine will assign an IO channel for each CPU
8545 * to use when issuing I/Os.
8546 */
8547static int
8548lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8549{
8550        int i, idx, saved_chann, used_chann, cpu, phys_id;
8551        int max_phys_id, min_phys_id;
8552        int num_io_channel, first_cpu, chan;
8553        struct lpfc_vector_map_info *cpup;
8554#ifdef CONFIG_X86
8555        struct cpuinfo_x86 *cpuinfo;
8556#endif
8557        struct cpumask *mask;
8558        uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8559
8560        /* If there is no mapping, just return */
8561        if (!phba->cfg_fcp_cpu_map)
8562                return 1;
8563
8564        /* Init cpu_map array */
8565        memset(phba->sli4_hba.cpu_map, 0xff,
8566               (sizeof(struct lpfc_vector_map_info) *
8567                phba->sli4_hba.num_present_cpu));
8568
8569        max_phys_id = 0;
8570        min_phys_id = 0xff;
8571        phys_id = 0;
8572        num_io_channel = 0;
8573        first_cpu = LPFC_VECTOR_MAP_EMPTY;
8574
8575        /* Update CPU map with physical id and core id of each CPU */
8576        cpup = phba->sli4_hba.cpu_map;
8577        for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8578#ifdef CONFIG_X86
8579                cpuinfo = &cpu_data(cpu);
8580                cpup->phys_id = cpuinfo->phys_proc_id;
8581                cpup->core_id = cpuinfo->cpu_core_id;
8582#else
8583                /* No distinction between CPUs for other platforms */
8584                cpup->phys_id = 0;
8585                cpup->core_id = 0;
8586#endif
8587
8588                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8589                                "3328 CPU physid %d coreid %d\n",
8590                                cpup->phys_id, cpup->core_id);
8591
8592                if (cpup->phys_id > max_phys_id)
8593                        max_phys_id = cpup->phys_id;
8594                if (cpup->phys_id < min_phys_id)
8595                        min_phys_id = cpup->phys_id;
8596                cpup++;
8597        }
8598
8599        phys_id = min_phys_id;
8600        /* Now associate the HBA vectors with specific CPUs */
8601        for (idx = 0; idx < vectors; idx++) {
8602                cpup = phba->sli4_hba.cpu_map;
8603                cpu = lpfc_find_next_cpu(phba, phys_id);
8604                if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8605
8606                        /* Try for all phys_id's */
8607                        for (i = 1; i < max_phys_id; i++) {
8608                                phys_id++;
8609                                if (phys_id > max_phys_id)
8610                                        phys_id = min_phys_id;
8611                                cpu = lpfc_find_next_cpu(phba, phys_id);
8612                                if (cpu == LPFC_VECTOR_MAP_EMPTY)
8613                                        continue;
8614                                goto found;
8615                        }
8616
8617                        /* Use round robin for scheduling */
8618                        phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
8619                        chan = 0;
8620                        cpup = phba->sli4_hba.cpu_map;
8621                        for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
8622                                cpup->channel_id = chan;
8623                                cpup++;
8624                                chan++;
8625                                if (chan >= phba->cfg_fcp_io_channel)
8626                                        chan = 0;
8627                        }
8628
8629                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8630                                        "3329 Cannot set affinity:"
8631                                        "Error mapping vector %d (%d)\n",
8632                                        idx, vectors);
8633                        return 0;
8634                }
8635found:
8636                cpup += cpu;
8637                if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8638                        lpfc_used_cpu[cpu] = phys_id;
8639
8640                /* Associate vector with selected CPU */
8641                cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8642
8643                /* Associate IO channel with selected CPU */
8644                cpup->channel_id = idx;
8645                num_io_channel++;
8646
8647                if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8648                        first_cpu = cpu;
8649
8650                /* Now affinitize to the selected CPU */
8651                mask = &cpup->maskbits;
8652                cpumask_clear(mask);
8653                cpumask_set_cpu(cpu, mask);
8654                i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8655                                          vector, mask);
8656
8657                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8658                                "3330 Set Affinity: CPU %d channel %d "
8659                                "irq %d (%x)\n",
8660                                cpu, cpup->channel_id,
8661                                phba->sli4_hba.msix_entries[idx].vector, i);
8662
8663                /* Spread vector mapping across multple physical CPU nodes */
8664                phys_id++;
8665                if (phys_id > max_phys_id)
8666                        phys_id = min_phys_id;
8667        }
8668
8669        /*
8670         * Finally fill in the IO channel for any remaining CPUs.
8671         * At this point, all IO channels have been assigned to a specific
8672         * MSIx vector, mapped to a specific CPU.
8673         * Base the remaining IO channel assigned, to IO channels already
8674         * assigned to other CPUs on the same phys_id.
8675         */
8676        for (i = min_phys_id; i <= max_phys_id; i++) {
8677                /*
8678                 * If there are no io channels already mapped to
8679                 * this phys_id, just round robin thru the io_channels.
8680                 * Setup chann[] for round robin.
8681                 */
8682                for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8683                        chann[idx] = idx;
8684
8685                saved_chann = 0;
8686                used_chann = 0;
8687
8688                /*
8689                 * First build a list of IO channels already assigned
8690                 * to this phys_id before reassigning the same IO
8691                 * channels to the remaining CPUs.
8692                 */
8693                cpup = phba->sli4_hba.cpu_map;
8694                cpu = first_cpu;
8695                cpup += cpu;
8696                for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8697                     idx++) {
8698                        if (cpup->phys_id == i) {
8699                                /*
8700                                 * Save any IO channels that are
8701                                 * already mapped to this phys_id.
8702                                 */
8703                                if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8704                                        chann[saved_chann] =
8705                                                cpup->channel_id;
8706                                        saved_chann++;
8707                                        goto out;
8708                                }
8709
8710                                /* See if we are using round-robin */
8711                                if (saved_chann == 0)
8712                                        saved_chann =
8713                                                phba->cfg_fcp_io_channel;
8714
8715                                /* Associate next IO channel with CPU */
8716                                cpup->channel_id = chann[used_chann];
8717                                num_io_channel++;
8718                                used_chann++;
8719                                if (used_chann == saved_chann)
8720                                        used_chann = 0;
8721
8722                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8723                                                "3331 Set IO_CHANN "
8724                                                "CPU %d channel %d\n",
8725                                                idx, cpup->channel_id);
8726                        }
8727out:
8728                        cpu++;
8729                        if (cpu >= phba->sli4_hba.num_present_cpu) {
8730                                cpup = phba->sli4_hba.cpu_map;
8731                                cpu = 0;
8732                        } else {
8733                                cpup++;
8734                        }
8735                }
8736        }
8737
8738        if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8739                cpup = phba->sli4_hba.cpu_map;
8740                for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8741                        if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8742                                cpup->channel_id = 0;
8743                                num_io_channel++;
8744
8745                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8746                                                "3332 Assign IO_CHANN "
8747                                                "CPU %d channel %d\n",
8748                                                idx, cpup->channel_id);
8749                        }
8750                        cpup++;
8751                }
8752        }
8753
8754        /* Sanity check */
8755        if (num_io_channel != phba->sli4_hba.num_present_cpu)
8756                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8757                                "3333 Set affinity mismatch:"
8758                                "%d chann != %d cpus: %d vectors\n",
8759                                num_io_channel, phba->sli4_hba.num_present_cpu,
8760                                vectors);
8761
8762        /* Enable using cpu affinity for scheduling */
8763        phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8764        return 1;
8765}
8766
8767
8768/**
8769 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8770 * @phba: pointer to lpfc hba data structure.
8771 *
8772 * This routine is invoked to enable the MSI-X interrupt vectors to device
8773 * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
8774 * is called to enable the MSI-X vectors. The device driver is responsible
8775 * for calling the individual request_irq() to register each MSI-X vector
8776 * with a interrupt handler, which is done in this function. Note that
8777 * later when device is unloading, the driver should always call free_irq()
8778 * on all MSI-X vectors it has done request_irq() on before calling
8779 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8780 * will be left with MSI-X enabled and leaks its vectors.
8781 *
8782 * Return codes
8783 * 0 - successful
8784 * other values - error
8785 **/
8786static int
8787lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8788{
8789        int vectors, rc, index;
8790
8791        /* Set up MSI-X multi-message vectors */
8792        for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8793                phba->sli4_hba.msix_entries[index].entry = index;
8794
8795        /* Configure MSI-X capability structure */
8796        vectors = phba->cfg_fcp_io_channel;
8797        if (phba->cfg_fof) {
8798                phba->sli4_hba.msix_entries[index].entry = index;
8799                vectors++;
8800        }
8801        rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
8802                                   2, vectors);
8803        if (rc < 0) {
8804                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8805                                "0484 PCI enable MSI-X failed (%d)\n", rc);
8806                goto vec_fail_out;
8807        }
8808        vectors = rc;
8809
8810        /* Log MSI-X vector assignment */
8811        for (index = 0; index < vectors; index++)
8812                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8813                                "0489 MSI-X entry[%d]: vector=x%x "
8814                                "message=%d\n", index,
8815                                phba->sli4_hba.msix_entries[index].vector,
8816                                phba->sli4_hba.msix_entries[index].entry);
8817
8818        /* Assign MSI-X vectors to interrupt handlers */
8819        for (index = 0; index < vectors; index++) {
8820                memset(&phba->sli4_hba.handler_name[index], 0, 16);
8821                snprintf((char *)&phba->sli4_hba.handler_name[index],
8822                         LPFC_SLI4_HANDLER_NAME_SZ,
8823                         LPFC_DRIVER_HANDLER_NAME"%d", index);
8824
8825                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8826                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8827                atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8828                if (phba->cfg_fof && (index == (vectors - 1)))
8829                        rc = request_irq(
8830                                phba->sli4_hba.msix_entries[index].vector,
8831                                 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
8832                                 (char *)&phba->sli4_hba.handler_name[index],
8833                                 &phba->sli4_hba.fcp_eq_hdl[index]);
8834                else
8835                        rc = request_irq(
8836                                phba->sli4_hba.msix_entries[index].vector,
8837                                 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8838                                 (char *)&phba->sli4_hba.handler_name[index],
8839                                 &phba->sli4_hba.fcp_eq_hdl[index]);
8840                if (rc) {
8841                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8842                                        "0486 MSI-X fast-path (%d) "
8843                                        "request_irq failed (%d)\n", index, rc);
8844                        goto cfg_fail_out;
8845                }
8846        }
8847
8848        if (phba->cfg_fof)
8849                vectors--;
8850
8851        if (vectors != phba->cfg_fcp_io_channel) {
8852                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8853                                "3238 Reducing IO channels to match number of "
8854                                "MSI-X vectors, requested %d got %d\n",
8855                                phba->cfg_fcp_io_channel, vectors);
8856                phba->cfg_fcp_io_channel = vectors;
8857        }
8858
8859        lpfc_sli4_set_affinity(phba, vectors);
8860        return rc;
8861
8862cfg_fail_out:
8863        /* free the irq already requested */
8864        for (--index; index >= 0; index--) {
8865                irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8866                                          vector, NULL);
8867                free_irq(phba->sli4_hba.msix_entries[index].vector,
8868                         &phba->sli4_hba.fcp_eq_hdl[index]);
8869        }
8870
8871        /* Unconfigure MSI-X capability structure */
8872        pci_disable_msix(phba->pcidev);
8873
8874vec_fail_out:
8875        return rc;
8876}
8877
8878/**
8879 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
8880 * @phba: pointer to lpfc hba data structure.
8881 *
8882 * This routine is invoked to release the MSI-X vectors and then disable the
8883 * MSI-X interrupt mode to device with SLI-4 interface spec.
8884 **/
8885static void
8886lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8887{
8888        int index;
8889
8890        /* Free up MSI-X multi-message vectors */
8891        for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8892                irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8893                                          vector, NULL);
8894                free_irq(phba->sli4_hba.msix_entries[index].vector,
8895                         &phba->sli4_hba.fcp_eq_hdl[index]);
8896        }
8897        if (phba->cfg_fof) {
8898                free_irq(phba->sli4_hba.msix_entries[index].vector,
8899                         &phba->sli4_hba.fcp_eq_hdl[index]);
8900        }
8901        /* Disable MSI-X */
8902        pci_disable_msix(phba->pcidev);
8903
8904        return;
8905}
8906
8907/**
8908 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8909 * @phba: pointer to lpfc hba data structure.
8910 *
8911 * This routine is invoked to enable the MSI interrupt mode to device with
8912 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8913 * to enable the MSI vector. The device driver is responsible for calling
8914 * the request_irq() to register MSI vector with a interrupt the handler,
8915 * which is done in this function.
8916 *
8917 * Return codes
8918 *      0 - successful
8919 *      other values - error
8920 **/
8921static int
8922lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8923{
8924        int rc, index;
8925
8926        rc = pci_enable_msi(phba->pcidev);
8927        if (!rc)
8928                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8929                                "0487 PCI enable MSI mode success.\n");
8930        else {
8931                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8932                                "0488 PCI enable MSI mode failed (%d)\n", rc);
8933                return rc;
8934        }
8935
8936        rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8937                         IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8938        if (rc) {
8939                pci_disable_msi(phba->pcidev);
8940                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8941                                "0490 MSI request_irq failed (%d)\n", rc);
8942                return rc;
8943        }
8944
8945        for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8946                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8947                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8948        }
8949
8950        if (phba->cfg_fof) {
8951                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8952                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8953        }
8954        return 0;
8955}
8956
8957/**
8958 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8959 * @phba: pointer to lpfc hba data structure.
8960 *
8961 * This routine is invoked to disable the MSI interrupt mode to device with
8962 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8963 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8964 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8965 * its vector.
8966 **/
8967static void
8968lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8969{
8970        free_irq(phba->pcidev->irq, phba);
8971        pci_disable_msi(phba->pcidev);
8972        return;
8973}
8974
8975/**
8976 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8977 * @phba: pointer to lpfc hba data structure.
8978 *
8979 * This routine is invoked to enable device interrupt and associate driver's
8980 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8981 * interface spec. Depends on the interrupt mode configured to the driver,
8982 * the driver will try to fallback from the configured interrupt mode to an
8983 * interrupt mode which is supported by the platform, kernel, and device in
8984 * the order of:
8985 * MSI-X -> MSI -> IRQ.
8986 *
8987 * Return codes
8988 *      0 - successful
8989 *      other values - error
8990 **/
8991static uint32_t
8992lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8993{
8994        uint32_t intr_mode = LPFC_INTR_ERROR;
8995        int retval, index;
8996
8997        if (cfg_mode == 2) {
8998                /* Preparation before conf_msi mbox cmd */
8999                retval = 0;
9000                if (!retval) {
9001                        /* Now, try to enable MSI-X interrupt mode */
9002                        retval = lpfc_sli4_enable_msix(phba);
9003                        if (!retval) {
9004                                /* Indicate initialization to MSI-X mode */
9005                                phba->intr_type = MSIX;
9006                                intr_mode = 2;
9007                        }
9008                }
9009        }
9010
9011        /* Fallback to MSI if MSI-X initialization failed */
9012        if (cfg_mode >= 1 && phba->intr_type == NONE) {
9013                retval = lpfc_sli4_enable_msi(phba);
9014                if (!retval) {
9015                        /* Indicate initialization to MSI mode */
9016                        phba->intr_type = MSI;
9017                        intr_mode = 1;
9018                }
9019        }
9020
9021        /* Fallback to INTx if both MSI-X/MSI initalization failed */
9022        if (phba->intr_type == NONE) {
9023                retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9024                                     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9025                if (!retval) {
9026                        /* Indicate initialization to INTx mode */
9027                        phba->intr_type = INTx;
9028                        intr_mode = 0;
9029                        for (index = 0; index < phba->cfg_fcp_io_channel;
9030                             index++) {
9031                                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9032                                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9033                                atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9034                                        fcp_eq_in_use, 1);
9035                        }
9036                        if (phba->cfg_fof) {
9037                                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9038                                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9039                                atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9040                                        fcp_eq_in_use, 1);
9041                        }
9042                }
9043        }
9044        return intr_mode;
9045}
9046
9047/**
9048 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9049 * @phba: pointer to lpfc hba data structure.
9050 *
9051 * This routine is invoked to disable device interrupt and disassociate
9052 * the driver's interrupt handler(s) from interrupt vector(s) to device
9053 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9054 * will release the interrupt vector(s) for the message signaled interrupt.
9055 **/
9056static void
9057lpfc_sli4_disable_intr(struct lpfc_hba *phba)
9058{
9059        /* Disable the currently initialized interrupt mode */
9060        if (phba->intr_type == MSIX)
9061                lpfc_sli4_disable_msix(phba);
9062        else if (phba->intr_type == MSI)
9063                lpfc_sli4_disable_msi(phba);
9064        else if (phba->intr_type == INTx)
9065                free_irq(phba->pcidev->irq, phba);
9066
9067        /* Reset interrupt management states */
9068        phba->intr_type = NONE;
9069        phba->sli.slistat.sli_intr = 0;
9070
9071        return;
9072}
9073
9074/**
9075 * lpfc_unset_hba - Unset SLI3 hba device initialization
9076 * @phba: pointer to lpfc hba data structure.
9077 *
9078 * This routine is invoked to unset the HBA device initialization steps to
9079 * a device with SLI-3 interface spec.
9080 **/
9081static void
9082lpfc_unset_hba(struct lpfc_hba *phba)
9083{
9084        struct lpfc_vport *vport = phba->pport;
9085        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
9086
9087        spin_lock_irq(shost->host_lock);
9088        vport->load_flag |= FC_UNLOADING;
9089        spin_unlock_irq(shost->host_lock);
9090
9091        kfree(phba->vpi_bmask);
9092        kfree(phba->vpi_ids);
9093
9094        lpfc_stop_hba_timers(phba);
9095
9096        phba->pport->work_port_events = 0;
9097
9098        lpfc_sli_hba_down(phba);
9099
9100        lpfc_sli_brdrestart(phba);
9101
9102        lpfc_sli_disable_intr(phba);
9103
9104        return;
9105}
9106
9107/**
9108 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9109 * @phba: Pointer to HBA context object.
9110 *
9111 * This function is called in the SLI4 code path to wait for completion
9112 * of device's XRIs exchange busy. It will check the XRI exchange busy
9113 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9114 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9115 * I/Os every 30 seconds, log error message, and wait forever. Only when
9116 * all XRI exchange busy complete, the driver unload shall proceed with
9117 * invoking the function reset ioctl mailbox command to the CNA and the
9118 * the rest of the driver unload resource release.
9119 **/
9120static void
9121lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
9122{
9123        int wait_time = 0;
9124        int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9125        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9126
9127        while (!fcp_xri_cmpl || !els_xri_cmpl) {
9128                if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
9129                        if (!fcp_xri_cmpl)
9130                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9131                                                "2877 FCP XRI exchange busy "
9132                                                "wait time: %d seconds.\n",
9133                                                wait_time/1000);
9134                        if (!els_xri_cmpl)
9135                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9136                                                "2878 ELS XRI exchange busy "
9137                                                "wait time: %d seconds.\n",
9138                                                wait_time/1000);
9139                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
9140                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
9141                } else {
9142                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
9143                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
9144                }
9145                fcp_xri_cmpl =
9146                        list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9147                els_xri_cmpl =
9148                        list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9149        }
9150}
9151
9152/**
9153 * lpfc_sli4_hba_unset - Unset the fcoe hba
9154 * @phba: Pointer to HBA context object.
9155 *
9156 * This function is called in the SLI4 code path to reset the HBA's FCoE
9157 * function. The caller is not required to hold any lock. This routine
9158 * issues PCI function reset mailbox command to reset the FCoE function.
9159 * At the end of the function, it calls lpfc_hba_down_post function to
9160 * free any pending commands.
9161 **/
9162static void
9163lpfc_sli4_hba_unset(struct lpfc_hba *phba)
9164{
9165        int wait_cnt = 0;
9166        LPFC_MBOXQ_t *mboxq;
9167        struct pci_dev *pdev = phba->pcidev;
9168
9169        lpfc_stop_hba_timers(phba);
9170        phba->sli4_hba.intr_enable = 0;
9171
9172        /*
9173         * Gracefully wait out the potential current outstanding asynchronous
9174         * mailbox command.
9175         */
9176
9177        /* First, block any pending async mailbox command from posted */
9178        spin_lock_irq(&phba->hbalock);
9179        phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9180        spin_unlock_irq(&phba->hbalock);
9181        /* Now, trying to wait it out if we can */
9182        while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9183                msleep(10);
9184                if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
9185                        break;
9186        }
9187        /* Forcefully release the outstanding mailbox command if timed out */
9188        if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9189                spin_lock_irq(&phba->hbalock);
9190                mboxq = phba->sli.mbox_active;
9191                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9192                __lpfc_mbox_cmpl_put(phba, mboxq);
9193                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9194                phba->sli.mbox_active = NULL;
9195                spin_unlock_irq(&phba->hbalock);
9196        }
9197
9198        /* Abort all iocbs associated with the hba */
9199        lpfc_sli_hba_iocb_abort(phba);
9200
9201        /* Wait for completion of device XRI exchange busy */
9202        lpfc_sli4_xri_exchange_busy_wait(phba);
9203
9204        /* Disable PCI subsystem interrupt */
9205        lpfc_sli4_disable_intr(phba);
9206
9207        /* Disable SR-IOV if enabled */
9208        if (phba->cfg_sriov_nr_virtfn)
9209                pci_disable_sriov(pdev);
9210
9211        /* Stop kthread signal shall trigger work_done one more time */
9212        kthread_stop(phba->worker_thread);
9213
9214        /* Reset SLI4 HBA FCoE function */
9215        lpfc_pci_function_reset(phba);
9216        lpfc_sli4_queue_destroy(phba);
9217
9218        /* Stop the SLI4 device port */
9219        phba->pport->work_port_events = 0;
9220}
9221
9222 /**
9223 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9224 * @phba: Pointer to HBA context object.
9225 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9226 *
9227 * This function is called in the SLI4 code path to read the port's
9228 * sli4 capabilities.
9229 *
9230 * This function may be be called from any context that can block-wait
9231 * for the completion.  The expectation is that this routine is called
9232 * typically from probe_one or from the online routine.
9233 **/
9234int
9235lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9236{
9237        int rc;
9238        struct lpfc_mqe *mqe;
9239        struct lpfc_pc_sli4_params *sli4_params;
9240        uint32_t mbox_tmo;
9241
9242        rc = 0;
9243        mqe = &mboxq->u.mqe;
9244
9245        /* Read the port's SLI4 Parameters port capabilities */
9246        lpfc_pc_sli4_params(mboxq);
9247        if (!phba->sli4_hba.intr_enable)
9248                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9249        else {
9250                mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9251                rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9252        }
9253
9254        if (unlikely(rc))
9255                return 1;
9256
9257        sli4_params = &phba->sli4_hba.pc_sli4_params;
9258        sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
9259        sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
9260        sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
9261        sli4_params->featurelevel_1 = bf_get(featurelevel_1,
9262                                             &mqe->un.sli4_params);
9263        sli4_params->featurelevel_2 = bf_get(featurelevel_2,
9264                                             &mqe->un.sli4_params);
9265        sli4_params->proto_types = mqe->un.sli4_params.word3;
9266        sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
9267        sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
9268        sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
9269        sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
9270        sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
9271        sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
9272        sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
9273        sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
9274        sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
9275        sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
9276        sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
9277        sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
9278        sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
9279        sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
9280        sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
9281        sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
9282        sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
9283        sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
9284        sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
9285        sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
9286
9287        /* Make sure that sge_supp_len can be handled by the driver */
9288        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9289                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9290
9291        return rc;
9292}
9293
9294/**
9295 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9296 * @phba: Pointer to HBA context object.
9297 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9298 *
9299 * This function is called in the SLI4 code path to read the port's
9300 * sli4 capabilities.
9301 *
9302 * This function may be be called from any context that can block-wait
9303 * for the completion.  The expectation is that this routine is called
9304 * typically from probe_one or from the online routine.
9305 **/
9306int
9307lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9308{
9309        int rc;
9310        struct lpfc_mqe *mqe = &mboxq->u.mqe;
9311        struct lpfc_pc_sli4_params *sli4_params;
9312        uint32_t mbox_tmo;
9313        int length;
9314        struct lpfc_sli4_parameters *mbx_sli4_parameters;
9315
9316        /*
9317         * By default, the driver assumes the SLI4 port requires RPI
9318         * header postings.  The SLI4_PARAM response will correct this
9319         * assumption.
9320         */
9321        phba->sli4_hba.rpi_hdrs_in_use = 1;
9322
9323        /* Read the port's SLI4 Config Parameters */
9324        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
9325                  sizeof(struct lpfc_sli4_cfg_mhdr));
9326        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9327                         LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
9328                         length, LPFC_SLI4_MBX_EMBED);
9329        if (!phba->sli4_hba.intr_enable)
9330                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9331        else {
9332                mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9333                rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9334        }
9335        if (unlikely(rc))
9336                return rc;
9337        sli4_params = &phba->sli4_hba.pc_sli4_params;
9338        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
9339        sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
9340        sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
9341        sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
9342        sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
9343                                             mbx_sli4_parameters);
9344        sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
9345                                             mbx_sli4_parameters);
9346        if (bf_get(cfg_phwq, mbx_sli4_parameters))
9347                phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
9348        else
9349                phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9350        sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9351        sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9352        sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
9353        sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9354        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9355        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9356        sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
9357        sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
9358        sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9359                                            mbx_sli4_parameters);
9360        sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
9361                                           mbx_sli4_parameters);
9362        phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
9363        phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
9364
9365        /* Make sure that sge_supp_len can be handled by the driver */
9366        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9367                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9368
9369        return 0;
9370}
9371
9372/**
9373 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9374 * @pdev: pointer to PCI device
9375 * @pid: pointer to PCI device identifier
9376 *
9377 * This routine is to be called to attach a device with SLI-3 interface spec
9378 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9379 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9380 * information of the device and driver to see if the driver state that it can
9381 * support this kind of device. If the match is successful, the driver core
9382 * invokes this routine. If this routine determines it can claim the HBA, it
9383 * does all the initialization that it needs to do to handle the HBA properly.
9384 *
9385 * Return code
9386 *      0 - driver can claim the device
9387 *      negative value - driver can not claim the device
9388 **/
9389static int
9390lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
9391{
9392        struct lpfc_hba   *phba;
9393        struct lpfc_vport *vport = NULL;
9394        struct Scsi_Host  *shost = NULL;
9395        int error;
9396        uint32_t cfg_mode, intr_mode;
9397
9398        /* Allocate memory for HBA structure */
9399        phba = lpfc_hba_alloc(pdev);
9400        if (!phba)
9401                return -ENOMEM;
9402
9403        /* Perform generic PCI device enabling operation */
9404        error = lpfc_enable_pci_dev(phba);
9405        if (error)
9406                goto out_free_phba;
9407
9408        /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9409        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
9410        if (error)
9411                goto out_disable_pci_dev;
9412
9413        /* Set up SLI-3 specific device PCI memory space */
9414        error = lpfc_sli_pci_mem_setup(phba);
9415        if (error) {
9416                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9417                                "1402 Failed to set up pci memory space.\n");
9418                goto out_disable_pci_dev;
9419        }
9420
9421        /* Set up phase-1 common device driver resources */
9422        error = lpfc_setup_driver_resource_phase1(phba);
9423        if (error) {
9424                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9425                                "1403 Failed to set up driver resource.\n");
9426                goto out_unset_pci_mem_s3;
9427        }
9428
9429        /* Set up SLI-3 specific device driver resources */
9430        error = lpfc_sli_driver_resource_setup(phba);
9431        if (error) {
9432                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9433                                "1404 Failed to set up driver resource.\n");
9434                goto out_unset_pci_mem_s3;
9435        }
9436
9437        /* Initialize and populate the iocb list per host */
9438        error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
9439        if (error) {
9440                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9441                                "1405 Failed to initialize iocb list.\n");
9442                goto out_unset_driver_resource_s3;
9443        }
9444
9445        /* Set up common device driver resources */
9446        error = lpfc_setup_driver_resource_phase2(phba);
9447        if (error) {
9448                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9449                                "1406 Failed to set up driver resource.\n");
9450                goto out_free_iocb_list;
9451        }
9452
9453        /* Get the default values for Model Name and Description */
9454        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9455
9456        /* Create SCSI host to the physical port */
9457        error = lpfc_create_shost(phba);
9458        if (error) {
9459                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460                                "1407 Failed to create scsi host.\n");
9461                goto out_unset_driver_resource;
9462        }
9463
9464        /* Configure sysfs attributes */
9465        vport = phba->pport;
9466        error = lpfc_alloc_sysfs_attr(vport);
9467        if (error) {
9468                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9469                                "1476 Failed to allocate sysfs attr\n");
9470                goto out_destroy_shost;
9471        }
9472
9473        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9474        /* Now, trying to enable interrupt and bring up the device */
9475        cfg_mode = phba->cfg_use_msi;
9476        while (true) {
9477                /* Put device to a known state before enabling interrupt */
9478                lpfc_stop_port(phba);
9479                /* Configure and enable interrupt */
9480                intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
9481                if (intr_mode == LPFC_INTR_ERROR) {
9482                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9483                                        "0431 Failed to enable interrupt.\n");
9484                        error = -ENODEV;
9485                        goto out_free_sysfs_attr;
9486                }
9487                /* SLI-3 HBA setup */
9488                if (lpfc_sli_hba_setup(phba)) {
9489                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9490                                        "1477 Failed to set up hba\n");
9491                        error = -ENODEV;
9492                        goto out_remove_device;
9493                }
9494
9495                /* Wait 50ms for the interrupts of previous mailbox commands */
9496                msleep(50);
9497                /* Check active interrupts on message signaled interrupts */
9498                if (intr_mode == 0 ||
9499                    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
9500                        /* Log the current active interrupt mode */
9501                        phba->intr_mode = intr_mode;
9502                        lpfc_log_intr_mode(phba, intr_mode);
9503                        break;
9504                } else {
9505                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9506                                        "0447 Configure interrupt mode (%d) "
9507                                        "failed active interrupt test.\n",
9508                                        intr_mode);
9509                        /* Disable the current interrupt mode */
9510                        lpfc_sli_disable_intr(phba);
9511                        /* Try next level of interrupt mode */
9512                        cfg_mode = --intr_mode;
9513                }
9514        }
9515
9516        /* Perform post initialization setup */
9517        lpfc_post_init_setup(phba);
9518
9519        /* Check if there are static vports to be created. */
9520        lpfc_create_static_vport(phba);
9521
9522        return 0;
9523
9524out_remove_device:
9525        lpfc_unset_hba(phba);
9526out_free_sysfs_attr:
9527        lpfc_free_sysfs_attr(vport);
9528out_destroy_shost:
9529        lpfc_destroy_shost(phba);
9530out_unset_driver_resource:
9531        lpfc_unset_driver_resource_phase2(phba);
9532out_free_iocb_list:
9533        lpfc_free_iocb_list(phba);
9534out_unset_driver_resource_s3:
9535        lpfc_sli_driver_resource_unset(phba);
9536out_unset_pci_mem_s3:
9537        lpfc_sli_pci_mem_unset(phba);
9538out_disable_pci_dev:
9539        lpfc_disable_pci_dev(phba);
9540        if (shost)
9541                scsi_host_put(shost);
9542out_free_phba:
9543        lpfc_hba_free(phba);
9544        return error;
9545}
9546
9547/**
9548 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9549 * @pdev: pointer to PCI device
9550 *
9551 * This routine is to be called to disattach a device with SLI-3 interface
9552 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9553 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9554 * device to be removed from the PCI subsystem properly.
9555 **/
9556static void
9557lpfc_pci_remove_one_s3(struct pci_dev *pdev)
9558{
9559        struct Scsi_Host  *shost = pci_get_drvdata(pdev);
9560        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9561        struct lpfc_vport **vports;
9562        struct lpfc_hba   *phba = vport->phba;
9563        int i;
9564        int bars = pci_select_bars(pdev, IORESOURCE_MEM);
9565
9566        spin_lock_irq(&phba->hbalock);
9567        vport->load_flag |= FC_UNLOADING;
9568        spin_unlock_irq(&phba->hbalock);
9569
9570        lpfc_free_sysfs_attr(vport);
9571
9572        /* Release all the vports against this physical port */
9573        vports = lpfc_create_vport_work_array(phba);
9574        if (vports != NULL)
9575                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9576                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9577                                continue;
9578                        fc_vport_terminate(vports[i]->fc_vport);
9579                }
9580        lpfc_destroy_vport_work_array(phba, vports);
9581
9582        /* Remove FC host and then SCSI host with the physical port */
9583        fc_remove_host(shost);
9584        scsi_remove_host(shost);
9585        lpfc_cleanup(vport);
9586
9587        /*
9588         * Bring down the SLI Layer. This step disable all interrupts,
9589         * clears the rings, discards all mailbox commands, and resets
9590         * the HBA.
9591         */
9592
9593        /* HBA interrupt will be disabled after this call */
9594        lpfc_sli_hba_down(phba);
9595        /* Stop kthread signal shall trigger work_done one more time */
9596        kthread_stop(phba->worker_thread);
9597        /* Final cleanup of txcmplq and reset the HBA */
9598        lpfc_sli_brdrestart(phba);
9599
9600        kfree(phba->vpi_bmask);
9601        kfree(phba->vpi_ids);
9602
9603        lpfc_stop_hba_timers(phba);
9604        spin_lock_irq(&phba->hbalock);
9605        list_del_init(&vport->listentry);
9606        spin_unlock_irq(&phba->hbalock);
9607
9608        lpfc_debugfs_terminate(vport);
9609
9610        /* Disable SR-IOV if enabled */
9611        if (phba->cfg_sriov_nr_virtfn)
9612                pci_disable_sriov(pdev);
9613
9614        /* Disable interrupt */
9615        lpfc_sli_disable_intr(phba);
9616
9617        scsi_host_put(shost);
9618
9619        /*
9620         * Call scsi_free before mem_free since scsi bufs are released to their
9621         * corresponding pools here.
9622         */
9623        lpfc_scsi_free(phba);
9624        lpfc_mem_free_all(phba);
9625
9626        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9627                          phba->hbqslimp.virt, phba->hbqslimp.phys);
9628
9629        /* Free resources associated with SLI2 interface */
9630        dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9631                          phba->slim2p.virt, phba->slim2p.phys);
9632
9633        /* unmap adapter SLIM and Control Registers */
9634        iounmap(phba->ctrl_regs_memmap_p);
9635        iounmap(phba->slim_memmap_p);
9636
9637        lpfc_hba_free(phba);
9638
9639        pci_release_selected_regions(pdev, bars);
9640        pci_disable_device(pdev);
9641}
9642
9643/**
9644 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9645 * @pdev: pointer to PCI device
9646 * @msg: power management message
9647 *
9648 * This routine is to be called from the kernel's PCI subsystem to support
9649 * system Power Management (PM) to device with SLI-3 interface spec. When
9650 * PM invokes this method, it quiesces the device by stopping the driver's
9651 * worker thread for the device, turning off device's interrupt and DMA,
9652 * and bring the device offline. Note that as the driver implements the
9653 * minimum PM requirements to a power-aware driver's PM support for the
9654 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9655 * to the suspend() method call will be treated as SUSPEND and the driver will
9656 * fully reinitialize its device during resume() method call, the driver will
9657 * set device to PCI_D3hot state in PCI config space instead of setting it
9658 * according to the @msg provided by the PM.
9659 *
9660 * Return code
9661 *      0 - driver suspended the device
9662 *      Error otherwise
9663 **/
9664static int
9665lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9666{
9667        struct Scsi_Host *shost = pci_get_drvdata(pdev);
9668        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9669
9670        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9671                        "0473 PCI device Power Management suspend.\n");
9672
9673        /* Bring down the device */
9674        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9675        lpfc_offline(phba);
9676        kthread_stop(phba->worker_thread);
9677
9678        /* Disable interrupt from device */
9679        lpfc_sli_disable_intr(phba);
9680
9681        /* Save device state to PCI config space */
9682        pci_save_state(pdev);
9683        pci_set_power_state(pdev, PCI_D3hot);
9684
9685        return 0;
9686}
9687
9688/**
9689 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9690 * @pdev: pointer to PCI device
9691 *
9692 * This routine is to be called from the kernel's PCI subsystem to support
9693 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9694 * invokes this method, it restores the device's PCI config space state and
9695 * fully reinitializes the device and brings it online. Note that as the
9696 * driver implements the minimum PM requirements to a power-aware driver's
9697 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9698 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9699 * driver will fully reinitialize its device during resume() method call,
9700 * the device will be set to PCI_D0 directly in PCI config space before
9701 * restoring the state.
9702 *
9703 * Return code
9704 *      0 - driver suspended the device
9705 *      Error otherwise
9706 **/
9707static int
9708lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9709{
9710        struct Scsi_Host *shost = pci_get_drvdata(pdev);
9711        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9712        uint32_t intr_mode;
9713        int error;
9714
9715        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9716                        "0452 PCI device Power Management resume.\n");
9717
9718        /* Restore device state from PCI config space */
9719        pci_set_power_state(pdev, PCI_D0);
9720        pci_restore_state(pdev);
9721
9722        /*
9723         * As the new kernel behavior of pci_restore_state() API call clears
9724         * device saved_state flag, need to save the restored state again.
9725         */
9726        pci_save_state(pdev);
9727
9728        if (pdev->is_busmaster)
9729                pci_set_master(pdev);
9730
9731        /* Startup the kernel thread for this host adapter. */
9732        phba->worker_thread = kthread_run(lpfc_do_work, phba,
9733                                        "lpfc_worker_%d", phba->brd_no);
9734        if (IS_ERR(phba->worker_thread)) {
9735                error = PTR_ERR(phba->worker_thread);
9736                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9737                                "0434 PM resume failed to start worker "
9738                                "thread: error=x%x.\n", error);
9739                return error;
9740        }
9741
9742        /* Configure and enable interrupt */
9743        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9744        if (intr_mode == LPFC_INTR_ERROR) {
9745                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9746                                "0430 PM resume Failed to enable interrupt\n");
9747                return -EIO;
9748        } else
9749                phba->intr_mode = intr_mode;
9750
9751        /* Restart HBA and bring it online */
9752        lpfc_sli_brdrestart(phba);
9753        lpfc_online(phba);
9754
9755        /* Log the current active interrupt mode */
9756        lpfc_log_intr_mode(phba, phba->intr_mode);
9757
9758        return 0;
9759}
9760
9761/**
9762 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
9763 * @phba: pointer to lpfc hba data structure.
9764 *
9765 * This routine is called to prepare the SLI3 device for PCI slot recover. It
9766 * aborts all the outstanding SCSI I/Os to the pci device.
9767 **/
9768static void
9769lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9770{
9771        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9772                        "2723 PCI channel I/O abort preparing for recovery\n");
9773
9774        /*
9775         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9776         * and let the SCSI mid-layer to retry them to recover.
9777         */
9778        lpfc_sli_abort_fcp_rings(phba);
9779}
9780
9781/**
9782 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
9783 * @phba: pointer to lpfc hba data structure.
9784 *
9785 * This routine is called to prepare the SLI3 device for PCI slot reset. It
9786 * disables the device interrupt and pci device, and aborts the internal FCP
9787 * pending I/Os.
9788 **/
9789static void
9790lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9791{
9792        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9793                        "2710 PCI channel disable preparing for reset\n");
9794
9795        /* Block any management I/Os to the device */
9796        lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
9797
9798        /* Block all SCSI devices' I/Os on the host */
9799        lpfc_scsi_dev_block(phba);
9800
9801        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9802        lpfc_sli_flush_fcp_rings(phba);
9803
9804        /* stop all timers */
9805        lpfc_stop_hba_timers(phba);
9806
9807        /* Disable interrupt and pci device */
9808        lpfc_sli_disable_intr(phba);
9809        pci_disable_device(phba->pcidev);
9810}
9811
9812/**
9813 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
9814 * @phba: pointer to lpfc hba data structure.
9815 *
9816 * This routine is called to prepare the SLI3 device for PCI slot permanently
9817 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9818 * pending I/Os.
9819 **/
9820static void
9821lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9822{
9823        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9824                        "2711 PCI channel permanent disable for failure\n");
9825        /* Block all SCSI devices' I/Os on the host */
9826        lpfc_scsi_dev_block(phba);
9827
9828        /* stop all timers */
9829        lpfc_stop_hba_timers(phba);
9830
9831        /* Clean up all driver's outstanding SCSI I/Os */
9832        lpfc_sli_flush_fcp_rings(phba);
9833}
9834
9835/**
9836 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
9837 * @pdev: pointer to PCI device.
9838 * @state: the current PCI connection state.
9839 *
9840 * This routine is called from the PCI subsystem for I/O error handling to
9841 * device with SLI-3 interface spec. This function is called by the PCI
9842 * subsystem after a PCI bus error affecting this device has been detected.
9843 * When this function is invoked, it will need to stop all the I/Os and
9844 * interrupt(s) to the device. Once that is done, it will return
9845 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
9846 * as desired.
9847 *
9848 * Return codes
9849 *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
9850 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9851 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9852 **/
9853static pci_ers_result_t
9854lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9855{
9856        struct Scsi_Host *shost = pci_get_drvdata(pdev);
9857        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9858
9859        switch (state) {
9860        case pci_channel_io_normal:
9861                /* Non-fatal error, prepare for recovery */
9862                lpfc_sli_prep_dev_for_recover(phba);
9863                return PCI_ERS_RESULT_CAN_RECOVER;
9864        case pci_channel_io_frozen:
9865                /* Fatal error, prepare for slot reset */
9866                lpfc_sli_prep_dev_for_reset(phba);
9867                return PCI_ERS_RESULT_NEED_RESET;
9868        case pci_channel_io_perm_failure:
9869                /* Permanent failure, prepare for device down */
9870                lpfc_sli_prep_dev_for_perm_failure(phba);
9871                return PCI_ERS_RESULT_DISCONNECT;
9872        default:
9873                /* Unknown state, prepare and request slot reset */
9874                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9875                                "0472 Unknown PCI error state: x%x\n", state);
9876                lpfc_sli_prep_dev_for_reset(phba);
9877                return PCI_ERS_RESULT_NEED_RESET;
9878        }
9879}
9880
9881/**
9882 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
9883 * @pdev: pointer to PCI device.
9884 *
9885 * This routine is called from the PCI subsystem for error handling to
9886 * device with SLI-3 interface spec. This is called after PCI bus has been
9887 * reset to restart the PCI card from scratch, as if from a cold-boot.
9888 * During the PCI subsystem error recovery, after driver returns
9889 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9890 * recovery and then call this routine before calling the .resume method
9891 * to recover the device. This function will initialize the HBA device,
9892 * enable the interrupt, but it will just put the HBA to offline state
9893 * without passing any I/O traffic.
9894 *
9895 * Return codes
9896 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
9897 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9898 */
9899static pci_ers_result_t
9900lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9901{
9902        struct Scsi_Host *shost = pci_get_drvdata(pdev);
9903        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9904        struct lpfc_sli *psli = &phba->sli;
9905        uint32_t intr_mode;
9906
9907        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9908        if (pci_enable_device_mem(pdev)) {
9909                printk(KERN_ERR "lpfc: Cannot re-enable "
9910                        "PCI device after reset.\n");
9911                return PCI_ERS_RESULT_DISCONNECT;
9912        }
9913
9914        pci_restore_state(pdev);
9915
9916        /*
9917         * As the new kernel behavior of pci_restore_state() API call clears
9918         * device saved_state flag, need to save the restored state again.
9919         */
9920        pci_save_state(pdev);
9921
9922        if (pdev->is_busmaster)
9923                pci_set_master(pdev);
9924
9925        spin_lock_irq(&phba->hbalock);
9926        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9927        spin_unlock_irq(&phba->hbalock);
9928
9929        /* Configure and enable interrupt */
9930        intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9931        if (intr_mode == LPFC_INTR_ERROR) {
9932                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9933                                "0427 Cannot re-enable interrupt after "
9934                                "slot reset.\n");
9935                return PCI_ERS_RESULT_DISCONNECT;
9936        } else
9937                phba->intr_mode = intr_mode;
9938
9939        /* Take device offline, it will perform cleanup */
9940        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9941        lpfc_offline(phba);
9942        lpfc_sli_brdrestart(phba);
9943
9944        /* Log the current active interrupt mode */
9945        lpfc_log_intr_mode(phba, phba->intr_mode);
9946
9947        return PCI_ERS_RESULT_RECOVERED;
9948}
9949
9950/**
9951 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9952 * @pdev: pointer to PCI device
9953 *
9954 * This routine is called from the PCI subsystem for error handling to device
9955 * with SLI-3 interface spec. It is called when kernel error recovery tells
9956 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9957 * error recovery. After this call, traffic can start to flow from this device
9958 * again.
9959 */
9960static void
9961lpfc_io_resume_s3(struct pci_dev *pdev)
9962{
9963        struct Scsi_Host *shost = pci_get_drvdata(pdev);
9964        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9965
9966        /* Bring device online, it will be no-op for non-fatal error resume */
9967        lpfc_online(phba);
9968
9969        /* Clean up Advanced Error Reporting (AER) if needed */
9970        if (phba->hba_flag & HBA_AER_ENABLED)
9971                pci_cleanup_aer_uncorrect_error_status(pdev);
9972}
9973
9974/**
9975 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9976 * @phba: pointer to lpfc hba data structure.
9977 *
9978 * returns the number of ELS/CT IOCBs to reserve
9979 **/
9980int
9981lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9982{
9983        int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9984
9985        if (phba->sli_rev == LPFC_SLI_REV4) {
9986                if (max_xri <= 100)
9987                        return 10;
9988                else if (max_xri <= 256)
9989                        return 25;
9990                else if (max_xri <= 512)
9991                        return 50;
9992                else if (max_xri <= 1024)
9993                        return 100;
9994                else if (max_xri <= 1536)
9995                        return 150;
9996                else if (max_xri <= 2048)
9997                        return 200;
9998                else
9999                        return 250;
10000        } else
10001                return 0;
10002}
10003
10004/**
10005 * lpfc_write_firmware - attempt to write a firmware image to the port
10006 * @fw: pointer to firmware image returned from request_firmware.
10007 * @phba: pointer to lpfc hba data structure.
10008 *
10009 **/
10010static void
10011lpfc_write_firmware(const struct firmware *fw, void *context)
10012{
10013        struct lpfc_hba *phba = (struct lpfc_hba *)context;
10014        char fwrev[FW_REV_STR_SIZE];
10015        struct lpfc_grp_hdr *image;
10016        struct list_head dma_buffer_list;
10017        int i, rc = 0;
10018        struct lpfc_dmabuf *dmabuf, *next;
10019        uint32_t offset = 0, temp_offset = 0;
10020
10021        /* It can be null in no-wait mode, sanity check */
10022        if (!fw) {
10023                rc = -ENXIO;
10024                goto out;
10025        }
10026        image = (struct lpfc_grp_hdr *)fw->data;
10027
10028        INIT_LIST_HEAD(&dma_buffer_list);
10029        if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
10030            (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
10031             LPFC_FILE_TYPE_GROUP) ||
10032            (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
10033            (be32_to_cpu(image->size) != fw->size)) {
10034                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10035                                "3022 Invalid FW image found. "
10036                                "Magic:%x Type:%x ID:%x\n",
10037                                be32_to_cpu(image->magic_number),
10038                                bf_get_be32(lpfc_grp_hdr_file_type, image),
10039                                bf_get_be32(lpfc_grp_hdr_id, image));
10040                rc = -EINVAL;
10041                goto release_out;
10042        }
10043        lpfc_decode_firmware_rev(phba, fwrev, 1);
10044        if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
10045                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10046                                "3023 Updating Firmware, Current Version:%s "
10047                                "New Version:%s\n",
10048                                fwrev, image->revision);
10049                for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
10050                        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
10051                                         GFP_KERNEL);
10052                        if (!dmabuf) {
10053                                rc = -ENOMEM;
10054                                goto release_out;
10055                        }
10056                        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
10057                                                          SLI4_PAGE_SIZE,
10058                                                          &dmabuf->phys,
10059                                                          GFP_KERNEL);
10060                        if (!dmabuf->virt) {
10061                                kfree(dmabuf);
10062                                rc = -ENOMEM;
10063                                goto release_out;
10064                        }
10065                        list_add_tail(&dmabuf->list, &dma_buffer_list);
10066                }
10067                while (offset < fw->size) {
10068                        temp_offset = offset;
10069                        list_for_each_entry(dmabuf, &dma_buffer_list, list) {
10070                                if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
10071                                        memcpy(dmabuf->virt,
10072                                               fw->data + temp_offset,
10073                                               fw->size - temp_offset);
10074                                        temp_offset = fw->size;
10075                                        break;
10076                                }
10077                                memcpy(dmabuf->virt, fw->data + temp_offset,
10078                                       SLI4_PAGE_SIZE);
10079                                temp_offset += SLI4_PAGE_SIZE;
10080                        }
10081                        rc = lpfc_wr_object(phba, &dma_buffer_list,
10082                                    (fw->size - offset), &offset);
10083                        if (rc)
10084                                goto release_out;
10085                }
10086                rc = offset;
10087        }
10088
10089release_out:
10090        list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
10091                list_del(&dmabuf->list);
10092                dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
10093                                  dmabuf->virt, dmabuf->phys);
10094                kfree(dmabuf);
10095        }
10096        release_firmware(fw);
10097out:
10098        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10099                        "3024 Firmware update done: %d.\n", rc);
10100        return;
10101}
10102
10103/**
10104 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
10105 * @phba: pointer to lpfc hba data structure.
10106 *
10107 * This routine is called to perform Linux generic firmware upgrade on device
10108 * that supports such feature.
10109 **/
10110int
10111lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
10112{
10113        uint8_t file_name[ELX_MODEL_NAME_SIZE];
10114        int ret;
10115        const struct firmware *fw;
10116
10117        /* Only supported on SLI4 interface type 2 for now */
10118        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
10119            LPFC_SLI_INTF_IF_TYPE_2)
10120                return -EPERM;
10121
10122        snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
10123
10124        if (fw_upgrade == INT_FW_UPGRADE) {
10125                ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
10126                                        file_name, &phba->pcidev->dev,
10127                                        GFP_KERNEL, (void *)phba,
10128                                        lpfc_write_firmware);
10129        } else if (fw_upgrade == RUN_FW_UPGRADE) {
10130                ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
10131                if (!ret)
10132                        lpfc_write_firmware(fw, (void *)phba);
10133        } else {
10134                ret = -EINVAL;
10135        }
10136
10137        return ret;
10138}
10139
10140/**
10141 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
10142 * @pdev: pointer to PCI device
10143 * @pid: pointer to PCI device identifier
10144 *
10145 * This routine is called from the kernel's PCI subsystem to device with
10146 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10147 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10148 * information of the device and driver to see if the driver state that it
10149 * can support this kind of device. If the match is successful, the driver
10150 * core invokes this routine. If this routine determines it can claim the HBA,
10151 * it does all the initialization that it needs to do to handle the HBA
10152 * properly.
10153 *
10154 * Return code
10155 *      0 - driver can claim the device
10156 *      negative value - driver can not claim the device
10157 **/
10158static int
10159lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
10160{
10161        struct lpfc_hba   *phba;
10162        struct lpfc_vport *vport = NULL;
10163        struct Scsi_Host  *shost = NULL;
10164        int error, ret;
10165        uint32_t cfg_mode, intr_mode;
10166        int adjusted_fcp_io_channel;
10167
10168        /* Allocate memory for HBA structure */
10169        phba = lpfc_hba_alloc(pdev);
10170        if (!phba)
10171                return -ENOMEM;
10172
10173        /* Perform generic PCI device enabling operation */
10174        error = lpfc_enable_pci_dev(phba);
10175        if (error)
10176                goto out_free_phba;
10177
10178        /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10179        error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
10180        if (error)
10181                goto out_disable_pci_dev;
10182
10183        /* Set up SLI-4 specific device PCI memory space */
10184        error = lpfc_sli4_pci_mem_setup(phba);
10185        if (error) {
10186                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10187                                "1410 Failed to set up pci memory space.\n");
10188                goto out_disable_pci_dev;
10189        }
10190
10191        /* Set up phase-1 common device driver resources */
10192        error = lpfc_setup_driver_resource_phase1(phba);
10193        if (error) {
10194                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10195                                "1411 Failed to set up driver resource.\n");
10196                goto out_unset_pci_mem_s4;
10197        }
10198
10199        /* Set up SLI-4 Specific device driver resources */
10200        error = lpfc_sli4_driver_resource_setup(phba);
10201        if (error) {
10202                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10203                                "1412 Failed to set up driver resource.\n");
10204                goto out_unset_pci_mem_s4;
10205        }
10206
10207        /* Initialize and populate the iocb list per host */
10208
10209        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10210                        "2821 initialize iocb list %d.\n",
10211                        phba->cfg_iocb_cnt*1024);
10212        error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
10213
10214        if (error) {
10215                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10216                                "1413 Failed to initialize iocb list.\n");
10217                goto out_unset_driver_resource_s4;
10218        }
10219
10220        INIT_LIST_HEAD(&phba->active_rrq_list);
10221        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
10222
10223        /* Set up common device driver resources */
10224        error = lpfc_setup_driver_resource_phase2(phba);
10225        if (error) {
10226                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10227                                "1414 Failed to set up driver resource.\n");
10228                goto out_free_iocb_list;
10229        }
10230
10231        /* Get the default values for Model Name and Description */
10232        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10233
10234        /* Create SCSI host to the physical port */
10235        error = lpfc_create_shost(phba);
10236        if (error) {
10237                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10238                                "1415 Failed to create scsi host.\n");
10239                goto out_unset_driver_resource;
10240        }
10241
10242        /* Configure sysfs attributes */
10243        vport = phba->pport;
10244        error = lpfc_alloc_sysfs_attr(vport);
10245        if (error) {
10246                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10247                                "1416 Failed to allocate sysfs attr\n");
10248                goto out_destroy_shost;
10249        }
10250
10251        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
10252        /* Now, trying to enable interrupt and bring up the device */
10253        cfg_mode = phba->cfg_use_msi;
10254
10255        /* Put device to a known state before enabling interrupt */
10256        lpfc_stop_port(phba);
10257        /* Configure and enable interrupt */
10258        intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
10259        if (intr_mode == LPFC_INTR_ERROR) {
10260                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10261                                "0426 Failed to enable interrupt.\n");
10262                error = -ENODEV;
10263                goto out_free_sysfs_attr;
10264        }
10265        /* Default to single EQ for non-MSI-X */
10266        if (phba->intr_type != MSIX)
10267                adjusted_fcp_io_channel = 1;
10268        else
10269                adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
10270        phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
10271        /* Set up SLI-4 HBA */
10272        if (lpfc_sli4_hba_setup(phba)) {
10273                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10274                                "1421 Failed to set up hba\n");
10275                error = -ENODEV;
10276                goto out_disable_intr;
10277        }
10278
10279        /* Log the current active interrupt mode */
10280        phba->intr_mode = intr_mode;
10281        lpfc_log_intr_mode(phba, intr_mode);
10282
10283        /* Perform post initialization setup */
10284        lpfc_post_init_setup(phba);
10285
10286        /* check for firmware upgrade or downgrade */
10287        if (phba->cfg_request_firmware_upgrade)
10288                ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
10289
10290        /* Check if there are static vports to be created. */
10291        lpfc_create_static_vport(phba);
10292        return 0;
10293
10294out_disable_intr:
10295        lpfc_sli4_disable_intr(phba);
10296out_free_sysfs_attr:
10297        lpfc_free_sysfs_attr(vport);
10298out_destroy_shost:
10299        lpfc_destroy_shost(phba);
10300out_unset_driver_resource:
10301        lpfc_unset_driver_resource_phase2(phba);
10302out_free_iocb_list:
10303        lpfc_free_iocb_list(phba);
10304out_unset_driver_resource_s4:
10305        lpfc_sli4_driver_resource_unset(phba);
10306out_unset_pci_mem_s4:
10307        lpfc_sli4_pci_mem_unset(phba);
10308out_disable_pci_dev:
10309        lpfc_disable_pci_dev(phba);
10310        if (shost)
10311                scsi_host_put(shost);
10312out_free_phba:
10313        lpfc_hba_free(phba);
10314        return error;
10315}
10316
10317/**
10318 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
10319 * @pdev: pointer to PCI device
10320 *
10321 * This routine is called from the kernel's PCI subsystem to device with
10322 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10323 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10324 * device to be removed from the PCI subsystem properly.
10325 **/
10326static void
10327lpfc_pci_remove_one_s4(struct pci_dev *pdev)
10328{
10329        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10330        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10331        struct lpfc_vport **vports;
10332        struct lpfc_hba *phba = vport->phba;
10333        int i;
10334
10335        /* Mark the device unloading flag */
10336        spin_lock_irq(&phba->hbalock);
10337        vport->load_flag |= FC_UNLOADING;
10338        spin_unlock_irq(&phba->hbalock);
10339
10340        /* Free the HBA sysfs attributes */
10341        lpfc_free_sysfs_attr(vport);
10342
10343        /* Release all the vports against this physical port */
10344        vports = lpfc_create_vport_work_array(phba);
10345        if (vports != NULL)
10346                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10347                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10348                                continue;
10349                        fc_vport_terminate(vports[i]->fc_vport);
10350                }
10351        lpfc_destroy_vport_work_array(phba, vports);
10352
10353        /* Remove FC host and then SCSI host with the physical port */
10354        fc_remove_host(shost);
10355        scsi_remove_host(shost);
10356
10357        /* Perform cleanup on the physical port */
10358        lpfc_cleanup(vport);
10359
10360        /*
10361         * Bring down the SLI Layer. This step disables all interrupts,
10362         * clears the rings, discards all mailbox commands, and resets
10363         * the HBA FCoE function.
10364         */
10365        lpfc_debugfs_terminate(vport);
10366        lpfc_sli4_hba_unset(phba);
10367
10368        spin_lock_irq(&phba->hbalock);
10369        list_del_init(&vport->listentry);
10370        spin_unlock_irq(&phba->hbalock);
10371
10372        /* Perform scsi free before driver resource_unset since scsi
10373         * buffers are released to their corresponding pools here.
10374         */
10375        lpfc_scsi_free(phba);
10376
10377        lpfc_sli4_driver_resource_unset(phba);
10378
10379        /* Unmap adapter Control and Doorbell registers */
10380        lpfc_sli4_pci_mem_unset(phba);
10381
10382        /* Release PCI resources and disable device's PCI function */
10383        scsi_host_put(shost);
10384        lpfc_disable_pci_dev(phba);
10385
10386        /* Finally, free the driver's device data structure */
10387        lpfc_hba_free(phba);
10388
10389        return;
10390}
10391
10392/**
10393 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
10394 * @pdev: pointer to PCI device
10395 * @msg: power management message
10396 *
10397 * This routine is called from the kernel's PCI subsystem to support system
10398 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10399 * this method, it quiesces the device by stopping the driver's worker
10400 * thread for the device, turning off device's interrupt and DMA, and bring
10401 * the device offline. Note that as the driver implements the minimum PM
10402 * requirements to a power-aware driver's PM support for suspend/resume -- all
10403 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10404 * method call will be treated as SUSPEND and the driver will fully
10405 * reinitialize its device during resume() method call, the driver will set
10406 * device to PCI_D3hot state in PCI config space instead of setting it
10407 * according to the @msg provided by the PM.
10408 *
10409 * Return code
10410 *      0 - driver suspended the device
10411 *      Error otherwise
10412 **/
10413static int
10414lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
10415{
10416        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10417        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10418
10419        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10420                        "2843 PCI device Power Management suspend.\n");
10421
10422        /* Bring down the device */
10423        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10424        lpfc_offline(phba);
10425        kthread_stop(phba->worker_thread);
10426
10427        /* Disable interrupt from device */
10428        lpfc_sli4_disable_intr(phba);
10429        lpfc_sli4_queue_destroy(phba);
10430
10431        /* Save device state to PCI config space */
10432        pci_save_state(pdev);
10433        pci_set_power_state(pdev, PCI_D3hot);
10434
10435        return 0;
10436}
10437
10438/**
10439 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
10440 * @pdev: pointer to PCI device
10441 *
10442 * This routine is called from the kernel's PCI subsystem to support system
10443 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10444 * this method, it restores the device's PCI config space state and fully
10445 * reinitializes the device and brings it online. Note that as the driver
10446 * implements the minimum PM requirements to a power-aware driver's PM for
10447 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10448 * to the suspend() method call will be treated as SUSPEND and the driver
10449 * will fully reinitialize its device during resume() method call, the device
10450 * will be set to PCI_D0 directly in PCI config space before restoring the
10451 * state.
10452 *
10453 * Return code
10454 *      0 - driver suspended the device
10455 *      Error otherwise
10456 **/
10457static int
10458lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10459{
10460        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10461        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10462        uint32_t intr_mode;
10463        int error;
10464
10465        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10466                        "0292 PCI device Power Management resume.\n");
10467
10468        /* Restore device state from PCI config space */
10469        pci_set_power_state(pdev, PCI_D0);
10470        pci_restore_state(pdev);
10471
10472        /*
10473         * As the new kernel behavior of pci_restore_state() API call clears
10474         * device saved_state flag, need to save the restored state again.
10475         */
10476        pci_save_state(pdev);
10477
10478        if (pdev->is_busmaster)
10479                pci_set_master(pdev);
10480
10481         /* Startup the kernel thread for this host adapter. */
10482        phba->worker_thread = kthread_run(lpfc_do_work, phba,
10483                                        "lpfc_worker_%d", phba->brd_no);
10484        if (IS_ERR(phba->worker_thread)) {
10485                error = PTR_ERR(phba->worker_thread);
10486                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10487                                "0293 PM resume failed to start worker "
10488                                "thread: error=x%x.\n", error);
10489                return error;
10490        }
10491
10492        /* Configure and enable interrupt */
10493        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10494        if (intr_mode == LPFC_INTR_ERROR) {
10495                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10496                                "0294 PM resume Failed to enable interrupt\n");
10497                return -EIO;
10498        } else
10499                phba->intr_mode = intr_mode;
10500
10501        /* Restart HBA and bring it online */
10502        lpfc_sli_brdrestart(phba);
10503        lpfc_online(phba);
10504
10505        /* Log the current active interrupt mode */
10506        lpfc_log_intr_mode(phba, phba->intr_mode);
10507
10508        return 0;
10509}
10510
10511/**
10512 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10513 * @phba: pointer to lpfc hba data structure.
10514 *
10515 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10516 * aborts all the outstanding SCSI I/Os to the pci device.
10517 **/
10518static void
10519lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10520{
10521        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10522                        "2828 PCI channel I/O abort preparing for recovery\n");
10523        /*
10524         * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10525         * and let the SCSI mid-layer to retry them to recover.
10526         */
10527        lpfc_sli_abort_fcp_rings(phba);
10528}
10529
10530/**
10531 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10532 * @phba: pointer to lpfc hba data structure.
10533 *
10534 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10535 * disables the device interrupt and pci device, and aborts the internal FCP
10536 * pending I/Os.
10537 **/
10538static void
10539lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
10540{
10541        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10542                        "2826 PCI channel disable preparing for reset\n");
10543
10544        /* Block any management I/Os to the device */
10545        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
10546
10547        /* Block all SCSI devices' I/Os on the host */
10548        lpfc_scsi_dev_block(phba);
10549
10550        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10551        lpfc_sli_flush_fcp_rings(phba);
10552
10553        /* stop all timers */
10554        lpfc_stop_hba_timers(phba);
10555
10556        /* Disable interrupt and pci device */
10557        lpfc_sli4_disable_intr(phba);
10558        lpfc_sli4_queue_destroy(phba);
10559        pci_disable_device(phba->pcidev);
10560}
10561
10562/**
10563 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10564 * @phba: pointer to lpfc hba data structure.
10565 *
10566 * This routine is called to prepare the SLI4 device for PCI slot permanently
10567 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10568 * pending I/Os.
10569 **/
10570static void
10571lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10572{
10573        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10574                        "2827 PCI channel permanent disable for failure\n");
10575
10576        /* Block all SCSI devices' I/Os on the host */
10577        lpfc_scsi_dev_block(phba);
10578
10579        /* stop all timers */
10580        lpfc_stop_hba_timers(phba);
10581
10582        /* Clean up all driver's outstanding SCSI I/Os */
10583        lpfc_sli_flush_fcp_rings(phba);
10584}
10585
10586/**
10587 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10588 * @pdev: pointer to PCI device.
10589 * @state: the current PCI connection state.
10590 *
10591 * This routine is called from the PCI subsystem for error handling to device
10592 * with SLI-4 interface spec. This function is called by the PCI subsystem
10593 * after a PCI bus error affecting this device has been detected. When this
10594 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10595 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10596 * for the PCI subsystem to perform proper recovery as desired.
10597 *
10598 * Return codes
10599 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10600 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10601 **/
10602static pci_ers_result_t
10603lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10604{
10605        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10606        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10607
10608        switch (state) {
10609        case pci_channel_io_normal:
10610                /* Non-fatal error, prepare for recovery */
10611                lpfc_sli4_prep_dev_for_recover(phba);
10612                return PCI_ERS_RESULT_CAN_RECOVER;
10613        case pci_channel_io_frozen:
10614                /* Fatal error, prepare for slot reset */
10615                lpfc_sli4_prep_dev_for_reset(phba);
10616                return PCI_ERS_RESULT_NEED_RESET;
10617        case pci_channel_io_perm_failure:
10618                /* Permanent failure, prepare for device down */
10619                lpfc_sli4_prep_dev_for_perm_failure(phba);
10620                return PCI_ERS_RESULT_DISCONNECT;
10621        default:
10622                /* Unknown state, prepare and request slot reset */
10623                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10624                                "2825 Unknown PCI error state: x%x\n", state);
10625                lpfc_sli4_prep_dev_for_reset(phba);
10626                return PCI_ERS_RESULT_NEED_RESET;
10627        }
10628}
10629
10630/**
10631 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10632 * @pdev: pointer to PCI device.
10633 *
10634 * This routine is called from the PCI subsystem for error handling to device
10635 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10636 * restart the PCI card from scratch, as if from a cold-boot. During the
10637 * PCI subsystem error recovery, after the driver returns
10638 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10639 * recovery and then call this routine before calling the .resume method to
10640 * recover the device. This function will initialize the HBA device, enable
10641 * the interrupt, but it will just put the HBA to offline state without
10642 * passing any I/O traffic.
10643 *
10644 * Return codes
10645 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
10646 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10647 */
10648static pci_ers_result_t
10649lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10650{
10651        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10652        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10653        struct lpfc_sli *psli = &phba->sli;
10654        uint32_t intr_mode;
10655
10656        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10657        if (pci_enable_device_mem(pdev)) {
10658                printk(KERN_ERR "lpfc: Cannot re-enable "
10659                        "PCI device after reset.\n");
10660                return PCI_ERS_RESULT_DISCONNECT;
10661        }
10662
10663        pci_restore_state(pdev);
10664
10665        /*
10666         * As the new kernel behavior of pci_restore_state() API call clears
10667         * device saved_state flag, need to save the restored state again.
10668         */
10669        pci_save_state(pdev);
10670
10671        if (pdev->is_busmaster)
10672                pci_set_master(pdev);
10673
10674        spin_lock_irq(&phba->hbalock);
10675        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10676        spin_unlock_irq(&phba->hbalock);
10677
10678        /* Configure and enable interrupt */
10679        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10680        if (intr_mode == LPFC_INTR_ERROR) {
10681                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10682                                "2824 Cannot re-enable interrupt after "
10683                                "slot reset.\n");
10684                return PCI_ERS_RESULT_DISCONNECT;
10685        } else
10686                phba->intr_mode = intr_mode;
10687
10688        /* Log the current active interrupt mode */
10689        lpfc_log_intr_mode(phba, phba->intr_mode);
10690
10691        return PCI_ERS_RESULT_RECOVERED;
10692}
10693
10694/**
10695 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10696 * @pdev: pointer to PCI device
10697 *
10698 * This routine is called from the PCI subsystem for error handling to device
10699 * with SLI-4 interface spec. It is called when kernel error recovery tells
10700 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10701 * error recovery. After this call, traffic can start to flow from this device
10702 * again.
10703 **/
10704static void
10705lpfc_io_resume_s4(struct pci_dev *pdev)
10706{
10707        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10708        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10709
10710        /*
10711         * In case of slot reset, as function reset is performed through
10712         * mailbox command which needs DMA to be enabled, this operation
10713         * has to be moved to the io resume phase. Taking device offline
10714         * will perform the necessary cleanup.
10715         */
10716        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10717                /* Perform device reset */
10718                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10719                lpfc_offline(phba);
10720                lpfc_sli_brdrestart(phba);
10721                /* Bring the device back online */
10722                lpfc_online(phba);
10723        }
10724
10725        /* Clean up Advanced Error Reporting (AER) if needed */
10726        if (phba->hba_flag & HBA_AER_ENABLED)
10727                pci_cleanup_aer_uncorrect_error_status(pdev);
10728}
10729
10730/**
10731 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10732 * @pdev: pointer to PCI device
10733 * @pid: pointer to PCI device identifier
10734 *
10735 * This routine is to be registered to the kernel's PCI subsystem. When an
10736 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10737 * at PCI device-specific information of the device and driver to see if the
10738 * driver state that it can support this kind of device. If the match is
10739 * successful, the driver core invokes this routine. This routine dispatches
10740 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10741 * do all the initialization that it needs to do to handle the HBA device
10742 * properly.
10743 *
10744 * Return code
10745 *      0 - driver can claim the device
10746 *      negative value - driver can not claim the device
10747 **/
10748static int
10749lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10750{
10751        int rc;
10752        struct lpfc_sli_intf intf;
10753
10754        if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
10755                return -ENODEV;
10756
10757        if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
10758            (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
10759                rc = lpfc_pci_probe_one_s4(pdev, pid);
10760        else
10761                rc = lpfc_pci_probe_one_s3(pdev, pid);
10762
10763        return rc;
10764}
10765
10766/**
10767 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
10768 * @pdev: pointer to PCI device
10769 *
10770 * This routine is to be registered to the kernel's PCI subsystem. When an
10771 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
10772 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
10773 * remove routine, which will perform all the necessary cleanup for the
10774 * device to be removed from the PCI subsystem properly.
10775 **/
10776static void
10777lpfc_pci_remove_one(struct pci_dev *pdev)
10778{
10779        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10780        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10781
10782        switch (phba->pci_dev_grp) {
10783        case LPFC_PCI_DEV_LP:
10784                lpfc_pci_remove_one_s3(pdev);
10785                break;
10786        case LPFC_PCI_DEV_OC:
10787                lpfc_pci_remove_one_s4(pdev);
10788                break;
10789        default:
10790                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10791                                "1424 Invalid PCI device group: 0x%x\n",
10792                                phba->pci_dev_grp);
10793                break;
10794        }
10795        return;
10796}
10797
10798/**
10799 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
10800 * @pdev: pointer to PCI device
10801 * @msg: power management message
10802 *
10803 * This routine is to be registered to the kernel's PCI subsystem to support
10804 * system Power Management (PM). When PM invokes this method, it dispatches
10805 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
10806 * suspend the device.
10807 *
10808 * Return code
10809 *      0 - driver suspended the device
10810 *      Error otherwise
10811 **/
10812static int
10813lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10814{
10815        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10816        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10817        int rc = -ENODEV;
10818
10819        switch (phba->pci_dev_grp) {
10820        case LPFC_PCI_DEV_LP:
10821                rc = lpfc_pci_suspend_one_s3(pdev, msg);
10822                break;
10823        case LPFC_PCI_DEV_OC:
10824                rc = lpfc_pci_suspend_one_s4(pdev, msg);
10825                break;
10826        default:
10827                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10828                                "1425 Invalid PCI device group: 0x%x\n",
10829                                phba->pci_dev_grp);
10830                break;
10831        }
10832        return rc;
10833}
10834
10835/**
10836 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
10837 * @pdev: pointer to PCI device
10838 *
10839 * This routine is to be registered to the kernel's PCI subsystem to support
10840 * system Power Management (PM). When PM invokes this method, it dispatches
10841 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
10842 * resume the device.
10843 *
10844 * Return code
10845 *      0 - driver suspended the device
10846 *      Error otherwise
10847 **/
10848static int
10849lpfc_pci_resume_one(struct pci_dev *pdev)
10850{
10851        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10852        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10853        int rc = -ENODEV;
10854
10855        switch (phba->pci_dev_grp) {
10856        case LPFC_PCI_DEV_LP:
10857                rc = lpfc_pci_resume_one_s3(pdev);
10858                break;
10859        case LPFC_PCI_DEV_OC:
10860                rc = lpfc_pci_resume_one_s4(pdev);
10861                break;
10862        default:
10863                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10864                                "1426 Invalid PCI device group: 0x%x\n",
10865                                phba->pci_dev_grp);
10866                break;
10867        }
10868        return rc;
10869}
10870
10871/**
10872 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
10873 * @pdev: pointer to PCI device.
10874 * @state: the current PCI connection state.
10875 *
10876 * This routine is registered to the PCI subsystem for error handling. This
10877 * function is called by the PCI subsystem after a PCI bus error affecting
10878 * this device has been detected. When this routine is invoked, it dispatches
10879 * the action to the proper SLI-3 or SLI-4 device error detected handling
10880 * routine, which will perform the proper error detected operation.
10881 *
10882 * Return codes
10883 *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10884 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10885 **/
10886static pci_ers_result_t
10887lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10888{
10889        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10890        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10891        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10892
10893        switch (phba->pci_dev_grp) {
10894        case LPFC_PCI_DEV_LP:
10895                rc = lpfc_io_error_detected_s3(pdev, state);
10896                break;
10897        case LPFC_PCI_DEV_OC:
10898                rc = lpfc_io_error_detected_s4(pdev, state);
10899                break;
10900        default:
10901                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10902                                "1427 Invalid PCI device group: 0x%x\n",
10903                                phba->pci_dev_grp);
10904                break;
10905        }
10906        return rc;
10907}
10908
10909/**
10910 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10911 * @pdev: pointer to PCI device.
10912 *
10913 * This routine is registered to the PCI subsystem for error handling. This
10914 * function is called after PCI bus has been reset to restart the PCI card
10915 * from scratch, as if from a cold-boot. When this routine is invoked, it
10916 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10917 * routine, which will perform the proper device reset.
10918 *
10919 * Return codes
10920 *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
10921 *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10922 **/
10923static pci_ers_result_t
10924lpfc_io_slot_reset(struct pci_dev *pdev)
10925{
10926        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10927        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10928        pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10929
10930        switch (phba->pci_dev_grp) {
10931        case LPFC_PCI_DEV_LP:
10932                rc = lpfc_io_slot_reset_s3(pdev);
10933                break;
10934        case LPFC_PCI_DEV_OC:
10935                rc = lpfc_io_slot_reset_s4(pdev);
10936                break;
10937        default:
10938                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10939                                "1428 Invalid PCI device group: 0x%x\n",
10940                                phba->pci_dev_grp);
10941                break;
10942        }
10943        return rc;
10944}
10945
10946/**
10947 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10948 * @pdev: pointer to PCI device
10949 *
10950 * This routine is registered to the PCI subsystem for error handling. It
10951 * is called when kernel error recovery tells the lpfc driver that it is
10952 * OK to resume normal PCI operation after PCI bus error recovery. When
10953 * this routine is invoked, it dispatches the action to the proper SLI-3
10954 * or SLI-4 device io_resume routine, which will resume the device operation.
10955 **/
10956static void
10957lpfc_io_resume(struct pci_dev *pdev)
10958{
10959        struct Scsi_Host *shost = pci_get_drvdata(pdev);
10960        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10961
10962        switch (phba->pci_dev_grp) {
10963        case LPFC_PCI_DEV_LP:
10964                lpfc_io_resume_s3(pdev);
10965                break;
10966        case LPFC_PCI_DEV_OC:
10967                lpfc_io_resume_s4(pdev);
10968                break;
10969        default:
10970                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10971                                "1429 Invalid PCI device group: 0x%x\n",
10972                                phba->pci_dev_grp);
10973                break;
10974        }
10975        return;
10976}
10977
10978/**
10979 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
10980 * @phba: pointer to lpfc hba data structure.
10981 *
10982 * This routine checks to see if OAS is supported for this adapter. If
10983 * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
10984 * the enable oas flag is cleared and the pool created for OAS device data
10985 * is destroyed.
10986 *
10987 **/
10988void
10989lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10990{
10991
10992        if (!phba->cfg_EnableXLane)
10993                return;
10994
10995        if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10996                phba->cfg_fof = 1;
10997        } else {
10998                phba->cfg_fof = 0;
10999                if (phba->device_data_mem_pool)
11000                        mempool_destroy(phba->device_data_mem_pool);
11001                phba->device_data_mem_pool = NULL;
11002        }
11003
11004        return;
11005}
11006
11007/**
11008 * lpfc_fof_queue_setup - Set up all the fof queues
11009 * @phba: pointer to lpfc hba data structure.
11010 *
11011 * This routine is invoked to set up all the fof queues for the FC HBA
11012 * operation.
11013 *
11014 * Return codes
11015 *      0 - successful
11016 *      -ENOMEM - No available memory
11017 **/
11018int
11019lpfc_fof_queue_setup(struct lpfc_hba *phba)
11020{
11021        struct lpfc_sli *psli = &phba->sli;
11022        int rc;
11023
11024        rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
11025        if (rc)
11026                return -ENOMEM;
11027
11028        if (phba->cfg_fof) {
11029
11030                rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
11031                                    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
11032                if (rc)
11033                        goto out_oas_cq;
11034
11035                rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
11036                                    phba->sli4_hba.oas_cq, LPFC_FCP);
11037                if (rc)
11038                        goto out_oas_wq;
11039
11040                phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
11041                phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
11042        }
11043
11044        return 0;
11045
11046out_oas_wq:
11047        lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
11048out_oas_cq:
11049        lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
11050        return rc;
11051
11052}
11053
11054/**
11055 * lpfc_fof_queue_create - Create all the fof queues
11056 * @phba: pointer to lpfc hba data structure.
11057 *
11058 * This routine is invoked to allocate all the fof queues for the FC HBA
11059 * operation. For each SLI4 queue type, the parameters such as queue entry
11060 * count (queue depth) shall be taken from the module parameter. For now,
11061 * we just use some constant number as place holder.
11062 *
11063 * Return codes
11064 *      0 - successful
11065 *      -ENOMEM - No availble memory
11066 *      -EIO - The mailbox failed to complete successfully.
11067 **/
11068int
11069lpfc_fof_queue_create(struct lpfc_hba *phba)
11070{
11071        struct lpfc_queue *qdesc;
11072
11073        /* Create FOF EQ */
11074        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
11075                                      phba->sli4_hba.eq_ecount);
11076        if (!qdesc)
11077                goto out_error;
11078
11079        phba->sli4_hba.fof_eq = qdesc;
11080
11081        if (phba->cfg_fof) {
11082
11083                /* Create OAS CQ */
11084                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
11085                                                      phba->sli4_hba.cq_ecount);
11086                if (!qdesc)
11087                        goto out_error;
11088
11089                phba->sli4_hba.oas_cq = qdesc;
11090
11091                /* Create OAS WQ */
11092                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
11093                                              phba->sli4_hba.wq_ecount);
11094                if (!qdesc)
11095                        goto out_error;
11096
11097                phba->sli4_hba.oas_wq = qdesc;
11098
11099        }
11100        return 0;
11101
11102out_error:
11103        lpfc_fof_queue_destroy(phba);
11104        return -ENOMEM;
11105}
11106
11107/**
11108 * lpfc_fof_queue_destroy - Destroy all the fof queues
11109 * @phba: pointer to lpfc hba data structure.
11110 *
11111 * This routine is invoked to release all the SLI4 queues with the FC HBA
11112 * operation.
11113 *
11114 * Return codes
11115 *      0 - successful
11116 **/
11117int
11118lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11119{
11120        /* Release FOF Event queue */
11121        if (phba->sli4_hba.fof_eq != NULL) {
11122                lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11123                phba->sli4_hba.fof_eq = NULL;
11124        }
11125
11126        /* Release OAS Completion queue */
11127        if (phba->sli4_hba.oas_cq != NULL) {
11128                lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11129                phba->sli4_hba.oas_cq = NULL;
11130        }
11131
11132        /* Release OAS Work queue */
11133        if (phba->sli4_hba.oas_wq != NULL) {
11134                lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11135                phba->sli4_hba.oas_wq = NULL;
11136        }
11137        return 0;
11138}
11139
11140static struct pci_device_id lpfc_id_table[] = {
11141        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
11142                PCI_ANY_ID, PCI_ANY_ID, },
11143        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
11144                PCI_ANY_ID, PCI_ANY_ID, },
11145        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
11146                PCI_ANY_ID, PCI_ANY_ID, },
11147        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
11148                PCI_ANY_ID, PCI_ANY_ID, },
11149        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
11150                PCI_ANY_ID, PCI_ANY_ID, },
11151        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
11152                PCI_ANY_ID, PCI_ANY_ID, },
11153        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
11154                PCI_ANY_ID, PCI_ANY_ID, },
11155        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
11156                PCI_ANY_ID, PCI_ANY_ID, },
11157        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
11158                PCI_ANY_ID, PCI_ANY_ID, },
11159        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
11160                PCI_ANY_ID, PCI_ANY_ID, },
11161        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
11162                PCI_ANY_ID, PCI_ANY_ID, },
11163        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
11164                PCI_ANY_ID, PCI_ANY_ID, },
11165        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
11166                PCI_ANY_ID, PCI_ANY_ID, },
11167        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
11168                PCI_ANY_ID, PCI_ANY_ID, },
11169        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
11170                PCI_ANY_ID, PCI_ANY_ID, },
11171        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
11172                PCI_ANY_ID, PCI_ANY_ID, },
11173        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
11174                PCI_ANY_ID, PCI_ANY_ID, },
11175        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
11176                PCI_ANY_ID, PCI_ANY_ID, },
11177        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
11178                PCI_ANY_ID, PCI_ANY_ID, },
11179        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
11180                PCI_ANY_ID, PCI_ANY_ID, },
11181        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
11182                PCI_ANY_ID, PCI_ANY_ID, },
11183        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
11184                PCI_ANY_ID, PCI_ANY_ID, },
11185        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
11186                PCI_ANY_ID, PCI_ANY_ID, },
11187        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
11188                PCI_ANY_ID, PCI_ANY_ID, },
11189        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
11190                PCI_ANY_ID, PCI_ANY_ID, },
11191        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
11192                PCI_ANY_ID, PCI_ANY_ID, },
11193        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
11194                PCI_ANY_ID, PCI_ANY_ID, },
11195        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
11196                PCI_ANY_ID, PCI_ANY_ID, },
11197        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
11198                PCI_ANY_ID, PCI_ANY_ID, },
11199        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
11200                PCI_ANY_ID, PCI_ANY_ID, },
11201        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
11202                PCI_ANY_ID, PCI_ANY_ID, },
11203        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
11204                PCI_ANY_ID, PCI_ANY_ID, },
11205        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
11206                PCI_ANY_ID, PCI_ANY_ID, },
11207        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
11208                PCI_ANY_ID, PCI_ANY_ID, },
11209        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
11210                PCI_ANY_ID, PCI_ANY_ID, },
11211        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
11212                PCI_ANY_ID, PCI_ANY_ID, },
11213        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
11214                PCI_ANY_ID, PCI_ANY_ID, },
11215        {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
11216                PCI_ANY_ID, PCI_ANY_ID, },
11217        {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
11218                PCI_ANY_ID, PCI_ANY_ID, },
11219        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
11220                PCI_ANY_ID, PCI_ANY_ID, },
11221        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
11222                PCI_ANY_ID, PCI_ANY_ID, },
11223        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
11224                PCI_ANY_ID, PCI_ANY_ID, },
11225        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
11226                PCI_ANY_ID, PCI_ANY_ID, },
11227        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
11228                PCI_ANY_ID, PCI_ANY_ID, },
11229        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
11230                PCI_ANY_ID, PCI_ANY_ID, },
11231        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
11232                PCI_ANY_ID, PCI_ANY_ID, },
11233        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
11234                PCI_ANY_ID, PCI_ANY_ID, },
11235        { 0 }
11236};
11237
11238MODULE_DEVICE_TABLE(pci, lpfc_id_table);
11239
11240static const struct pci_error_handlers lpfc_err_handler = {
11241        .error_detected = lpfc_io_error_detected,
11242        .slot_reset = lpfc_io_slot_reset,
11243        .resume = lpfc_io_resume,
11244};
11245
11246static struct pci_driver lpfc_driver = {
11247        .name           = LPFC_DRIVER_NAME,
11248        .id_table       = lpfc_id_table,
11249        .probe          = lpfc_pci_probe_one,
11250        .remove         = lpfc_pci_remove_one,
11251        .suspend        = lpfc_pci_suspend_one,
11252        .resume         = lpfc_pci_resume_one,
11253        .err_handler    = &lpfc_err_handler,
11254};
11255
11256static const struct file_operations lpfc_mgmt_fop = {
11257        .owner = THIS_MODULE,
11258};
11259
11260static struct miscdevice lpfc_mgmt_dev = {
11261        .minor = MISC_DYNAMIC_MINOR,
11262        .name = "lpfcmgmt",
11263        .fops = &lpfc_mgmt_fop,
11264};
11265
11266/**
11267 * lpfc_init - lpfc module initialization routine
11268 *
11269 * This routine is to be invoked when the lpfc module is loaded into the
11270 * kernel. The special kernel macro module_init() is used to indicate the
11271 * role of this routine to the kernel as lpfc module entry point.
11272 *
11273 * Return codes
11274 *   0 - successful
11275 *   -ENOMEM - FC attach transport failed
11276 *   all others - failed
11277 */
11278static int __init
11279lpfc_init(void)
11280{
11281        int cpu;
11282        int error = 0;
11283
11284        printk(LPFC_MODULE_DESC "\n");
11285        printk(LPFC_COPYRIGHT "\n");
11286
11287        error = misc_register(&lpfc_mgmt_dev);
11288        if (error)
11289                printk(KERN_ERR "Could not register lpfcmgmt device, "
11290                        "misc_register returned with status %d", error);
11291
11292        if (lpfc_enable_npiv) {
11293                lpfc_transport_functions.vport_create = lpfc_vport_create;
11294                lpfc_transport_functions.vport_delete = lpfc_vport_delete;
11295        }
11296        lpfc_transport_template =
11297                                fc_attach_transport(&lpfc_transport_functions);
11298        if (lpfc_transport_template == NULL)
11299                return -ENOMEM;
11300        if (lpfc_enable_npiv) {
11301                lpfc_vport_transport_template =
11302                        fc_attach_transport(&lpfc_vport_transport_functions);
11303                if (lpfc_vport_transport_template == NULL) {
11304                        fc_release_transport(lpfc_transport_template);
11305                        return -ENOMEM;
11306                }
11307        }
11308
11309        /* Initialize in case vector mapping is needed */
11310        lpfc_used_cpu = NULL;
11311        lpfc_present_cpu = 0;
11312        for_each_present_cpu(cpu)
11313                lpfc_present_cpu++;
11314
11315        error = pci_register_driver(&lpfc_driver);
11316        if (error) {
11317                fc_release_transport(lpfc_transport_template);
11318                if (lpfc_enable_npiv)
11319                        fc_release_transport(lpfc_vport_transport_template);
11320        }
11321
11322        return error;
11323}
11324
11325/**
11326 * lpfc_exit - lpfc module removal routine
11327 *
11328 * This routine is invoked when the lpfc module is removed from the kernel.
11329 * The special kernel macro module_exit() is used to indicate the role of
11330 * this routine to the kernel as lpfc module exit point.
11331 */
11332static void __exit
11333lpfc_exit(void)
11334{
11335        misc_deregister(&lpfc_mgmt_dev);
11336        pci_unregister_driver(&lpfc_driver);
11337        fc_release_transport(lpfc_transport_template);
11338        if (lpfc_enable_npiv)
11339                fc_release_transport(lpfc_vport_transport_template);
11340        if (_dump_buf_data) {
11341                printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
11342                                "_dump_buf_data at 0x%p\n",
11343                                (1L << _dump_buf_data_order), _dump_buf_data);
11344                free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
11345        }
11346
11347        if (_dump_buf_dif) {
11348                printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
11349                                "_dump_buf_dif at 0x%p\n",
11350                                (1L << _dump_buf_dif_order), _dump_buf_dif);
11351                free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
11352        }
11353        kfree(lpfc_used_cpu);
11354}
11355
11356module_init(lpfc_init);
11357module_exit(lpfc_exit);
11358MODULE_LICENSE("GPL");
11359MODULE_DESCRIPTION(LPFC_MODULE_DESC);
11360MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11361MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
11362