linux/drivers/scsi/lpfc/lpfc_nportdisc.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include <linux/interrupt.h>
  28
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_transport_fc.h>
  33#include <scsi/fc/fc_fs.h>
  34
  35#include "lpfc_hw4.h"
  36#include "lpfc_hw.h"
  37#include "lpfc_sli.h"
  38#include "lpfc_sli4.h"
  39#include "lpfc_nl.h"
  40#include "lpfc_disc.h"
  41#include "lpfc.h"
  42#include "lpfc_scsi.h"
  43#include "lpfc_nvme.h"
  44#include "lpfc_logmsg.h"
  45#include "lpfc_crtn.h"
  46#include "lpfc_vport.h"
  47#include "lpfc_debugfs.h"
  48
  49
  50/* Called to verify a rcv'ed ADISC was intended for us. */
  51static int
  52lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  53                 struct lpfc_name *nn, struct lpfc_name *pn)
  54{
  55        /* First, we MUST have a RPI registered */
  56        if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
  57                return 0;
  58
  59        /* Compare the ADISC rsp WWNN / WWPN matches our internal node
  60         * table entry for that node.
  61         */
  62        if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
  63                return 0;
  64
  65        if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
  66                return 0;
  67
  68        /* we match, return success */
  69        return 1;
  70}
  71
  72int
  73lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  74                 struct serv_parm *sp, uint32_t class, int flogi)
  75{
  76        volatile struct serv_parm *hsp = &vport->fc_sparam;
  77        uint16_t hsp_value, ssp_value = 0;
  78
  79        /*
  80         * The receive data field size and buffer-to-buffer receive data field
  81         * size entries are 16 bits but are represented as two 8-bit fields in
  82         * the driver data structure to account for rsvd bits and other control
  83         * bits.  Reconstruct and compare the fields as a 16-bit values before
  84         * correcting the byte values.
  85         */
  86        if (sp->cls1.classValid) {
  87                if (!flogi) {
  88                        hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
  89                                     hsp->cls1.rcvDataSizeLsb);
  90                        ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
  91                                     sp->cls1.rcvDataSizeLsb);
  92                        if (!ssp_value)
  93                                goto bad_service_param;
  94                        if (ssp_value > hsp_value) {
  95                                sp->cls1.rcvDataSizeLsb =
  96                                        hsp->cls1.rcvDataSizeLsb;
  97                                sp->cls1.rcvDataSizeMsb =
  98                                        hsp->cls1.rcvDataSizeMsb;
  99                        }
 100                }
 101        } else if (class == CLASS1)
 102                goto bad_service_param;
 103        if (sp->cls2.classValid) {
 104                if (!flogi) {
 105                        hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
 106                                     hsp->cls2.rcvDataSizeLsb);
 107                        ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
 108                                     sp->cls2.rcvDataSizeLsb);
 109                        if (!ssp_value)
 110                                goto bad_service_param;
 111                        if (ssp_value > hsp_value) {
 112                                sp->cls2.rcvDataSizeLsb =
 113                                        hsp->cls2.rcvDataSizeLsb;
 114                                sp->cls2.rcvDataSizeMsb =
 115                                        hsp->cls2.rcvDataSizeMsb;
 116                        }
 117                }
 118        } else if (class == CLASS2)
 119                goto bad_service_param;
 120        if (sp->cls3.classValid) {
 121                if (!flogi) {
 122                        hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
 123                                     hsp->cls3.rcvDataSizeLsb);
 124                        ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
 125                                     sp->cls3.rcvDataSizeLsb);
 126                        if (!ssp_value)
 127                                goto bad_service_param;
 128                        if (ssp_value > hsp_value) {
 129                                sp->cls3.rcvDataSizeLsb =
 130                                        hsp->cls3.rcvDataSizeLsb;
 131                                sp->cls3.rcvDataSizeMsb =
 132                                        hsp->cls3.rcvDataSizeMsb;
 133                        }
 134                }
 135        } else if (class == CLASS3)
 136                goto bad_service_param;
 137
 138        /*
 139         * Preserve the upper four bits of the MSB from the PLOGI response.
 140         * These bits contain the Buffer-to-Buffer State Change Number
 141         * from the target and need to be passed to the FW.
 142         */
 143        hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
 144        ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
 145        if (ssp_value > hsp_value) {
 146                sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
 147                sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
 148                                       (hsp->cmn.bbRcvSizeMsb & 0x0F);
 149        }
 150
 151        memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
 152        memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
 153        return 1;
 154bad_service_param:
 155        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 156                         "0207 Device %x "
 157                         "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
 158                         "invalid service parameters.  Ignoring device.\n",
 159                         ndlp->nlp_DID,
 160                         sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
 161                         sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
 162                         sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
 163                         sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
 164        return 0;
 165}
 166
 167static void *
 168lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 169                        struct lpfc_iocbq *rspiocb)
 170{
 171        struct lpfc_dmabuf *pcmd, *prsp;
 172        uint32_t *lp;
 173        void     *ptr = NULL;
 174        IOCB_t   *irsp;
 175
 176        irsp = &rspiocb->iocb;
 177        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 178
 179        /* For lpfc_els_abort, context2 could be zero'ed to delay
 180         * freeing associated memory till after ABTS completes.
 181         */
 182        if (pcmd) {
 183                prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
 184                                       list);
 185                if (prsp) {
 186                        lp = (uint32_t *) prsp->virt;
 187                        ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
 188                }
 189        } else {
 190                /* Force ulpStatus error since we are returning NULL ptr */
 191                if (!(irsp->ulpStatus)) {
 192                        irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
 193                        irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
 194                }
 195                ptr = NULL;
 196        }
 197        return ptr;
 198}
 199
 200
 201
 202/*
 203 * Free resources / clean up outstanding I/Os
 204 * associated with a LPFC_NODELIST entry. This
 205 * routine effectively results in a "software abort".
 206 */
 207void
 208lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 209{
 210        LIST_HEAD(abort_list);
 211        struct lpfc_sli_ring *pring;
 212        struct lpfc_iocbq *iocb, *next_iocb;
 213
 214        pring = lpfc_phba_elsring(phba);
 215
 216        /* In case of error recovery path, we might have a NULL pring here */
 217        if (unlikely(!pring))
 218                return;
 219
 220        /* Abort outstanding I/O on NPort <nlp_DID> */
 221        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
 222                         "2819 Abort outstanding I/O on NPort x%x "
 223                         "Data: x%x x%x x%x\n",
 224                         ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 225                         ndlp->nlp_rpi);
 226        /* Clean up all fabric IOs first.*/
 227        lpfc_fabric_abort_nport(ndlp);
 228
 229        /*
 230         * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
 231         * of all ELS IOs that need an ABTS.  The IOs need to stay on the
 232         * txcmplq so that the abort operation completes them successfully.
 233         */
 234        spin_lock_irq(&phba->hbalock);
 235        if (phba->sli_rev == LPFC_SLI_REV4)
 236                spin_lock(&pring->ring_lock);
 237        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
 238        /* Add to abort_list on on NDLP match. */
 239                if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
 240                        list_add_tail(&iocb->dlist, &abort_list);
 241        }
 242        if (phba->sli_rev == LPFC_SLI_REV4)
 243                spin_unlock(&pring->ring_lock);
 244        spin_unlock_irq(&phba->hbalock);
 245
 246        /* Abort the targeted IOs and remove them from the abort list. */
 247        list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
 248                        spin_lock_irq(&phba->hbalock);
 249                        list_del_init(&iocb->dlist);
 250                        lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
 251                        spin_unlock_irq(&phba->hbalock);
 252        }
 253        /* Make sure HBA is alive */
 254        lpfc_issue_hb_tmo(phba);
 255
 256        INIT_LIST_HEAD(&abort_list);
 257
 258        /* Now process the txq */
 259        spin_lock_irq(&phba->hbalock);
 260        if (phba->sli_rev == LPFC_SLI_REV4)
 261                spin_lock(&pring->ring_lock);
 262
 263        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
 264                /* Check to see if iocb matches the nport we are looking for */
 265                if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
 266                        list_del_init(&iocb->list);
 267                        list_add_tail(&iocb->list, &abort_list);
 268                }
 269        }
 270
 271        if (phba->sli_rev == LPFC_SLI_REV4)
 272                spin_unlock(&pring->ring_lock);
 273        spin_unlock_irq(&phba->hbalock);
 274
 275        /* Cancel all the IOCBs from the completions list */
 276        lpfc_sli_cancel_iocbs(phba, &abort_list,
 277                              IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 278
 279        lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 280}
 281
 282/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
 283 * @phba: pointer to lpfc hba data structure.
 284 * @login_mbox: pointer to REG_RPI mailbox object
 285 *
 286 * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
 287 */
 288static void
 289lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
 290{
 291        struct lpfc_iocbq *save_iocb;
 292        struct lpfc_nodelist *ndlp;
 293        MAILBOX_t *mb = &login_mbox->u.mb;
 294
 295        int rc;
 296
 297        ndlp = login_mbox->ctx_ndlp;
 298        save_iocb = login_mbox->context3;
 299
 300        if (mb->mbxStatus == MBX_SUCCESS) {
 301                /* Now that REG_RPI completed successfully,
 302                 * we can now proceed with sending the PLOGI ACC.
 303                 */
 304                rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
 305                                      save_iocb, ndlp, NULL);
 306                if (rc) {
 307                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 308                                        "4576 PLOGI ACC fails pt2pt discovery: "
 309                                        "DID %x Data: %x\n", ndlp->nlp_DID, rc);
 310                }
 311        }
 312
 313        /* Now process the REG_RPI cmpl */
 314        lpfc_mbx_cmpl_reg_login(phba, login_mbox);
 315        ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
 316        kfree(save_iocb);
 317}
 318
 319static int
 320lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 321               struct lpfc_iocbq *cmdiocb)
 322{
 323        struct lpfc_hba    *phba = vport->phba;
 324        struct lpfc_dmabuf *pcmd;
 325        uint64_t nlp_portwwn = 0;
 326        uint32_t *lp;
 327        IOCB_t *icmd;
 328        struct serv_parm *sp;
 329        uint32_t ed_tov;
 330        LPFC_MBOXQ_t *link_mbox;
 331        LPFC_MBOXQ_t *login_mbox;
 332        struct lpfc_iocbq *save_iocb;
 333        struct ls_rjt stat;
 334        uint32_t vid, flag;
 335        int rc;
 336
 337        memset(&stat, 0, sizeof (struct ls_rjt));
 338        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 339        lp = (uint32_t *) pcmd->virt;
 340        sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
 341        if (wwn_to_u64(sp->portName.u.wwn) == 0) {
 342                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 343                                 "0140 PLOGI Reject: invalid pname\n");
 344                stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 345                stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
 346                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 347                        NULL);
 348                return 0;
 349        }
 350        if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
 351                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 352                                 "0141 PLOGI Reject: invalid nname\n");
 353                stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 354                stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
 355                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 356                        NULL);
 357                return 0;
 358        }
 359
 360        nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
 361        if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
 362                /* Reject this request because invalid parameters */
 363                stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 364                stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
 365                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 366                        NULL);
 367                return 0;
 368        }
 369        icmd = &cmdiocb->iocb;
 370
 371        /* PLOGI chkparm OK */
 372        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 373                         "0114 PLOGI chkparm OK Data: x%x x%x x%x "
 374                         "x%x x%x x%x\n",
 375                         ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
 376                         ndlp->nlp_rpi, vport->port_state,
 377                         vport->fc_flag);
 378
 379        if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
 380                ndlp->nlp_fcp_info |= CLASS2;
 381        else
 382                ndlp->nlp_fcp_info |= CLASS3;
 383
 384        ndlp->nlp_class_sup = 0;
 385        if (sp->cls1.classValid)
 386                ndlp->nlp_class_sup |= FC_COS_CLASS1;
 387        if (sp->cls2.classValid)
 388                ndlp->nlp_class_sup |= FC_COS_CLASS2;
 389        if (sp->cls3.classValid)
 390                ndlp->nlp_class_sup |= FC_COS_CLASS3;
 391        if (sp->cls4.classValid)
 392                ndlp->nlp_class_sup |= FC_COS_CLASS4;
 393        ndlp->nlp_maxframe =
 394                ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
 395        /* if already logged in, do implicit logout */
 396        switch (ndlp->nlp_state) {
 397        case  NLP_STE_NPR_NODE:
 398                if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
 399                        break;
 400                fallthrough;
 401        case  NLP_STE_REG_LOGIN_ISSUE:
 402        case  NLP_STE_PRLI_ISSUE:
 403        case  NLP_STE_UNMAPPED_NODE:
 404        case  NLP_STE_MAPPED_NODE:
 405                /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
 406                 * For target mode, execute implicit logo.
 407                 * Fabric nodes go into NPR.
 408                 */
 409                if (!(ndlp->nlp_type & NLP_FABRIC) &&
 410                    !(phba->nvmet_support)) {
 411                        /* Clear ndlp info, since follow up PRLI may have
 412                         * updated ndlp information
 413                         */
 414                        ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 415                        ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
 416                        ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
 417                        ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
 418                        ndlp->nlp_flag &= ~NLP_FIRSTBURST;
 419
 420                        lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
 421                                         ndlp, NULL);
 422                        return 1;
 423                }
 424                if (nlp_portwwn != 0 &&
 425                    nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
 426                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 427                                         "0143 PLOGI recv'd from DID: x%x "
 428                                         "WWPN changed: old %llx new %llx\n",
 429                                         ndlp->nlp_DID,
 430                                         (unsigned long long)nlp_portwwn,
 431                                         (unsigned long long)
 432                                         wwn_to_u64(sp->portName.u.wwn));
 433
 434                /* Notify transport of connectivity loss to trigger cleanup. */
 435                if (phba->nvmet_support &&
 436                    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
 437                        lpfc_nvmet_invalidate_host(phba, ndlp);
 438
 439                ndlp->nlp_prev_state = ndlp->nlp_state;
 440                /* rport needs to be unregistered first */
 441                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 442                break;
 443        }
 444
 445        ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 446        ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
 447        ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
 448        ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
 449        ndlp->nlp_flag &= ~NLP_FIRSTBURST;
 450
 451        login_mbox = NULL;
 452        link_mbox = NULL;
 453        save_iocb = NULL;
 454
 455        /* Check for Nport to NPort pt2pt protocol */
 456        if ((vport->fc_flag & FC_PT2PT) &&
 457            !(vport->fc_flag & FC_PT2PT_PLOGI)) {
 458                /* rcv'ed PLOGI decides what our NPortId will be */
 459                vport->fc_myDID = icmd->un.rcvels.parmRo;
 460
 461                /* If there is an outstanding FLOGI, abort it now.
 462                 * The remote NPort is not going to ACC our FLOGI
 463                 * if its already issuing a PLOGI for pt2pt mode.
 464                 * This indicates our FLOGI was dropped; however, we
 465                 * must have ACCed the remote NPorts FLOGI to us
 466                 * to make it here.
 467                 */
 468                if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
 469                        lpfc_els_abort_flogi(phba);
 470
 471                ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
 472                if (sp->cmn.edtovResolution) {
 473                        /* E_D_TOV ticks are in nanoseconds */
 474                        ed_tov = (phba->fc_edtov + 999999) / 1000000;
 475                }
 476
 477                /*
 478                 * For pt-to-pt, use the larger EDTOV
 479                 * RATOV = 2 * EDTOV
 480                 */
 481                if (ed_tov > phba->fc_edtov)
 482                        phba->fc_edtov = ed_tov;
 483                phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
 484
 485                memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 486
 487                /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
 488                 * to account for updated TOV's / parameters
 489                 */
 490                if (phba->sli_rev == LPFC_SLI_REV4)
 491                        lpfc_issue_reg_vfi(vport);
 492                else {
 493                        link_mbox = mempool_alloc(phba->mbox_mem_pool,
 494                                                  GFP_KERNEL);
 495                        if (!link_mbox)
 496                                goto out;
 497                        lpfc_config_link(phba, link_mbox);
 498                        link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 499                        link_mbox->vport = vport;
 500                        link_mbox->ctx_ndlp = ndlp;
 501
 502                        rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
 503                        if (rc == MBX_NOT_FINISHED) {
 504                                mempool_free(link_mbox, phba->mbox_mem_pool);
 505                                goto out;
 506                        }
 507                }
 508
 509                lpfc_can_disctmo(vport);
 510        }
 511
 512        ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
 513        if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
 514            sp->cmn.valid_vendor_ver_level) {
 515                vid = be32_to_cpu(sp->un.vv.vid);
 516                flag = be32_to_cpu(sp->un.vv.flags);
 517                if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
 518                        ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
 519        }
 520
 521        login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 522        if (!login_mbox)
 523                goto out;
 524
 525        save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
 526        if (!save_iocb)
 527                goto out;
 528
 529        /* Save info from cmd IOCB to be used in rsp after all mbox completes */
 530        memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
 531               sizeof(struct lpfc_iocbq));
 532
 533        /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
 534        if (phba->sli_rev == LPFC_SLI_REV4)
 535                lpfc_unreg_rpi(vport, ndlp);
 536
 537        /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
 538         * always be deferring the ACC.
 539         */
 540        rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
 541                            (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
 542        if (rc)
 543                goto out;
 544
 545        login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
 546        login_mbox->vport = vport;
 547
 548        /*
 549         * If there is an outstanding PLOGI issued, abort it before
 550         * sending ACC rsp for received PLOGI. If pending plogi
 551         * is not canceled here, the plogi will be rejected by
 552         * remote port and will be retried. On a configuration with
 553         * single discovery thread, this will cause a huge delay in
 554         * discovery. Also this will cause multiple state machines
 555         * running in parallel for this node.
 556         * This only applies to a fabric environment.
 557         */
 558        if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
 559            (vport->fc_flag & FC_FABRIC)) {
 560                /* software abort outstanding PLOGI */
 561                lpfc_els_abort(phba, ndlp);
 562        }
 563
 564        if ((vport->port_type == LPFC_NPIV_PORT &&
 565             vport->cfg_restrict_login)) {
 566
 567                /* no deferred ACC */
 568                kfree(save_iocb);
 569
 570                /* In order to preserve RPIs, we want to cleanup
 571                 * the default RPI the firmware created to rcv
 572                 * this ELS request. The only way to do this is
 573                 * to register, then unregister the RPI.
 574                 */
 575                spin_lock_irq(&ndlp->lock);
 576                ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
 577                                   NLP_RCV_PLOGI);
 578                spin_unlock_irq(&ndlp->lock);
 579                stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
 580                stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
 581                rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
 582                        ndlp, login_mbox);
 583                if (rc)
 584                        mempool_free(login_mbox, phba->mbox_mem_pool);
 585                return 1;
 586        }
 587
 588        /* So the order here should be:
 589         * SLI3 pt2pt
 590         *   Issue CONFIG_LINK mbox
 591         *   CONFIG_LINK cmpl
 592         * SLI4 pt2pt
 593         *   Issue REG_VFI mbox
 594         *   REG_VFI cmpl
 595         * SLI4
 596         *   Issue UNREG RPI mbx
 597         *   UNREG RPI cmpl
 598         * Issue REG_RPI mbox
 599         * REG RPI cmpl
 600         * Issue PLOGI ACC
 601         * PLOGI ACC cmpl
 602         */
 603        login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
 604        login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 605        login_mbox->context3 = save_iocb; /* For PLOGI ACC */
 606
 607        spin_lock_irq(&ndlp->lock);
 608        ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
 609        spin_unlock_irq(&ndlp->lock);
 610
 611        /* Start the ball rolling by issuing REG_LOGIN here */
 612        rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
 613        if (rc == MBX_NOT_FINISHED)
 614                goto out;
 615        lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
 616
 617        return 1;
 618out:
 619        kfree(save_iocb);
 620        if (login_mbox)
 621                mempool_free(login_mbox, phba->mbox_mem_pool);
 622
 623        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 624        stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
 625        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 626        return 0;
 627}
 628
 629/**
 630 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
 631 * @phba: pointer to lpfc hba data structure.
 632 * @mboxq: pointer to mailbox object
 633 *
 634 * This routine is invoked to issue a completion to a rcv'ed
 635 * ADISC or PDISC after the paused RPI has been resumed.
 636 **/
 637static void
 638lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 639{
 640        struct lpfc_vport *vport;
 641        struct lpfc_iocbq *elsiocb;
 642        struct lpfc_nodelist *ndlp;
 643        uint32_t cmd;
 644
 645        elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
 646        ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
 647        vport = mboxq->vport;
 648        cmd = elsiocb->drvrTimeout;
 649
 650        if (cmd == ELS_CMD_ADISC) {
 651                lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
 652        } else {
 653                lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
 654                        ndlp, NULL);
 655        }
 656        kfree(elsiocb);
 657        mempool_free(mboxq, phba->mbox_mem_pool);
 658}
 659
 660static int
 661lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 662                struct lpfc_iocbq *cmdiocb)
 663{
 664        struct lpfc_iocbq  *elsiocb;
 665        struct lpfc_dmabuf *pcmd;
 666        struct serv_parm   *sp;
 667        struct lpfc_name   *pnn, *ppn;
 668        struct ls_rjt stat;
 669        ADISC *ap;
 670        IOCB_t *icmd;
 671        uint32_t *lp;
 672        uint32_t cmd;
 673
 674        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 675        lp = (uint32_t *) pcmd->virt;
 676
 677        cmd = *lp++;
 678        if (cmd == ELS_CMD_ADISC) {
 679                ap = (ADISC *) lp;
 680                pnn = (struct lpfc_name *) & ap->nodeName;
 681                ppn = (struct lpfc_name *) & ap->portName;
 682        } else {
 683                sp = (struct serv_parm *) lp;
 684                pnn = (struct lpfc_name *) & sp->nodeName;
 685                ppn = (struct lpfc_name *) & sp->portName;
 686        }
 687
 688        icmd = &cmdiocb->iocb;
 689        if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
 690
 691                /*
 692                 * As soon as  we send ACC, the remote NPort can
 693                 * start sending us data. Thus, for SLI4 we must
 694                 * resume the RPI before the ACC goes out.
 695                 */
 696                if (vport->phba->sli_rev == LPFC_SLI_REV4) {
 697                        elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
 698                                GFP_KERNEL);
 699                        if (elsiocb) {
 700
 701                                /* Save info from cmd IOCB used in rsp */
 702                                memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
 703                                        sizeof(struct lpfc_iocbq));
 704
 705                                /* Save the ELS cmd */
 706                                elsiocb->drvrTimeout = cmd;
 707
 708                                lpfc_sli4_resume_rpi(ndlp,
 709                                        lpfc_mbx_cmpl_resume_rpi, elsiocb);
 710                                goto out;
 711                        }
 712                }
 713
 714                if (cmd == ELS_CMD_ADISC) {
 715                        lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
 716                } else {
 717                        lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
 718                                ndlp, NULL);
 719                }
 720out:
 721                /* If we are authenticated, move to the proper state.
 722                 * It is possible an ADISC arrived and the remote nport
 723                 * is already in MAPPED or UNMAPPED state.  Catch this
 724                 * condition and don't set the nlp_state again because
 725                 * it causes an unnecessary transport unregister/register.
 726                 */
 727                if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
 728                        if (ndlp->nlp_state != NLP_STE_MAPPED_NODE)
 729                                lpfc_nlp_set_state(vport, ndlp,
 730                                                   NLP_STE_MAPPED_NODE);
 731                }
 732
 733                return 1;
 734        }
 735        /* Reject this request because invalid parameters */
 736        stat.un.b.lsRjtRsvd0 = 0;
 737        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 738        stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
 739        stat.un.b.vendorUnique = 0;
 740        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 741
 742        /* 1 sec timeout */
 743        mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 744
 745        spin_lock_irq(&ndlp->lock);
 746        ndlp->nlp_flag |= NLP_DELAY_TMO;
 747        spin_unlock_irq(&ndlp->lock);
 748        ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
 749        ndlp->nlp_prev_state = ndlp->nlp_state;
 750        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 751        return 0;
 752}
 753
 754static int
 755lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 756              struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
 757{
 758        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 759        struct lpfc_hba    *phba = vport->phba;
 760        struct lpfc_vport **vports;
 761        int i, active_vlink_present = 0 ;
 762
 763        /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
 764        /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
 765         * PLOGIs during LOGO storms from a device.
 766         */
 767        spin_lock_irq(&ndlp->lock);
 768        ndlp->nlp_flag |= NLP_LOGO_ACC;
 769        spin_unlock_irq(&ndlp->lock);
 770        if (els_cmd == ELS_CMD_PRLO)
 771                lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
 772        else
 773                lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 774
 775        /* Notify transport of connectivity loss to trigger cleanup. */
 776        if (phba->nvmet_support &&
 777            ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
 778                lpfc_nvmet_invalidate_host(phba, ndlp);
 779
 780        if (ndlp->nlp_DID == Fabric_DID) {
 781                if (vport->port_state <= LPFC_FDISC)
 782                        goto out;
 783                lpfc_linkdown_port(vport);
 784                spin_lock_irq(shost->host_lock);
 785                vport->fc_flag |= FC_VPORT_LOGO_RCVD;
 786                spin_unlock_irq(shost->host_lock);
 787                vports = lpfc_create_vport_work_array(phba);
 788                if (vports) {
 789                        for (i = 0; i <= phba->max_vports && vports[i] != NULL;
 790                                        i++) {
 791                                if ((!(vports[i]->fc_flag &
 792                                        FC_VPORT_LOGO_RCVD)) &&
 793                                        (vports[i]->port_state > LPFC_FDISC)) {
 794                                        active_vlink_present = 1;
 795                                        break;
 796                                }
 797                        }
 798                        lpfc_destroy_vport_work_array(phba, vports);
 799                }
 800
 801                /*
 802                 * Don't re-instantiate if vport is marked for deletion.
 803                 * If we are here first then vport_delete is going to wait
 804                 * for discovery to complete.
 805                 */
 806                if (!(vport->load_flag & FC_UNLOADING) &&
 807                                        active_vlink_present) {
 808                        /*
 809                         * If there are other active VLinks present,
 810                         * re-instantiate the Vlink using FDISC.
 811                         */
 812                        mod_timer(&ndlp->nlp_delayfunc,
 813                                  jiffies + msecs_to_jiffies(1000));
 814                        spin_lock_irq(&ndlp->lock);
 815                        ndlp->nlp_flag |= NLP_DELAY_TMO;
 816                        spin_unlock_irq(&ndlp->lock);
 817                        ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
 818                        vport->port_state = LPFC_FDISC;
 819                } else {
 820                        spin_lock_irq(shost->host_lock);
 821                        phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
 822                        spin_unlock_irq(shost->host_lock);
 823                        lpfc_retry_pport_discovery(phba);
 824                }
 825        } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
 826                ((ndlp->nlp_type & NLP_FCP_TARGET) ||
 827                (ndlp->nlp_type & NLP_NVME_TARGET) ||
 828                (vport->fc_flag & FC_PT2PT))) ||
 829                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 830                /* Only try to re-login if this is NOT a Fabric Node
 831                 * AND the remote NPORT is a FCP/NVME Target or we
 832                 * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
 833                 * case for LOGO as a response to ADISC behavior.
 834                 */
 835                mod_timer(&ndlp->nlp_delayfunc,
 836                          jiffies + msecs_to_jiffies(1000 * 1));
 837                spin_lock_irq(&ndlp->lock);
 838                ndlp->nlp_flag |= NLP_DELAY_TMO;
 839                spin_unlock_irq(&ndlp->lock);
 840
 841                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
 842        }
 843out:
 844        ndlp->nlp_prev_state = ndlp->nlp_state;
 845        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 846
 847        spin_lock_irq(&ndlp->lock);
 848        ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 849        spin_unlock_irq(&ndlp->lock);
 850        /* The driver has to wait until the ACC completes before it continues
 851         * processing the LOGO.  The action will resume in
 852         * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
 853         * unreg_login, the driver waits so the ACC does not get aborted.
 854         */
 855        return 0;
 856}
 857
 858static uint32_t
 859lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
 860                            struct lpfc_nodelist *ndlp,
 861                            struct lpfc_iocbq *cmdiocb)
 862{
 863        struct ls_rjt stat;
 864        uint32_t *payload;
 865        uint32_t cmd;
 866
 867        payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
 868        cmd = *payload;
 869        if (vport->phba->nvmet_support) {
 870                /* Must be a NVME PRLI */
 871                if (cmd ==  ELS_CMD_PRLI)
 872                        goto out;
 873        } else {
 874                /* Initiator mode. */
 875                if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
 876                        goto out;
 877        }
 878        return 1;
 879out:
 880        lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
 881                         "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
 882                         "state x%x flags x%x\n",
 883                         cmd, ndlp->nlp_rpi, ndlp->nlp_state,
 884                         ndlp->nlp_flag);
 885        memset(&stat, 0, sizeof(struct ls_rjt));
 886        stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
 887        stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
 888        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
 889                            ndlp, NULL);
 890        return 0;
 891}
 892
 893static void
 894lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 895              struct lpfc_iocbq *cmdiocb)
 896{
 897        struct lpfc_hba  *phba = vport->phba;
 898        struct lpfc_dmabuf *pcmd;
 899        uint32_t *lp;
 900        PRLI *npr;
 901        struct fc_rport *rport = ndlp->rport;
 902        u32 roles;
 903
 904        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 905        lp = (uint32_t *) pcmd->virt;
 906        npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
 907
 908        if ((npr->prliType == PRLI_FCP_TYPE) ||
 909            (npr->prliType == PRLI_NVME_TYPE)) {
 910                if (npr->initiatorFunc) {
 911                        if (npr->prliType == PRLI_FCP_TYPE)
 912                                ndlp->nlp_type |= NLP_FCP_INITIATOR;
 913                        if (npr->prliType == PRLI_NVME_TYPE)
 914                                ndlp->nlp_type |= NLP_NVME_INITIATOR;
 915                }
 916                if (npr->targetFunc) {
 917                        if (npr->prliType == PRLI_FCP_TYPE)
 918                                ndlp->nlp_type |= NLP_FCP_TARGET;
 919                        if (npr->prliType == PRLI_NVME_TYPE)
 920                                ndlp->nlp_type |= NLP_NVME_TARGET;
 921                        if (npr->writeXferRdyDis)
 922                                ndlp->nlp_flag |= NLP_FIRSTBURST;
 923                }
 924                if (npr->Retry && ndlp->nlp_type &
 925                                        (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
 926                        ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
 927
 928                if (npr->Retry && phba->nsler &&
 929                    ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
 930                        ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
 931
 932
 933                /* If this driver is in nvme target mode, set the ndlp's fc4
 934                 * type to NVME provided the PRLI response claims NVME FC4
 935                 * type.  Target mode does not issue gft_id so doesn't get
 936                 * the fc4 type set until now.
 937                 */
 938                if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
 939                        ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 940                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 941                }
 942
 943                /* Fabric Controllers send FCP PRLI as an initiator but should
 944                 * not get recognized as FCP type and registered with transport.
 945                 */
 946                if (npr->prliType == PRLI_FCP_TYPE &&
 947                    !(ndlp->nlp_type & NLP_FABRIC))
 948                        ndlp->nlp_fc4_type |= NLP_FC4_FCP;
 949        }
 950        if (rport) {
 951                /* We need to update the rport role values */
 952                roles = FC_RPORT_ROLE_UNKNOWN;
 953                if (ndlp->nlp_type & NLP_FCP_INITIATOR)
 954                        roles |= FC_RPORT_ROLE_FCP_INITIATOR;
 955                if (ndlp->nlp_type & NLP_FCP_TARGET)
 956                        roles |= FC_RPORT_ROLE_FCP_TARGET;
 957
 958                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 959                        "rport rolechg:   role:x%x did:x%x flg:x%x",
 960                        roles, ndlp->nlp_DID, ndlp->nlp_flag);
 961
 962                if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 963                        fc_remote_port_rolechg(rport, roles);
 964        }
 965}
 966
 967static uint32_t
 968lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 969{
 970        if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
 971                spin_lock_irq(&ndlp->lock);
 972                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 973                spin_unlock_irq(&ndlp->lock);
 974                return 0;
 975        }
 976
 977        if (!(vport->fc_flag & FC_PT2PT)) {
 978                /* Check config parameter use-adisc or FCP-2 */
 979                if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
 980                    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
 981                     (ndlp->nlp_type & NLP_FCP_TARGET)))) {
 982                        spin_lock_irq(&ndlp->lock);
 983                        ndlp->nlp_flag |= NLP_NPR_ADISC;
 984                        spin_unlock_irq(&ndlp->lock);
 985                        return 1;
 986                }
 987        }
 988
 989        spin_lock_irq(&ndlp->lock);
 990        ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 991        spin_unlock_irq(&ndlp->lock);
 992        lpfc_unreg_rpi(vport, ndlp);
 993        return 0;
 994}
 995
 996/**
 997 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
 998 * @phba : Pointer to lpfc_hba structure.
 999 * @vport: Pointer to lpfc_vport structure.
1000 * @ndlp: Pointer to lpfc_nodelist structure.
1001 * @rpi  : rpi to be release.
1002 *
1003 * This function will send a unreg_login mailbox command to the firmware
1004 * to release a rpi.
1005 **/
1006static void
1007lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
1008                 struct lpfc_nodelist *ndlp, uint16_t rpi)
1009{
1010        LPFC_MBOXQ_t *pmb;
1011        int rc;
1012
1013        /* If there is already an UNREG in progress for this ndlp,
1014         * no need to queue up another one.
1015         */
1016        if (ndlp->nlp_flag & NLP_UNREG_INP) {
1017                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1018                                 "1435 release_rpi SKIP UNREG x%x on "
1019                                 "NPort x%x deferred x%x  flg x%x "
1020                                 "Data: x%px\n",
1021                                 ndlp->nlp_rpi, ndlp->nlp_DID,
1022                                 ndlp->nlp_defer_did,
1023                                 ndlp->nlp_flag, ndlp);
1024                return;
1025        }
1026
1027        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1028                        GFP_KERNEL);
1029        if (!pmb)
1030                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1031                                 "2796 mailbox memory allocation failed \n");
1032        else {
1033                lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
1034                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1035                pmb->vport = vport;
1036                pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
1037                if (!pmb->ctx_ndlp) {
1038                        mempool_free(pmb, phba->mbox_mem_pool);
1039                        return;
1040                }
1041
1042                if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
1043                    (!(vport->fc_flag & FC_OFFLINE_MODE)))
1044                        ndlp->nlp_flag |= NLP_UNREG_INP;
1045
1046                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1047                                 "1437 release_rpi UNREG x%x "
1048                                 "on NPort x%x flg x%x\n",
1049                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
1050
1051                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1052                if (rc == MBX_NOT_FINISHED)
1053                        mempool_free(pmb, phba->mbox_mem_pool);
1054        }
1055}
1056
1057static uint32_t
1058lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1059                  void *arg, uint32_t evt)
1060{
1061        struct lpfc_hba *phba;
1062        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1063        uint16_t rpi;
1064
1065        phba = vport->phba;
1066        /* Release the RPI if reglogin completing */
1067        if (!(phba->pport->load_flag & FC_UNLOADING) &&
1068                (evt == NLP_EVT_CMPL_REG_LOGIN) &&
1069                (!pmb->u.mb.mbxStatus)) {
1070                rpi = pmb->u.mb.un.varWords[0];
1071                lpfc_release_rpi(phba, vport, ndlp, rpi);
1072        }
1073        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1074                         "0271 Illegal State Transition: node x%x "
1075                         "event x%x, state x%x Data: x%x x%x\n",
1076                         ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
1077                         ndlp->nlp_flag);
1078        return ndlp->nlp_state;
1079}
1080
1081static uint32_t
1082lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1083                  void *arg, uint32_t evt)
1084{
1085        /* This transition is only legal if we previously
1086         * rcv'ed a PLOGI. Since we don't want 2 discovery threads
1087         * working on the same NPortID, do nothing for this thread
1088         * to stop it.
1089         */
1090        if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
1091                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1092                                 "0272 Illegal State Transition: node x%x "
1093                                 "event x%x, state x%x Data: x%x x%x\n",
1094                                  ndlp->nlp_DID, evt, ndlp->nlp_state,
1095                                  ndlp->nlp_rpi, ndlp->nlp_flag);
1096        }
1097        return ndlp->nlp_state;
1098}
1099
1100/* Start of Discovery State Machine routines */
1101
1102static uint32_t
1103lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1104                           void *arg, uint32_t evt)
1105{
1106        struct lpfc_iocbq *cmdiocb;
1107
1108        cmdiocb = (struct lpfc_iocbq *) arg;
1109
1110        if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1111                return ndlp->nlp_state;
1112        }
1113        return NLP_STE_FREED_NODE;
1114}
1115
1116static uint32_t
1117lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1118                         void *arg, uint32_t evt)
1119{
1120        lpfc_issue_els_logo(vport, ndlp, 0);
1121        return ndlp->nlp_state;
1122}
1123
1124static uint32_t
1125lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1126                          void *arg, uint32_t evt)
1127{
1128        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1129
1130        spin_lock_irq(&ndlp->lock);
1131        ndlp->nlp_flag |= NLP_LOGO_ACC;
1132        spin_unlock_irq(&ndlp->lock);
1133        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1134
1135        return ndlp->nlp_state;
1136}
1137
1138static uint32_t
1139lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1140                           void *arg, uint32_t evt)
1141{
1142        return NLP_STE_FREED_NODE;
1143}
1144
1145static uint32_t
1146lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1147                           void *arg, uint32_t evt)
1148{
1149        return NLP_STE_FREED_NODE;
1150}
1151
1152static uint32_t
1153lpfc_device_recov_unused_node(struct lpfc_vport *vport,
1154                        struct lpfc_nodelist *ndlp,
1155                           void *arg, uint32_t evt)
1156{
1157        return ndlp->nlp_state;
1158}
1159
1160static uint32_t
1161lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1162                           void *arg, uint32_t evt)
1163{
1164        struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1165        struct lpfc_hba   *phba = vport->phba;
1166        struct lpfc_iocbq *cmdiocb = arg;
1167        struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1168        uint32_t *lp = (uint32_t *) pcmd->virt;
1169        struct serv_parm *sp = (struct serv_parm *) (lp + 1);
1170        struct ls_rjt stat;
1171        int port_cmp;
1172
1173        memset(&stat, 0, sizeof (struct ls_rjt));
1174
1175        /* For a PLOGI, we only accept if our portname is less
1176         * than the remote portname.
1177         */
1178        phba->fc_stat.elsLogiCol++;
1179        port_cmp = memcmp(&vport->fc_portname, &sp->portName,
1180                          sizeof(struct lpfc_name));
1181
1182        if (port_cmp >= 0) {
1183                /* Reject this request because the remote node will accept
1184                   ours */
1185                stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1186                stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
1187                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
1188                        NULL);
1189        } else {
1190                if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
1191                    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
1192                    (vport->num_disc_nodes)) {
1193                        spin_lock_irq(&ndlp->lock);
1194                        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1195                        spin_unlock_irq(&ndlp->lock);
1196                        /* Check if there are more PLOGIs to be sent */
1197                        lpfc_more_plogi(vport);
1198                        if (vport->num_disc_nodes == 0) {
1199                                spin_lock_irq(shost->host_lock);
1200                                vport->fc_flag &= ~FC_NDISC_ACTIVE;
1201                                spin_unlock_irq(shost->host_lock);
1202                                lpfc_can_disctmo(vport);
1203                                lpfc_end_rscn(vport);
1204                        }
1205                }
1206        } /* If our portname was less */
1207
1208        return ndlp->nlp_state;
1209}
1210
1211static uint32_t
1212lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1213                          void *arg, uint32_t evt)
1214{
1215        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1216        struct ls_rjt     stat;
1217
1218        memset(&stat, 0, sizeof (struct ls_rjt));
1219        stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1220        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1221        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1222        return ndlp->nlp_state;
1223}
1224
1225static uint32_t
1226lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1227                          void *arg, uint32_t evt)
1228{
1229        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1230
1231        /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
1232        if (vport->phba->sli_rev == LPFC_SLI_REV3)
1233                ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
1234                                /* software abort outstanding PLOGI */
1235        lpfc_els_abort(vport->phba, ndlp);
1236
1237        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1238        return ndlp->nlp_state;
1239}
1240
1241static uint32_t
1242lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1243                         void *arg, uint32_t evt)
1244{
1245        struct lpfc_hba   *phba = vport->phba;
1246        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1247
1248        /* software abort outstanding PLOGI */
1249        lpfc_els_abort(phba, ndlp);
1250
1251        if (evt == NLP_EVT_RCV_LOGO) {
1252                lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1253        } else {
1254                lpfc_issue_els_logo(vport, ndlp, 0);
1255        }
1256
1257        /* Put ndlp in npr state set plogi timer for 1 sec */
1258        mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1259        spin_lock_irq(&ndlp->lock);
1260        ndlp->nlp_flag |= NLP_DELAY_TMO;
1261        spin_unlock_irq(&ndlp->lock);
1262        ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1263        ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1264        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1265
1266        return ndlp->nlp_state;
1267}
1268
1269static uint32_t
1270lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1271                            struct lpfc_nodelist *ndlp,
1272                            void *arg,
1273                            uint32_t evt)
1274{
1275        struct lpfc_hba    *phba = vport->phba;
1276        struct lpfc_iocbq  *cmdiocb, *rspiocb;
1277        struct lpfc_dmabuf *pcmd, *prsp, *mp;
1278        uint32_t *lp;
1279        uint32_t vid, flag;
1280        IOCB_t *irsp;
1281        struct serv_parm *sp;
1282        uint32_t ed_tov;
1283        LPFC_MBOXQ_t *mbox;
1284        int rc;
1285
1286        cmdiocb = (struct lpfc_iocbq *) arg;
1287        rspiocb = cmdiocb->context_un.rsp_iocb;
1288
1289        if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1290                /* Recovery from PLOGI collision logic */
1291                return ndlp->nlp_state;
1292        }
1293
1294        irsp = &rspiocb->iocb;
1295
1296        if (irsp->ulpStatus)
1297                goto out;
1298
1299        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1300
1301        prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1302        if (!prsp)
1303                goto out;
1304
1305        lp = (uint32_t *) prsp->virt;
1306        sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1307
1308        /* Some switches have FDMI servers returning 0 for WWN */
1309        if ((ndlp->nlp_DID != FDMI_DID) &&
1310                (wwn_to_u64(sp->portName.u.wwn) == 0 ||
1311                wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1312                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1313                                 "0142 PLOGI RSP: Invalid WWN.\n");
1314                goto out;
1315        }
1316        if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1317                goto out;
1318        /* PLOGI chkparm OK */
1319        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1320                         "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1321                         ndlp->nlp_DID, ndlp->nlp_state,
1322                         ndlp->nlp_flag, ndlp->nlp_rpi);
1323        if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1324                ndlp->nlp_fcp_info |= CLASS2;
1325        else
1326                ndlp->nlp_fcp_info |= CLASS3;
1327
1328        ndlp->nlp_class_sup = 0;
1329        if (sp->cls1.classValid)
1330                ndlp->nlp_class_sup |= FC_COS_CLASS1;
1331        if (sp->cls2.classValid)
1332                ndlp->nlp_class_sup |= FC_COS_CLASS2;
1333        if (sp->cls3.classValid)
1334                ndlp->nlp_class_sup |= FC_COS_CLASS3;
1335        if (sp->cls4.classValid)
1336                ndlp->nlp_class_sup |= FC_COS_CLASS4;
1337        ndlp->nlp_maxframe =
1338                ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1339
1340        if ((vport->fc_flag & FC_PT2PT) &&
1341            (vport->fc_flag & FC_PT2PT_PLOGI)) {
1342                ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
1343                if (sp->cmn.edtovResolution) {
1344                        /* E_D_TOV ticks are in nanoseconds */
1345                        ed_tov = (phba->fc_edtov + 999999) / 1000000;
1346                }
1347
1348                ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
1349                if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
1350                    sp->cmn.valid_vendor_ver_level) {
1351                        vid = be32_to_cpu(sp->un.vv.vid);
1352                        flag = be32_to_cpu(sp->un.vv.flags);
1353                        if ((vid == LPFC_VV_EMLX_ID) &&
1354                            (flag & LPFC_VV_SUPPRESS_RSP))
1355                                ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
1356                }
1357
1358                /*
1359                 * Use the larger EDTOV
1360                 * RATOV = 2 * EDTOV for pt-to-pt
1361                 */
1362                if (ed_tov > phba->fc_edtov)
1363                        phba->fc_edtov = ed_tov;
1364                phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
1365
1366                memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
1367
1368                /* Issue config_link / reg_vfi to account for updated TOV's */
1369                if (phba->sli_rev == LPFC_SLI_REV4) {
1370                        lpfc_issue_reg_vfi(vport);
1371                } else {
1372                        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1373                        if (!mbox) {
1374                                lpfc_printf_vlog(vport, KERN_ERR,
1375                                                 LOG_TRACE_EVENT,
1376                                                 "0133 PLOGI: no memory "
1377                                                 "for config_link "
1378                                                 "Data: x%x x%x x%x x%x\n",
1379                                                 ndlp->nlp_DID, ndlp->nlp_state,
1380                                                 ndlp->nlp_flag, ndlp->nlp_rpi);
1381                                goto out;
1382                        }
1383
1384                        lpfc_config_link(phba, mbox);
1385
1386                        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1387                        mbox->vport = vport;
1388                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1389                        if (rc == MBX_NOT_FINISHED) {
1390                                mempool_free(mbox, phba->mbox_mem_pool);
1391                                goto out;
1392                        }
1393                }
1394        }
1395
1396        lpfc_unreg_rpi(vport, ndlp);
1397
1398        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1399        if (!mbox) {
1400                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1401                                 "0018 PLOGI: no memory for reg_login "
1402                                 "Data: x%x x%x x%x x%x\n",
1403                                 ndlp->nlp_DID, ndlp->nlp_state,
1404                                 ndlp->nlp_flag, ndlp->nlp_rpi);
1405                goto out;
1406        }
1407
1408        if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1409                         (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1410                switch (ndlp->nlp_DID) {
1411                case NameServer_DID:
1412                        mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1413                        break;
1414                case FDMI_DID:
1415                        mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1416                        break;
1417                default:
1418                        ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1419                        mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1420                }
1421
1422                mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
1423                if (!mbox->ctx_ndlp)
1424                        goto out;
1425
1426                mbox->vport = vport;
1427                if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1428                    != MBX_NOT_FINISHED) {
1429                        lpfc_nlp_set_state(vport, ndlp,
1430                                           NLP_STE_REG_LOGIN_ISSUE);
1431                        return ndlp->nlp_state;
1432                }
1433                if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1434                        ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1435                /* decrement node reference count to the failed mbox
1436                 * command
1437                 */
1438                lpfc_nlp_put(ndlp);
1439                mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
1440                lpfc_mbuf_free(phba, mp->virt, mp->phys);
1441                kfree(mp);
1442                mempool_free(mbox, phba->mbox_mem_pool);
1443
1444                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1445                                 "0134 PLOGI: cannot issue reg_login "
1446                                 "Data: x%x x%x x%x x%x\n",
1447                                 ndlp->nlp_DID, ndlp->nlp_state,
1448                                 ndlp->nlp_flag, ndlp->nlp_rpi);
1449        } else {
1450                mempool_free(mbox, phba->mbox_mem_pool);
1451
1452                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1453                                 "0135 PLOGI: cannot format reg_login "
1454                                 "Data: x%x x%x x%x x%x\n",
1455                                 ndlp->nlp_DID, ndlp->nlp_state,
1456                                 ndlp->nlp_flag, ndlp->nlp_rpi);
1457        }
1458
1459
1460out:
1461        if (ndlp->nlp_DID == NameServer_DID) {
1462                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1463                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1464                                 "0261 Cannot Register NameServer login\n");
1465        }
1466
1467        /*
1468        ** In case the node reference counter does not go to zero, ensure that
1469        ** the stale state for the node is not processed.
1470        */
1471
1472        ndlp->nlp_prev_state = ndlp->nlp_state;
1473        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1474        return NLP_STE_FREED_NODE;
1475}
1476
1477static uint32_t
1478lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1479                           void *arg, uint32_t evt)
1480{
1481        return ndlp->nlp_state;
1482}
1483
1484static uint32_t
1485lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1486        struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1487{
1488        struct lpfc_hba *phba;
1489        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1490        MAILBOX_t *mb = &pmb->u.mb;
1491        uint16_t rpi;
1492
1493        phba = vport->phba;
1494        /* Release the RPI */
1495        if (!(phba->pport->load_flag & FC_UNLOADING) &&
1496                !mb->mbxStatus) {
1497                rpi = pmb->u.mb.un.varWords[0];
1498                lpfc_release_rpi(phba, vport, ndlp, rpi);
1499        }
1500        return ndlp->nlp_state;
1501}
1502
1503static uint32_t
1504lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1505                           void *arg, uint32_t evt)
1506{
1507        if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1508                spin_lock_irq(&ndlp->lock);
1509                ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1510                spin_unlock_irq(&ndlp->lock);
1511                return ndlp->nlp_state;
1512        } else {
1513                /* software abort outstanding PLOGI */
1514                lpfc_els_abort(vport->phba, ndlp);
1515
1516                lpfc_drop_node(vport, ndlp);
1517                return NLP_STE_FREED_NODE;
1518        }
1519}
1520
1521static uint32_t
1522lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1523                              struct lpfc_nodelist *ndlp,
1524                              void *arg,
1525                              uint32_t evt)
1526{
1527        struct lpfc_hba  *phba = vport->phba;
1528
1529        /* Don't do anything that will mess up processing of the
1530         * previous RSCN.
1531         */
1532        if (vport->fc_flag & FC_RSCN_DEFERRED)
1533                return ndlp->nlp_state;
1534
1535        /* software abort outstanding PLOGI */
1536        lpfc_els_abort(phba, ndlp);
1537
1538        ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1539        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1540        spin_lock_irq(&ndlp->lock);
1541        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1542        spin_unlock_irq(&ndlp->lock);
1543
1544        return ndlp->nlp_state;
1545}
1546
1547static uint32_t
1548lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1549                           void *arg, uint32_t evt)
1550{
1551        struct lpfc_hba   *phba = vport->phba;
1552        struct lpfc_iocbq *cmdiocb;
1553
1554        /* software abort outstanding ADISC */
1555        lpfc_els_abort(phba, ndlp);
1556
1557        cmdiocb = (struct lpfc_iocbq *) arg;
1558
1559        if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1560                if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1561                        spin_lock_irq(&ndlp->lock);
1562                        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1563                        spin_unlock_irq(&ndlp->lock);
1564                        if (vport->num_disc_nodes)
1565                                lpfc_more_adisc(vport);
1566                }
1567                return ndlp->nlp_state;
1568        }
1569        ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1570        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1571        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1572
1573        return ndlp->nlp_state;
1574}
1575
1576static uint32_t
1577lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1578                          void *arg, uint32_t evt)
1579{
1580        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1581
1582        if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1583                lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1584        return ndlp->nlp_state;
1585}
1586
1587static uint32_t
1588lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1589                          void *arg, uint32_t evt)
1590{
1591        struct lpfc_hba *phba = vport->phba;
1592        struct lpfc_iocbq *cmdiocb;
1593
1594        cmdiocb = (struct lpfc_iocbq *) arg;
1595
1596        /* software abort outstanding ADISC */
1597        lpfc_els_abort(phba, ndlp);
1598
1599        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1600        return ndlp->nlp_state;
1601}
1602
1603static uint32_t
1604lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1605                            struct lpfc_nodelist *ndlp,
1606                            void *arg, uint32_t evt)
1607{
1608        struct lpfc_iocbq *cmdiocb;
1609
1610        cmdiocb = (struct lpfc_iocbq *) arg;
1611
1612        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1613        return ndlp->nlp_state;
1614}
1615
1616static uint32_t
1617lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1618                          void *arg, uint32_t evt)
1619{
1620        struct lpfc_iocbq *cmdiocb;
1621
1622        cmdiocb = (struct lpfc_iocbq *) arg;
1623
1624        /* Treat like rcv logo */
1625        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1626        return ndlp->nlp_state;
1627}
1628
1629static uint32_t
1630lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1631                            struct lpfc_nodelist *ndlp,
1632                            void *arg, uint32_t evt)
1633{
1634        struct lpfc_hba   *phba = vport->phba;
1635        struct lpfc_iocbq *cmdiocb, *rspiocb;
1636        IOCB_t *irsp;
1637        ADISC *ap;
1638        int rc;
1639
1640        cmdiocb = (struct lpfc_iocbq *) arg;
1641        rspiocb = cmdiocb->context_un.rsp_iocb;
1642
1643        ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1644        irsp = &rspiocb->iocb;
1645
1646        if ((irsp->ulpStatus) ||
1647            (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1648                /* 1 sec timeout */
1649                mod_timer(&ndlp->nlp_delayfunc,
1650                          jiffies + msecs_to_jiffies(1000));
1651                spin_lock_irq(&ndlp->lock);
1652                ndlp->nlp_flag |= NLP_DELAY_TMO;
1653                spin_unlock_irq(&ndlp->lock);
1654                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1655
1656                memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1657                memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1658
1659                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1660                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1661                lpfc_unreg_rpi(vport, ndlp);
1662                return ndlp->nlp_state;
1663        }
1664
1665        if (phba->sli_rev == LPFC_SLI_REV4) {
1666                rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1667                if (rc) {
1668                        /* Stay in state and retry. */
1669                        ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1670                        return ndlp->nlp_state;
1671                }
1672        }
1673
1674        if (ndlp->nlp_type & NLP_FCP_TARGET)
1675                ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1676
1677        if (ndlp->nlp_type & NLP_NVME_TARGET)
1678                ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1679
1680        if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
1681                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1682                lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1683        } else {
1684                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1685                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1686        }
1687
1688        return ndlp->nlp_state;
1689}
1690
1691static uint32_t
1692lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1693                           void *arg, uint32_t evt)
1694{
1695        if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1696                spin_lock_irq(&ndlp->lock);
1697                ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1698                spin_unlock_irq(&ndlp->lock);
1699                return ndlp->nlp_state;
1700        } else {
1701                /* software abort outstanding ADISC */
1702                lpfc_els_abort(vport->phba, ndlp);
1703
1704                lpfc_drop_node(vport, ndlp);
1705                return NLP_STE_FREED_NODE;
1706        }
1707}
1708
1709static uint32_t
1710lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1711                              struct lpfc_nodelist *ndlp,
1712                              void *arg,
1713                              uint32_t evt)
1714{
1715        struct lpfc_hba  *phba = vport->phba;
1716
1717        /* Don't do anything that will mess up processing of the
1718         * previous RSCN.
1719         */
1720        if (vport->fc_flag & FC_RSCN_DEFERRED)
1721                return ndlp->nlp_state;
1722
1723        /* software abort outstanding ADISC */
1724        lpfc_els_abort(phba, ndlp);
1725
1726        ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1727        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1728        spin_lock_irq(&ndlp->lock);
1729        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1730        spin_unlock_irq(&ndlp->lock);
1731        lpfc_disc_set_adisc(vport, ndlp);
1732        return ndlp->nlp_state;
1733}
1734
1735static uint32_t
1736lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1737                              struct lpfc_nodelist *ndlp,
1738                              void *arg,
1739                              uint32_t evt)
1740{
1741        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1742
1743        lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1744        return ndlp->nlp_state;
1745}
1746
1747static uint32_t
1748lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1749                             struct lpfc_nodelist *ndlp,
1750                             void *arg,
1751                             uint32_t evt)
1752{
1753        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1754        struct ls_rjt     stat;
1755
1756        if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
1757                return ndlp->nlp_state;
1758        }
1759        if (vport->phba->nvmet_support) {
1760                /* NVME Target mode.  Handle and respond to the PRLI and
1761                 * transition to UNMAPPED provided the RPI has completed
1762                 * registration.
1763                 */
1764                if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
1765                        lpfc_rcv_prli(vport, ndlp, cmdiocb);
1766                        lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1767                } else {
1768                        /* RPI registration has not completed. Reject the PRLI
1769                         * to prevent an illegal state transition when the
1770                         * rpi registration does complete.
1771                         */
1772                        memset(&stat, 0, sizeof(struct ls_rjt));
1773                        stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1774                        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1775                        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
1776                                            ndlp, NULL);
1777                        return ndlp->nlp_state;
1778                }
1779        } else {
1780                /* Initiator mode. */
1781                lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1782        }
1783        return ndlp->nlp_state;
1784}
1785
1786static uint32_t
1787lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1788                             struct lpfc_nodelist *ndlp,
1789                             void *arg,
1790                             uint32_t evt)
1791{
1792        struct lpfc_hba   *phba = vport->phba;
1793        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1794        LPFC_MBOXQ_t      *mb;
1795        LPFC_MBOXQ_t      *nextmb;
1796        struct lpfc_dmabuf *mp;
1797        struct lpfc_nodelist *ns_ndlp;
1798
1799        cmdiocb = (struct lpfc_iocbq *) arg;
1800
1801        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1802        if ((mb = phba->sli.mbox_active)) {
1803                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1804                   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1805                        ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1806                        lpfc_nlp_put(ndlp);
1807                        mb->ctx_ndlp = NULL;
1808                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1809                }
1810        }
1811
1812        spin_lock_irq(&phba->hbalock);
1813        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1814                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1815                   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1816                        mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
1817                        if (mp) {
1818                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1819                                kfree(mp);
1820                        }
1821                        ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1822                        lpfc_nlp_put(ndlp);
1823                        list_del(&mb->list);
1824                        phba->sli.mboxq_cnt--;
1825                        mempool_free(mb, phba->mbox_mem_pool);
1826                }
1827        }
1828        spin_unlock_irq(&phba->hbalock);
1829
1830        /* software abort if any GID_FT is outstanding */
1831        if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
1832                ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
1833                if (ns_ndlp)
1834                        lpfc_els_abort(phba, ns_ndlp);
1835        }
1836
1837        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1838        return ndlp->nlp_state;
1839}
1840
1841static uint32_t
1842lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1843                               struct lpfc_nodelist *ndlp,
1844                               void *arg,
1845                               uint32_t evt)
1846{
1847        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1848
1849        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1850        return ndlp->nlp_state;
1851}
1852
1853static uint32_t
1854lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1855                             struct lpfc_nodelist *ndlp,
1856                             void *arg,
1857                             uint32_t evt)
1858{
1859        struct lpfc_iocbq *cmdiocb;
1860
1861        cmdiocb = (struct lpfc_iocbq *) arg;
1862        lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1863        return ndlp->nlp_state;
1864}
1865
1866static uint32_t
1867lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1868                                  struct lpfc_nodelist *ndlp,
1869                                  void *arg,
1870                                  uint32_t evt)
1871{
1872        struct lpfc_hba *phba = vport->phba;
1873        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1874        MAILBOX_t *mb = &pmb->u.mb;
1875        uint32_t did  = mb->un.varWords[1];
1876
1877        if (mb->mbxStatus) {
1878                /* RegLogin failed */
1879                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1880                                 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1881                                 "x%x\n",
1882                                 did, mb->mbxStatus, vport->port_state,
1883                                 mb->un.varRegLogin.vpi,
1884                                 mb->un.varRegLogin.rpi);
1885                /*
1886                 * If RegLogin failed due to lack of HBA resources do not
1887                 * retry discovery.
1888                 */
1889                if (mb->mbxStatus == MBXERR_RPI_FULL) {
1890                        ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1891                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1892                        return ndlp->nlp_state;
1893                }
1894
1895                /* Put ndlp in npr state set plogi timer for 1 sec */
1896                mod_timer(&ndlp->nlp_delayfunc,
1897                          jiffies + msecs_to_jiffies(1000 * 1));
1898                spin_lock_irq(&ndlp->lock);
1899                ndlp->nlp_flag |= NLP_DELAY_TMO;
1900                spin_unlock_irq(&ndlp->lock);
1901                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1902
1903                lpfc_issue_els_logo(vport, ndlp, 0);
1904                return ndlp->nlp_state;
1905        }
1906
1907        /* SLI4 ports have preallocated logical rpis. */
1908        if (phba->sli_rev < LPFC_SLI_REV4)
1909                ndlp->nlp_rpi = mb->un.varWords[0];
1910
1911        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1912
1913        /* Only if we are not a fabric nport do we issue PRLI */
1914        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1915                         "3066 RegLogin Complete on x%x x%x x%x\n",
1916                         did, ndlp->nlp_type, ndlp->nlp_fc4_type);
1917        if (!(ndlp->nlp_type & NLP_FABRIC) &&
1918            (phba->nvmet_support == 0)) {
1919                /* The driver supports FCP and NVME concurrently.  If the
1920                 * ndlp's nlp_fc4_type is still zero, the driver doesn't
1921                 * know what PRLI to send yet.  Figure that out now and
1922                 * call PRLI depending on the outcome.
1923                 */
1924                if (vport->fc_flag & FC_PT2PT) {
1925                        /* If we are pt2pt, there is no Fabric to determine
1926                         * the FC4 type of the remote nport. So if NVME
1927                         * is configured try it.
1928                         */
1929                        ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1930                        if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
1931                            (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
1932                                ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1933                                /* We need to update the localport also */
1934                                lpfc_nvme_update_localport(vport);
1935                        }
1936
1937                } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
1938                        ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1939
1940                } else if (ndlp->nlp_fc4_type == 0) {
1941                        /* If we are only configured for FCP, the driver
1942                         * should just issue PRLI for FCP. Otherwise issue
1943                         * GFT_ID to determine if remote port supports NVME.
1944                         */
1945                        if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
1946                                lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
1947                                            ndlp->nlp_DID);
1948                                return ndlp->nlp_state;
1949                        }
1950                        ndlp->nlp_fc4_type = NLP_FC4_FCP;
1951                }
1952
1953                ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1954                lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1955                if (lpfc_issue_els_prli(vport, ndlp, 0)) {
1956                        lpfc_issue_els_logo(vport, ndlp, 0);
1957                        ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1958                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1959                }
1960        } else {
1961                if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
1962                        phba->targetport->port_id = vport->fc_myDID;
1963
1964                /* Only Fabric ports should transition. NVME target
1965                 * must complete PRLI.
1966                 */
1967                if (ndlp->nlp_type & NLP_FABRIC) {
1968                        ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
1969                        ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1970                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1971                }
1972        }
1973        return ndlp->nlp_state;
1974}
1975
1976static uint32_t
1977lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1978                              struct lpfc_nodelist *ndlp,
1979                              void *arg,
1980                              uint32_t evt)
1981{
1982        if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1983                spin_lock_irq(&ndlp->lock);
1984                ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1985                spin_unlock_irq(&ndlp->lock);
1986                return ndlp->nlp_state;
1987        } else {
1988                lpfc_drop_node(vport, ndlp);
1989                return NLP_STE_FREED_NODE;
1990        }
1991}
1992
1993static uint32_t
1994lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1995                                 struct lpfc_nodelist *ndlp,
1996                                 void *arg,
1997                                 uint32_t evt)
1998{
1999        /* Don't do anything that will mess up processing of the
2000         * previous RSCN.
2001         */
2002        if (vport->fc_flag & FC_RSCN_DEFERRED)
2003                return ndlp->nlp_state;
2004
2005        ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
2006        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2007        spin_lock_irq(&ndlp->lock);
2008
2009        /* If we are a target we won't immediately transition into PRLI,
2010         * so if REG_LOGIN already completed we don't need to ignore it.
2011         */
2012        if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
2013            !vport->phba->nvmet_support)
2014                ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
2015
2016        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2017        spin_unlock_irq(&ndlp->lock);
2018        lpfc_disc_set_adisc(vport, ndlp);
2019        return ndlp->nlp_state;
2020}
2021
2022static uint32_t
2023lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2024                          void *arg, uint32_t evt)
2025{
2026        struct lpfc_iocbq *cmdiocb;
2027
2028        cmdiocb = (struct lpfc_iocbq *) arg;
2029
2030        lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2031        return ndlp->nlp_state;
2032}
2033
2034static uint32_t
2035lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2036                         void *arg, uint32_t evt)
2037{
2038        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2039
2040        if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2041                return ndlp->nlp_state;
2042        lpfc_rcv_prli(vport, ndlp, cmdiocb);
2043        lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2044        return ndlp->nlp_state;
2045}
2046
2047static uint32_t
2048lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2049                         void *arg, uint32_t evt)
2050{
2051        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2052
2053        /* Software abort outstanding PRLI before sending acc */
2054        lpfc_els_abort(vport->phba, ndlp);
2055
2056        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2057        return ndlp->nlp_state;
2058}
2059
2060static uint32_t
2061lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2062                           void *arg, uint32_t evt)
2063{
2064        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2065
2066        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2067        return ndlp->nlp_state;
2068}
2069
2070/* This routine is envoked when we rcv a PRLO request from a nport
2071 * we are logged into.  We should send back a PRLO rsp setting the
2072 * appropriate bits.
2073 * NEXT STATE = PRLI_ISSUE
2074 */
2075static uint32_t
2076lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2077                         void *arg, uint32_t evt)
2078{
2079        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2080
2081        lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2082        return ndlp->nlp_state;
2083}
2084
2085static uint32_t
2086lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2087                          void *arg, uint32_t evt)
2088{
2089        struct lpfc_iocbq *cmdiocb, *rspiocb;
2090        struct lpfc_hba   *phba = vport->phba;
2091        IOCB_t *irsp;
2092        PRLI *npr;
2093        struct lpfc_nvme_prli *nvpr;
2094        void *temp_ptr;
2095
2096        cmdiocb = (struct lpfc_iocbq *) arg;
2097        rspiocb = cmdiocb->context_un.rsp_iocb;
2098
2099        /* A solicited PRLI is either FCP or NVME.  The PRLI cmd/rsp
2100         * format is different so NULL the two PRLI types so that the
2101         * driver correctly gets the correct context.
2102         */
2103        npr = NULL;
2104        nvpr = NULL;
2105        temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
2106        if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
2107                npr = (PRLI *) temp_ptr;
2108        else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
2109                nvpr = (struct lpfc_nvme_prli *) temp_ptr;
2110
2111        irsp = &rspiocb->iocb;
2112        if (irsp->ulpStatus) {
2113                if ((vport->port_type == LPFC_NPIV_PORT) &&
2114                    vport->cfg_restrict_login) {
2115                        goto out;
2116                }
2117
2118                /* Adjust the nlp_type accordingly if the PRLI failed */
2119                if (npr)
2120                        ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
2121                if (nvpr)
2122                        ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
2123
2124                /* We can't set the DSM state till BOTH PRLIs complete */
2125                goto out_err;
2126        }
2127
2128        if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
2129            (npr->prliType == PRLI_FCP_TYPE)) {
2130                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2131                                 "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
2132                                 npr->initiatorFunc,
2133                                 npr->targetFunc);
2134                if (npr->initiatorFunc)
2135                        ndlp->nlp_type |= NLP_FCP_INITIATOR;
2136                if (npr->targetFunc) {
2137                        ndlp->nlp_type |= NLP_FCP_TARGET;
2138                        if (npr->writeXferRdyDis)
2139                                ndlp->nlp_flag |= NLP_FIRSTBURST;
2140                }
2141                if (npr->Retry)
2142                        ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
2143
2144        } else if (nvpr &&
2145                   (bf_get_be32(prli_acc_rsp_code, nvpr) ==
2146                    PRLI_REQ_EXECUTED) &&
2147                   (bf_get_be32(prli_type_code, nvpr) ==
2148                    PRLI_NVME_TYPE)) {
2149
2150                /* Complete setting up the remote ndlp personality. */
2151                if (bf_get_be32(prli_init, nvpr))
2152                        ndlp->nlp_type |= NLP_NVME_INITIATOR;
2153
2154                if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
2155                    bf_get_be32(prli_conf, nvpr))
2156
2157                        ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
2158                else
2159                        ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
2160
2161                /* Target driver cannot solicit NVME FB. */
2162                if (bf_get_be32(prli_tgt, nvpr)) {
2163                        /* Complete the nvme target roles.  The transport
2164                         * needs to know if the rport is capable of
2165                         * discovery in addition to its role.
2166                         */
2167                        ndlp->nlp_type |= NLP_NVME_TARGET;
2168                        if (bf_get_be32(prli_disc, nvpr))
2169                                ndlp->nlp_type |= NLP_NVME_DISCOVERY;
2170
2171                        /*
2172                         * If prli_fba is set, the Target supports FirstBurst.
2173                         * If prli_fb_sz is 0, the FirstBurst size is unlimited,
2174                         * otherwise it defines the actual size supported by
2175                         * the NVME Target.
2176                         */
2177                        if ((bf_get_be32(prli_fba, nvpr) == 1) &&
2178                            (phba->cfg_nvme_enable_fb) &&
2179                            (!phba->nvmet_support)) {
2180                                /* Both sides support FB. The target's first
2181                                 * burst size is a 512 byte encoded value.
2182                                 */
2183                                ndlp->nlp_flag |= NLP_FIRSTBURST;
2184                                ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
2185                                                                 nvpr);
2186
2187                                /* Expressed in units of 512 bytes */
2188                                if (ndlp->nvme_fb_size)
2189                                        ndlp->nvme_fb_size <<=
2190                                                LPFC_NVME_FB_SHIFT;
2191                                else
2192                                        ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
2193                        }
2194                }
2195
2196                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2197                                 "6029 NVME PRLI Cmpl w1 x%08x "
2198                                 "w4 x%08x w5 x%08x flag x%x, "
2199                                 "fcp_info x%x nlp_type x%x\n",
2200                                 be32_to_cpu(nvpr->word1),
2201                                 be32_to_cpu(nvpr->word4),
2202                                 be32_to_cpu(nvpr->word5),
2203                                 ndlp->nlp_flag, ndlp->nlp_fcp_info,
2204                                 ndlp->nlp_type);
2205        }
2206        if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
2207            (vport->port_type == LPFC_NPIV_PORT) &&
2208             vport->cfg_restrict_login) {
2209out:
2210                spin_lock_irq(&ndlp->lock);
2211                ndlp->nlp_flag |= NLP_TARGET_REMOVE;
2212                spin_unlock_irq(&ndlp->lock);
2213                lpfc_issue_els_logo(vport, ndlp, 0);
2214
2215                ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2216                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2217                return ndlp->nlp_state;
2218        }
2219
2220out_err:
2221        /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
2222         * are complete.
2223         */
2224        if (ndlp->fc4_prli_sent == 0) {
2225                ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2226                if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
2227                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
2228                else if (ndlp->nlp_type &
2229                         (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
2230                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2231        } else
2232                lpfc_printf_vlog(vport,
2233                                 KERN_INFO, LOG_ELS,
2234                                 "3067 PRLI's still outstanding "
2235                                 "on x%06x - count %d, Pend Node Mode "
2236                                 "transition...\n",
2237                                 ndlp->nlp_DID, ndlp->fc4_prli_sent);
2238
2239        return ndlp->nlp_state;
2240}
2241
2242/*! lpfc_device_rm_prli_issue
2243 *
2244 * \pre
2245 * \post
2246 * \param   phba
2247 * \param   ndlp
2248 * \param   arg
2249 * \param   evt
2250 * \return  uint32_t
2251 *
2252 * \b Description:
2253 *    This routine is envoked when we a request to remove a nport we are in the
2254 *    process of PRLIing. We should software abort outstanding prli, unreg
2255 *    login, send a logout. We will change node state to UNUSED_NODE, put it
2256 *    on plogi list so it can be freed when LOGO completes.
2257 *
2258 */
2259
2260static uint32_t
2261lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2262                          void *arg, uint32_t evt)
2263{
2264        if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2265                spin_lock_irq(&ndlp->lock);
2266                ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2267                spin_unlock_irq(&ndlp->lock);
2268                return ndlp->nlp_state;
2269        } else {
2270                /* software abort outstanding PLOGI */
2271                lpfc_els_abort(vport->phba, ndlp);
2272
2273                lpfc_drop_node(vport, ndlp);
2274                return NLP_STE_FREED_NODE;
2275        }
2276}
2277
2278
2279/*! lpfc_device_recov_prli_issue
2280 *
2281 * \pre
2282 * \post
2283 * \param   phba
2284 * \param   ndlp
2285 * \param   arg
2286 * \param   evt
2287 * \return  uint32_t
2288 *
2289 * \b Description:
2290 *    The routine is envoked when the state of a device is unknown, like
2291 *    during a link down. We should remove the nodelist entry from the
2292 *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
2293 *    outstanding PRLI command, then free the node entry.
2294 */
2295static uint32_t
2296lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
2297                             struct lpfc_nodelist *ndlp,
2298                             void *arg,
2299                             uint32_t evt)
2300{
2301        struct lpfc_hba  *phba = vport->phba;
2302
2303        /* Don't do anything that will mess up processing of the
2304         * previous RSCN.
2305         */
2306        if (vport->fc_flag & FC_RSCN_DEFERRED)
2307                return ndlp->nlp_state;
2308
2309        /* software abort outstanding PRLI */
2310        lpfc_els_abort(phba, ndlp);
2311
2312        ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2313        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2314        spin_lock_irq(&ndlp->lock);
2315        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2316        spin_unlock_irq(&ndlp->lock);
2317        lpfc_disc_set_adisc(vport, ndlp);
2318        return ndlp->nlp_state;
2319}
2320
2321static uint32_t
2322lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2323                          void *arg, uint32_t evt)
2324{
2325        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2326        struct ls_rjt     stat;
2327
2328        memset(&stat, 0, sizeof(struct ls_rjt));
2329        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2330        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2331        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2332        return ndlp->nlp_state;
2333}
2334
2335static uint32_t
2336lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2337                         void *arg, uint32_t evt)
2338{
2339        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2340        struct ls_rjt     stat;
2341
2342        memset(&stat, 0, sizeof(struct ls_rjt));
2343        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2344        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2345        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2346        return ndlp->nlp_state;
2347}
2348
2349static uint32_t
2350lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2351                         void *arg, uint32_t evt)
2352{
2353        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2354
2355        spin_lock_irq(&ndlp->lock);
2356        ndlp->nlp_flag |= NLP_LOGO_ACC;
2357        spin_unlock_irq(&ndlp->lock);
2358        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2359        return ndlp->nlp_state;
2360}
2361
2362static uint32_t
2363lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2364                           void *arg, uint32_t evt)
2365{
2366        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2367        struct ls_rjt     stat;
2368
2369        memset(&stat, 0, sizeof(struct ls_rjt));
2370        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2371        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2372        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2373        return ndlp->nlp_state;
2374}
2375
2376static uint32_t
2377lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2378                         void *arg, uint32_t evt)
2379{
2380        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2381        struct ls_rjt     stat;
2382
2383        memset(&stat, 0, sizeof(struct ls_rjt));
2384        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2385        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2386        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2387        return ndlp->nlp_state;
2388}
2389
2390static uint32_t
2391lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2392                          void *arg, uint32_t evt)
2393{
2394        ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
2395        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2396        spin_lock_irq(&ndlp->lock);
2397        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2398        spin_unlock_irq(&ndlp->lock);
2399        lpfc_disc_set_adisc(vport, ndlp);
2400        return ndlp->nlp_state;
2401}
2402
2403static uint32_t
2404lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2405                          void *arg, uint32_t evt)
2406{
2407        /*
2408         * DevLoss has timed out and is calling for Device Remove.
2409         * In this case, abort the LOGO and cleanup the ndlp
2410         */
2411
2412        lpfc_unreg_rpi(vport, ndlp);
2413        /* software abort outstanding PLOGI */
2414        lpfc_els_abort(vport->phba, ndlp);
2415        lpfc_drop_node(vport, ndlp);
2416        return NLP_STE_FREED_NODE;
2417}
2418
2419static uint32_t
2420lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
2421                             struct lpfc_nodelist *ndlp,
2422                             void *arg, uint32_t evt)
2423{
2424        /*
2425         * Device Recovery events have no meaning for a node with a LOGO
2426         * outstanding.  The LOGO has to complete first and handle the
2427         * node from that point.
2428         */
2429        return ndlp->nlp_state;
2430}
2431
2432static uint32_t
2433lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2434                          void *arg, uint32_t evt)
2435{
2436        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2437
2438        lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2439        return ndlp->nlp_state;
2440}
2441
2442static uint32_t
2443lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2444                         void *arg, uint32_t evt)
2445{
2446        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2447
2448        if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2449                return ndlp->nlp_state;
2450
2451        lpfc_rcv_prli(vport, ndlp, cmdiocb);
2452        lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2453        return ndlp->nlp_state;
2454}
2455
2456static uint32_t
2457lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2458                         void *arg, uint32_t evt)
2459{
2460        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2461
2462        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2463        return ndlp->nlp_state;
2464}
2465
2466static uint32_t
2467lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2468                           void *arg, uint32_t evt)
2469{
2470        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2471
2472        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2473        return ndlp->nlp_state;
2474}
2475
2476static uint32_t
2477lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2478                         void *arg, uint32_t evt)
2479{
2480        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2481
2482        lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2483        return ndlp->nlp_state;
2484}
2485
2486static uint32_t
2487lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
2488                          struct lpfc_nodelist *ndlp,
2489                          void *arg,
2490                          uint32_t evt)
2491{
2492        lpfc_drop_node(vport, ndlp);
2493        return NLP_STE_FREED_NODE;
2494}
2495
2496static uint32_t
2497lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2498                             struct lpfc_nodelist *ndlp,
2499                             void *arg,
2500                             uint32_t evt)
2501{
2502        ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2503        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2504        spin_lock_irq(&ndlp->lock);
2505        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2506        ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2507        spin_unlock_irq(&ndlp->lock);
2508        lpfc_disc_set_adisc(vport, ndlp);
2509
2510        return ndlp->nlp_state;
2511}
2512
2513static uint32_t
2514lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2515                           void *arg, uint32_t evt)
2516{
2517        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2518
2519        lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2520        return ndlp->nlp_state;
2521}
2522
2523static uint32_t
2524lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2525                          void *arg, uint32_t evt)
2526{
2527        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2528
2529        if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2530                return ndlp->nlp_state;
2531        lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2532        return ndlp->nlp_state;
2533}
2534
2535static uint32_t
2536lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2537                          void *arg, uint32_t evt)
2538{
2539        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2540
2541        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2542        return ndlp->nlp_state;
2543}
2544
2545static uint32_t
2546lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2547                            struct lpfc_nodelist *ndlp,
2548                            void *arg, uint32_t evt)
2549{
2550        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2551
2552        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2553        return ndlp->nlp_state;
2554}
2555
2556static uint32_t
2557lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2558                          void *arg, uint32_t evt)
2559{
2560        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2561
2562        /* flush the target */
2563        lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2564
2565        /* Treat like rcv logo */
2566        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2567        return ndlp->nlp_state;
2568}
2569
2570static uint32_t
2571lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2572                              struct lpfc_nodelist *ndlp,
2573                              void *arg,
2574                              uint32_t evt)
2575{
2576        ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2577        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2578        spin_lock_irq(&ndlp->lock);
2579        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2580        ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2581        spin_unlock_irq(&ndlp->lock);
2582        lpfc_disc_set_adisc(vport, ndlp);
2583        return ndlp->nlp_state;
2584}
2585
2586static uint32_t
2587lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2588                        void *arg, uint32_t evt)
2589{
2590        struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2591
2592        /* Ignore PLOGI if we have an outstanding LOGO */
2593        if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2594                return ndlp->nlp_state;
2595        if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2596                lpfc_cancel_retry_delay_tmo(vport, ndlp);
2597                spin_lock_irq(&ndlp->lock);
2598                ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2599                spin_unlock_irq(&ndlp->lock);
2600        } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2601                /* send PLOGI immediately, move to PLOGI issue state */
2602                if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2603                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2604                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2605                        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2606                }
2607        }
2608        return ndlp->nlp_state;
2609}
2610
2611static uint32_t
2612lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2613                       void *arg, uint32_t evt)
2614{
2615        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2616        struct ls_rjt     stat;
2617
2618        memset(&stat, 0, sizeof (struct ls_rjt));
2619        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2620        stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2621        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2622
2623        if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2624                if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2625                        spin_lock_irq(&ndlp->lock);
2626                        ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2627                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2628                        spin_unlock_irq(&ndlp->lock);
2629                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2630                        lpfc_issue_els_adisc(vport, ndlp, 0);
2631                } else {
2632                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2633                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2634                        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2635                }
2636        }
2637        return ndlp->nlp_state;
2638}
2639
2640static uint32_t
2641lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2642                       void *arg, uint32_t evt)
2643{
2644        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2645
2646        lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2647        return ndlp->nlp_state;
2648}
2649
2650static uint32_t
2651lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2652                         void *arg, uint32_t evt)
2653{
2654        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2655
2656        lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2657        /*
2658         * Do not start discovery if discovery is about to start
2659         * or discovery in progress for this node. Starting discovery
2660         * here will affect the counting of discovery threads.
2661         */
2662        if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2663            !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2664                if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2665                        ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2666                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2667                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2668                        lpfc_issue_els_adisc(vport, ndlp, 0);
2669                } else {
2670                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2671                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2672                        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2673                }
2674        }
2675        return ndlp->nlp_state;
2676}
2677
2678static uint32_t
2679lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2680                       void *arg, uint32_t evt)
2681{
2682        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2683
2684        spin_lock_irq(&ndlp->lock);
2685        ndlp->nlp_flag |= NLP_LOGO_ACC;
2686        spin_unlock_irq(&ndlp->lock);
2687
2688        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2689
2690        if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2691                mod_timer(&ndlp->nlp_delayfunc,
2692                          jiffies + msecs_to_jiffies(1000 * 1));
2693                spin_lock_irq(&ndlp->lock);
2694                ndlp->nlp_flag |= NLP_DELAY_TMO;
2695                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2696                spin_unlock_irq(&ndlp->lock);
2697                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2698        } else {
2699                spin_lock_irq(&ndlp->lock);
2700                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2701                spin_unlock_irq(&ndlp->lock);
2702        }
2703        return ndlp->nlp_state;
2704}
2705
2706static uint32_t
2707lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2708                         void *arg, uint32_t evt)
2709{
2710        struct lpfc_iocbq *cmdiocb, *rspiocb;
2711        IOCB_t *irsp;
2712
2713        cmdiocb = (struct lpfc_iocbq *) arg;
2714        rspiocb = cmdiocb->context_un.rsp_iocb;
2715
2716        irsp = &rspiocb->iocb;
2717        if (irsp->ulpStatus) {
2718                return NLP_STE_FREED_NODE;
2719        }
2720        return ndlp->nlp_state;
2721}
2722
2723static uint32_t
2724lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2725                        void *arg, uint32_t evt)
2726{
2727        struct lpfc_iocbq *cmdiocb, *rspiocb;
2728        IOCB_t *irsp;
2729
2730        cmdiocb = (struct lpfc_iocbq *) arg;
2731        rspiocb = cmdiocb->context_un.rsp_iocb;
2732
2733        irsp = &rspiocb->iocb;
2734        if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2735                lpfc_drop_node(vport, ndlp);
2736                return NLP_STE_FREED_NODE;
2737        }
2738        return ndlp->nlp_state;
2739}
2740
2741static uint32_t
2742lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2743                        void *arg, uint32_t evt)
2744{
2745        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2746
2747        /* For the fabric port just clear the fc flags. */
2748        if (ndlp->nlp_DID == Fabric_DID) {
2749                spin_lock_irq(shost->host_lock);
2750                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2751                spin_unlock_irq(shost->host_lock);
2752        }
2753        lpfc_unreg_rpi(vport, ndlp);
2754        return ndlp->nlp_state;
2755}
2756
2757static uint32_t
2758lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2759                         void *arg, uint32_t evt)
2760{
2761        struct lpfc_iocbq *cmdiocb, *rspiocb;
2762        IOCB_t *irsp;
2763
2764        cmdiocb = (struct lpfc_iocbq *) arg;
2765        rspiocb = cmdiocb->context_un.rsp_iocb;
2766
2767        irsp = &rspiocb->iocb;
2768        if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2769                lpfc_drop_node(vport, ndlp);
2770                return NLP_STE_FREED_NODE;
2771        }
2772        return ndlp->nlp_state;
2773}
2774
2775static uint32_t
2776lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2777                            struct lpfc_nodelist *ndlp,
2778                            void *arg, uint32_t evt)
2779{
2780        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2781        MAILBOX_t    *mb = &pmb->u.mb;
2782
2783        if (!mb->mbxStatus) {
2784                /* SLI4 ports have preallocated logical rpis. */
2785                if (vport->phba->sli_rev < LPFC_SLI_REV4)
2786                        ndlp->nlp_rpi = mb->un.varWords[0];
2787                ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2788                if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2789                        lpfc_unreg_rpi(vport, ndlp);
2790                }
2791        } else {
2792                if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2793                        lpfc_drop_node(vport, ndlp);
2794                        return NLP_STE_FREED_NODE;
2795                }
2796        }
2797        return ndlp->nlp_state;
2798}
2799
2800static uint32_t
2801lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2802                        void *arg, uint32_t evt)
2803{
2804        if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2805                spin_lock_irq(&ndlp->lock);
2806                ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2807                spin_unlock_irq(&ndlp->lock);
2808                return ndlp->nlp_state;
2809        }
2810        lpfc_drop_node(vport, ndlp);
2811        return NLP_STE_FREED_NODE;
2812}
2813
2814static uint32_t
2815lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2816                           void *arg, uint32_t evt)
2817{
2818        /* Don't do anything that will mess up processing of the
2819         * previous RSCN.
2820         */
2821        if (vport->fc_flag & FC_RSCN_DEFERRED)
2822                return ndlp->nlp_state;
2823
2824        lpfc_cancel_retry_delay_tmo(vport, ndlp);
2825        spin_lock_irq(&ndlp->lock);
2826        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2827        ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2828        spin_unlock_irq(&ndlp->lock);
2829        return ndlp->nlp_state;
2830}
2831
2832
2833/* This next section defines the NPort Discovery State Machine */
2834
2835/* There are 4 different double linked lists nodelist entries can reside on.
2836 * The plogi list and adisc list are used when Link Up discovery or RSCN
2837 * processing is needed. Each list holds the nodes that we will send PLOGI
2838 * or ADISC on. These lists will keep track of what nodes will be effected
2839 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2840 * The unmapped_list will contain all nodes that we have successfully logged
2841 * into at the Fibre Channel level. The mapped_list will contain all nodes
2842 * that are mapped FCP targets.
2843 */
2844/*
2845 * The bind list is a list of undiscovered (potentially non-existent) nodes
2846 * that we have saved binding information on. This information is used when
2847 * nodes transition from the unmapped to the mapped list.
2848 */
2849/* For UNUSED_NODE state, the node has just been allocated .
2850 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2851 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2852 * and put on the unmapped list. For ADISC processing, the node is taken off
2853 * the ADISC list and placed on either the mapped or unmapped list (depending
2854 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2855 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2856 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2857 * node, the node is taken off the unmapped list. The binding list is checked
2858 * for a valid binding, or a binding is automatically assigned. If binding
2859 * assignment is unsuccessful, the node is left on the unmapped list. If
2860 * binding assignment is successful, the associated binding list entry (if
2861 * any) is removed, and the node is placed on the mapped list.
2862 */
2863/*
2864 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2865 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2866 * expire, all effected nodes will receive a DEVICE_RM event.
2867 */
2868/*
2869 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2870 * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2871 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2872 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2873 * we will first process the ADISC list.  32 entries are processed initially and
2874 * ADISC is initited for each one.  Completions / Events for each node are
2875 * funnelled thru the state machine.  As each node finishes ADISC processing, it
2876 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2877 * waiting, and the ADISC list count is identically 0, then we are done. For
2878 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2879 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2880 * list.  32 entries are processed initially and PLOGI is initited for each one.
2881 * Completions / Events for each node are funnelled thru the state machine.  As
2882 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2883 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2884 * indentically 0, then we are done. We have now completed discovery / RSCN
2885 * handling. Upon completion, ALL nodes should be on either the mapped or
2886 * unmapped lists.
2887 */
2888
2889static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2890     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2891        /* Action routine                  Event       Current State  */
2892        lpfc_rcv_plogi_unused_node,     /* RCV_PLOGI   UNUSED_NODE    */
2893        lpfc_rcv_els_unused_node,       /* RCV_PRLI        */
2894        lpfc_rcv_logo_unused_node,      /* RCV_LOGO        */
2895        lpfc_rcv_els_unused_node,       /* RCV_ADISC       */
2896        lpfc_rcv_els_unused_node,       /* RCV_PDISC       */
2897        lpfc_rcv_els_unused_node,       /* RCV_PRLO        */
2898        lpfc_disc_illegal,              /* CMPL_PLOGI      */
2899        lpfc_disc_illegal,              /* CMPL_PRLI       */
2900        lpfc_cmpl_logo_unused_node,     /* CMPL_LOGO       */
2901        lpfc_disc_illegal,              /* CMPL_ADISC      */
2902        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2903        lpfc_device_rm_unused_node,     /* DEVICE_RM       */
2904        lpfc_device_recov_unused_node,  /* DEVICE_RECOVERY */
2905
2906        lpfc_rcv_plogi_plogi_issue,     /* RCV_PLOGI   PLOGI_ISSUE    */
2907        lpfc_rcv_prli_plogi_issue,      /* RCV_PRLI        */
2908        lpfc_rcv_logo_plogi_issue,      /* RCV_LOGO        */
2909        lpfc_rcv_els_plogi_issue,       /* RCV_ADISC       */
2910        lpfc_rcv_els_plogi_issue,       /* RCV_PDISC       */
2911        lpfc_rcv_els_plogi_issue,       /* RCV_PRLO        */
2912        lpfc_cmpl_plogi_plogi_issue,    /* CMPL_PLOGI      */
2913        lpfc_disc_illegal,              /* CMPL_PRLI       */
2914        lpfc_cmpl_logo_plogi_issue,     /* CMPL_LOGO       */
2915        lpfc_disc_illegal,              /* CMPL_ADISC      */
2916        lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2917        lpfc_device_rm_plogi_issue,     /* DEVICE_RM       */
2918        lpfc_device_recov_plogi_issue,  /* DEVICE_RECOVERY */
2919
2920        lpfc_rcv_plogi_adisc_issue,     /* RCV_PLOGI   ADISC_ISSUE    */
2921        lpfc_rcv_prli_adisc_issue,      /* RCV_PRLI        */
2922        lpfc_rcv_logo_adisc_issue,      /* RCV_LOGO        */
2923        lpfc_rcv_padisc_adisc_issue,    /* RCV_ADISC       */
2924        lpfc_rcv_padisc_adisc_issue,    /* RCV_PDISC       */
2925        lpfc_rcv_prlo_adisc_issue,      /* RCV_PRLO        */
2926        lpfc_disc_illegal,              /* CMPL_PLOGI      */
2927        lpfc_disc_illegal,              /* CMPL_PRLI       */
2928        lpfc_disc_illegal,              /* CMPL_LOGO       */
2929        lpfc_cmpl_adisc_adisc_issue,    /* CMPL_ADISC      */
2930        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2931        lpfc_device_rm_adisc_issue,     /* DEVICE_RM       */
2932        lpfc_device_recov_adisc_issue,  /* DEVICE_RECOVERY */
2933
2934        lpfc_rcv_plogi_reglogin_issue,  /* RCV_PLOGI  REG_LOGIN_ISSUE */
2935        lpfc_rcv_prli_reglogin_issue,   /* RCV_PLOGI       */
2936        lpfc_rcv_logo_reglogin_issue,   /* RCV_LOGO        */
2937        lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC       */
2938        lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC       */
2939        lpfc_rcv_prlo_reglogin_issue,   /* RCV_PRLO        */
2940        lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
2941        lpfc_disc_illegal,              /* CMPL_PRLI       */
2942        lpfc_disc_illegal,              /* CMPL_LOGO       */
2943        lpfc_disc_illegal,              /* CMPL_ADISC      */
2944        lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2945        lpfc_device_rm_reglogin_issue,  /* DEVICE_RM       */
2946        lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2947
2948        lpfc_rcv_plogi_prli_issue,      /* RCV_PLOGI   PRLI_ISSUE     */
2949        lpfc_rcv_prli_prli_issue,       /* RCV_PRLI        */
2950        lpfc_rcv_logo_prli_issue,       /* RCV_LOGO        */
2951        lpfc_rcv_padisc_prli_issue,     /* RCV_ADISC       */
2952        lpfc_rcv_padisc_prli_issue,     /* RCV_PDISC       */
2953        lpfc_rcv_prlo_prli_issue,       /* RCV_PRLO        */
2954        lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
2955        lpfc_cmpl_prli_prli_issue,      /* CMPL_PRLI       */
2956        lpfc_disc_illegal,              /* CMPL_LOGO       */
2957        lpfc_disc_illegal,              /* CMPL_ADISC      */
2958        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2959        lpfc_device_rm_prli_issue,      /* DEVICE_RM       */
2960        lpfc_device_recov_prli_issue,   /* DEVICE_RECOVERY */
2961
2962        lpfc_rcv_plogi_logo_issue,      /* RCV_PLOGI   LOGO_ISSUE     */
2963        lpfc_rcv_prli_logo_issue,       /* RCV_PRLI        */
2964        lpfc_rcv_logo_logo_issue,       /* RCV_LOGO        */
2965        lpfc_rcv_padisc_logo_issue,     /* RCV_ADISC       */
2966        lpfc_rcv_padisc_logo_issue,     /* RCV_PDISC       */
2967        lpfc_rcv_prlo_logo_issue,       /* RCV_PRLO        */
2968        lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
2969        lpfc_disc_illegal,              /* CMPL_PRLI       */
2970        lpfc_cmpl_logo_logo_issue,      /* CMPL_LOGO       */
2971        lpfc_disc_illegal,              /* CMPL_ADISC      */
2972        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2973        lpfc_device_rm_logo_issue,      /* DEVICE_RM       */
2974        lpfc_device_recov_logo_issue,   /* DEVICE_RECOVERY */
2975
2976        lpfc_rcv_plogi_unmap_node,      /* RCV_PLOGI   UNMAPPED_NODE  */
2977        lpfc_rcv_prli_unmap_node,       /* RCV_PRLI        */
2978        lpfc_rcv_logo_unmap_node,       /* RCV_LOGO        */
2979        lpfc_rcv_padisc_unmap_node,     /* RCV_ADISC       */
2980        lpfc_rcv_padisc_unmap_node,     /* RCV_PDISC       */
2981        lpfc_rcv_prlo_unmap_node,       /* RCV_PRLO        */
2982        lpfc_disc_illegal,              /* CMPL_PLOGI      */
2983        lpfc_disc_illegal,              /* CMPL_PRLI       */
2984        lpfc_disc_illegal,              /* CMPL_LOGO       */
2985        lpfc_disc_illegal,              /* CMPL_ADISC      */
2986        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2987        lpfc_device_rm_unmap_node,      /* DEVICE_RM       */
2988        lpfc_device_recov_unmap_node,   /* DEVICE_RECOVERY */
2989
2990        lpfc_rcv_plogi_mapped_node,     /* RCV_PLOGI   MAPPED_NODE    */
2991        lpfc_rcv_prli_mapped_node,      /* RCV_PRLI        */
2992        lpfc_rcv_logo_mapped_node,      /* RCV_LOGO        */
2993        lpfc_rcv_padisc_mapped_node,    /* RCV_ADISC       */
2994        lpfc_rcv_padisc_mapped_node,    /* RCV_PDISC       */
2995        lpfc_rcv_prlo_mapped_node,      /* RCV_PRLO        */
2996        lpfc_disc_illegal,              /* CMPL_PLOGI      */
2997        lpfc_disc_illegal,              /* CMPL_PRLI       */
2998        lpfc_disc_illegal,              /* CMPL_LOGO       */
2999        lpfc_disc_illegal,              /* CMPL_ADISC      */
3000        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3001        lpfc_disc_illegal,              /* DEVICE_RM       */
3002        lpfc_device_recov_mapped_node,  /* DEVICE_RECOVERY */
3003
3004        lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
3005        lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
3006        lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
3007        lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
3008        lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
3009        lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
3010        lpfc_cmpl_plogi_npr_node,       /* CMPL_PLOGI      */
3011        lpfc_cmpl_prli_npr_node,        /* CMPL_PRLI       */
3012        lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
3013        lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
3014        lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
3015        lpfc_device_rm_npr_node,        /* DEVICE_RM       */
3016        lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
3017};
3018
3019int
3020lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3021                        void *arg, uint32_t evt)
3022{
3023        uint32_t cur_state, rc;
3024        uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
3025                         uint32_t);
3026        uint32_t got_ndlp = 0;
3027        uint32_t data1;
3028
3029        if (lpfc_nlp_get(ndlp))
3030                got_ndlp = 1;
3031
3032        cur_state = ndlp->nlp_state;
3033
3034        data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
3035                ((uint32_t)ndlp->nlp_type));
3036        /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
3037        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3038                         "0211 DSM in event x%x on NPort x%x in "
3039                         "state %d rpi x%x Data: x%x x%x\n",
3040                         evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
3041                         ndlp->nlp_flag, data1);
3042
3043        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3044                 "DSM in:          evt:%d ste:%d did:x%x",
3045                evt, cur_state, ndlp->nlp_DID);
3046
3047        func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
3048        rc = (func) (vport, ndlp, arg, evt);
3049
3050        /* DSM out state <rc> on NPort <nlp_DID> */
3051        if (got_ndlp) {
3052                data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
3053                        ((uint32_t)ndlp->nlp_type));
3054                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3055                         "0212 DSM out state %d on NPort x%x "
3056                         "rpi x%x Data: x%x x%x\n",
3057                         rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
3058                         data1);
3059
3060                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3061                        "DSM out:         ste:%d did:x%x flg:x%x",
3062                        rc, ndlp->nlp_DID, ndlp->nlp_flag);
3063                /* Decrement the ndlp reference count held for this function */
3064                lpfc_nlp_put(ndlp);
3065        } else {
3066                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3067                        "0213 DSM out state %d on NPort free\n", rc);
3068
3069                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3070                        "DSM out:         ste:%d did:x%x flg:x%x",
3071                        rc, 0, 0);
3072        }
3073
3074        return rc;
3075}
3076