linux/drivers/scsi/lpfc/lpfc_hbadisc.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/slab.h>
  27#include <linux/pci.h>
  28#include <linux/kthread.h>
  29#include <linux/interrupt.h>
  30#include <linux/lockdep.h>
  31
  32#include <scsi/scsi.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport_fc.h>
  36#include <scsi/fc/fc_fs.h>
  37
  38#include <linux/nvme-fc-driver.h>
  39
  40#include "lpfc_hw4.h"
  41#include "lpfc_hw.h"
  42#include "lpfc_nl.h"
  43#include "lpfc_disc.h"
  44#include "lpfc_sli.h"
  45#include "lpfc_sli4.h"
  46#include "lpfc.h"
  47#include "lpfc_scsi.h"
  48#include "lpfc_nvme.h"
  49#include "lpfc_logmsg.h"
  50#include "lpfc_crtn.h"
  51#include "lpfc_vport.h"
  52#include "lpfc_debugfs.h"
  53
  54/* AlpaArray for assignment of scsid for scan-down and bind_method */
  55static uint8_t lpfcAlpaArray[] = {
  56        0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
  57        0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
  58        0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
  59        0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
  60        0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
  61        0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
  62        0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
  63        0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
  64        0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
  65        0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
  66        0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
  67        0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
  68        0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
  69};
  70
  71static void lpfc_disc_timeout_handler(struct lpfc_vport *);
  72static void lpfc_disc_flush_list(struct lpfc_vport *vport);
  73static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
  74static int lpfc_fcf_inuse(struct lpfc_hba *);
  75
  76void
  77lpfc_terminate_rport_io(struct fc_rport *rport)
  78{
  79        struct lpfc_rport_data *rdata;
  80        struct lpfc_nodelist * ndlp;
  81        struct lpfc_hba *phba;
  82
  83        rdata = rport->dd_data;
  84        ndlp = rdata->pnode;
  85
  86        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  87                if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
  88                        printk(KERN_ERR "Cannot find remote node"
  89                        " to terminate I/O Data x%x\n",
  90                        rport->port_id);
  91                return;
  92        }
  93
  94        phba  = ndlp->phba;
  95
  96        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
  97                "rport terminate: sid:x%x did:x%x flg:x%x",
  98                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
  99
 100        if (ndlp->nlp_sid != NLP_NO_SID) {
 101                lpfc_sli_abort_iocb(ndlp->vport,
 102                        &phba->sli.sli3_ring[LPFC_FCP_RING],
 103                        ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 104        }
 105}
 106
 107/*
 108 * This function will be called when dev_loss_tmo fire.
 109 */
 110void
 111lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 112{
 113        struct lpfc_rport_data *rdata;
 114        struct lpfc_nodelist * ndlp;
 115        struct lpfc_vport *vport;
 116        struct Scsi_Host *shost;
 117        struct lpfc_hba   *phba;
 118        struct lpfc_work_evt *evtp;
 119        int  put_node;
 120        int  put_rport;
 121
 122        rdata = rport->dd_data;
 123        ndlp = rdata->pnode;
 124        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 125                return;
 126
 127        vport = ndlp->vport;
 128        phba  = vport->phba;
 129
 130        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 131                "rport devlosscb: sid:x%x did:x%x flg:x%x",
 132                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 133
 134        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 135                         "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
 136                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 137
 138        /* Don't defer this if we are in the process of deleting the vport
 139         * or unloading the driver. The unload will cleanup the node
 140         * appropriately we just need to cleanup the ndlp rport info here.
 141         */
 142        if (vport->load_flag & FC_UNLOADING) {
 143                put_node = rdata->pnode != NULL;
 144                put_rport = ndlp->rport != NULL;
 145                rdata->pnode = NULL;
 146                ndlp->rport = NULL;
 147                if (put_node)
 148                        lpfc_nlp_put(ndlp);
 149                if (put_rport)
 150                        put_device(&rport->dev);
 151                return;
 152        }
 153
 154        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
 155                return;
 156
 157        if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
 158                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
 159                                "6789 rport name %llx != node port name %llx",
 160                                rport->port_name,
 161                                wwn_to_u64(ndlp->nlp_portname.u.wwn));
 162
 163        evtp = &ndlp->dev_loss_evt;
 164
 165        if (!list_empty(&evtp->evt_listp)) {
 166                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
 167                                "6790 rport name %llx dev_loss_evt pending",
 168                                rport->port_name);
 169                return;
 170        }
 171
 172        shost = lpfc_shost_from_vport(vport);
 173        spin_lock_irq(shost->host_lock);
 174        ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
 175        spin_unlock_irq(shost->host_lock);
 176
 177        /* We need to hold the node by incrementing the reference
 178         * count until this queued work is done
 179         */
 180        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 181
 182        spin_lock_irq(&phba->hbalock);
 183        if (evtp->evt_arg1) {
 184                evtp->evt = LPFC_EVT_DEV_LOSS;
 185                list_add_tail(&evtp->evt_listp, &phba->work_list);
 186                lpfc_worker_wake_up(phba);
 187        }
 188        spin_unlock_irq(&phba->hbalock);
 189
 190        return;
 191}
 192
 193/**
 194 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
 195 * @ndlp: Pointer to remote node object.
 196 *
 197 * This function is called from the worker thread when devloss timeout timer
 198 * expires. For SLI4 host, this routine shall return 1 when at lease one
 199 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
 200 * routine shall return 0 when there is no remote node is still in use of FCF
 201 * when devloss timeout happened to this @ndlp.
 202 **/
 203static int
 204lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 205{
 206        struct lpfc_rport_data *rdata;
 207        struct fc_rport   *rport;
 208        struct lpfc_vport *vport;
 209        struct lpfc_hba   *phba;
 210        struct Scsi_Host  *shost;
 211        uint8_t *name;
 212        int  put_node;
 213        int warn_on = 0;
 214        int fcf_inuse = 0;
 215
 216        rport = ndlp->rport;
 217        vport = ndlp->vport;
 218        shost = lpfc_shost_from_vport(vport);
 219
 220        spin_lock_irq(shost->host_lock);
 221        ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
 222        spin_unlock_irq(shost->host_lock);
 223
 224        if (!rport)
 225                return fcf_inuse;
 226
 227        name = (uint8_t *) &ndlp->nlp_portname;
 228        phba  = vport->phba;
 229
 230        if (phba->sli_rev == LPFC_SLI_REV4)
 231                fcf_inuse = lpfc_fcf_inuse(phba);
 232
 233        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 234                "rport devlosstmo:did:x%x type:x%x id:x%x",
 235                ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
 236
 237        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 238                         "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
 239                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 240
 241        /*
 242         * lpfc_nlp_remove if reached with dangling rport drops the
 243         * reference. To make sure that does not happen clear rport
 244         * pointer in ndlp before lpfc_nlp_put.
 245         */
 246        rdata = rport->dd_data;
 247
 248        /* Don't defer this if we are in the process of deleting the vport
 249         * or unloading the driver. The unload will cleanup the node
 250         * appropriately we just need to cleanup the ndlp rport info here.
 251         */
 252        if (vport->load_flag & FC_UNLOADING) {
 253                if (ndlp->nlp_sid != NLP_NO_SID) {
 254                        /* flush the target */
 255                        lpfc_sli_abort_iocb(vport,
 256                                            &phba->sli.sli3_ring[LPFC_FCP_RING],
 257                                            ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 258                }
 259                put_node = rdata->pnode != NULL;
 260                rdata->pnode = NULL;
 261                ndlp->rport = NULL;
 262                if (put_node)
 263                        lpfc_nlp_put(ndlp);
 264                put_device(&rport->dev);
 265
 266                return fcf_inuse;
 267        }
 268
 269        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
 270                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 271                                 "0284 Devloss timeout Ignored on "
 272                                 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
 273                                 "NPort x%x\n",
 274                                 *name, *(name+1), *(name+2), *(name+3),
 275                                 *(name+4), *(name+5), *(name+6), *(name+7),
 276                                 ndlp->nlp_DID);
 277                return fcf_inuse;
 278        }
 279
 280        put_node = rdata->pnode != NULL;
 281        rdata->pnode = NULL;
 282        ndlp->rport = NULL;
 283        if (put_node)
 284                lpfc_nlp_put(ndlp);
 285        put_device(&rport->dev);
 286
 287        if (ndlp->nlp_type & NLP_FABRIC)
 288                return fcf_inuse;
 289
 290        if (ndlp->nlp_sid != NLP_NO_SID) {
 291                warn_on = 1;
 292                lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
 293                                    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 294        }
 295
 296        if (warn_on) {
 297                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 298                                 "0203 Devloss timeout on "
 299                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 300                                 "NPort x%06x Data: x%x x%x x%x\n",
 301                                 *name, *(name+1), *(name+2), *(name+3),
 302                                 *(name+4), *(name+5), *(name+6), *(name+7),
 303                                 ndlp->nlp_DID, ndlp->nlp_flag,
 304                                 ndlp->nlp_state, ndlp->nlp_rpi);
 305        } else {
 306                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 307                                 "0204 Devloss timeout on "
 308                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 309                                 "NPort x%06x Data: x%x x%x x%x\n",
 310                                 *name, *(name+1), *(name+2), *(name+3),
 311                                 *(name+4), *(name+5), *(name+6), *(name+7),
 312                                 ndlp->nlp_DID, ndlp->nlp_flag,
 313                                 ndlp->nlp_state, ndlp->nlp_rpi);
 314        }
 315
 316        if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
 317            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
 318            (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
 319            (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
 320            (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
 321                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 322
 323        return fcf_inuse;
 324}
 325
 326/**
 327 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
 328 * @phba: Pointer to hba context object.
 329 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
 330 * @nlp_did: remote node identifer with devloss timeout.
 331 *
 332 * This function is called from the worker thread after invoking devloss
 333 * timeout handler and releasing the reference count for the ndlp with
 334 * which the devloss timeout was handled for SLI4 host. For the devloss
 335 * timeout of the last remote node which had been in use of FCF, when this
 336 * routine is invoked, it shall be guaranteed that none of the remote are
 337 * in-use of FCF. When devloss timeout to the last remote using the FCF,
 338 * if the FIP engine is neither in FCF table scan process nor roundrobin
 339 * failover process, the in-use FCF shall be unregistered. If the FIP
 340 * engine is in FCF discovery process, the devloss timeout state shall
 341 * be set for either the FCF table scan process or roundrobin failover
 342 * process to unregister the in-use FCF.
 343 **/
 344static void
 345lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
 346                                    uint32_t nlp_did)
 347{
 348        /* If devloss timeout happened to a remote node when FCF had no
 349         * longer been in-use, do nothing.
 350         */
 351        if (!fcf_inuse)
 352                return;
 353
 354        if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
 355                spin_lock_irq(&phba->hbalock);
 356                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 357                        if (phba->hba_flag & HBA_DEVLOSS_TMO) {
 358                                spin_unlock_irq(&phba->hbalock);
 359                                return;
 360                        }
 361                        phba->hba_flag |= HBA_DEVLOSS_TMO;
 362                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 363                                        "2847 Last remote node (x%x) using "
 364                                        "FCF devloss tmo\n", nlp_did);
 365                }
 366                if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
 367                        spin_unlock_irq(&phba->hbalock);
 368                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 369                                        "2868 Devloss tmo to FCF rediscovery "
 370                                        "in progress\n");
 371                        return;
 372                }
 373                if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
 374                        spin_unlock_irq(&phba->hbalock);
 375                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 376                                        "2869 Devloss tmo to idle FIP engine, "
 377                                        "unreg in-use FCF and rescan.\n");
 378                        /* Unregister in-use FCF and rescan */
 379                        lpfc_unregister_fcf_rescan(phba);
 380                        return;
 381                }
 382                spin_unlock_irq(&phba->hbalock);
 383                if (phba->hba_flag & FCF_TS_INPROG)
 384                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 385                                        "2870 FCF table scan in progress\n");
 386                if (phba->hba_flag & FCF_RR_INPROG)
 387                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 388                                        "2871 FLOGI roundrobin FCF failover "
 389                                        "in progress\n");
 390        }
 391        lpfc_unregister_unused_fcf(phba);
 392}
 393
 394/**
 395 * lpfc_alloc_fast_evt - Allocates data structure for posting event
 396 * @phba: Pointer to hba context object.
 397 *
 398 * This function is called from the functions which need to post
 399 * events from interrupt context. This function allocates data
 400 * structure required for posting event. It also keeps track of
 401 * number of events pending and prevent event storm when there are
 402 * too many events.
 403 **/
 404struct lpfc_fast_path_event *
 405lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
 406        struct lpfc_fast_path_event *ret;
 407
 408        /* If there are lot of fast event do not exhaust memory due to this */
 409        if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
 410                return NULL;
 411
 412        ret = kzalloc(sizeof(struct lpfc_fast_path_event),
 413                        GFP_ATOMIC);
 414        if (ret) {
 415                atomic_inc(&phba->fast_event_count);
 416                INIT_LIST_HEAD(&ret->work_evt.evt_listp);
 417                ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
 418        }
 419        return ret;
 420}
 421
 422/**
 423 * lpfc_free_fast_evt - Frees event data structure
 424 * @phba: Pointer to hba context object.
 425 * @evt:  Event object which need to be freed.
 426 *
 427 * This function frees the data structure required for posting
 428 * events.
 429 **/
 430void
 431lpfc_free_fast_evt(struct lpfc_hba *phba,
 432                struct lpfc_fast_path_event *evt) {
 433
 434        atomic_dec(&phba->fast_event_count);
 435        kfree(evt);
 436}
 437
 438/**
 439 * lpfc_send_fastpath_evt - Posts events generated from fast path
 440 * @phba: Pointer to hba context object.
 441 * @evtp: Event data structure.
 442 *
 443 * This function is called from worker thread, when the interrupt
 444 * context need to post an event. This function posts the event
 445 * to fc transport netlink interface.
 446 **/
 447static void
 448lpfc_send_fastpath_evt(struct lpfc_hba *phba,
 449                struct lpfc_work_evt *evtp)
 450{
 451        unsigned long evt_category, evt_sub_category;
 452        struct lpfc_fast_path_event *fast_evt_data;
 453        char *evt_data;
 454        uint32_t evt_data_size;
 455        struct Scsi_Host *shost;
 456
 457        fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
 458                work_evt);
 459
 460        evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
 461        evt_sub_category = (unsigned long) fast_evt_data->un.
 462                        fabric_evt.subcategory;
 463        shost = lpfc_shost_from_vport(fast_evt_data->vport);
 464        if (evt_category == FC_REG_FABRIC_EVENT) {
 465                if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
 466                        evt_data = (char *) &fast_evt_data->un.read_check_error;
 467                        evt_data_size = sizeof(fast_evt_data->un.
 468                                read_check_error);
 469                } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
 470                        (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
 471                        evt_data = (char *) &fast_evt_data->un.fabric_evt;
 472                        evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
 473                } else {
 474                        lpfc_free_fast_evt(phba, fast_evt_data);
 475                        return;
 476                }
 477        } else if (evt_category == FC_REG_SCSI_EVENT) {
 478                switch (evt_sub_category) {
 479                case LPFC_EVENT_QFULL:
 480                case LPFC_EVENT_DEVBSY:
 481                        evt_data = (char *) &fast_evt_data->un.scsi_evt;
 482                        evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
 483                        break;
 484                case LPFC_EVENT_CHECK_COND:
 485                        evt_data = (char *) &fast_evt_data->un.check_cond_evt;
 486                        evt_data_size =  sizeof(fast_evt_data->un.
 487                                check_cond_evt);
 488                        break;
 489                case LPFC_EVENT_VARQUEDEPTH:
 490                        evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
 491                        evt_data_size = sizeof(fast_evt_data->un.
 492                                queue_depth_evt);
 493                        break;
 494                default:
 495                        lpfc_free_fast_evt(phba, fast_evt_data);
 496                        return;
 497                }
 498        } else {
 499                lpfc_free_fast_evt(phba, fast_evt_data);
 500                return;
 501        }
 502
 503        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 504                fc_host_post_vendor_event(shost,
 505                        fc_get_event_number(),
 506                        evt_data_size,
 507                        evt_data,
 508                        LPFC_NL_VENDOR_ID);
 509
 510        lpfc_free_fast_evt(phba, fast_evt_data);
 511        return;
 512}
 513
 514static void
 515lpfc_work_list_done(struct lpfc_hba *phba)
 516{
 517        struct lpfc_work_evt  *evtp = NULL;
 518        struct lpfc_nodelist  *ndlp;
 519        int free_evt;
 520        int fcf_inuse;
 521        uint32_t nlp_did;
 522
 523        spin_lock_irq(&phba->hbalock);
 524        while (!list_empty(&phba->work_list)) {
 525                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
 526                                 evt_listp);
 527                spin_unlock_irq(&phba->hbalock);
 528                free_evt = 1;
 529                switch (evtp->evt) {
 530                case LPFC_EVT_ELS_RETRY:
 531                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
 532                        lpfc_els_retry_delay_handler(ndlp);
 533                        free_evt = 0; /* evt is part of ndlp */
 534                        /* decrement the node reference count held
 535                         * for this queued work
 536                         */
 537                        lpfc_nlp_put(ndlp);
 538                        break;
 539                case LPFC_EVT_DEV_LOSS:
 540                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
 541                        fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
 542                        free_evt = 0;
 543                        /* decrement the node reference count held for
 544                         * this queued work
 545                         */
 546                        nlp_did = ndlp->nlp_DID;
 547                        lpfc_nlp_put(ndlp);
 548                        if (phba->sli_rev == LPFC_SLI_REV4)
 549                                lpfc_sli4_post_dev_loss_tmo_handler(phba,
 550                                                                    fcf_inuse,
 551                                                                    nlp_did);
 552                        break;
 553                case LPFC_EVT_ONLINE:
 554                        if (phba->link_state < LPFC_LINK_DOWN)
 555                                *(int *) (evtp->evt_arg1) = lpfc_online(phba);
 556                        else
 557                                *(int *) (evtp->evt_arg1) = 0;
 558                        complete((struct completion *)(evtp->evt_arg2));
 559                        break;
 560                case LPFC_EVT_OFFLINE_PREP:
 561                        if (phba->link_state >= LPFC_LINK_DOWN)
 562                                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
 563                        *(int *)(evtp->evt_arg1) = 0;
 564                        complete((struct completion *)(evtp->evt_arg2));
 565                        break;
 566                case LPFC_EVT_OFFLINE:
 567                        lpfc_offline(phba);
 568                        lpfc_sli_brdrestart(phba);
 569                        *(int *)(evtp->evt_arg1) =
 570                                lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
 571                        lpfc_unblock_mgmt_io(phba);
 572                        complete((struct completion *)(evtp->evt_arg2));
 573                        break;
 574                case LPFC_EVT_WARM_START:
 575                        lpfc_offline(phba);
 576                        lpfc_reset_barrier(phba);
 577                        lpfc_sli_brdreset(phba);
 578                        lpfc_hba_down_post(phba);
 579                        *(int *)(evtp->evt_arg1) =
 580                                lpfc_sli_brdready(phba, HS_MBRDY);
 581                        lpfc_unblock_mgmt_io(phba);
 582                        complete((struct completion *)(evtp->evt_arg2));
 583                        break;
 584                case LPFC_EVT_KILL:
 585                        lpfc_offline(phba);
 586                        *(int *)(evtp->evt_arg1)
 587                                = (phba->pport->stopped)
 588                                        ? 0 : lpfc_sli_brdkill(phba);
 589                        lpfc_unblock_mgmt_io(phba);
 590                        complete((struct completion *)(evtp->evt_arg2));
 591                        break;
 592                case LPFC_EVT_FASTPATH_MGMT_EVT:
 593                        lpfc_send_fastpath_evt(phba, evtp);
 594                        free_evt = 0;
 595                        break;
 596                case LPFC_EVT_RESET_HBA:
 597                        if (!(phba->pport->load_flag & FC_UNLOADING))
 598                                lpfc_reset_hba(phba);
 599                        break;
 600                }
 601                if (free_evt)
 602                        kfree(evtp);
 603                spin_lock_irq(&phba->hbalock);
 604        }
 605        spin_unlock_irq(&phba->hbalock);
 606
 607}
 608
 609static void
 610lpfc_work_done(struct lpfc_hba *phba)
 611{
 612        struct lpfc_sli_ring *pring;
 613        uint32_t ha_copy, status, control, work_port_events;
 614        struct lpfc_vport **vports;
 615        struct lpfc_vport *vport;
 616        int i;
 617
 618        spin_lock_irq(&phba->hbalock);
 619        ha_copy = phba->work_ha;
 620        phba->work_ha = 0;
 621        spin_unlock_irq(&phba->hbalock);
 622
 623        /* First, try to post the next mailbox command to SLI4 device */
 624        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
 625                lpfc_sli4_post_async_mbox(phba);
 626
 627        if (ha_copy & HA_ERATT)
 628                /* Handle the error attention event */
 629                lpfc_handle_eratt(phba);
 630
 631        if (ha_copy & HA_MBATT)
 632                lpfc_sli_handle_mb_event(phba);
 633
 634        if (ha_copy & HA_LATT)
 635                lpfc_handle_latt(phba);
 636
 637        /* Process SLI4 events */
 638        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
 639                if (phba->hba_flag & HBA_RRQ_ACTIVE)
 640                        lpfc_handle_rrq_active(phba);
 641                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
 642                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
 643                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
 644                        lpfc_sli4_els_xri_abort_event_proc(phba);
 645                if (phba->hba_flag & ASYNC_EVENT)
 646                        lpfc_sli4_async_event_proc(phba);
 647                if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
 648                        spin_lock_irq(&phba->hbalock);
 649                        phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
 650                        spin_unlock_irq(&phba->hbalock);
 651                        lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 652                }
 653                if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
 654                        lpfc_sli4_fcf_redisc_event_proc(phba);
 655        }
 656
 657        vports = lpfc_create_vport_work_array(phba);
 658        if (vports != NULL)
 659                for (i = 0; i <= phba->max_vports; i++) {
 660                        /*
 661                         * We could have no vports in array if unloading, so if
 662                         * this happens then just use the pport
 663                         */
 664                        if (vports[i] == NULL && i == 0)
 665                                vport = phba->pport;
 666                        else
 667                                vport = vports[i];
 668                        if (vport == NULL)
 669                                break;
 670                        spin_lock_irq(&vport->work_port_lock);
 671                        work_port_events = vport->work_port_events;
 672                        vport->work_port_events &= ~work_port_events;
 673                        spin_unlock_irq(&vport->work_port_lock);
 674                        if (work_port_events & WORKER_DISC_TMO)
 675                                lpfc_disc_timeout_handler(vport);
 676                        if (work_port_events & WORKER_ELS_TMO)
 677                                lpfc_els_timeout_handler(vport);
 678                        if (work_port_events & WORKER_HB_TMO)
 679                                lpfc_hb_timeout_handler(phba);
 680                        if (work_port_events & WORKER_MBOX_TMO)
 681                                lpfc_mbox_timeout_handler(phba);
 682                        if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
 683                                lpfc_unblock_fabric_iocbs(phba);
 684                        if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
 685                                lpfc_ramp_down_queue_handler(phba);
 686                        if (work_port_events & WORKER_DELAYED_DISC_TMO)
 687                                lpfc_delayed_disc_timeout_handler(vport);
 688                }
 689        lpfc_destroy_vport_work_array(phba, vports);
 690
 691        pring = lpfc_phba_elsring(phba);
 692        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
 693        status >>= (4*LPFC_ELS_RING);
 694        if (pring && (status & HA_RXMASK ||
 695                      pring->flag & LPFC_DEFERRED_RING_EVENT ||
 696                      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
 697                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 698                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
 699                        /* Preserve legacy behavior. */
 700                        if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
 701                                set_bit(LPFC_DATA_READY, &phba->data_flags);
 702                } else {
 703                        if (phba->link_state >= LPFC_LINK_UP ||
 704                            phba->link_flag & LS_MDS_LOOPBACK) {
 705                                pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 706                                lpfc_sli_handle_slow_ring_event(phba, pring,
 707                                                                (status &
 708                                                                HA_RXMASK));
 709                        }
 710                }
 711                if ((phba->sli_rev == LPFC_SLI_REV4) &&
 712                                 (!list_empty(&pring->txq)))
 713                        lpfc_drain_txq(phba);
 714                /*
 715                 * Turn on Ring interrupts
 716                 */
 717                if (phba->sli_rev <= LPFC_SLI_REV3) {
 718                        spin_lock_irq(&phba->hbalock);
 719                        control = readl(phba->HCregaddr);
 720                        if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
 721                                lpfc_debugfs_slow_ring_trc(phba,
 722                                        "WRK Enable ring: cntl:x%x hacopy:x%x",
 723                                        control, ha_copy, 0);
 724
 725                                control |= (HC_R0INT_ENA << LPFC_ELS_RING);
 726                                writel(control, phba->HCregaddr);
 727                                readl(phba->HCregaddr); /* flush */
 728                        } else {
 729                                lpfc_debugfs_slow_ring_trc(phba,
 730                                        "WRK Ring ok:     cntl:x%x hacopy:x%x",
 731                                        control, ha_copy, 0);
 732                        }
 733                        spin_unlock_irq(&phba->hbalock);
 734                }
 735        }
 736        lpfc_work_list_done(phba);
 737}
 738
 739int
 740lpfc_do_work(void *p)
 741{
 742        struct lpfc_hba *phba = p;
 743        int rc;
 744
 745        set_user_nice(current, MIN_NICE);
 746        current->flags |= PF_NOFREEZE;
 747        phba->data_flags = 0;
 748
 749        while (!kthread_should_stop()) {
 750                /* wait and check worker queue activities */
 751                rc = wait_event_interruptible(phba->work_waitq,
 752                                        (test_and_clear_bit(LPFC_DATA_READY,
 753                                                            &phba->data_flags)
 754                                         || kthread_should_stop()));
 755                /* Signal wakeup shall terminate the worker thread */
 756                if (rc) {
 757                        lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
 758                                        "0433 Wakeup on signal: rc=x%x\n", rc);
 759                        break;
 760                }
 761
 762                /* Attend pending lpfc data processing */
 763                lpfc_work_done(phba);
 764        }
 765        phba->worker_thread = NULL;
 766        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
 767                        "0432 Worker thread stopped.\n");
 768        return 0;
 769}
 770
 771/*
 772 * This is only called to handle FC worker events. Since this a rare
 773 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
 774 * embedding it in the IOCB.
 775 */
 776int
 777lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 778                      uint32_t evt)
 779{
 780        struct lpfc_work_evt  *evtp;
 781        unsigned long flags;
 782
 783        /*
 784         * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
 785         * be queued to worker thread for processing
 786         */
 787        evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
 788        if (!evtp)
 789                return 0;
 790
 791        evtp->evt_arg1  = arg1;
 792        evtp->evt_arg2  = arg2;
 793        evtp->evt       = evt;
 794
 795        spin_lock_irqsave(&phba->hbalock, flags);
 796        list_add_tail(&evtp->evt_listp, &phba->work_list);
 797        spin_unlock_irqrestore(&phba->hbalock, flags);
 798
 799        lpfc_worker_wake_up(phba);
 800
 801        return 1;
 802}
 803
 804void
 805lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 806{
 807        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 808        struct lpfc_hba  *phba = vport->phba;
 809        struct lpfc_nodelist *ndlp, *next_ndlp;
 810
 811        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 812                if (!NLP_CHK_NODE_ACT(ndlp))
 813                        continue;
 814                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 815                        continue;
 816                if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
 817                        ((vport->port_type == LPFC_NPIV_PORT) &&
 818                        (ndlp->nlp_DID == NameServer_DID)))
 819                        lpfc_unreg_rpi(vport, ndlp);
 820
 821                /* Leave Fabric nodes alone on link down */
 822                if ((phba->sli_rev < LPFC_SLI_REV4) &&
 823                    (!remove && ndlp->nlp_type & NLP_FABRIC))
 824                        continue;
 825                lpfc_disc_state_machine(vport, ndlp, NULL,
 826                                        remove
 827                                        ? NLP_EVT_DEVICE_RM
 828                                        : NLP_EVT_DEVICE_RECOVERY);
 829        }
 830        if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
 831                if (phba->sli_rev == LPFC_SLI_REV4)
 832                        lpfc_sli4_unreg_all_rpis(vport);
 833                lpfc_mbx_unreg_vpi(vport);
 834                spin_lock_irq(shost->host_lock);
 835                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 836                spin_unlock_irq(shost->host_lock);
 837        }
 838}
 839
 840void
 841lpfc_port_link_failure(struct lpfc_vport *vport)
 842{
 843        lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
 844
 845        /* Cleanup any outstanding received buffers */
 846        lpfc_cleanup_rcv_buffers(vport);
 847
 848        /* Cleanup any outstanding RSCN activity */
 849        lpfc_els_flush_rscn(vport);
 850
 851        /* Cleanup any outstanding ELS commands */
 852        lpfc_els_flush_cmd(vport);
 853
 854        lpfc_cleanup_rpis(vport, 0);
 855
 856        /* Turn off discovery timer if its running */
 857        lpfc_can_disctmo(vport);
 858}
 859
 860void
 861lpfc_linkdown_port(struct lpfc_vport *vport)
 862{
 863        struct lpfc_hba  *phba = vport->phba;
 864        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 865
 866        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 867                fc_host_post_event(shost, fc_get_event_number(),
 868                                   FCH_EVT_LINKDOWN, 0);
 869
 870        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 871                "Link Down:       state:x%x rtry:x%x flg:x%x",
 872                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
 873
 874        lpfc_port_link_failure(vport);
 875
 876        /* Stop delayed Nport discovery */
 877        spin_lock_irq(shost->host_lock);
 878        vport->fc_flag &= ~FC_DISC_DELAYED;
 879        spin_unlock_irq(shost->host_lock);
 880        del_timer_sync(&vport->delayed_disc_tmo);
 881}
 882
 883int
 884lpfc_linkdown(struct lpfc_hba *phba)
 885{
 886        struct lpfc_vport *vport = phba->pport;
 887        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 888        struct lpfc_vport **vports;
 889        LPFC_MBOXQ_t          *mb;
 890        int i;
 891
 892        if (phba->link_state == LPFC_LINK_DOWN)
 893                return 0;
 894
 895        /* Block all SCSI stack I/Os */
 896        lpfc_scsi_dev_block(phba);
 897
 898        spin_lock_irq(&phba->hbalock);
 899        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
 900        spin_unlock_irq(&phba->hbalock);
 901        if (phba->link_state > LPFC_LINK_DOWN) {
 902                phba->link_state = LPFC_LINK_DOWN;
 903                spin_lock_irq(shost->host_lock);
 904                phba->pport->fc_flag &= ~FC_LBIT;
 905                spin_unlock_irq(shost->host_lock);
 906        }
 907        vports = lpfc_create_vport_work_array(phba);
 908        if (vports != NULL) {
 909                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 910                        /* Issue a LINK DOWN event to all nodes */
 911                        lpfc_linkdown_port(vports[i]);
 912
 913                        vports[i]->fc_myDID = 0;
 914
 915                        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
 916                            (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 917                                if (phba->nvmet_support)
 918                                        lpfc_nvmet_update_targetport(phba);
 919                                else
 920                                        lpfc_nvme_update_localport(vports[i]);
 921                        }
 922                }
 923        }
 924        lpfc_destroy_vport_work_array(phba, vports);
 925        /* Clean up any firmware default rpi's */
 926        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 927        if (mb) {
 928                lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
 929                mb->vport = vport;
 930                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 931                if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 932                    == MBX_NOT_FINISHED) {
 933                        mempool_free(mb, phba->mbox_mem_pool);
 934                }
 935        }
 936
 937        /* Setup myDID for link up if we are in pt2pt mode */
 938        if (phba->pport->fc_flag & FC_PT2PT) {
 939                mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 940                if (mb) {
 941                        lpfc_config_link(phba, mb);
 942                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 943                        mb->vport = vport;
 944                        if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 945                            == MBX_NOT_FINISHED) {
 946                                mempool_free(mb, phba->mbox_mem_pool);
 947                        }
 948                }
 949                spin_lock_irq(shost->host_lock);
 950                phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
 951                spin_unlock_irq(shost->host_lock);
 952        }
 953        return 0;
 954}
 955
 956static void
 957lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
 958{
 959        struct lpfc_nodelist *ndlp;
 960
 961        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 962                ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 963                if (!NLP_CHK_NODE_ACT(ndlp))
 964                        continue;
 965                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 966                        continue;
 967                if (ndlp->nlp_type & NLP_FABRIC) {
 968                        /* On Linkup its safe to clean up the ndlp
 969                         * from Fabric connections.
 970                         */
 971                        if (ndlp->nlp_DID != Fabric_DID)
 972                                lpfc_unreg_rpi(vport, ndlp);
 973                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 974                } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
 975                        /* Fail outstanding IO now since device is
 976                         * marked for PLOGI.
 977                         */
 978                        lpfc_unreg_rpi(vport, ndlp);
 979                }
 980        }
 981}
 982
 983static void
 984lpfc_linkup_port(struct lpfc_vport *vport)
 985{
 986        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 987        struct lpfc_hba  *phba = vport->phba;
 988
 989        if ((vport->load_flag & FC_UNLOADING) != 0)
 990                return;
 991
 992        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 993                "Link Up:         top:x%x speed:x%x flg:x%x",
 994                phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
 995
 996        /* If NPIV is not enabled, only bring the physical port up */
 997        if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 998                (vport != phba->pport))
 999                return;
1000
1001        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1002                fc_host_post_event(shost, fc_get_event_number(),
1003                                   FCH_EVT_LINKUP, 0);
1004
1005        spin_lock_irq(shost->host_lock);
1006        vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1007                            FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1008        vport->fc_flag |= FC_NDISC_ACTIVE;
1009        vport->fc_ns_retry = 0;
1010        spin_unlock_irq(shost->host_lock);
1011
1012        if (vport->fc_flag & FC_LBIT)
1013                lpfc_linkup_cleanup_nodes(vport);
1014
1015}
1016
1017static int
1018lpfc_linkup(struct lpfc_hba *phba)
1019{
1020        struct lpfc_vport **vports;
1021        int i;
1022
1023        phba->link_state = LPFC_LINK_UP;
1024
1025        /* Unblock fabric iocbs if they are blocked */
1026        clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1027        del_timer_sync(&phba->fabric_block_timer);
1028
1029        vports = lpfc_create_vport_work_array(phba);
1030        if (vports != NULL)
1031                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1032                        lpfc_linkup_port(vports[i]);
1033        lpfc_destroy_vport_work_array(phba, vports);
1034
1035        return 0;
1036}
1037
1038/*
1039 * This routine handles processing a CLEAR_LA mailbox
1040 * command upon completion. It is setup in the LPFC_MBOXQ
1041 * as the completion routine when the command is
1042 * handed off to the SLI layer. SLI3 only.
1043 */
1044static void
1045lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1046{
1047        struct lpfc_vport *vport = pmb->vport;
1048        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1049        struct lpfc_sli   *psli = &phba->sli;
1050        MAILBOX_t *mb = &pmb->u.mb;
1051        uint32_t control;
1052
1053        /* Since we don't do discovery right now, turn these off here */
1054        psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1055        psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1056
1057        /* Check for error */
1058        if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1059                /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1060                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1061                                 "0320 CLEAR_LA mbxStatus error x%x hba "
1062                                 "state x%x\n",
1063                                 mb->mbxStatus, vport->port_state);
1064                phba->link_state = LPFC_HBA_ERROR;
1065                goto out;
1066        }
1067
1068        if (vport->port_type == LPFC_PHYSICAL_PORT)
1069                phba->link_state = LPFC_HBA_READY;
1070
1071        spin_lock_irq(&phba->hbalock);
1072        psli->sli_flag |= LPFC_PROCESS_LA;
1073        control = readl(phba->HCregaddr);
1074        control |= HC_LAINT_ENA;
1075        writel(control, phba->HCregaddr);
1076        readl(phba->HCregaddr); /* flush */
1077        spin_unlock_irq(&phba->hbalock);
1078        mempool_free(pmb, phba->mbox_mem_pool);
1079        return;
1080
1081out:
1082        /* Device Discovery completes */
1083        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1084                         "0225 Device Discovery completes\n");
1085        mempool_free(pmb, phba->mbox_mem_pool);
1086
1087        spin_lock_irq(shost->host_lock);
1088        vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1089        spin_unlock_irq(shost->host_lock);
1090
1091        lpfc_can_disctmo(vport);
1092
1093        /* turn on Link Attention interrupts */
1094
1095        spin_lock_irq(&phba->hbalock);
1096        psli->sli_flag |= LPFC_PROCESS_LA;
1097        control = readl(phba->HCregaddr);
1098        control |= HC_LAINT_ENA;
1099        writel(control, phba->HCregaddr);
1100        readl(phba->HCregaddr); /* flush */
1101        spin_unlock_irq(&phba->hbalock);
1102
1103        return;
1104}
1105
1106
1107void
1108lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1109{
1110        struct lpfc_vport *vport = pmb->vport;
1111        uint8_t bbscn = 0;
1112
1113        if (pmb->u.mb.mbxStatus)
1114                goto out;
1115
1116        mempool_free(pmb, phba->mbox_mem_pool);
1117
1118        /* don't perform discovery for SLI4 loopback diagnostic test */
1119        if ((phba->sli_rev == LPFC_SLI_REV4) &&
1120            !(phba->hba_flag & HBA_FCOE_MODE) &&
1121            (phba->link_flag & LS_LOOPBACK_MODE))
1122                return;
1123
1124        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1125            vport->fc_flag & FC_PUBLIC_LOOP &&
1126            !(vport->fc_flag & FC_LBIT)) {
1127                        /* Need to wait for FAN - use discovery timer
1128                         * for timeout.  port_state is identically
1129                         * LPFC_LOCAL_CFG_LINK while waiting for FAN
1130                         */
1131                        lpfc_set_disctmo(vport);
1132                        return;
1133        }
1134
1135        /* Start discovery by sending a FLOGI. port_state is identically
1136         * LPFC_FLOGI while waiting for FLOGI cmpl
1137         */
1138        if (vport->port_state != LPFC_FLOGI) {
1139                if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
1140                        bbscn = bf_get(lpfc_bbscn_def,
1141                                       &phba->sli4_hba.bbscn_params);
1142                        vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
1143                        vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
1144                }
1145                lpfc_initial_flogi(vport);
1146        } else if (vport->fc_flag & FC_PT2PT) {
1147                lpfc_disc_start(vport);
1148        }
1149        return;
1150
1151out:
1152        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1153                         "0306 CONFIG_LINK mbxStatus error x%x "
1154                         "HBA state x%x\n",
1155                         pmb->u.mb.mbxStatus, vport->port_state);
1156        mempool_free(pmb, phba->mbox_mem_pool);
1157
1158        lpfc_linkdown(phba);
1159
1160        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1161                         "0200 CONFIG_LINK bad hba state x%x\n",
1162                         vport->port_state);
1163
1164        lpfc_issue_clear_la(phba, vport);
1165        return;
1166}
1167
1168/**
1169 * lpfc_sli4_clear_fcf_rr_bmask
1170 * @phba pointer to the struct lpfc_hba for this port.
1171 * This fucnction resets the round robin bit mask and clears the
1172 * fcf priority list. The list deletions are done while holding the
1173 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1174 * from the lpfc_fcf_pri record.
1175 **/
1176void
1177lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1178{
1179        struct lpfc_fcf_pri *fcf_pri;
1180        struct lpfc_fcf_pri *next_fcf_pri;
1181        memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1182        spin_lock_irq(&phba->hbalock);
1183        list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1184                                &phba->fcf.fcf_pri_list, list) {
1185                list_del_init(&fcf_pri->list);
1186                fcf_pri->fcf_rec.flag = 0;
1187        }
1188        spin_unlock_irq(&phba->hbalock);
1189}
1190static void
1191lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1192{
1193        struct lpfc_vport *vport = mboxq->vport;
1194
1195        if (mboxq->u.mb.mbxStatus) {
1196                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1197                         "2017 REG_FCFI mbxStatus error x%x "
1198                         "HBA state x%x\n",
1199                         mboxq->u.mb.mbxStatus, vport->port_state);
1200                goto fail_out;
1201        }
1202
1203        /* Start FCoE discovery by sending a FLOGI. */
1204        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1205        /* Set the FCFI registered flag */
1206        spin_lock_irq(&phba->hbalock);
1207        phba->fcf.fcf_flag |= FCF_REGISTERED;
1208        spin_unlock_irq(&phba->hbalock);
1209
1210        /* If there is a pending FCoE event, restart FCF table scan. */
1211        if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1212                lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1213                goto fail_out;
1214
1215        /* Mark successful completion of FCF table scan */
1216        spin_lock_irq(&phba->hbalock);
1217        phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1218        phba->hba_flag &= ~FCF_TS_INPROG;
1219        if (vport->port_state != LPFC_FLOGI) {
1220                phba->hba_flag |= FCF_RR_INPROG;
1221                spin_unlock_irq(&phba->hbalock);
1222                lpfc_issue_init_vfi(vport);
1223                goto out;
1224        }
1225        spin_unlock_irq(&phba->hbalock);
1226        goto out;
1227
1228fail_out:
1229        spin_lock_irq(&phba->hbalock);
1230        phba->hba_flag &= ~FCF_RR_INPROG;
1231        spin_unlock_irq(&phba->hbalock);
1232out:
1233        mempool_free(mboxq, phba->mbox_mem_pool);
1234}
1235
1236/**
1237 * lpfc_fab_name_match - Check if the fcf fabric name match.
1238 * @fab_name: pointer to fabric name.
1239 * @new_fcf_record: pointer to fcf record.
1240 *
1241 * This routine compare the fcf record's fabric name with provided
1242 * fabric name. If the fabric name are identical this function
1243 * returns 1 else return 0.
1244 **/
1245static uint32_t
1246lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1247{
1248        if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1249                return 0;
1250        if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1251                return 0;
1252        if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1253                return 0;
1254        if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1255                return 0;
1256        if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1257                return 0;
1258        if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1259                return 0;
1260        if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1261                return 0;
1262        if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1263                return 0;
1264        return 1;
1265}
1266
1267/**
1268 * lpfc_sw_name_match - Check if the fcf switch name match.
1269 * @fab_name: pointer to fabric name.
1270 * @new_fcf_record: pointer to fcf record.
1271 *
1272 * This routine compare the fcf record's switch name with provided
1273 * switch name. If the switch name are identical this function
1274 * returns 1 else return 0.
1275 **/
1276static uint32_t
1277lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1278{
1279        if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1280                return 0;
1281        if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1282                return 0;
1283        if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1284                return 0;
1285        if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1286                return 0;
1287        if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1288                return 0;
1289        if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1290                return 0;
1291        if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1292                return 0;
1293        if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1294                return 0;
1295        return 1;
1296}
1297
1298/**
1299 * lpfc_mac_addr_match - Check if the fcf mac address match.
1300 * @mac_addr: pointer to mac address.
1301 * @new_fcf_record: pointer to fcf record.
1302 *
1303 * This routine compare the fcf record's mac address with HBA's
1304 * FCF mac address. If the mac addresses are identical this function
1305 * returns 1 else return 0.
1306 **/
1307static uint32_t
1308lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1309{
1310        if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1311                return 0;
1312        if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1313                return 0;
1314        if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1315                return 0;
1316        if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1317                return 0;
1318        if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1319                return 0;
1320        if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1321                return 0;
1322        return 1;
1323}
1324
1325static bool
1326lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1327{
1328        return (curr_vlan_id == new_vlan_id);
1329}
1330
1331/**
1332 * lpfc_update_fcf_record - Update driver fcf record
1333 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1334 * @phba: pointer to lpfc hba data structure.
1335 * @fcf_index: Index for the lpfc_fcf_record.
1336 * @new_fcf_record: pointer to hba fcf record.
1337 *
1338 * This routine updates the driver FCF priority record from the new HBA FCF
1339 * record. This routine is called with the host lock held.
1340 **/
1341static void
1342__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1343                                 struct fcf_record *new_fcf_record
1344                                 )
1345{
1346        struct lpfc_fcf_pri *fcf_pri;
1347
1348        lockdep_assert_held(&phba->hbalock);
1349
1350        fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1351        fcf_pri->fcf_rec.fcf_index = fcf_index;
1352        /* FCF record priority */
1353        fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1354
1355}
1356
1357/**
1358 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1359 * @fcf: pointer to driver fcf record.
1360 * @new_fcf_record: pointer to fcf record.
1361 *
1362 * This routine copies the FCF information from the FCF
1363 * record to lpfc_hba data structure.
1364 **/
1365static void
1366lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1367                     struct fcf_record *new_fcf_record)
1368{
1369        /* Fabric name */
1370        fcf_rec->fabric_name[0] =
1371                bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1372        fcf_rec->fabric_name[1] =
1373                bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1374        fcf_rec->fabric_name[2] =
1375                bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1376        fcf_rec->fabric_name[3] =
1377                bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1378        fcf_rec->fabric_name[4] =
1379                bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1380        fcf_rec->fabric_name[5] =
1381                bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1382        fcf_rec->fabric_name[6] =
1383                bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1384        fcf_rec->fabric_name[7] =
1385                bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1386        /* Mac address */
1387        fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1388        fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1389        fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1390        fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1391        fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1392        fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1393        /* FCF record index */
1394        fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1395        /* FCF record priority */
1396        fcf_rec->priority = new_fcf_record->fip_priority;
1397        /* Switch name */
1398        fcf_rec->switch_name[0] =
1399                bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1400        fcf_rec->switch_name[1] =
1401                bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1402        fcf_rec->switch_name[2] =
1403                bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1404        fcf_rec->switch_name[3] =
1405                bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1406        fcf_rec->switch_name[4] =
1407                bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1408        fcf_rec->switch_name[5] =
1409                bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1410        fcf_rec->switch_name[6] =
1411                bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1412        fcf_rec->switch_name[7] =
1413                bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1414}
1415
1416/**
1417 * lpfc_update_fcf_record - Update driver fcf record
1418 * @phba: pointer to lpfc hba data structure.
1419 * @fcf_rec: pointer to driver fcf record.
1420 * @new_fcf_record: pointer to hba fcf record.
1421 * @addr_mode: address mode to be set to the driver fcf record.
1422 * @vlan_id: vlan tag to be set to the driver fcf record.
1423 * @flag: flag bits to be set to the driver fcf record.
1424 *
1425 * This routine updates the driver FCF record from the new HBA FCF record
1426 * together with the address mode, vlan_id, and other informations. This
1427 * routine is called with the host lock held.
1428 **/
1429static void
1430__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1431                       struct fcf_record *new_fcf_record, uint32_t addr_mode,
1432                       uint16_t vlan_id, uint32_t flag)
1433{
1434        lockdep_assert_held(&phba->hbalock);
1435
1436        /* Copy the fields from the HBA's FCF record */
1437        lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1438        /* Update other fields of driver FCF record */
1439        fcf_rec->addr_mode = addr_mode;
1440        fcf_rec->vlan_id = vlan_id;
1441        fcf_rec->flag |= (flag | RECORD_VALID);
1442        __lpfc_update_fcf_record_pri(phba,
1443                bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1444                                 new_fcf_record);
1445}
1446
1447/**
1448 * lpfc_register_fcf - Register the FCF with hba.
1449 * @phba: pointer to lpfc hba data structure.
1450 *
1451 * This routine issues a register fcfi mailbox command to register
1452 * the fcf with HBA.
1453 **/
1454static void
1455lpfc_register_fcf(struct lpfc_hba *phba)
1456{
1457        LPFC_MBOXQ_t *fcf_mbxq;
1458        int rc;
1459
1460        spin_lock_irq(&phba->hbalock);
1461        /* If the FCF is not available do nothing. */
1462        if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1463                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1464                spin_unlock_irq(&phba->hbalock);
1465                return;
1466        }
1467
1468        /* The FCF is already registered, start discovery */
1469        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1470                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1471                phba->hba_flag &= ~FCF_TS_INPROG;
1472                if (phba->pport->port_state != LPFC_FLOGI &&
1473                    phba->pport->fc_flag & FC_FABRIC) {
1474                        phba->hba_flag |= FCF_RR_INPROG;
1475                        spin_unlock_irq(&phba->hbalock);
1476                        lpfc_initial_flogi(phba->pport);
1477                        return;
1478                }
1479                spin_unlock_irq(&phba->hbalock);
1480                return;
1481        }
1482        spin_unlock_irq(&phba->hbalock);
1483
1484        fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1485        if (!fcf_mbxq) {
1486                spin_lock_irq(&phba->hbalock);
1487                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1488                spin_unlock_irq(&phba->hbalock);
1489                return;
1490        }
1491
1492        lpfc_reg_fcfi(phba, fcf_mbxq);
1493        fcf_mbxq->vport = phba->pport;
1494        fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1495        rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1496        if (rc == MBX_NOT_FINISHED) {
1497                spin_lock_irq(&phba->hbalock);
1498                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1499                spin_unlock_irq(&phba->hbalock);
1500                mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1501        }
1502
1503        return;
1504}
1505
1506/**
1507 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1508 * @phba: pointer to lpfc hba data structure.
1509 * @new_fcf_record: pointer to fcf record.
1510 * @boot_flag: Indicates if this record used by boot bios.
1511 * @addr_mode: The address mode to be used by this FCF
1512 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1513 *
1514 * This routine compare the fcf record with connect list obtained from the
1515 * config region to decide if this FCF can be used for SAN discovery. It returns
1516 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1517 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1518 * is used by boot bios and addr_mode will indicate the addressing mode to be
1519 * used for this FCF when the function returns.
1520 * If the FCF record need to be used with a particular vlan id, the vlan is
1521 * set in the vlan_id on return of the function. If not VLAN tagging need to
1522 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1523 **/
1524static int
1525lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1526                        struct fcf_record *new_fcf_record,
1527                        uint32_t *boot_flag, uint32_t *addr_mode,
1528                        uint16_t *vlan_id)
1529{
1530        struct lpfc_fcf_conn_entry *conn_entry;
1531        int i, j, fcf_vlan_id = 0;
1532
1533        /* Find the lowest VLAN id in the FCF record */
1534        for (i = 0; i < 512; i++) {
1535                if (new_fcf_record->vlan_bitmap[i]) {
1536                        fcf_vlan_id = i * 8;
1537                        j = 0;
1538                        while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1539                                j++;
1540                                fcf_vlan_id++;
1541                        }
1542                        break;
1543                }
1544        }
1545
1546        /* FCF not valid/available or solicitation in progress */
1547        if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1548            !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1549            bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1550                return 0;
1551
1552        if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1553                *boot_flag = 0;
1554                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1555                                new_fcf_record);
1556                if (phba->valid_vlan)
1557                        *vlan_id = phba->vlan_id;
1558                else
1559                        *vlan_id = LPFC_FCOE_NULL_VID;
1560                return 1;
1561        }
1562
1563        /*
1564         * If there are no FCF connection table entry, driver connect to all
1565         * FCFs.
1566         */
1567        if (list_empty(&phba->fcf_conn_rec_list)) {
1568                *boot_flag = 0;
1569                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1570                        new_fcf_record);
1571
1572                /*
1573                 * When there are no FCF connect entries, use driver's default
1574                 * addressing mode - FPMA.
1575                 */
1576                if (*addr_mode & LPFC_FCF_FPMA)
1577                        *addr_mode = LPFC_FCF_FPMA;
1578
1579                /* If FCF record report a vlan id use that vlan id */
1580                if (fcf_vlan_id)
1581                        *vlan_id = fcf_vlan_id;
1582                else
1583                        *vlan_id = LPFC_FCOE_NULL_VID;
1584                return 1;
1585        }
1586
1587        list_for_each_entry(conn_entry,
1588                            &phba->fcf_conn_rec_list, list) {
1589                if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1590                        continue;
1591
1592                if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1593                        !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1594                                             new_fcf_record))
1595                        continue;
1596                if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1597                        !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1598                                            new_fcf_record))
1599                        continue;
1600                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1601                        /*
1602                         * If the vlan bit map does not have the bit set for the
1603                         * vlan id to be used, then it is not a match.
1604                         */
1605                        if (!(new_fcf_record->vlan_bitmap
1606                                [conn_entry->conn_rec.vlan_tag / 8] &
1607                                (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1608                                continue;
1609                }
1610
1611                /*
1612                 * If connection record does not support any addressing mode,
1613                 * skip the FCF record.
1614                 */
1615                if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1616                        & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1617                        continue;
1618
1619                /*
1620                 * Check if the connection record specifies a required
1621                 * addressing mode.
1622                 */
1623                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1624                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1625
1626                        /*
1627                         * If SPMA required but FCF not support this continue.
1628                         */
1629                        if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1630                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1631                                        new_fcf_record) & LPFC_FCF_SPMA))
1632                                continue;
1633
1634                        /*
1635                         * If FPMA required but FCF not support this continue.
1636                         */
1637                        if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1638                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1639                                new_fcf_record) & LPFC_FCF_FPMA))
1640                                continue;
1641                }
1642
1643                /*
1644                 * This fcf record matches filtering criteria.
1645                 */
1646                if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1647                        *boot_flag = 1;
1648                else
1649                        *boot_flag = 0;
1650
1651                /*
1652                 * If user did not specify any addressing mode, or if the
1653                 * preferred addressing mode specified by user is not supported
1654                 * by FCF, allow fabric to pick the addressing mode.
1655                 */
1656                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1657                                new_fcf_record);
1658                /*
1659                 * If the user specified a required address mode, assign that
1660                 * address mode
1661                 */
1662                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1663                        (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1664                        *addr_mode = (conn_entry->conn_rec.flags &
1665                                FCFCNCT_AM_SPMA) ?
1666                                LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1667                /*
1668                 * If the user specified a preferred address mode, use the
1669                 * addr mode only if FCF support the addr_mode.
1670                 */
1671                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1672                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1673                        (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1674                        (*addr_mode & LPFC_FCF_SPMA))
1675                                *addr_mode = LPFC_FCF_SPMA;
1676                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1677                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1678                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1679                        (*addr_mode & LPFC_FCF_FPMA))
1680                                *addr_mode = LPFC_FCF_FPMA;
1681
1682                /* If matching connect list has a vlan id, use it */
1683                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1684                        *vlan_id = conn_entry->conn_rec.vlan_tag;
1685                /*
1686                 * If no vlan id is specified in connect list, use the vlan id
1687                 * in the FCF record
1688                 */
1689                else if (fcf_vlan_id)
1690                        *vlan_id = fcf_vlan_id;
1691                else
1692                        *vlan_id = LPFC_FCOE_NULL_VID;
1693
1694                return 1;
1695        }
1696
1697        return 0;
1698}
1699
1700/**
1701 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1702 * @phba: pointer to lpfc hba data structure.
1703 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1704 *
1705 * This function check if there is any fcoe event pending while driver
1706 * scan FCF entries. If there is any pending event, it will restart the
1707 * FCF saning and return 1 else return 0.
1708 */
1709int
1710lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1711{
1712        /*
1713         * If the Link is up and no FCoE events while in the
1714         * FCF discovery, no need to restart FCF discovery.
1715         */
1716        if ((phba->link_state  >= LPFC_LINK_UP) &&
1717            (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1718                return 0;
1719
1720        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1721                        "2768 Pending link or FCF event during current "
1722                        "handling of the previous event: link_state:x%x, "
1723                        "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1724                        phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1725                        phba->fcoe_eventtag);
1726
1727        spin_lock_irq(&phba->hbalock);
1728        phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1729        spin_unlock_irq(&phba->hbalock);
1730
1731        if (phba->link_state >= LPFC_LINK_UP) {
1732                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1733                                "2780 Restart FCF table scan due to "
1734                                "pending FCF event:evt_tag_at_scan:x%x, "
1735                                "evt_tag_current:x%x\n",
1736                                phba->fcoe_eventtag_at_fcf_scan,
1737                                phba->fcoe_eventtag);
1738                lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1739        } else {
1740                /*
1741                 * Do not continue FCF discovery and clear FCF_TS_INPROG
1742                 * flag
1743                 */
1744                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1745                                "2833 Stop FCF discovery process due to link "
1746                                "state change (x%x)\n", phba->link_state);
1747                spin_lock_irq(&phba->hbalock);
1748                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1749                phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1750                spin_unlock_irq(&phba->hbalock);
1751        }
1752
1753        /* Unregister the currently registered FCF if required */
1754        if (unreg_fcf) {
1755                spin_lock_irq(&phba->hbalock);
1756                phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1757                spin_unlock_irq(&phba->hbalock);
1758                lpfc_sli4_unregister_fcf(phba);
1759        }
1760        return 1;
1761}
1762
1763/**
1764 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1765 * @phba: pointer to lpfc hba data structure.
1766 * @fcf_cnt: number of eligible fcf record seen so far.
1767 *
1768 * This function makes an running random selection decision on FCF record to
1769 * use through a sequence of @fcf_cnt eligible FCF records with equal
1770 * probability. To perform integer manunipulation of random numbers with
1771 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1772 * from prandom_u32() are taken as the random random number generated.
1773 *
1774 * Returns true when outcome is for the newly read FCF record should be
1775 * chosen; otherwise, return false when outcome is for keeping the previously
1776 * chosen FCF record.
1777 **/
1778static bool
1779lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1780{
1781        uint32_t rand_num;
1782
1783        /* Get 16-bit uniform random number */
1784        rand_num = 0xFFFF & prandom_u32();
1785
1786        /* Decision with probability 1/fcf_cnt */
1787        if ((fcf_cnt * rand_num) < 0xFFFF)
1788                return true;
1789        else
1790                return false;
1791}
1792
1793/**
1794 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1795 * @phba: pointer to lpfc hba data structure.
1796 * @mboxq: pointer to mailbox object.
1797 * @next_fcf_index: pointer to holder of next fcf index.
1798 *
1799 * This routine parses the non-embedded fcf mailbox command by performing the
1800 * necessarily error checking, non-embedded read FCF record mailbox command
1801 * SGE parsing, and endianness swapping.
1802 *
1803 * Returns the pointer to the new FCF record in the non-embedded mailbox
1804 * command DMA memory if successfully, other NULL.
1805 */
1806static struct fcf_record *
1807lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1808                             uint16_t *next_fcf_index)
1809{
1810        void *virt_addr;
1811        struct lpfc_mbx_sge sge;
1812        struct lpfc_mbx_read_fcf_tbl *read_fcf;
1813        uint32_t shdr_status, shdr_add_status, if_type;
1814        union lpfc_sli4_cfg_shdr *shdr;
1815        struct fcf_record *new_fcf_record;
1816
1817        /* Get the first SGE entry from the non-embedded DMA memory. This
1818         * routine only uses a single SGE.
1819         */
1820        lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1821        if (unlikely(!mboxq->sge_array)) {
1822                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1823                                "2524 Failed to get the non-embedded SGE "
1824                                "virtual address\n");
1825                return NULL;
1826        }
1827        virt_addr = mboxq->sge_array->addr[0];
1828
1829        shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1830        lpfc_sli_pcimem_bcopy(shdr, shdr,
1831                              sizeof(union lpfc_sli4_cfg_shdr));
1832        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1833        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1834        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1835        if (shdr_status || shdr_add_status) {
1836                if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1837                                        if_type == LPFC_SLI_INTF_IF_TYPE_2)
1838                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1839                                        "2726 READ_FCF_RECORD Indicates empty "
1840                                        "FCF table.\n");
1841                else
1842                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1843                                        "2521 READ_FCF_RECORD mailbox failed "
1844                                        "with status x%x add_status x%x, "
1845                                        "mbx\n", shdr_status, shdr_add_status);
1846                return NULL;
1847        }
1848
1849        /* Interpreting the returned information of the FCF record */
1850        read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1851        lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1852                              sizeof(struct lpfc_mbx_read_fcf_tbl));
1853        *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1854        new_fcf_record = (struct fcf_record *)(virt_addr +
1855                          sizeof(struct lpfc_mbx_read_fcf_tbl));
1856        lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1857                                offsetof(struct fcf_record, vlan_bitmap));
1858        new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1859        new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1860
1861        return new_fcf_record;
1862}
1863
1864/**
1865 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1866 * @phba: pointer to lpfc hba data structure.
1867 * @fcf_record: pointer to the fcf record.
1868 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1869 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1870 *
1871 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1872 * enabled.
1873 **/
1874static void
1875lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1876                              struct fcf_record *fcf_record,
1877                              uint16_t vlan_id,
1878                              uint16_t next_fcf_index)
1879{
1880        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1881                        "2764 READ_FCF_RECORD:\n"
1882                        "\tFCF_Index     : x%x\n"
1883                        "\tFCF_Avail     : x%x\n"
1884                        "\tFCF_Valid     : x%x\n"
1885                        "\tFCF_SOL       : x%x\n"
1886                        "\tFIP_Priority  : x%x\n"
1887                        "\tMAC_Provider  : x%x\n"
1888                        "\tLowest VLANID : x%x\n"
1889                        "\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
1890                        "\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1891                        "\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1892                        "\tNext_FCF_Index: x%x\n",
1893                        bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1894                        bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1895                        bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1896                        bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1897                        fcf_record->fip_priority,
1898                        bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1899                        vlan_id,
1900                        bf_get(lpfc_fcf_record_mac_0, fcf_record),
1901                        bf_get(lpfc_fcf_record_mac_1, fcf_record),
1902                        bf_get(lpfc_fcf_record_mac_2, fcf_record),
1903                        bf_get(lpfc_fcf_record_mac_3, fcf_record),
1904                        bf_get(lpfc_fcf_record_mac_4, fcf_record),
1905                        bf_get(lpfc_fcf_record_mac_5, fcf_record),
1906                        bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1907                        bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1908                        bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1909                        bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1910                        bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1911                        bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1912                        bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1913                        bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1914                        bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1915                        bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1916                        bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1917                        bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1918                        bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1919                        bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1920                        bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1921                        bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1922                        next_fcf_index);
1923}
1924
1925/**
1926 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1927 * @phba: pointer to lpfc hba data structure.
1928 * @fcf_rec: pointer to an existing FCF record.
1929 * @new_fcf_record: pointer to a new FCF record.
1930 * @new_vlan_id: vlan id from the new FCF record.
1931 *
1932 * This function performs matching test of a new FCF record against an existing
1933 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1934 * will not be used as part of the FCF record matching criteria.
1935 *
1936 * Returns true if all the fields matching, otherwise returns false.
1937 */
1938static bool
1939lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1940                           struct lpfc_fcf_rec *fcf_rec,
1941                           struct fcf_record *new_fcf_record,
1942                           uint16_t new_vlan_id)
1943{
1944        if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1945                if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1946                        return false;
1947        if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1948                return false;
1949        if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1950                return false;
1951        if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1952                return false;
1953        if (fcf_rec->priority != new_fcf_record->fip_priority)
1954                return false;
1955        return true;
1956}
1957
1958/**
1959 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1960 * @vport: Pointer to vport object.
1961 * @fcf_index: index to next fcf.
1962 *
1963 * This function processing the roundrobin fcf failover to next fcf index.
1964 * When this function is invoked, there will be a current fcf registered
1965 * for flogi.
1966 * Return: 0 for continue retrying flogi on currently registered fcf;
1967 *         1 for stop flogi on currently registered fcf;
1968 */
1969int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1970{
1971        struct lpfc_hba *phba = vport->phba;
1972        int rc;
1973
1974        if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1975                spin_lock_irq(&phba->hbalock);
1976                if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1977                        spin_unlock_irq(&phba->hbalock);
1978                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1979                                        "2872 Devloss tmo with no eligible "
1980                                        "FCF, unregister in-use FCF (x%x) "
1981                                        "and rescan FCF table\n",
1982                                        phba->fcf.current_rec.fcf_indx);
1983                        lpfc_unregister_fcf_rescan(phba);
1984                        goto stop_flogi_current_fcf;
1985                }
1986                /* Mark the end to FLOGI roundrobin failover */
1987                phba->hba_flag &= ~FCF_RR_INPROG;
1988                /* Allow action to new fcf asynchronous event */
1989                phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1990                spin_unlock_irq(&phba->hbalock);
1991                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1992                                "2865 No FCF available, stop roundrobin FCF "
1993                                "failover and change port state:x%x/x%x\n",
1994                                phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1995                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1996                goto stop_flogi_current_fcf;
1997        } else {
1998                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1999                                "2794 Try FLOGI roundrobin FCF failover to "
2000                                "(x%x)\n", fcf_index);
2001                rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2002                if (rc)
2003                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2004                                        "2761 FLOGI roundrobin FCF failover "
2005                                        "failed (rc:x%x) to read FCF (x%x)\n",
2006                                        rc, phba->fcf.current_rec.fcf_indx);
2007                else
2008                        goto stop_flogi_current_fcf;
2009        }
2010        return 0;
2011
2012stop_flogi_current_fcf:
2013        lpfc_can_disctmo(vport);
2014        return 1;
2015}
2016
2017/**
2018 * lpfc_sli4_fcf_pri_list_del
2019 * @phba: pointer to lpfc hba data structure.
2020 * @fcf_index the index of the fcf record to delete
2021 * This routine checks the on list flag of the fcf_index to be deleted.
2022 * If it is one the list then it is removed from the list, and the flag
2023 * is cleared. This routine grab the hbalock before removing the fcf
2024 * record from the list.
2025 **/
2026static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2027                        uint16_t fcf_index)
2028{
2029        struct lpfc_fcf_pri *new_fcf_pri;
2030
2031        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2032        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2033                "3058 deleting idx x%x pri x%x flg x%x\n",
2034                fcf_index, new_fcf_pri->fcf_rec.priority,
2035                 new_fcf_pri->fcf_rec.flag);
2036        spin_lock_irq(&phba->hbalock);
2037        if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2038                if (phba->fcf.current_rec.priority ==
2039                                new_fcf_pri->fcf_rec.priority)
2040                        phba->fcf.eligible_fcf_cnt--;
2041                list_del_init(&new_fcf_pri->list);
2042                new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2043        }
2044        spin_unlock_irq(&phba->hbalock);
2045}
2046
2047/**
2048 * lpfc_sli4_set_fcf_flogi_fail
2049 * @phba: pointer to lpfc hba data structure.
2050 * @fcf_index the index of the fcf record to update
2051 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2052 * flag so the the round robin slection for the particular priority level
2053 * will try a different fcf record that does not have this bit set.
2054 * If the fcf record is re-read for any reason this flag is cleared brfore
2055 * adding it to the priority list.
2056 **/
2057void
2058lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2059{
2060        struct lpfc_fcf_pri *new_fcf_pri;
2061        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2062        spin_lock_irq(&phba->hbalock);
2063        new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2064        spin_unlock_irq(&phba->hbalock);
2065}
2066
2067/**
2068 * lpfc_sli4_fcf_pri_list_add
2069 * @phba: pointer to lpfc hba data structure.
2070 * @fcf_index the index of the fcf record to add
2071 * This routine checks the priority of the fcf_index to be added.
2072 * If it is a lower priority than the current head of the fcf_pri list
2073 * then it is added to the list in the right order.
2074 * If it is the same priority as the current head of the list then it
2075 * is added to the head of the list and its bit in the rr_bmask is set.
2076 * If the fcf_index to be added is of a higher priority than the current
2077 * head of the list then the rr_bmask is cleared, its bit is set in the
2078 * rr_bmask and it is added to the head of the list.
2079 * returns:
2080 * 0=success 1=failure
2081 **/
2082static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2083        uint16_t fcf_index,
2084        struct fcf_record *new_fcf_record)
2085{
2086        uint16_t current_fcf_pri;
2087        uint16_t last_index;
2088        struct lpfc_fcf_pri *fcf_pri;
2089        struct lpfc_fcf_pri *next_fcf_pri;
2090        struct lpfc_fcf_pri *new_fcf_pri;
2091        int ret;
2092
2093        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2094        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2095                "3059 adding idx x%x pri x%x flg x%x\n",
2096                fcf_index, new_fcf_record->fip_priority,
2097                 new_fcf_pri->fcf_rec.flag);
2098        spin_lock_irq(&phba->hbalock);
2099        if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2100                list_del_init(&new_fcf_pri->list);
2101        new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2102        new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2103        if (list_empty(&phba->fcf.fcf_pri_list)) {
2104                list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2105                ret = lpfc_sli4_fcf_rr_index_set(phba,
2106                                new_fcf_pri->fcf_rec.fcf_index);
2107                goto out;
2108        }
2109
2110        last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2111                                LPFC_SLI4_FCF_TBL_INDX_MAX);
2112        if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2113                ret = 0; /* Empty rr list */
2114                goto out;
2115        }
2116        current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2117        if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
2118                list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2119                if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
2120                        memset(phba->fcf.fcf_rr_bmask, 0,
2121                                sizeof(*phba->fcf.fcf_rr_bmask));
2122                        /* fcfs_at_this_priority_level = 1; */
2123                        phba->fcf.eligible_fcf_cnt = 1;
2124                } else
2125                        /* fcfs_at_this_priority_level++; */
2126                        phba->fcf.eligible_fcf_cnt++;
2127                ret = lpfc_sli4_fcf_rr_index_set(phba,
2128                                new_fcf_pri->fcf_rec.fcf_index);
2129                goto out;
2130        }
2131
2132        list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2133                                &phba->fcf.fcf_pri_list, list) {
2134                if (new_fcf_pri->fcf_rec.priority <=
2135                                fcf_pri->fcf_rec.priority) {
2136                        if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2137                                list_add(&new_fcf_pri->list,
2138                                                &phba->fcf.fcf_pri_list);
2139                        else
2140                                list_add(&new_fcf_pri->list,
2141                                         &((struct lpfc_fcf_pri *)
2142                                        fcf_pri->list.prev)->list);
2143                        ret = 0;
2144                        goto out;
2145                } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2146                        || new_fcf_pri->fcf_rec.priority <
2147                                next_fcf_pri->fcf_rec.priority) {
2148                        list_add(&new_fcf_pri->list, &fcf_pri->list);
2149                        ret = 0;
2150                        goto out;
2151                }
2152                if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2153                        continue;
2154
2155        }
2156        ret = 1;
2157out:
2158        /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2159        new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2160        spin_unlock_irq(&phba->hbalock);
2161        return ret;
2162}
2163
2164/**
2165 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2166 * @phba: pointer to lpfc hba data structure.
2167 * @mboxq: pointer to mailbox object.
2168 *
2169 * This function iterates through all the fcf records available in
2170 * HBA and chooses the optimal FCF record for discovery. After finding
2171 * the FCF for discovery it registers the FCF record and kicks start
2172 * discovery.
2173 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2174 * use an FCF record which matches fabric name and mac address of the
2175 * currently used FCF record.
2176 * If the driver supports only one FCF, it will try to use the FCF record
2177 * used by BOOT_BIOS.
2178 */
2179void
2180lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2181{
2182        struct fcf_record *new_fcf_record;
2183        uint32_t boot_flag, addr_mode;
2184        uint16_t fcf_index, next_fcf_index;
2185        struct lpfc_fcf_rec *fcf_rec = NULL;
2186        uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2187        bool select_new_fcf;
2188        int rc;
2189
2190        /* If there is pending FCoE event restart FCF table scan */
2191        if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2192                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2193                return;
2194        }
2195
2196        /* Parse the FCF record from the non-embedded mailbox command */
2197        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2198                                                      &next_fcf_index);
2199        if (!new_fcf_record) {
2200                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2201                                "2765 Mailbox command READ_FCF_RECORD "
2202                                "failed to retrieve a FCF record.\n");
2203                /* Let next new FCF event trigger fast failover */
2204                spin_lock_irq(&phba->hbalock);
2205                phba->hba_flag &= ~FCF_TS_INPROG;
2206                spin_unlock_irq(&phba->hbalock);
2207                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2208                return;
2209        }
2210
2211        /* Check the FCF record against the connection list */
2212        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2213                                      &addr_mode, &vlan_id);
2214
2215        /* Log the FCF record information if turned on */
2216        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2217                                      next_fcf_index);
2218
2219        /*
2220         * If the fcf record does not match with connect list entries
2221         * read the next entry; otherwise, this is an eligible FCF
2222         * record for roundrobin FCF failover.
2223         */
2224        if (!rc) {
2225                lpfc_sli4_fcf_pri_list_del(phba,
2226                                        bf_get(lpfc_fcf_record_fcf_index,
2227                                               new_fcf_record));
2228                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2229                                "2781 FCF (x%x) failed connection "
2230                                "list check: (x%x/x%x/%x)\n",
2231                                bf_get(lpfc_fcf_record_fcf_index,
2232                                       new_fcf_record),
2233                                bf_get(lpfc_fcf_record_fcf_avail,
2234                                       new_fcf_record),
2235                                bf_get(lpfc_fcf_record_fcf_valid,
2236                                       new_fcf_record),
2237                                bf_get(lpfc_fcf_record_fcf_sol,
2238                                       new_fcf_record));
2239                if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2240                    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2241                    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2242                        if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2243                            phba->fcf.current_rec.fcf_indx) {
2244                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2245                                        "2862 FCF (x%x) matches property "
2246                                        "of in-use FCF (x%x)\n",
2247                                        bf_get(lpfc_fcf_record_fcf_index,
2248                                               new_fcf_record),
2249                                        phba->fcf.current_rec.fcf_indx);
2250                                goto read_next_fcf;
2251                        }
2252                        /*
2253                         * In case the current in-use FCF record becomes
2254                         * invalid/unavailable during FCF discovery that
2255                         * was not triggered by fast FCF failover process,
2256                         * treat it as fast FCF failover.
2257                         */
2258                        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2259                            !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2260                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2261                                                "2835 Invalid in-use FCF "
2262                                                "(x%x), enter FCF failover "
2263                                                "table scan.\n",
2264                                                phba->fcf.current_rec.fcf_indx);
2265                                spin_lock_irq(&phba->hbalock);
2266                                phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2267                                spin_unlock_irq(&phba->hbalock);
2268                                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2269                                lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2270                                                LPFC_FCOE_FCF_GET_FIRST);
2271                                return;
2272                        }
2273                }
2274                goto read_next_fcf;
2275        } else {
2276                fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2277                rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2278                                                        new_fcf_record);
2279                if (rc)
2280                        goto read_next_fcf;
2281        }
2282
2283        /*
2284         * If this is not the first FCF discovery of the HBA, use last
2285         * FCF record for the discovery. The condition that a rescan
2286         * matches the in-use FCF record: fabric name, switch name, mac
2287         * address, and vlan_id.
2288         */
2289        spin_lock_irq(&phba->hbalock);
2290        if (phba->fcf.fcf_flag & FCF_IN_USE) {
2291                if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2292                        lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2293                    new_fcf_record, vlan_id)) {
2294                        if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2295                            phba->fcf.current_rec.fcf_indx) {
2296                                phba->fcf.fcf_flag |= FCF_AVAILABLE;
2297                                if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2298                                        /* Stop FCF redisc wait timer */
2299                                        __lpfc_sli4_stop_fcf_redisc_wait_timer(
2300                                                                        phba);
2301                                else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2302                                        /* Fast failover, mark completed */
2303                                        phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2304                                spin_unlock_irq(&phba->hbalock);
2305                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2306                                                "2836 New FCF matches in-use "
2307                                                "FCF (x%x), port_state:x%x, "
2308                                                "fc_flag:x%x\n",
2309                                                phba->fcf.current_rec.fcf_indx,
2310                                                phba->pport->port_state,
2311                                                phba->pport->fc_flag);
2312                                goto out;
2313                        } else
2314                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2315                                        "2863 New FCF (x%x) matches "
2316                                        "property of in-use FCF (x%x)\n",
2317                                        bf_get(lpfc_fcf_record_fcf_index,
2318                                               new_fcf_record),
2319                                        phba->fcf.current_rec.fcf_indx);
2320                }
2321                /*
2322                 * Read next FCF record from HBA searching for the matching
2323                 * with in-use record only if not during the fast failover
2324                 * period. In case of fast failover period, it shall try to
2325                 * determine whether the FCF record just read should be the
2326                 * next candidate.
2327                 */
2328                if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2329                        spin_unlock_irq(&phba->hbalock);
2330                        goto read_next_fcf;
2331                }
2332        }
2333        /*
2334         * Update on failover FCF record only if it's in FCF fast-failover
2335         * period; otherwise, update on current FCF record.
2336         */
2337        if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2338                fcf_rec = &phba->fcf.failover_rec;
2339        else
2340                fcf_rec = &phba->fcf.current_rec;
2341
2342        if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2343                /*
2344                 * If the driver FCF record does not have boot flag
2345                 * set and new hba fcf record has boot flag set, use
2346                 * the new hba fcf record.
2347                 */
2348                if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2349                        /* Choose this FCF record */
2350                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2351                                        "2837 Update current FCF record "
2352                                        "(x%x) with new FCF record (x%x)\n",
2353                                        fcf_rec->fcf_indx,
2354                                        bf_get(lpfc_fcf_record_fcf_index,
2355                                        new_fcf_record));
2356                        __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2357                                        addr_mode, vlan_id, BOOT_ENABLE);
2358                        spin_unlock_irq(&phba->hbalock);
2359                        goto read_next_fcf;
2360                }
2361                /*
2362                 * If the driver FCF record has boot flag set and the
2363                 * new hba FCF record does not have boot flag, read
2364                 * the next FCF record.
2365                 */
2366                if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2367                        spin_unlock_irq(&phba->hbalock);
2368                        goto read_next_fcf;
2369                }
2370                /*
2371                 * If the new hba FCF record has lower priority value
2372                 * than the driver FCF record, use the new record.
2373                 */
2374                if (new_fcf_record->fip_priority < fcf_rec->priority) {
2375                        /* Choose the new FCF record with lower priority */
2376                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2377                                        "2838 Update current FCF record "
2378                                        "(x%x) with new FCF record (x%x)\n",
2379                                        fcf_rec->fcf_indx,
2380                                        bf_get(lpfc_fcf_record_fcf_index,
2381                                               new_fcf_record));
2382                        __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2383                                        addr_mode, vlan_id, 0);
2384                        /* Reset running random FCF selection count */
2385                        phba->fcf.eligible_fcf_cnt = 1;
2386                } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2387                        /* Update running random FCF selection count */
2388                        phba->fcf.eligible_fcf_cnt++;
2389                        select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2390                                                phba->fcf.eligible_fcf_cnt);
2391                        if (select_new_fcf) {
2392                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2393                                        "2839 Update current FCF record "
2394                                        "(x%x) with new FCF record (x%x)\n",
2395                                        fcf_rec->fcf_indx,
2396                                        bf_get(lpfc_fcf_record_fcf_index,
2397                                               new_fcf_record));
2398                                /* Choose the new FCF by random selection */
2399                                __lpfc_update_fcf_record(phba, fcf_rec,
2400                                                         new_fcf_record,
2401                                                         addr_mode, vlan_id, 0);
2402                        }
2403                }
2404                spin_unlock_irq(&phba->hbalock);
2405                goto read_next_fcf;
2406        }
2407        /*
2408         * This is the first suitable FCF record, choose this record for
2409         * initial best-fit FCF.
2410         */
2411        if (fcf_rec) {
2412                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2413                                "2840 Update initial FCF candidate "
2414                                "with FCF (x%x)\n",
2415                                bf_get(lpfc_fcf_record_fcf_index,
2416                                       new_fcf_record));
2417                __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2418                                         addr_mode, vlan_id, (boot_flag ?
2419                                         BOOT_ENABLE : 0));
2420                phba->fcf.fcf_flag |= FCF_AVAILABLE;
2421                /* Setup initial running random FCF selection count */
2422                phba->fcf.eligible_fcf_cnt = 1;
2423        }
2424        spin_unlock_irq(&phba->hbalock);
2425        goto read_next_fcf;
2426
2427read_next_fcf:
2428        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2429        if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2430                if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2431                        /*
2432                         * Case of FCF fast failover scan
2433                         */
2434
2435                        /*
2436                         * It has not found any suitable FCF record, cancel
2437                         * FCF scan inprogress, and do nothing
2438                         */
2439                        if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2440                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2441                                               "2782 No suitable FCF found: "
2442                                               "(x%x/x%x)\n",
2443                                               phba->fcoe_eventtag_at_fcf_scan,
2444                                               bf_get(lpfc_fcf_record_fcf_index,
2445                                                      new_fcf_record));
2446                                spin_lock_irq(&phba->hbalock);
2447                                if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2448                                        phba->hba_flag &= ~FCF_TS_INPROG;
2449                                        spin_unlock_irq(&phba->hbalock);
2450                                        /* Unregister in-use FCF and rescan */
2451                                        lpfc_printf_log(phba, KERN_INFO,
2452                                                        LOG_FIP,
2453                                                        "2864 On devloss tmo "
2454                                                        "unreg in-use FCF and "
2455                                                        "rescan FCF table\n");
2456                                        lpfc_unregister_fcf_rescan(phba);
2457                                        return;
2458                                }
2459                                /*
2460                                 * Let next new FCF event trigger fast failover
2461                                 */
2462                                phba->hba_flag &= ~FCF_TS_INPROG;
2463                                spin_unlock_irq(&phba->hbalock);
2464                                return;
2465                        }
2466                        /*
2467                         * It has found a suitable FCF record that is not
2468                         * the same as in-use FCF record, unregister the
2469                         * in-use FCF record, replace the in-use FCF record
2470                         * with the new FCF record, mark FCF fast failover
2471                         * completed, and then start register the new FCF
2472                         * record.
2473                         */
2474
2475                        /* Unregister the current in-use FCF record */
2476                        lpfc_unregister_fcf(phba);
2477
2478                        /* Replace in-use record with the new record */
2479                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2480                                        "2842 Replace in-use FCF (x%x) "
2481                                        "with failover FCF (x%x)\n",
2482                                        phba->fcf.current_rec.fcf_indx,
2483                                        phba->fcf.failover_rec.fcf_indx);
2484                        memcpy(&phba->fcf.current_rec,
2485                               &phba->fcf.failover_rec,
2486                               sizeof(struct lpfc_fcf_rec));
2487                        /*
2488                         * Mark the fast FCF failover rediscovery completed
2489                         * and the start of the first round of the roundrobin
2490                         * FCF failover.
2491                         */
2492                        spin_lock_irq(&phba->hbalock);
2493                        phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2494                        spin_unlock_irq(&phba->hbalock);
2495                        /* Register to the new FCF record */
2496                        lpfc_register_fcf(phba);
2497                } else {
2498                        /*
2499                         * In case of transaction period to fast FCF failover,
2500                         * do nothing when search to the end of the FCF table.
2501                         */
2502                        if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2503                            (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2504                                return;
2505
2506                        if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2507                                phba->fcf.fcf_flag & FCF_IN_USE) {
2508                                /*
2509                                 * In case the current in-use FCF record no
2510                                 * longer existed during FCF discovery that
2511                                 * was not triggered by fast FCF failover
2512                                 * process, treat it as fast FCF failover.
2513                                 */
2514                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2515                                                "2841 In-use FCF record (x%x) "
2516                                                "not reported, entering fast "
2517                                                "FCF failover mode scanning.\n",
2518                                                phba->fcf.current_rec.fcf_indx);
2519                                spin_lock_irq(&phba->hbalock);
2520                                phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2521                                spin_unlock_irq(&phba->hbalock);
2522                                lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2523                                                LPFC_FCOE_FCF_GET_FIRST);
2524                                return;
2525                        }
2526                        /* Register to the new FCF record */
2527                        lpfc_register_fcf(phba);
2528                }
2529        } else
2530                lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2531        return;
2532
2533out:
2534        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2535        lpfc_register_fcf(phba);
2536
2537        return;
2538}
2539
2540/**
2541 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2542 * @phba: pointer to lpfc hba data structure.
2543 * @mboxq: pointer to mailbox object.
2544 *
2545 * This is the callback function for FLOGI failure roundrobin FCF failover
2546 * read FCF record mailbox command from the eligible FCF record bmask for
2547 * performing the failover. If the FCF read back is not valid/available, it
2548 * fails through to retrying FLOGI to the currently registered FCF again.
2549 * Otherwise, if the FCF read back is valid and available, it will set the
2550 * newly read FCF record to the failover FCF record, unregister currently
2551 * registered FCF record, copy the failover FCF record to the current
2552 * FCF record, and then register the current FCF record before proceeding
2553 * to trying FLOGI on the new failover FCF.
2554 */
2555void
2556lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2557{
2558        struct fcf_record *new_fcf_record;
2559        uint32_t boot_flag, addr_mode;
2560        uint16_t next_fcf_index, fcf_index;
2561        uint16_t current_fcf_index;
2562        uint16_t vlan_id;
2563        int rc;
2564
2565        /* If link state is not up, stop the roundrobin failover process */
2566        if (phba->link_state < LPFC_LINK_UP) {
2567                spin_lock_irq(&phba->hbalock);
2568                phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2569                phba->hba_flag &= ~FCF_RR_INPROG;
2570                spin_unlock_irq(&phba->hbalock);
2571                goto out;
2572        }
2573
2574        /* Parse the FCF record from the non-embedded mailbox command */
2575        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2576                                                      &next_fcf_index);
2577        if (!new_fcf_record) {
2578                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2579                                "2766 Mailbox command READ_FCF_RECORD "
2580                                "failed to retrieve a FCF record. "
2581                                "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2582                                phba->fcf.fcf_flag);
2583                lpfc_unregister_fcf_rescan(phba);
2584                goto out;
2585        }
2586
2587        /* Get the needed parameters from FCF record */
2588        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2589                                      &addr_mode, &vlan_id);
2590
2591        /* Log the FCF record information if turned on */
2592        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2593                                      next_fcf_index);
2594
2595        fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2596        if (!rc) {
2597                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2598                                "2848 Remove ineligible FCF (x%x) from "
2599                                "from roundrobin bmask\n", fcf_index);
2600                /* Clear roundrobin bmask bit for ineligible FCF */
2601                lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2602                /* Perform next round of roundrobin FCF failover */
2603                fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2604                rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2605                if (rc)
2606                        goto out;
2607                goto error_out;
2608        }
2609
2610        if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2611                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2612                                "2760 Perform FLOGI roundrobin FCF failover: "
2613                                "FCF (x%x) back to FCF (x%x)\n",
2614                                phba->fcf.current_rec.fcf_indx, fcf_index);
2615                /* Wait 500 ms before retrying FLOGI to current FCF */
2616                msleep(500);
2617                lpfc_issue_init_vfi(phba->pport);
2618                goto out;
2619        }
2620
2621        /* Upload new FCF record to the failover FCF record */
2622        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2623                        "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2624                        phba->fcf.failover_rec.fcf_indx, fcf_index);
2625        spin_lock_irq(&phba->hbalock);
2626        __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2627                                 new_fcf_record, addr_mode, vlan_id,
2628                                 (boot_flag ? BOOT_ENABLE : 0));
2629        spin_unlock_irq(&phba->hbalock);
2630
2631        current_fcf_index = phba->fcf.current_rec.fcf_indx;
2632
2633        /* Unregister the current in-use FCF record */
2634        lpfc_unregister_fcf(phba);
2635
2636        /* Replace in-use record with the new record */
2637        memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2638               sizeof(struct lpfc_fcf_rec));
2639
2640        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2641                        "2783 Perform FLOGI roundrobin FCF failover: FCF "
2642                        "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2643
2644error_out:
2645        lpfc_register_fcf(phba);
2646out:
2647        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2648}
2649
2650/**
2651 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2652 * @phba: pointer to lpfc hba data structure.
2653 * @mboxq: pointer to mailbox object.
2654 *
2655 * This is the callback function of read FCF record mailbox command for
2656 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2657 * failover when a new FCF event happened. If the FCF read back is
2658 * valid/available and it passes the connection list check, it updates
2659 * the bmask for the eligible FCF record for roundrobin failover.
2660 */
2661void
2662lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2663{
2664        struct fcf_record *new_fcf_record;
2665        uint32_t boot_flag, addr_mode;
2666        uint16_t fcf_index, next_fcf_index;
2667        uint16_t vlan_id;
2668        int rc;
2669
2670        /* If link state is not up, no need to proceed */
2671        if (phba->link_state < LPFC_LINK_UP)
2672                goto out;
2673
2674        /* If FCF discovery period is over, no need to proceed */
2675        if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2676                goto out;
2677
2678        /* Parse the FCF record from the non-embedded mailbox command */
2679        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2680                                                      &next_fcf_index);
2681        if (!new_fcf_record) {
2682                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2683                                "2767 Mailbox command READ_FCF_RECORD "
2684                                "failed to retrieve a FCF record.\n");
2685                goto out;
2686        }
2687
2688        /* Check the connection list for eligibility */
2689        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2690                                      &addr_mode, &vlan_id);
2691
2692        /* Log the FCF record information if turned on */
2693        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2694                                      next_fcf_index);
2695
2696        if (!rc)
2697                goto out;
2698
2699        /* Update the eligible FCF record index bmask */
2700        fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2701
2702        rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2703
2704out:
2705        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2706}
2707
2708/**
2709 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2710 * @phba: pointer to lpfc hba data structure.
2711 * @mboxq: pointer to mailbox data structure.
2712 *
2713 * This function handles completion of init vfi mailbox command.
2714 */
2715static void
2716lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2717{
2718        struct lpfc_vport *vport = mboxq->vport;
2719
2720        /*
2721         * VFI not supported on interface type 0, just do the flogi
2722         * Also continue if the VFI is in use - just use the same one.
2723         */
2724        if (mboxq->u.mb.mbxStatus &&
2725            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2726                        LPFC_SLI_INTF_IF_TYPE_0) &&
2727            mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2728                lpfc_printf_vlog(vport, KERN_ERR,
2729                                LOG_MBOX,
2730                                "2891 Init VFI mailbox failed 0x%x\n",
2731                                mboxq->u.mb.mbxStatus);
2732                mempool_free(mboxq, phba->mbox_mem_pool);
2733                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2734                return;
2735        }
2736
2737        lpfc_initial_flogi(vport);
2738        mempool_free(mboxq, phba->mbox_mem_pool);
2739        return;
2740}
2741
2742/**
2743 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2744 * @vport: pointer to lpfc_vport data structure.
2745 *
2746 * This function issue a init_vfi mailbox command to initialize the VFI and
2747 * VPI for the physical port.
2748 */
2749void
2750lpfc_issue_init_vfi(struct lpfc_vport *vport)
2751{
2752        LPFC_MBOXQ_t *mboxq;
2753        int rc;
2754        struct lpfc_hba *phba = vport->phba;
2755
2756        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2757        if (!mboxq) {
2758                lpfc_printf_vlog(vport, KERN_ERR,
2759                        LOG_MBOX, "2892 Failed to allocate "
2760                        "init_vfi mailbox\n");
2761                return;
2762        }
2763        lpfc_init_vfi(mboxq, vport);
2764        mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2765        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2766        if (rc == MBX_NOT_FINISHED) {
2767                lpfc_printf_vlog(vport, KERN_ERR,
2768                        LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2769                mempool_free(mboxq, vport->phba->mbox_mem_pool);
2770        }
2771}
2772
2773/**
2774 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2775 * @phba: pointer to lpfc hba data structure.
2776 * @mboxq: pointer to mailbox data structure.
2777 *
2778 * This function handles completion of init vpi mailbox command.
2779 */
2780void
2781lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2782{
2783        struct lpfc_vport *vport = mboxq->vport;
2784        struct lpfc_nodelist *ndlp;
2785        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2786
2787        if (mboxq->u.mb.mbxStatus) {
2788                lpfc_printf_vlog(vport, KERN_ERR,
2789                                LOG_MBOX,
2790                                "2609 Init VPI mailbox failed 0x%x\n",
2791                                mboxq->u.mb.mbxStatus);
2792                mempool_free(mboxq, phba->mbox_mem_pool);
2793                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2794                return;
2795        }
2796        spin_lock_irq(shost->host_lock);
2797        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2798        spin_unlock_irq(shost->host_lock);
2799
2800        /* If this port is physical port or FDISC is done, do reg_vpi */
2801        if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2802                        ndlp = lpfc_findnode_did(vport, Fabric_DID);
2803                        if (!ndlp)
2804                                lpfc_printf_vlog(vport, KERN_ERR,
2805                                        LOG_DISCOVERY,
2806                                        "2731 Cannot find fabric "
2807                                        "controller node\n");
2808                        else
2809                                lpfc_register_new_vport(phba, vport, ndlp);
2810                        mempool_free(mboxq, phba->mbox_mem_pool);
2811                        return;
2812        }
2813
2814        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2815                lpfc_initial_fdisc(vport);
2816        else {
2817                lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2818                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2819                                 "2606 No NPIV Fabric support\n");
2820        }
2821        mempool_free(mboxq, phba->mbox_mem_pool);
2822        return;
2823}
2824
2825/**
2826 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2827 * @vport: pointer to lpfc_vport data structure.
2828 *
2829 * This function issue a init_vpi mailbox command to initialize
2830 * VPI for the vport.
2831 */
2832void
2833lpfc_issue_init_vpi(struct lpfc_vport *vport)
2834{
2835        LPFC_MBOXQ_t *mboxq;
2836        int rc, vpi;
2837
2838        if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2839                vpi = lpfc_alloc_vpi(vport->phba);
2840                if (!vpi) {
2841                        lpfc_printf_vlog(vport, KERN_ERR,
2842                                         LOG_MBOX,
2843                                         "3303 Failed to obtain vport vpi\n");
2844                        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2845                        return;
2846                }
2847                vport->vpi = vpi;
2848        }
2849
2850        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2851        if (!mboxq) {
2852                lpfc_printf_vlog(vport, KERN_ERR,
2853                        LOG_MBOX, "2607 Failed to allocate "
2854                        "init_vpi mailbox\n");
2855                return;
2856        }
2857        lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2858        mboxq->vport = vport;
2859        mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2860        rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2861        if (rc == MBX_NOT_FINISHED) {
2862                lpfc_printf_vlog(vport, KERN_ERR,
2863                        LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2864                mempool_free(mboxq, vport->phba->mbox_mem_pool);
2865        }
2866}
2867
2868/**
2869 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2870 * @phba: pointer to lpfc hba data structure.
2871 *
2872 * This function loops through the list of vports on the @phba and issues an
2873 * FDISC if possible.
2874 */
2875void
2876lpfc_start_fdiscs(struct lpfc_hba *phba)
2877{
2878        struct lpfc_vport **vports;
2879        int i;
2880
2881        vports = lpfc_create_vport_work_array(phba);
2882        if (vports != NULL) {
2883                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2884                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2885                                continue;
2886                        /* There are no vpi for this vport */
2887                        if (vports[i]->vpi > phba->max_vpi) {
2888                                lpfc_vport_set_state(vports[i],
2889                                                     FC_VPORT_FAILED);
2890                                continue;
2891                        }
2892                        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2893                                lpfc_vport_set_state(vports[i],
2894                                                     FC_VPORT_LINKDOWN);
2895                                continue;
2896                        }
2897                        if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2898                                lpfc_issue_init_vpi(vports[i]);
2899                                continue;
2900                        }
2901                        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2902                                lpfc_initial_fdisc(vports[i]);
2903                        else {
2904                                lpfc_vport_set_state(vports[i],
2905                                                     FC_VPORT_NO_FABRIC_SUPP);
2906                                lpfc_printf_vlog(vports[i], KERN_ERR,
2907                                                 LOG_ELS,
2908                                                 "0259 No NPIV "
2909                                                 "Fabric support\n");
2910                        }
2911                }
2912        }
2913        lpfc_destroy_vport_work_array(phba, vports);
2914}
2915
2916void
2917lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2918{
2919        struct lpfc_dmabuf *dmabuf = mboxq->context1;
2920        struct lpfc_vport *vport = mboxq->vport;
2921        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2922
2923        /*
2924         * VFI not supported for interface type 0, so ignore any mailbox
2925         * error (except VFI in use) and continue with the discovery.
2926         */
2927        if (mboxq->u.mb.mbxStatus &&
2928            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2929                        LPFC_SLI_INTF_IF_TYPE_0) &&
2930            mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2931                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2932                         "2018 REG_VFI mbxStatus error x%x "
2933                         "HBA state x%x\n",
2934                         mboxq->u.mb.mbxStatus, vport->port_state);
2935                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2936                        /* FLOGI failed, use loop map to make discovery list */
2937                        lpfc_disc_list_loopmap(vport);
2938                        /* Start discovery */
2939                        lpfc_disc_start(vport);
2940                        goto out_free_mem;
2941                }
2942                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2943                goto out_free_mem;
2944        }
2945
2946        /* If the VFI is already registered, there is nothing else to do
2947         * Unless this was a VFI update and we are in PT2PT mode, then
2948         * we should drop through to set the port state to ready.
2949         */
2950        if (vport->fc_flag & FC_VFI_REGISTERED)
2951                if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2952                      vport->fc_flag & FC_PT2PT))
2953                        goto out_free_mem;
2954
2955        /* The VPI is implicitly registered when the VFI is registered */
2956        spin_lock_irq(shost->host_lock);
2957        vport->vpi_state |= LPFC_VPI_REGISTERED;
2958        vport->fc_flag |= FC_VFI_REGISTERED;
2959        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2960        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2961        spin_unlock_irq(shost->host_lock);
2962
2963        /* In case SLI4 FC loopback test, we are ready */
2964        if ((phba->sli_rev == LPFC_SLI_REV4) &&
2965            (phba->link_flag & LS_LOOPBACK_MODE)) {
2966                phba->link_state = LPFC_HBA_READY;
2967                goto out_free_mem;
2968        }
2969
2970        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2971                         "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
2972                         "alpacnt:%d LinkState:%x topology:%x\n",
2973                         vport->port_state, vport->fc_flag, vport->fc_myDID,
2974                         vport->phba->alpa_map[0],
2975                         phba->link_state, phba->fc_topology);
2976
2977        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2978                /*
2979                 * For private loop or for NPort pt2pt,
2980                 * just start discovery and we are done.
2981                 */
2982                if ((vport->fc_flag & FC_PT2PT) ||
2983                    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2984                    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2985
2986                        /* Use loop map to make discovery list */
2987                        lpfc_disc_list_loopmap(vport);
2988                        /* Start discovery */
2989                        if (vport->fc_flag & FC_PT2PT)
2990                                vport->port_state = LPFC_VPORT_READY;
2991                        else
2992                                lpfc_disc_start(vport);
2993                } else {
2994                        lpfc_start_fdiscs(phba);
2995                        lpfc_do_scr_ns_plogi(phba, vport);
2996                }
2997        }
2998
2999out_free_mem:
3000        mempool_free(mboxq, phba->mbox_mem_pool);
3001        if (dmabuf) {
3002                lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3003                kfree(dmabuf);
3004        }
3005        return;
3006}
3007
3008static void
3009lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3010{
3011        MAILBOX_t *mb = &pmb->u.mb;
3012        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
3013        struct lpfc_vport  *vport = pmb->vport;
3014        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3015        struct serv_parm *sp = &vport->fc_sparam;
3016        uint32_t ed_tov;
3017
3018        /* Check for error */
3019        if (mb->mbxStatus) {
3020                /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3021                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3022                                 "0319 READ_SPARAM mbxStatus error x%x "
3023                                 "hba state x%x>\n",
3024                                 mb->mbxStatus, vport->port_state);
3025                lpfc_linkdown(phba);
3026                goto out;
3027        }
3028
3029        memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3030               sizeof (struct serv_parm));
3031
3032        ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3033        if (sp->cmn.edtovResolution)    /* E_D_TOV ticks are in nanoseconds */
3034                ed_tov = (ed_tov + 999999) / 1000000;
3035
3036        phba->fc_edtov = ed_tov;
3037        phba->fc_ratov = (2 * ed_tov) / 1000;
3038        if (phba->fc_ratov < FF_DEF_RATOV) {
3039                /* RA_TOV should be atleast 10sec for initial flogi */
3040                phba->fc_ratov = FF_DEF_RATOV;
3041        }
3042
3043        lpfc_update_vport_wwn(vport);
3044        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3045        if (vport->port_type == LPFC_PHYSICAL_PORT) {
3046                memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3047                memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3048        }
3049
3050        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3051        kfree(mp);
3052        mempool_free(pmb, phba->mbox_mem_pool);
3053        return;
3054
3055out:
3056        pmb->context1 = NULL;
3057        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3058        kfree(mp);
3059        lpfc_issue_clear_la(phba, vport);
3060        mempool_free(pmb, phba->mbox_mem_pool);
3061        return;
3062}
3063
3064static void
3065lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3066{
3067        struct lpfc_vport *vport = phba->pport;
3068        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3069        struct Scsi_Host *shost;
3070        int i;
3071        struct lpfc_dmabuf *mp;
3072        int rc;
3073        struct fcf_record *fcf_record;
3074        uint32_t fc_flags = 0;
3075
3076        spin_lock_irq(&phba->hbalock);
3077        phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3078
3079        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3080                switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3081                case LPFC_LINK_SPEED_1GHZ:
3082                case LPFC_LINK_SPEED_2GHZ:
3083                case LPFC_LINK_SPEED_4GHZ:
3084                case LPFC_LINK_SPEED_8GHZ:
3085                case LPFC_LINK_SPEED_10GHZ:
3086                case LPFC_LINK_SPEED_16GHZ:
3087                case LPFC_LINK_SPEED_32GHZ:
3088                case LPFC_LINK_SPEED_64GHZ:
3089                        break;
3090                default:
3091                        phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3092                        break;
3093                }
3094        }
3095
3096        if (phba->fc_topology &&
3097            phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3098                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3099                                "3314 Toplogy changed was 0x%x is 0x%x\n",
3100                                phba->fc_topology,
3101                                bf_get(lpfc_mbx_read_top_topology, la));
3102                phba->fc_topology_changed = 1;
3103        }
3104
3105        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3106        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3107
3108        shost = lpfc_shost_from_vport(vport);
3109        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3110                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3111
3112                /* if npiv is enabled and this adapter supports npiv log
3113                 * a message that npiv is not supported in this topology
3114                 */
3115                if (phba->cfg_enable_npiv && phba->max_vpi)
3116                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3117                                "1309 Link Up Event npiv not supported in loop "
3118                                "topology\n");
3119                                /* Get Loop Map information */
3120                if (bf_get(lpfc_mbx_read_top_il, la))
3121                        fc_flags |= FC_LBIT;
3122
3123                vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3124                i = la->lilpBde64.tus.f.bdeSize;
3125
3126                if (i == 0) {
3127                        phba->alpa_map[0] = 0;
3128                } else {
3129                        if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3130                                int numalpa, j, k;
3131                                union {
3132                                        uint8_t pamap[16];
3133                                        struct {
3134                                                uint32_t wd1;
3135                                                uint32_t wd2;
3136                                                uint32_t wd3;
3137                                                uint32_t wd4;
3138                                        } pa;
3139                                } un;
3140                                numalpa = phba->alpa_map[0];
3141                                j = 0;
3142                                while (j < numalpa) {
3143                                        memset(un.pamap, 0, 16);
3144                                        for (k = 1; j < numalpa; k++) {
3145                                                un.pamap[k - 1] =
3146                                                        phba->alpa_map[j + 1];
3147                                                j++;
3148                                                if (k == 16)
3149                                                        break;
3150                                        }
3151                                        /* Link Up Event ALPA map */
3152                                        lpfc_printf_log(phba,
3153                                                        KERN_WARNING,
3154                                                        LOG_LINK_EVENT,
3155                                                        "1304 Link Up Event "
3156                                                        "ALPA map Data: x%x "
3157                                                        "x%x x%x x%x\n",
3158                                                        un.pa.wd1, un.pa.wd2,
3159                                                        un.pa.wd3, un.pa.wd4);
3160                                }
3161                        }
3162                }
3163        } else {
3164                if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3165                        if (phba->max_vpi && phba->cfg_enable_npiv &&
3166                           (phba->sli_rev >= LPFC_SLI_REV3))
3167                                phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3168                }
3169                vport->fc_myDID = phba->fc_pref_DID;
3170                fc_flags |= FC_LBIT;
3171        }
3172        spin_unlock_irq(&phba->hbalock);
3173
3174        if (fc_flags) {
3175                spin_lock_irq(shost->host_lock);
3176                vport->fc_flag |= fc_flags;
3177                spin_unlock_irq(shost->host_lock);
3178        }
3179
3180        lpfc_linkup(phba);
3181        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3182        if (!sparam_mbox)
3183                goto out;
3184
3185        rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3186        if (rc) {
3187                mempool_free(sparam_mbox, phba->mbox_mem_pool);
3188                goto out;
3189        }
3190        sparam_mbox->vport = vport;
3191        sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3192        rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3193        if (rc == MBX_NOT_FINISHED) {
3194                mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
3195                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3196                kfree(mp);
3197                mempool_free(sparam_mbox, phba->mbox_mem_pool);
3198                goto out;
3199        }
3200
3201        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3202                cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3203                if (!cfglink_mbox)
3204                        goto out;
3205                vport->port_state = LPFC_LOCAL_CFG_LINK;
3206                lpfc_config_link(phba, cfglink_mbox);
3207                cfglink_mbox->vport = vport;
3208                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3209                rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3210                if (rc == MBX_NOT_FINISHED) {
3211                        mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3212                        goto out;
3213                }
3214        } else {
3215                vport->port_state = LPFC_VPORT_UNKNOWN;
3216                /*
3217                 * Add the driver's default FCF record at FCF index 0 now. This
3218                 * is phase 1 implementation that support FCF index 0 and driver
3219                 * defaults.
3220                 */
3221                if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3222                        fcf_record = kzalloc(sizeof(struct fcf_record),
3223                                        GFP_KERNEL);
3224                        if (unlikely(!fcf_record)) {
3225                                lpfc_printf_log(phba, KERN_ERR,
3226                                        LOG_MBOX | LOG_SLI,
3227                                        "2554 Could not allocate memory for "
3228                                        "fcf record\n");
3229                                rc = -ENODEV;
3230                                goto out;
3231                        }
3232
3233                        lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3234                                                LPFC_FCOE_FCF_DEF_INDEX);
3235                        rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3236                        if (unlikely(rc)) {
3237                                lpfc_printf_log(phba, KERN_ERR,
3238                                        LOG_MBOX | LOG_SLI,
3239                                        "2013 Could not manually add FCF "
3240                                        "record 0, status %d\n", rc);
3241                                rc = -ENODEV;
3242                                kfree(fcf_record);
3243                                goto out;
3244                        }
3245                        kfree(fcf_record);
3246                }
3247                /*
3248                 * The driver is expected to do FIP/FCF. Call the port
3249                 * and get the FCF Table.
3250                 */
3251                spin_lock_irq(&phba->hbalock);
3252                if (phba->hba_flag & FCF_TS_INPROG) {
3253                        spin_unlock_irq(&phba->hbalock);
3254                        return;
3255                }
3256                /* This is the initial FCF discovery scan */
3257                phba->fcf.fcf_flag |= FCF_INIT_DISC;
3258                spin_unlock_irq(&phba->hbalock);
3259                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3260                                "2778 Start FCF table scan at linkup\n");
3261                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3262                                                     LPFC_FCOE_FCF_GET_FIRST);
3263                if (rc) {
3264                        spin_lock_irq(&phba->hbalock);
3265                        phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3266                        spin_unlock_irq(&phba->hbalock);
3267                        goto out;
3268                }
3269                /* Reset FCF roundrobin bmask for new discovery */
3270                lpfc_sli4_clear_fcf_rr_bmask(phba);
3271        }
3272
3273        return;
3274out:
3275        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3276        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3277                         "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3278                         vport->port_state, sparam_mbox, cfglink_mbox);
3279        lpfc_issue_clear_la(phba, vport);
3280        return;
3281}
3282
3283static void
3284lpfc_enable_la(struct lpfc_hba *phba)
3285{
3286        uint32_t control;
3287        struct lpfc_sli *psli = &phba->sli;
3288        spin_lock_irq(&phba->hbalock);
3289        psli->sli_flag |= LPFC_PROCESS_LA;
3290        if (phba->sli_rev <= LPFC_SLI_REV3) {
3291                control = readl(phba->HCregaddr);
3292                control |= HC_LAINT_ENA;
3293                writel(control, phba->HCregaddr);
3294                readl(phba->HCregaddr); /* flush */
3295        }
3296        spin_unlock_irq(&phba->hbalock);
3297}
3298
3299static void
3300lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3301{
3302        lpfc_linkdown(phba);
3303        lpfc_enable_la(phba);
3304        lpfc_unregister_unused_fcf(phba);
3305        /* turn on Link Attention interrupts - no CLEAR_LA needed */
3306}
3307
3308
3309/*
3310 * This routine handles processing a READ_TOPOLOGY mailbox
3311 * command upon completion. It is setup in the LPFC_MBOXQ
3312 * as the completion routine when the command is
3313 * handed off to the SLI layer. SLI4 only.
3314 */
3315void
3316lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3317{
3318        struct lpfc_vport *vport = pmb->vport;
3319        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3320        struct lpfc_mbx_read_top *la;
3321        struct lpfc_sli_ring *pring;
3322        MAILBOX_t *mb = &pmb->u.mb;
3323        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3324        uint8_t attn_type;
3325
3326        /* Unblock ELS traffic */
3327        pring = lpfc_phba_elsring(phba);
3328        if (pring)
3329                pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3330
3331        /* Check for error */
3332        if (mb->mbxStatus) {
3333                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3334                                "1307 READ_LA mbox error x%x state x%x\n",
3335                                mb->mbxStatus, vport->port_state);
3336                lpfc_mbx_issue_link_down(phba);
3337                phba->link_state = LPFC_HBA_ERROR;
3338                goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3339        }
3340
3341        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3342        attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3343
3344        memcpy(&phba->alpa_map[0], mp->virt, 128);
3345
3346        spin_lock_irq(shost->host_lock);
3347        if (bf_get(lpfc_mbx_read_top_pb, la))
3348                vport->fc_flag |= FC_BYPASSED_MODE;
3349        else
3350                vport->fc_flag &= ~FC_BYPASSED_MODE;
3351        spin_unlock_irq(shost->host_lock);
3352
3353        if (phba->fc_eventTag <= la->eventTag) {
3354                phba->fc_stat.LinkMultiEvent++;
3355                if (attn_type == LPFC_ATT_LINK_UP)
3356                        if (phba->fc_eventTag != 0)
3357                                lpfc_linkdown(phba);
3358        }
3359
3360        phba->fc_eventTag = la->eventTag;
3361        if (phba->sli_rev < LPFC_SLI_REV4) {
3362                spin_lock_irq(&phba->hbalock);
3363                if (bf_get(lpfc_mbx_read_top_mm, la))
3364                        phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3365                else
3366                        phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3367                spin_unlock_irq(&phba->hbalock);
3368        }
3369
3370        phba->link_events++;
3371        if ((attn_type == LPFC_ATT_LINK_UP) &&
3372            !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3373                phba->fc_stat.LinkUp++;
3374                if (phba->link_flag & LS_LOOPBACK_MODE) {
3375                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3376                                        "1306 Link Up Event in loop back mode "
3377                                        "x%x received Data: x%x x%x x%x x%x\n",
3378                                        la->eventTag, phba->fc_eventTag,
3379                                        bf_get(lpfc_mbx_read_top_alpa_granted,
3380                                               la),
3381                                        bf_get(lpfc_mbx_read_top_link_spd, la),
3382                                        phba->alpa_map[0]);
3383                } else {
3384                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3385                                        "1303 Link Up Event x%x received "
3386                                        "Data: x%x x%x x%x x%x x%x x%x %d\n",
3387                                        la->eventTag, phba->fc_eventTag,
3388                                        bf_get(lpfc_mbx_read_top_alpa_granted,
3389                                               la),
3390                                        bf_get(lpfc_mbx_read_top_link_spd, la),
3391                                        phba->alpa_map[0],
3392                                        bf_get(lpfc_mbx_read_top_mm, la),
3393                                        bf_get(lpfc_mbx_read_top_fa, la),
3394                                        phba->wait_4_mlo_maint_flg);
3395                }
3396                lpfc_mbx_process_link_up(phba, la);
3397        } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3398                   attn_type == LPFC_ATT_UNEXP_WWPN) {
3399                phba->fc_stat.LinkDown++;
3400                if (phba->link_flag & LS_LOOPBACK_MODE)
3401                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3402                                "1308 Link Down Event in loop back mode "
3403                                "x%x received "
3404                                "Data: x%x x%x x%x\n",
3405                                la->eventTag, phba->fc_eventTag,
3406                                phba->pport->port_state, vport->fc_flag);
3407                else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3408                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3409                                "1313 Link Down UNEXP WWPN Event x%x received "
3410                                "Data: x%x x%x x%x x%x x%x\n",
3411                                la->eventTag, phba->fc_eventTag,
3412                                phba->pport->port_state, vport->fc_flag,
3413                                bf_get(lpfc_mbx_read_top_mm, la),
3414                                bf_get(lpfc_mbx_read_top_fa, la));
3415                else
3416                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3417                                "1305 Link Down Event x%x received "
3418                                "Data: x%x x%x x%x x%x x%x\n",
3419                                la->eventTag, phba->fc_eventTag,
3420                                phba->pport->port_state, vport->fc_flag,
3421                                bf_get(lpfc_mbx_read_top_mm, la),
3422                                bf_get(lpfc_mbx_read_top_fa, la));
3423                lpfc_mbx_issue_link_down(phba);
3424        }
3425        if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3426            attn_type == LPFC_ATT_LINK_UP) {
3427                if (phba->link_state != LPFC_LINK_DOWN) {
3428                        phba->fc_stat.LinkDown++;
3429                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3430                                "1312 Link Down Event x%x received "
3431                                "Data: x%x x%x x%x\n",
3432                                la->eventTag, phba->fc_eventTag,
3433                                phba->pport->port_state, vport->fc_flag);
3434                        lpfc_mbx_issue_link_down(phba);
3435                } else
3436                        lpfc_enable_la(phba);
3437
3438                lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3439                                "1310 Menlo Maint Mode Link up Event x%x rcvd "
3440                                "Data: x%x x%x x%x\n",
3441                                la->eventTag, phba->fc_eventTag,
3442                                phba->pport->port_state, vport->fc_flag);
3443                /*
3444                 * The cmnd that triggered this will be waiting for this
3445                 * signal.
3446                 */
3447                /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3448                if (phba->wait_4_mlo_maint_flg) {
3449                        phba->wait_4_mlo_maint_flg = 0;
3450                        wake_up_interruptible(&phba->wait_4_mlo_m_q);
3451                }
3452        }
3453
3454        if ((phba->sli_rev < LPFC_SLI_REV4) &&
3455            bf_get(lpfc_mbx_read_top_fa, la)) {
3456                if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3457                        lpfc_issue_clear_la(phba, vport);
3458                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3459                                "1311 fa %d\n",
3460                                bf_get(lpfc_mbx_read_top_fa, la));
3461        }
3462
3463lpfc_mbx_cmpl_read_topology_free_mbuf:
3464        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3465        kfree(mp);
3466        mempool_free(pmb, phba->mbox_mem_pool);
3467        return;
3468}
3469
3470/*
3471 * This routine handles processing a REG_LOGIN mailbox
3472 * command upon completion. It is setup in the LPFC_MBOXQ
3473 * as the completion routine when the command is
3474 * handed off to the SLI layer.
3475 */
3476void
3477lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3478{
3479        struct lpfc_vport  *vport = pmb->vport;
3480        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3481        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3482        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3483
3484        pmb->context1 = NULL;
3485        pmb->context2 = NULL;
3486
3487        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3488                         "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3489                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3490                         kref_read(&ndlp->kref),
3491                         ndlp->nlp_usg_map, ndlp);
3492        if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3493                ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3494
3495        if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3496            ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3497                /* We rcvd a rscn after issuing this
3498                 * mbox reg login, we may have cycled
3499                 * back through the state and be
3500                 * back at reg login state so this
3501                 * mbox needs to be ignored becase
3502                 * there is another reg login in
3503                 * process.
3504                 */
3505                spin_lock_irq(shost->host_lock);
3506                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3507                spin_unlock_irq(shost->host_lock);
3508
3509                /*
3510                 * We cannot leave the RPI registered because
3511                 * if we go thru discovery again for this ndlp
3512                 * a subsequent REG_RPI will fail.
3513                 */
3514                ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3515                lpfc_unreg_rpi(vport, ndlp);
3516        }
3517
3518        /* Call state machine */
3519        lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3520
3521        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3522        kfree(mp);
3523        mempool_free(pmb, phba->mbox_mem_pool);
3524        /* decrement the node reference count held for this callback
3525         * function.
3526         */
3527        lpfc_nlp_put(ndlp);
3528
3529        return;
3530}
3531
3532static void
3533lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3534{
3535        MAILBOX_t *mb = &pmb->u.mb;
3536        struct lpfc_vport *vport = pmb->vport;
3537        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3538
3539        switch (mb->mbxStatus) {
3540        case 0x0011:
3541        case 0x0020:
3542                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3543                                 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3544                                 mb->mbxStatus);
3545                break;
3546        /* If VPI is busy, reset the HBA */
3547        case 0x9700:
3548                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3549                        "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3550                        vport->vpi, mb->mbxStatus);
3551                if (!(phba->pport->load_flag & FC_UNLOADING))
3552                        lpfc_workq_post_event(phba, NULL, NULL,
3553                                LPFC_EVT_RESET_HBA);
3554        }
3555        spin_lock_irq(shost->host_lock);
3556        vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3557        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3558        spin_unlock_irq(shost->host_lock);
3559        vport->unreg_vpi_cmpl = VPORT_OK;
3560        mempool_free(pmb, phba->mbox_mem_pool);
3561        lpfc_cleanup_vports_rrqs(vport, NULL);
3562        /*
3563         * This shost reference might have been taken at the beginning of
3564         * lpfc_vport_delete()
3565         */
3566        if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3567                scsi_host_put(shost);
3568}
3569
3570int
3571lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3572{
3573        struct lpfc_hba  *phba = vport->phba;
3574        LPFC_MBOXQ_t *mbox;
3575        int rc;
3576
3577        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3578        if (!mbox)
3579                return 1;
3580
3581        lpfc_unreg_vpi(phba, vport->vpi, mbox);
3582        mbox->vport = vport;
3583        mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3584        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3585        if (rc == MBX_NOT_FINISHED) {
3586                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3587                                 "1800 Could not issue unreg_vpi\n");
3588                mempool_free(mbox, phba->mbox_mem_pool);
3589                vport->unreg_vpi_cmpl = VPORT_ERROR;
3590                return rc;
3591        }
3592        return 0;
3593}
3594
3595static void
3596lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3597{
3598        struct lpfc_vport *vport = pmb->vport;
3599        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3600        MAILBOX_t *mb = &pmb->u.mb;
3601
3602        switch (mb->mbxStatus) {
3603        case 0x0011:
3604        case 0x9601:
3605        case 0x9602:
3606                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3607                                 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3608                                 mb->mbxStatus);
3609                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3610                spin_lock_irq(shost->host_lock);
3611                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3612                spin_unlock_irq(shost->host_lock);
3613                vport->fc_myDID = 0;
3614
3615                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3616                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3617                        if (phba->nvmet_support)
3618                                lpfc_nvmet_update_targetport(phba);
3619                        else
3620                                lpfc_nvme_update_localport(vport);
3621                }
3622                goto out;
3623        }
3624
3625        spin_lock_irq(shost->host_lock);
3626        vport->vpi_state |= LPFC_VPI_REGISTERED;
3627        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3628        spin_unlock_irq(shost->host_lock);
3629        vport->num_disc_nodes = 0;
3630        /* go thru NPR list and issue ELS PLOGIs */
3631        if (vport->fc_npr_cnt)
3632                lpfc_els_disc_plogi(vport);
3633
3634        if (!vport->num_disc_nodes) {
3635                spin_lock_irq(shost->host_lock);
3636                vport->fc_flag &= ~FC_NDISC_ACTIVE;
3637                spin_unlock_irq(shost->host_lock);
3638                lpfc_can_disctmo(vport);
3639        }
3640        vport->port_state = LPFC_VPORT_READY;
3641
3642out:
3643        mempool_free(pmb, phba->mbox_mem_pool);
3644        return;
3645}
3646
3647/**
3648 * lpfc_create_static_vport - Read HBA config region to create static vports.
3649 * @phba: pointer to lpfc hba data structure.
3650 *
3651 * This routine issue a DUMP mailbox command for config region 22 to get
3652 * the list of static vports to be created. The function create vports
3653 * based on the information returned from the HBA.
3654 **/
3655void
3656lpfc_create_static_vport(struct lpfc_hba *phba)
3657{
3658        LPFC_MBOXQ_t *pmb = NULL;
3659        MAILBOX_t *mb;
3660        struct static_vport_info *vport_info;
3661        int mbx_wait_rc = 0, i;
3662        struct fc_vport_identifiers vport_id;
3663        struct fc_vport *new_fc_vport;
3664        struct Scsi_Host *shost;
3665        struct lpfc_vport *vport;
3666        uint16_t offset = 0;
3667        uint8_t *vport_buff;
3668        struct lpfc_dmabuf *mp;
3669        uint32_t byte_count = 0;
3670
3671        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3672        if (!pmb) {
3673                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3674                                "0542 lpfc_create_static_vport failed to"
3675                                " allocate mailbox memory\n");
3676                return;
3677        }
3678        memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3679        mb = &pmb->u.mb;
3680
3681        vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3682        if (!vport_info) {
3683                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3684                                "0543 lpfc_create_static_vport failed to"
3685                                " allocate vport_info\n");
3686                mempool_free(pmb, phba->mbox_mem_pool);
3687                return;
3688        }
3689
3690        vport_buff = (uint8_t *) vport_info;
3691        do {
3692                /* free dma buffer from previous round */
3693                if (pmb->context1) {
3694                        mp = (struct lpfc_dmabuf *)pmb->context1;
3695                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3696                        kfree(mp);
3697                }
3698                if (lpfc_dump_static_vport(phba, pmb, offset))
3699                        goto out;
3700
3701                pmb->vport = phba->pport;
3702                mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3703                                                        LPFC_MBOX_TMO);
3704
3705                if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3706                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3707                                "0544 lpfc_create_static_vport failed to"
3708                                " issue dump mailbox command ret 0x%x "
3709                                "status 0x%x\n",
3710                                mbx_wait_rc, mb->mbxStatus);
3711                        goto out;
3712                }
3713
3714                if (phba->sli_rev == LPFC_SLI_REV4) {
3715                        byte_count = pmb->u.mqe.un.mb_words[5];
3716                        mp = (struct lpfc_dmabuf *)pmb->context1;
3717                        if (byte_count > sizeof(struct static_vport_info) -
3718                                        offset)
3719                                byte_count = sizeof(struct static_vport_info)
3720                                        - offset;
3721                        memcpy(vport_buff + offset, mp->virt, byte_count);
3722                        offset += byte_count;
3723                } else {
3724                        if (mb->un.varDmp.word_cnt >
3725                                sizeof(struct static_vport_info) - offset)
3726                                mb->un.varDmp.word_cnt =
3727                                        sizeof(struct static_vport_info)
3728                                                - offset;
3729                        byte_count = mb->un.varDmp.word_cnt;
3730                        lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3731                                vport_buff + offset,
3732                                byte_count);
3733
3734                        offset += byte_count;
3735                }
3736
3737        } while (byte_count &&
3738                offset < sizeof(struct static_vport_info));
3739
3740
3741        if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3742                ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3743                        != VPORT_INFO_REV)) {
3744                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3745                        "0545 lpfc_create_static_vport bad"
3746                        " information header 0x%x 0x%x\n",
3747                        le32_to_cpu(vport_info->signature),
3748                        le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3749
3750                goto out;
3751        }
3752
3753        shost = lpfc_shost_from_vport(phba->pport);
3754
3755        for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3756                memset(&vport_id, 0, sizeof(vport_id));
3757                vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3758                vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3759                if (!vport_id.port_name || !vport_id.node_name)
3760                        continue;
3761
3762                vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3763                vport_id.vport_type = FC_PORTTYPE_NPIV;
3764                vport_id.disable = false;
3765                new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3766
3767                if (!new_fc_vport) {
3768                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3769                                "0546 lpfc_create_static_vport failed to"
3770                                " create vport\n");
3771                        continue;
3772                }
3773
3774                vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3775                vport->vport_flag |= STATIC_VPORT;
3776        }
3777
3778out:
3779        kfree(vport_info);
3780        if (mbx_wait_rc != MBX_TIMEOUT) {
3781                if (pmb->context1) {
3782                        mp = (struct lpfc_dmabuf *)pmb->context1;
3783                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3784                        kfree(mp);
3785                }
3786                mempool_free(pmb, phba->mbox_mem_pool);
3787        }
3788
3789        return;
3790}
3791
3792/*
3793 * This routine handles processing a Fabric REG_LOGIN mailbox
3794 * command upon completion. It is setup in the LPFC_MBOXQ
3795 * as the completion routine when the command is
3796 * handed off to the SLI layer.
3797 */
3798void
3799lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3800{
3801        struct lpfc_vport *vport = pmb->vport;
3802        MAILBOX_t *mb = &pmb->u.mb;
3803        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3804        struct lpfc_nodelist *ndlp;
3805        struct Scsi_Host *shost;
3806
3807        ndlp = (struct lpfc_nodelist *) pmb->context2;
3808        pmb->context1 = NULL;
3809        pmb->context2 = NULL;
3810
3811        if (mb->mbxStatus) {
3812                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3813                                 "0258 Register Fabric login error: 0x%x\n",
3814                                 mb->mbxStatus);
3815                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3816                kfree(mp);
3817                mempool_free(pmb, phba->mbox_mem_pool);
3818
3819                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3820                        /* FLOGI failed, use loop map to make discovery list */
3821                        lpfc_disc_list_loopmap(vport);
3822
3823                        /* Start discovery */
3824                        lpfc_disc_start(vport);
3825                        /* Decrement the reference count to ndlp after the
3826                         * reference to the ndlp are done.
3827                         */
3828                        lpfc_nlp_put(ndlp);
3829                        return;
3830                }
3831
3832                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3833                /* Decrement the reference count to ndlp after the reference
3834                 * to the ndlp are done.
3835                 */
3836                lpfc_nlp_put(ndlp);
3837                return;
3838        }
3839
3840        if (phba->sli_rev < LPFC_SLI_REV4)
3841                ndlp->nlp_rpi = mb->un.varWords[0];
3842        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3843        ndlp->nlp_type |= NLP_FABRIC;
3844        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3845
3846        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3847                /* when physical port receive logo donot start
3848                 * vport discovery */
3849                if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3850                        lpfc_start_fdiscs(phba);
3851                else {
3852                        shost = lpfc_shost_from_vport(vport);
3853                        spin_lock_irq(shost->host_lock);
3854                        vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3855                        spin_unlock_irq(shost->host_lock);
3856                }
3857                lpfc_do_scr_ns_plogi(phba, vport);
3858        }
3859
3860        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3861        kfree(mp);
3862        mempool_free(pmb, phba->mbox_mem_pool);
3863
3864        /* Drop the reference count from the mbox at the end after
3865         * all the current reference to the ndlp have been done.
3866         */
3867        lpfc_nlp_put(ndlp);
3868        return;
3869}
3870
3871 /*
3872  * This routine will issue a GID_FT for each FC4 Type supported
3873  * by the driver. ALL GID_FTs must complete before discovery is started.
3874  */
3875int
3876lpfc_issue_gidft(struct lpfc_vport *vport)
3877{
3878        struct lpfc_hba *phba = vport->phba;
3879        struct lpfc_nodelist *ndlp;
3880
3881        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3882                ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
3883
3884        /* Good status, issue CT Request to NameServer */
3885        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3886            (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3887                if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3888                        /* Cannot issue NameServer FCP Query, so finish up
3889                         * discovery
3890                         */
3891                        lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3892                                         "0604 %s FC TYPE %x %s\n",
3893                                         "Failed to issue GID_FT to ",
3894                                         FC_TYPE_FCP,
3895                                         "Finishing discovery.");
3896                        return 0;
3897                }
3898                vport->gidft_inp++;
3899        }
3900
3901        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3902            (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3903                if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
3904                        /* Cannot issue NameServer NVME Query, so finish up
3905                         * discovery
3906                         */
3907                        lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3908                                         "0605 %s FC_TYPE %x %s %d\n",
3909                                         "Failed to issue GID_FT to ",
3910                                         FC_TYPE_NVME,
3911                                         "Finishing discovery: gidftinp ",
3912                                         vport->gidft_inp);
3913                        if (vport->gidft_inp == 0)
3914                                return 0;
3915                } else
3916                        vport->gidft_inp++;
3917        }
3918        return vport->gidft_inp;
3919}
3920
3921/*
3922 * This routine handles processing a NameServer REG_LOGIN mailbox
3923 * command upon completion. It is setup in the LPFC_MBOXQ
3924 * as the completion routine when the command is
3925 * handed off to the SLI layer.
3926 */
3927void
3928lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3929{
3930        MAILBOX_t *mb = &pmb->u.mb;
3931        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3932        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3933        struct lpfc_vport *vport = pmb->vport;
3934
3935        pmb->context1 = NULL;
3936        pmb->context2 = NULL;
3937        vport->gidft_inp = 0;
3938
3939        if (mb->mbxStatus) {
3940                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3941                                 "0260 Register NameServer error: 0x%x\n",
3942                                 mb->mbxStatus);
3943
3944out:
3945                /* decrement the node reference count held for this
3946                 * callback function.
3947                 */
3948                lpfc_nlp_put(ndlp);
3949                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3950                kfree(mp);
3951                mempool_free(pmb, phba->mbox_mem_pool);
3952
3953                /* If no other thread is using the ndlp, free it */
3954                lpfc_nlp_not_used(ndlp);
3955
3956                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3957                        /*
3958                         * RegLogin failed, use loop map to make discovery
3959                         * list
3960                         */
3961                        lpfc_disc_list_loopmap(vport);
3962
3963                        /* Start discovery */
3964                        lpfc_disc_start(vport);
3965                        return;
3966                }
3967                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3968                return;
3969        }
3970
3971        if (phba->sli_rev < LPFC_SLI_REV4)
3972                ndlp->nlp_rpi = mb->un.varWords[0];
3973        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3974        ndlp->nlp_type |= NLP_FABRIC;
3975        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3976        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3977                         "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3978                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3979                         kref_read(&ndlp->kref),
3980                         ndlp->nlp_usg_map, ndlp);
3981
3982        if (vport->port_state < LPFC_VPORT_READY) {
3983                /* Link up discovery requires Fabric registration. */
3984                lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3985                lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3986                lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3987                lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3988
3989                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3990                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
3991                        lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
3992
3993                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3994                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
3995                        lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
3996                                    FC_TYPE_NVME);
3997
3998                /* Issue SCR just before NameServer GID_FT Query */
3999                lpfc_issue_els_scr(vport, SCR_DID, 0);
4000        }
4001
4002        vport->fc_ns_retry = 0;
4003        if (lpfc_issue_gidft(vport) == 0)
4004                goto out;
4005
4006        /*
4007         * At this point in time we may need to wait for multiple
4008         * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4009         *
4010         * decrement the node reference count held for this
4011         * callback function.
4012         */
4013        lpfc_nlp_put(ndlp);
4014        lpfc_mbuf_free(phba, mp->virt, mp->phys);
4015        kfree(mp);
4016        mempool_free(pmb, phba->mbox_mem_pool);
4017
4018        return;
4019}
4020
4021static void
4022lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4023{
4024        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4025        struct fc_rport  *rport;
4026        struct lpfc_rport_data *rdata;
4027        struct fc_rport_identifiers rport_ids;
4028        struct lpfc_hba  *phba = vport->phba;
4029
4030        if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4031                return;
4032
4033        /* Remote port has reappeared. Re-register w/ FC transport */
4034        rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4035        rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4036        rport_ids.port_id = ndlp->nlp_DID;
4037        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4038
4039        /*
4040         * We leave our node pointer in rport->dd_data when we unregister a
4041         * FCP target port.  But fc_remote_port_add zeros the space to which
4042         * rport->dd_data points.  So, if we're reusing a previously
4043         * registered port, drop the reference that we took the last time we
4044         * registered the port.
4045         */
4046        rport = ndlp->rport;
4047        if (rport) {
4048                rdata = rport->dd_data;
4049                /* break the link before dropping the ref */
4050                ndlp->rport = NULL;
4051                if (rdata) {
4052                        if (rdata->pnode == ndlp)
4053                                lpfc_nlp_put(ndlp);
4054                        rdata->pnode = NULL;
4055                }
4056                /* drop reference for earlier registeration */
4057                put_device(&rport->dev);
4058        }
4059
4060        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4061                "rport add:       did:x%x flg:x%x type x%x",
4062                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4063
4064        /* Don't add the remote port if unloading. */
4065        if (vport->load_flag & FC_UNLOADING)
4066                return;
4067
4068        ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4069        if (!rport || !get_device(&rport->dev)) {
4070                dev_printk(KERN_WARNING, &phba->pcidev->dev,
4071                           "Warning: fc_remote_port_add failed\n");
4072                return;
4073        }
4074
4075        /* initialize static port data */
4076        rport->maxframe_size = ndlp->nlp_maxframe;
4077        rport->supported_classes = ndlp->nlp_class_sup;
4078        rdata = rport->dd_data;
4079        rdata->pnode = lpfc_nlp_get(ndlp);
4080
4081        if (ndlp->nlp_type & NLP_FCP_TARGET)
4082                rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
4083        if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4084                rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4085
4086        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
4087                fc_remote_port_rolechg(rport, rport_ids.roles);
4088
4089        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4090                         "3183 rport register x%06x, rport %p role x%x\n",
4091                         ndlp->nlp_DID, rport, rport_ids.roles);
4092
4093        if ((rport->scsi_target_id != -1) &&
4094            (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4095                ndlp->nlp_sid = rport->scsi_target_id;
4096        }
4097        return;
4098}
4099
4100static void
4101lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4102{
4103        struct fc_rport *rport = ndlp->rport;
4104        struct lpfc_vport *vport = ndlp->vport;
4105        struct lpfc_hba  *phba = vport->phba;
4106
4107        if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4108                return;
4109
4110        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4111                "rport delete:    did:x%x flg:x%x type x%x",
4112                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4113
4114        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4115                         "3184 rport unregister x%06x, rport %p\n",
4116                         ndlp->nlp_DID, rport);
4117
4118        fc_remote_port_delete(rport);
4119
4120        return;
4121}
4122
4123static void
4124lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4125{
4126        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4127
4128        spin_lock_irq(shost->host_lock);
4129        switch (state) {
4130        case NLP_STE_UNUSED_NODE:
4131                vport->fc_unused_cnt += count;
4132                break;
4133        case NLP_STE_PLOGI_ISSUE:
4134                vport->fc_plogi_cnt += count;
4135                break;
4136        case NLP_STE_ADISC_ISSUE:
4137                vport->fc_adisc_cnt += count;
4138                break;
4139        case NLP_STE_REG_LOGIN_ISSUE:
4140                vport->fc_reglogin_cnt += count;
4141                break;
4142        case NLP_STE_PRLI_ISSUE:
4143                vport->fc_prli_cnt += count;
4144                break;
4145        case NLP_STE_UNMAPPED_NODE:
4146                vport->fc_unmap_cnt += count;
4147                break;
4148        case NLP_STE_MAPPED_NODE:
4149                vport->fc_map_cnt += count;
4150                break;
4151        case NLP_STE_NPR_NODE:
4152                if (vport->fc_npr_cnt == 0 && count == -1)
4153                        vport->fc_npr_cnt = 0;
4154                else
4155                        vport->fc_npr_cnt += count;
4156                break;
4157        }
4158        spin_unlock_irq(shost->host_lock);
4159}
4160
4161static void
4162lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4163                       int old_state, int new_state)
4164{
4165        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4166
4167        if (new_state == NLP_STE_UNMAPPED_NODE) {
4168                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4169                ndlp->nlp_type |= NLP_FC_NODE;
4170        }
4171        if (new_state == NLP_STE_MAPPED_NODE)
4172                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4173        if (new_state == NLP_STE_NPR_NODE)
4174                ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4175
4176        /* FCP and NVME Transport interface */
4177        if ((old_state == NLP_STE_MAPPED_NODE ||
4178             old_state == NLP_STE_UNMAPPED_NODE)) {
4179                if (ndlp->rport) {
4180                        vport->phba->nport_event_cnt++;
4181                        lpfc_unregister_remote_port(ndlp);
4182                }
4183
4184                if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4185                        vport->phba->nport_event_cnt++;
4186                        if (vport->phba->nvmet_support == 0) {
4187                                /* Start devloss if target. */
4188                                if (ndlp->nlp_type & NLP_NVME_TARGET)
4189                                        lpfc_nvme_unregister_port(vport, ndlp);
4190                        } else {
4191                                /* NVMET has no upcall. */
4192                                lpfc_nlp_put(ndlp);
4193                        }
4194                }
4195        }
4196
4197        /* FCP and NVME Transport interfaces */
4198
4199        if (new_state ==  NLP_STE_MAPPED_NODE ||
4200            new_state == NLP_STE_UNMAPPED_NODE) {
4201                if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
4202                    ndlp->nlp_DID == Fabric_DID ||
4203                    ndlp->nlp_DID == NameServer_DID ||
4204                    ndlp->nlp_DID == FDMI_DID) {
4205                        vport->phba->nport_event_cnt++;
4206                        /*
4207                         * Tell the fc transport about the port, if we haven't
4208                         * already. If we have, and it's a scsi entity, be
4209                         */
4210                        lpfc_register_remote_port(vport, ndlp);
4211                }
4212                /* Notify the NVME transport of this new rport. */
4213                if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4214                    ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4215                        if (vport->phba->nvmet_support == 0) {
4216                                /* Register this rport with the transport.
4217                                 * Only NVME Target Rports are registered with
4218                                 * the transport.
4219                                 */
4220                                if (ndlp->nlp_type & NLP_NVME_TARGET) {
4221                                        vport->phba->nport_event_cnt++;
4222                                        lpfc_nvme_register_port(vport, ndlp);
4223                                }
4224                        } else {
4225                                /* Just take an NDLP ref count since the
4226                                 * target does not register rports.
4227                                 */
4228                                lpfc_nlp_get(ndlp);
4229                        }
4230                }
4231        }
4232
4233        if ((new_state ==  NLP_STE_MAPPED_NODE) &&
4234                (vport->stat_data_enabled)) {
4235                /*
4236                 * A new target is discovered, if there is no buffer for
4237                 * statistical data collection allocate buffer.
4238                 */
4239                ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4240                                         sizeof(struct lpfc_scsicmd_bkt),
4241                                         GFP_KERNEL);
4242
4243                if (!ndlp->lat_data)
4244                        lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
4245                                "0286 lpfc_nlp_state_cleanup failed to "
4246                                "allocate statistical data buffer DID "
4247                                "0x%x\n", ndlp->nlp_DID);
4248        }
4249        /*
4250         * If the node just added to Mapped list was an FCP target,
4251         * but the remote port registration failed or assigned a target
4252         * id outside the presentable range - move the node to the
4253         * Unmapped List.
4254         */
4255        if ((new_state == NLP_STE_MAPPED_NODE) &&
4256            (ndlp->nlp_type & NLP_FCP_TARGET) &&
4257            (!ndlp->rport ||
4258             ndlp->rport->scsi_target_id == -1 ||
4259             ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4260                spin_lock_irq(shost->host_lock);
4261                ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4262                spin_unlock_irq(shost->host_lock);
4263                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4264        }
4265}
4266
4267static char *
4268lpfc_nlp_state_name(char *buffer, size_t size, int state)
4269{
4270        static char *states[] = {
4271                [NLP_STE_UNUSED_NODE] = "UNUSED",
4272                [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4273                [NLP_STE_ADISC_ISSUE] = "ADISC",
4274                [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4275                [NLP_STE_PRLI_ISSUE] = "PRLI",
4276                [NLP_STE_LOGO_ISSUE] = "LOGO",
4277                [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4278                [NLP_STE_MAPPED_NODE] = "MAPPED",
4279                [NLP_STE_NPR_NODE] = "NPR",
4280        };
4281
4282        if (state < NLP_STE_MAX_STATE && states[state])
4283                strlcpy(buffer, states[state], size);
4284        else
4285                snprintf(buffer, size, "unknown (%d)", state);
4286        return buffer;
4287}
4288
4289void
4290lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4291                   int state)
4292{
4293        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4294        int  old_state = ndlp->nlp_state;
4295        char name1[16], name2[16];
4296
4297        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4298                         "0904 NPort state transition x%06x, %s -> %s\n",
4299                         ndlp->nlp_DID,
4300                         lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4301                         lpfc_nlp_state_name(name2, sizeof(name2), state));
4302
4303        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4304                "node statechg    did:x%x old:%d ste:%d",
4305                ndlp->nlp_DID, old_state, state);
4306
4307        if (old_state == NLP_STE_NPR_NODE &&
4308            state != NLP_STE_NPR_NODE)
4309                lpfc_cancel_retry_delay_tmo(vport, ndlp);
4310        if (old_state == NLP_STE_UNMAPPED_NODE) {
4311                ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4312                ndlp->nlp_type &= ~NLP_FC_NODE;
4313        }
4314
4315        if (list_empty(&ndlp->nlp_listp)) {
4316                spin_lock_irq(shost->host_lock);
4317                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4318                spin_unlock_irq(shost->host_lock);
4319        } else if (old_state)
4320                lpfc_nlp_counters(vport, old_state, -1);
4321
4322        ndlp->nlp_state = state;
4323        lpfc_nlp_counters(vport, state, 1);
4324        lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4325}
4326
4327void
4328lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4329{
4330        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4331
4332        if (list_empty(&ndlp->nlp_listp)) {
4333                spin_lock_irq(shost->host_lock);
4334                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4335                spin_unlock_irq(shost->host_lock);
4336        }
4337}
4338
4339void
4340lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4341{
4342        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4343
4344        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4345        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4346                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4347        spin_lock_irq(shost->host_lock);
4348        list_del_init(&ndlp->nlp_listp);
4349        spin_unlock_irq(shost->host_lock);
4350        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4351                                NLP_STE_UNUSED_NODE);
4352}
4353
4354static void
4355lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4356{
4357        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4358        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4359                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4360        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4361                                NLP_STE_UNUSED_NODE);
4362}
4363/**
4364 * lpfc_initialize_node - Initialize all fields of node object
4365 * @vport: Pointer to Virtual Port object.
4366 * @ndlp: Pointer to FC node object.
4367 * @did: FC_ID of the node.
4368 *
4369 * This function is always called when node object need to be initialized.
4370 * It initializes all the fields of the node object. Although the reference
4371 * to phba from @ndlp can be obtained indirectly through it's reference to
4372 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4373 * to the life-span of the @ndlp might go beyond the existence of @vport as
4374 * the final release of ndlp is determined by its reference count. And, the
4375 * operation on @ndlp needs the reference to phba.
4376 **/
4377static inline void
4378lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4379        uint32_t did)
4380{
4381        INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4382        INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4383        timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4384        ndlp->nlp_DID = did;
4385        ndlp->vport = vport;
4386        ndlp->phba = vport->phba;
4387        ndlp->nlp_sid = NLP_NO_SID;
4388        ndlp->nlp_fc4_type = NLP_FC4_NONE;
4389        kref_init(&ndlp->kref);
4390        NLP_INT_NODE_ACT(ndlp);
4391        atomic_set(&ndlp->cmd_pending, 0);
4392        ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4393}
4394
4395struct lpfc_nodelist *
4396lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4397                 int state)
4398{
4399        struct lpfc_hba *phba = vport->phba;
4400        uint32_t did;
4401        unsigned long flags;
4402        unsigned long *active_rrqs_xri_bitmap = NULL;
4403        int rpi = LPFC_RPI_ALLOC_ERROR;
4404
4405        if (!ndlp)
4406                return NULL;
4407
4408        if (phba->sli_rev == LPFC_SLI_REV4) {
4409                rpi = lpfc_sli4_alloc_rpi(vport->phba);
4410                if (rpi == LPFC_RPI_ALLOC_ERROR)
4411                        return NULL;
4412        }
4413
4414        spin_lock_irqsave(&phba->ndlp_lock, flags);
4415        /* The ndlp should not be in memory free mode */
4416        if (NLP_CHK_FREE_REQ(ndlp)) {
4417                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4418                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4419                                "0277 lpfc_enable_node: ndlp:x%p "
4420                                "usgmap:x%x refcnt:%d\n",
4421                                (void *)ndlp, ndlp->nlp_usg_map,
4422                                kref_read(&ndlp->kref));
4423                goto free_rpi;
4424        }
4425        /* The ndlp should not already be in active mode */
4426        if (NLP_CHK_NODE_ACT(ndlp)) {
4427                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4428                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4429                                "0278 lpfc_enable_node: ndlp:x%p "
4430                                "usgmap:x%x refcnt:%d\n",
4431                                (void *)ndlp, ndlp->nlp_usg_map,
4432                                kref_read(&ndlp->kref));
4433                goto free_rpi;
4434        }
4435
4436        /* Keep the original DID */
4437        did = ndlp->nlp_DID;
4438        if (phba->sli_rev == LPFC_SLI_REV4)
4439                active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4440
4441        /* re-initialize ndlp except of ndlp linked list pointer */
4442        memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4443                sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4444        lpfc_initialize_node(vport, ndlp, did);
4445
4446        if (phba->sli_rev == LPFC_SLI_REV4)
4447                ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4448
4449        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4450        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4451                ndlp->nlp_rpi = rpi;
4452                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4453                                 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4454                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4455                                 ndlp->nlp_flag,
4456                                 kref_read(&ndlp->kref),
4457                                 ndlp->nlp_usg_map, ndlp);
4458        }
4459
4460
4461        if (state != NLP_STE_UNUSED_NODE)
4462                lpfc_nlp_set_state(vport, ndlp, state);
4463
4464        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4465                "node enable:       did:x%x",
4466                ndlp->nlp_DID, 0, 0);
4467        return ndlp;
4468
4469free_rpi:
4470        if (phba->sli_rev == LPFC_SLI_REV4)
4471                lpfc_sli4_free_rpi(vport->phba, rpi);
4472        return NULL;
4473}
4474
4475void
4476lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4477{
4478        /*
4479         * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4480         * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4481         * the ndlp from the vport. The ndlp marked as UNUSED on the list
4482         * until ALL other outstanding threads have completed. We check
4483         * that the ndlp not already in the UNUSED state before we proceed.
4484         */
4485        if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4486                return;
4487        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4488        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4489                lpfc_cleanup_vports_rrqs(vport, ndlp);
4490                lpfc_unreg_rpi(vport, ndlp);
4491        }
4492
4493        lpfc_nlp_put(ndlp);
4494        return;
4495}
4496
4497/*
4498 * Start / ReStart rescue timer for Discovery / RSCN handling
4499 */
4500void
4501lpfc_set_disctmo(struct lpfc_vport *vport)
4502{
4503        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4504        struct lpfc_hba  *phba = vport->phba;
4505        uint32_t tmo;
4506
4507        if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4508                /* For FAN, timeout should be greater than edtov */
4509                tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4510        } else {
4511                /* Normal discovery timeout should be > than ELS/CT timeout
4512                 * FC spec states we need 3 * ratov for CT requests
4513                 */
4514                tmo = ((phba->fc_ratov * 3) + 3);
4515        }
4516
4517
4518        if (!timer_pending(&vport->fc_disctmo)) {
4519                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4520                        "set disc timer:  tmo:x%x state:x%x flg:x%x",
4521                        tmo, vport->port_state, vport->fc_flag);
4522        }
4523
4524        mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4525        spin_lock_irq(shost->host_lock);
4526        vport->fc_flag |= FC_DISC_TMO;
4527        spin_unlock_irq(shost->host_lock);
4528
4529        /* Start Discovery Timer state <hba_state> */
4530        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4531                         "0247 Start Discovery Timer state x%x "
4532                         "Data: x%x x%lx x%x x%x\n",
4533                         vport->port_state, tmo,
4534                         (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4535                         vport->fc_adisc_cnt);
4536
4537        return;
4538}
4539
4540/*
4541 * Cancel rescue timer for Discovery / RSCN handling
4542 */
4543int
4544lpfc_can_disctmo(struct lpfc_vport *vport)
4545{
4546        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4547        unsigned long iflags;
4548
4549        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4550                "can disc timer:  state:x%x rtry:x%x flg:x%x",
4551                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4552
4553        /* Turn off discovery timer if its running */
4554        if (vport->fc_flag & FC_DISC_TMO) {
4555                spin_lock_irqsave(shost->host_lock, iflags);
4556                vport->fc_flag &= ~FC_DISC_TMO;
4557                spin_unlock_irqrestore(shost->host_lock, iflags);
4558                del_timer_sync(&vport->fc_disctmo);
4559                spin_lock_irqsave(&vport->work_port_lock, iflags);
4560                vport->work_port_events &= ~WORKER_DISC_TMO;
4561                spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4562        }
4563
4564        /* Cancel Discovery Timer state <hba_state> */
4565        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4566                         "0248 Cancel Discovery Timer state x%x "
4567                         "Data: x%x x%x x%x\n",
4568                         vport->port_state, vport->fc_flag,
4569                         vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4570        return 0;
4571}
4572
4573/*
4574 * Check specified ring for outstanding IOCB on the SLI queue
4575 * Return true if iocb matches the specified nport
4576 */
4577int
4578lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4579                    struct lpfc_sli_ring *pring,
4580                    struct lpfc_iocbq *iocb,
4581                    struct lpfc_nodelist *ndlp)
4582{
4583        IOCB_t *icmd = &iocb->iocb;
4584        struct lpfc_vport    *vport = ndlp->vport;
4585
4586        if (iocb->vport != vport)
4587                return 0;
4588
4589        if (pring->ringno == LPFC_ELS_RING) {
4590                switch (icmd->ulpCommand) {
4591                case CMD_GEN_REQUEST64_CR:
4592                        if (iocb->context_un.ndlp == ndlp)
4593                                return 1;
4594                case CMD_ELS_REQUEST64_CR:
4595                        if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4596                                return 1;
4597                case CMD_XMIT_ELS_RSP64_CX:
4598                        if (iocb->context1 == (uint8_t *) ndlp)
4599                                return 1;
4600                }
4601        } else if (pring->ringno == LPFC_FCP_RING) {
4602                /* Skip match check if waiting to relogin to FCP target */
4603                if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4604                    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4605                        return 0;
4606                }
4607                if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4608                        return 1;
4609                }
4610        }
4611        return 0;
4612}
4613
4614static void
4615__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4616                struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4617                struct list_head *dequeue_list)
4618{
4619        struct lpfc_iocbq *iocb, *next_iocb;
4620
4621        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4622                /* Check to see if iocb matches the nport */
4623                if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4624                        /* match, dequeue */
4625                        list_move_tail(&iocb->list, dequeue_list);
4626        }
4627}
4628
4629static void
4630lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4631                struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4632{
4633        struct lpfc_sli *psli = &phba->sli;
4634        uint32_t i;
4635
4636        spin_lock_irq(&phba->hbalock);
4637        for (i = 0; i < psli->num_rings; i++)
4638                __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4639                                                dequeue_list);
4640        spin_unlock_irq(&phba->hbalock);
4641}
4642
4643static void
4644lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4645                struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4646{
4647        struct lpfc_sli_ring *pring;
4648        struct lpfc_queue *qp = NULL;
4649
4650        spin_lock_irq(&phba->hbalock);
4651        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4652                pring = qp->pring;
4653                if (!pring)
4654                        continue;
4655                spin_lock(&pring->ring_lock);
4656                __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4657                spin_unlock(&pring->ring_lock);
4658        }
4659        spin_unlock_irq(&phba->hbalock);
4660}
4661
4662/*
4663 * Free resources / clean up outstanding I/Os
4664 * associated with nlp_rpi in the LPFC_NODELIST entry.
4665 */
4666static int
4667lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4668{
4669        LIST_HEAD(completions);
4670
4671        lpfc_fabric_abort_nport(ndlp);
4672
4673        /*
4674         * Everything that matches on txcmplq will be returned
4675         * by firmware with a no rpi error.
4676         */
4677        if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4678                if (phba->sli_rev != LPFC_SLI_REV4)
4679                        lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4680                else
4681                        lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4682        }
4683
4684        /* Cancel all the IOCBs from the completions list */
4685        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4686                              IOERR_SLI_ABORTED);
4687
4688        return 0;
4689}
4690
4691/**
4692 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4693 * @phba: Pointer to HBA context object.
4694 * @pmb: Pointer to mailbox object.
4695 *
4696 * This function will issue an ELS LOGO command after completing
4697 * the UNREG_RPI.
4698 **/
4699static void
4700lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4701{
4702        struct lpfc_vport  *vport = pmb->vport;
4703        struct lpfc_nodelist *ndlp;
4704
4705        ndlp = (struct lpfc_nodelist *)(pmb->context1);
4706        if (!ndlp)
4707                return;
4708        lpfc_issue_els_logo(vport, ndlp, 0);
4709        mempool_free(pmb, phba->mbox_mem_pool);
4710}
4711
4712/*
4713 * Free rpi associated with LPFC_NODELIST entry.
4714 * This routine is called from lpfc_freenode(), when we are removing
4715 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4716 * LOGO that completes successfully, and we are waiting to PLOGI back
4717 * to the remote NPort. In addition, it is called after we receive
4718 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4719 * we are waiting to PLOGI back to the remote NPort.
4720 */
4721int
4722lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4723{
4724        struct lpfc_hba *phba = vport->phba;
4725        LPFC_MBOXQ_t    *mbox;
4726        int rc, acc_plogi = 1;
4727        uint16_t rpi;
4728
4729        if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4730            ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4731                if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4732                        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4733                                         "3366 RPI x%x needs to be "
4734                                         "unregistered nlp_flag x%x "
4735                                         "did x%x\n",
4736                                         ndlp->nlp_rpi, ndlp->nlp_flag,
4737                                         ndlp->nlp_DID);
4738                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4739                if (mbox) {
4740                        /* SLI4 ports require the physical rpi value. */
4741                        rpi = ndlp->nlp_rpi;
4742                        if (phba->sli_rev == LPFC_SLI_REV4)
4743                                rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4744
4745                        lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4746                        mbox->vport = vport;
4747                        if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4748                                mbox->context1 = ndlp;
4749                                mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4750                        } else {
4751                                if (phba->sli_rev == LPFC_SLI_REV4 &&
4752                                    (!(vport->load_flag & FC_UNLOADING)) &&
4753                                    (bf_get(lpfc_sli_intf_if_type,
4754                                     &phba->sli4_hba.sli_intf) ==
4755                                      LPFC_SLI_INTF_IF_TYPE_2) &&
4756                                    (kref_read(&ndlp->kref) > 0)) {
4757                                        mbox->context1 = lpfc_nlp_get(ndlp);
4758                                        mbox->mbox_cmpl =
4759                                                lpfc_sli4_unreg_rpi_cmpl_clr;
4760                                        /*
4761                                         * accept PLOGIs after unreg_rpi_cmpl
4762                                         */
4763                                        acc_plogi = 0;
4764                                } else
4765                                        mbox->mbox_cmpl =
4766                                                lpfc_sli_def_mbox_cmpl;
4767                        }
4768
4769                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4770                        if (rc == MBX_NOT_FINISHED) {
4771                                mempool_free(mbox, phba->mbox_mem_pool);
4772                                acc_plogi = 1;
4773                        }
4774                }
4775                lpfc_no_rpi(phba, ndlp);
4776
4777                if (phba->sli_rev != LPFC_SLI_REV4)
4778                        ndlp->nlp_rpi = 0;
4779                ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4780                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4781                if (acc_plogi)
4782                        ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4783                return 1;
4784        }
4785        ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4786        return 0;
4787}
4788
4789/**
4790 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4791 * @phba: pointer to lpfc hba data structure.
4792 *
4793 * This routine is invoked to unregister all the currently registered RPIs
4794 * to the HBA.
4795 **/
4796void
4797lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4798{
4799        struct lpfc_vport **vports;
4800        struct lpfc_nodelist *ndlp;
4801        struct Scsi_Host *shost;
4802        int i;
4803
4804        vports = lpfc_create_vport_work_array(phba);
4805        if (!vports) {
4806                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4807                        "2884 Vport array allocation failed \n");
4808                return;
4809        }
4810        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4811                shost = lpfc_shost_from_vport(vports[i]);
4812                spin_lock_irq(shost->host_lock);
4813                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4814                        if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4815                                /* The mempool_alloc might sleep */
4816                                spin_unlock_irq(shost->host_lock);
4817                                lpfc_unreg_rpi(vports[i], ndlp);
4818                                spin_lock_irq(shost->host_lock);
4819                        }
4820                }
4821                spin_unlock_irq(shost->host_lock);
4822        }
4823        lpfc_destroy_vport_work_array(phba, vports);
4824}
4825
4826void
4827lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4828{
4829        struct lpfc_hba  *phba  = vport->phba;
4830        LPFC_MBOXQ_t     *mbox;
4831        int rc;
4832
4833        if (phba->sli_rev == LPFC_SLI_REV4) {
4834                lpfc_sli4_unreg_all_rpis(vport);
4835                return;
4836        }
4837
4838        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4839        if (mbox) {
4840                lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4841                                 mbox);
4842                mbox->vport = vport;
4843                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4844                mbox->context1 = NULL;
4845                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4846                if (rc != MBX_TIMEOUT)
4847                        mempool_free(mbox, phba->mbox_mem_pool);
4848
4849                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4850                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4851                                "1836 Could not issue "
4852                                "unreg_login(all_rpis) status %d\n", rc);
4853        }
4854}
4855
4856void
4857lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4858{
4859        struct lpfc_hba  *phba  = vport->phba;
4860        LPFC_MBOXQ_t     *mbox;
4861        int rc;
4862
4863        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4864        if (mbox) {
4865                lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4866                               mbox);
4867                mbox->vport = vport;
4868                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4869                mbox->context1 = NULL;
4870                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4871                if (rc != MBX_TIMEOUT)
4872                        mempool_free(mbox, phba->mbox_mem_pool);
4873
4874                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4875                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4876                                         "1815 Could not issue "
4877                                         "unreg_did (default rpis) status %d\n",
4878                                         rc);
4879        }
4880}
4881
4882/*
4883 * Free resources associated with LPFC_NODELIST entry
4884 * so it can be freed.
4885 */
4886static int
4887lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4888{
4889        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4890        struct lpfc_hba  *phba = vport->phba;
4891        LPFC_MBOXQ_t *mb, *nextmb;
4892        struct lpfc_dmabuf *mp;
4893
4894        /* Cleanup node for NPort <nlp_DID> */
4895        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4896                         "0900 Cleanup node for NPort x%x "
4897                         "Data: x%x x%x x%x\n",
4898                         ndlp->nlp_DID, ndlp->nlp_flag,
4899                         ndlp->nlp_state, ndlp->nlp_rpi);
4900        if (NLP_CHK_FREE_REQ(ndlp)) {
4901                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4902                                "0280 lpfc_cleanup_node: ndlp:x%p "
4903                                "usgmap:x%x refcnt:%d\n",
4904                                (void *)ndlp, ndlp->nlp_usg_map,
4905                                kref_read(&ndlp->kref));
4906                lpfc_dequeue_node(vport, ndlp);
4907        } else {
4908                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4909                                "0281 lpfc_cleanup_node: ndlp:x%p "
4910                                "usgmap:x%x refcnt:%d\n",
4911                                (void *)ndlp, ndlp->nlp_usg_map,
4912                                kref_read(&ndlp->kref));
4913                lpfc_disable_node(vport, ndlp);
4914        }
4915
4916
4917        /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4918
4919        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4920        if ((mb = phba->sli.mbox_active)) {
4921                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4922                   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4923                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4924                        mb->context2 = NULL;
4925                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4926                }
4927        }
4928
4929        spin_lock_irq(&phba->hbalock);
4930        /* Cleanup REG_LOGIN completions which are not yet processed */
4931        list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4932                if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4933                        (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4934                        (ndlp != (struct lpfc_nodelist *) mb->context2))
4935                        continue;
4936
4937                mb->context2 = NULL;
4938                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4939        }
4940
4941        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4942                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4943                   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4944                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4945                        mp = (struct lpfc_dmabuf *) (mb->context1);
4946                        if (mp) {
4947                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4948                                kfree(mp);
4949                        }
4950                        list_del(&mb->list);
4951                        mempool_free(mb, phba->mbox_mem_pool);
4952                        /* We shall not invoke the lpfc_nlp_put to decrement
4953                         * the ndlp reference count as we are in the process
4954                         * of lpfc_nlp_release.
4955                         */
4956                }
4957        }
4958        spin_unlock_irq(&phba->hbalock);
4959
4960        lpfc_els_abort(phba, ndlp);
4961
4962        spin_lock_irq(shost->host_lock);
4963        ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4964        spin_unlock_irq(shost->host_lock);
4965
4966        ndlp->nlp_last_elscmd = 0;
4967        del_timer_sync(&ndlp->nlp_delayfunc);
4968
4969        list_del_init(&ndlp->els_retry_evt.evt_listp);
4970        list_del_init(&ndlp->dev_loss_evt.evt_listp);
4971        lpfc_cleanup_vports_rrqs(vport, ndlp);
4972        lpfc_unreg_rpi(vport, ndlp);
4973
4974        return 0;
4975}
4976
4977/*
4978 * Check to see if we can free the nlp back to the freelist.
4979 * If we are in the middle of using the nlp in the discovery state
4980 * machine, defer the free till we reach the end of the state machine.
4981 */
4982static void
4983lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4984{
4985        struct lpfc_hba  *phba = vport->phba;
4986        struct lpfc_rport_data *rdata;
4987        struct fc_rport *rport;
4988        LPFC_MBOXQ_t *mbox;
4989        int rc;
4990
4991        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4992        if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4993            !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4994            !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
4995            phba->sli_rev != LPFC_SLI_REV4) {
4996                /* For this case we need to cleanup the default rpi
4997                 * allocated by the firmware.
4998                 */
4999                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5000                                 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5001                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5002                                 kref_read(&ndlp->kref),
5003                                 ndlp->nlp_usg_map, ndlp);
5004                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
5005                        != NULL) {
5006                        rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5007                            (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5008                        if (rc) {
5009                                mempool_free(mbox, phba->mbox_mem_pool);
5010                        }
5011                        else {
5012                                mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5013                                mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5014                                mbox->vport = vport;
5015                                mbox->context2 = ndlp;
5016                                rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5017                                if (rc == MBX_NOT_FINISHED) {
5018                                        mempool_free(mbox, phba->mbox_mem_pool);
5019                                }
5020                        }
5021                }
5022        }
5023        lpfc_cleanup_node(vport, ndlp);
5024
5025        /*
5026         * ndlp->rport must be set to NULL before it reaches here
5027         * i.e. break rport/node link before doing lpfc_nlp_put for
5028         * registered rport and then drop the reference of rport.
5029         */
5030        if (ndlp->rport) {
5031                /*
5032                 * extra lpfc_nlp_put dropped the reference of ndlp
5033                 * for registered rport so need to cleanup rport
5034                 */
5035                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5036                                "0940 removed node x%p DID x%x "
5037                                " rport not null %p\n",
5038                                ndlp, ndlp->nlp_DID, ndlp->rport);
5039                rport = ndlp->rport;
5040                rdata = rport->dd_data;
5041                rdata->pnode = NULL;
5042                ndlp->rport = NULL;
5043                put_device(&rport->dev);
5044        }
5045}
5046
5047static int
5048lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5049              uint32_t did)
5050{
5051        D_ID mydid, ndlpdid, matchdid;
5052
5053        if (did == Bcast_DID)
5054                return 0;
5055
5056        /* First check for Direct match */
5057        if (ndlp->nlp_DID == did)
5058                return 1;
5059
5060        /* Next check for area/domain identically equals 0 match */
5061        mydid.un.word = vport->fc_myDID;
5062        if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5063                return 0;
5064        }
5065
5066        matchdid.un.word = did;
5067        ndlpdid.un.word = ndlp->nlp_DID;
5068        if (matchdid.un.b.id == ndlpdid.un.b.id) {
5069                if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5070                    (mydid.un.b.area == matchdid.un.b.area)) {
5071                        /* This code is supposed to match the ID
5072                         * for a private loop device that is
5073                         * connect to fl_port. But we need to
5074                         * check that the port did not just go
5075                         * from pt2pt to fabric or we could end
5076                         * up matching ndlp->nlp_DID 000001 to
5077                         * fabric DID 0x20101
5078                         */
5079                        if ((ndlpdid.un.b.domain == 0) &&
5080                            (ndlpdid.un.b.area == 0)) {
5081                                if (ndlpdid.un.b.id &&
5082                                    vport->phba->fc_topology ==
5083                                    LPFC_TOPOLOGY_LOOP)
5084                                        return 1;
5085                        }
5086                        return 0;
5087                }
5088
5089                matchdid.un.word = ndlp->nlp_DID;
5090                if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5091                    (mydid.un.b.area == ndlpdid.un.b.area)) {
5092                        if ((matchdid.un.b.domain == 0) &&
5093                            (matchdid.un.b.area == 0)) {
5094                                if (matchdid.un.b.id)
5095                                        return 1;
5096                        }
5097                }
5098        }
5099        return 0;
5100}
5101
5102/* Search for a nodelist entry */
5103static struct lpfc_nodelist *
5104__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5105{
5106        struct lpfc_nodelist *ndlp;
5107        uint32_t data1;
5108
5109        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5110                if (lpfc_matchdid(vport, ndlp, did)) {
5111                        data1 = (((uint32_t) ndlp->nlp_state << 24) |
5112                                 ((uint32_t) ndlp->nlp_xri << 16) |
5113                                 ((uint32_t) ndlp->nlp_type << 8) |
5114                                 ((uint32_t) ndlp->nlp_rpi & 0xff));
5115                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5116                                         "0929 FIND node DID "
5117                                         "Data: x%p x%x x%x x%x %p\n",
5118                                         ndlp, ndlp->nlp_DID,
5119                                         ndlp->nlp_flag, data1,
5120                                         ndlp->active_rrqs_xri_bitmap);
5121                        return ndlp;
5122                }
5123        }
5124
5125        /* FIND node did <did> NOT FOUND */
5126        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5127                         "0932 FIND node did x%x NOT FOUND.\n", did);
5128        return NULL;
5129}
5130
5131struct lpfc_nodelist *
5132lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5133{
5134        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5135        struct lpfc_nodelist *ndlp;
5136        unsigned long iflags;
5137
5138        spin_lock_irqsave(shost->host_lock, iflags);
5139        ndlp = __lpfc_findnode_did(vport, did);
5140        spin_unlock_irqrestore(shost->host_lock, iflags);
5141        return ndlp;
5142}
5143
5144struct lpfc_nodelist *
5145lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5146{
5147        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5148        struct lpfc_nodelist *ndlp;
5149
5150        ndlp = lpfc_findnode_did(vport, did);
5151        if (!ndlp) {
5152                if (vport->phba->nvmet_support)
5153                        return NULL;
5154                if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5155                    lpfc_rscn_payload_check(vport, did) == 0)
5156                        return NULL;
5157                ndlp = lpfc_nlp_init(vport, did);
5158                if (!ndlp)
5159                        return NULL;
5160                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5161                spin_lock_irq(shost->host_lock);
5162                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5163                spin_unlock_irq(shost->host_lock);
5164                return ndlp;
5165        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5166                if (vport->phba->nvmet_support)
5167                        return NULL;
5168                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5169                if (!ndlp)
5170                        return NULL;
5171                spin_lock_irq(shost->host_lock);
5172                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5173                spin_unlock_irq(shost->host_lock);
5174                return ndlp;
5175        }
5176
5177        /* The NVME Target does not want to actively manage an rport.
5178         * The goal is to allow the target to reset its state and clear
5179         * pending IO in preparation for the initiator to recover.
5180         */
5181        if ((vport->fc_flag & FC_RSCN_MODE) &&
5182            !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5183                if (lpfc_rscn_payload_check(vport, did)) {
5184
5185                        /* Since this node is marked for discovery,
5186                         * delay timeout is not needed.
5187                         */
5188                        lpfc_cancel_retry_delay_tmo(vport, ndlp);
5189
5190                        /* NVME Target mode waits until rport is known to be
5191                         * impacted by the RSCN before it transitions.  No
5192                         * active management - just go to NPR provided the
5193                         * node had a valid login.
5194                         */
5195                        if (vport->phba->nvmet_support)
5196                                return ndlp;
5197
5198                        /* If we've already received a PLOGI from this NPort
5199                         * we don't need to try to discover it again.
5200                         */
5201                        if (ndlp->nlp_flag & NLP_RCV_PLOGI)
5202                                return NULL;
5203
5204                        spin_lock_irq(shost->host_lock);
5205                        ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5206                        spin_unlock_irq(shost->host_lock);
5207                } else
5208                        ndlp = NULL;
5209        } else {
5210                /* If the initiator received a PLOGI from this NPort or if the
5211                 * initiator is already in the process of discovery on it,
5212                 * there's no need to try to discover it again.
5213                 */
5214                if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5215                    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5216                    (!vport->phba->nvmet_support &&
5217                     ndlp->nlp_flag & NLP_RCV_PLOGI))
5218                        return NULL;
5219
5220                if (vport->phba->nvmet_support)
5221                        return ndlp;
5222
5223                /* Moving to NPR state clears unsolicited flags and
5224                 * allows for rediscovery
5225                 */
5226                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5227
5228                spin_lock_irq(shost->host_lock);
5229                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5230                spin_unlock_irq(shost->host_lock);
5231        }
5232        return ndlp;
5233}
5234
5235/* Build a list of nodes to discover based on the loopmap */
5236void
5237lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5238{
5239        struct lpfc_hba  *phba = vport->phba;
5240        int j;
5241        uint32_t alpa, index;
5242
5243        if (!lpfc_is_link_up(phba))
5244                return;
5245
5246        if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5247                return;
5248
5249        /* Check for loop map present or not */
5250        if (phba->alpa_map[0]) {
5251                for (j = 1; j <= phba->alpa_map[0]; j++) {
5252                        alpa = phba->alpa_map[j];
5253                        if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5254                                continue;
5255                        lpfc_setup_disc_node(vport, alpa);
5256                }
5257        } else {
5258                /* No alpamap, so try all alpa's */
5259                for (j = 0; j < FC_MAXLOOP; j++) {
5260                        /* If cfg_scan_down is set, start from highest
5261                         * ALPA (0xef) to lowest (0x1).
5262                         */
5263                        if (vport->cfg_scan_down)
5264                                index = j;
5265                        else
5266                                index = FC_MAXLOOP - j - 1;
5267                        alpa = lpfcAlpaArray[index];
5268                        if ((vport->fc_myDID & 0xff) == alpa)
5269                                continue;
5270                        lpfc_setup_disc_node(vport, alpa);
5271                }
5272        }
5273        return;
5274}
5275
5276/* SLI3 only */
5277void
5278lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5279{
5280        LPFC_MBOXQ_t *mbox;
5281        struct lpfc_sli *psli = &phba->sli;
5282        struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5283        struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
5284        int  rc;
5285
5286        /*
5287         * if it's not a physical port or if we already send
5288         * clear_la then don't send it.
5289         */
5290        if ((phba->link_state >= LPFC_CLEAR_LA) ||
5291            (vport->port_type != LPFC_PHYSICAL_PORT) ||
5292                (phba->sli_rev == LPFC_SLI_REV4))
5293                return;
5294
5295                        /* Link up discovery */
5296        if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5297                phba->link_state = LPFC_CLEAR_LA;
5298                lpfc_clear_la(phba, mbox);
5299                mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5300                mbox->vport = vport;
5301                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5302                if (rc == MBX_NOT_FINISHED) {
5303                        mempool_free(mbox, phba->mbox_mem_pool);
5304                        lpfc_disc_flush_list(vport);
5305                        extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5306                        fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5307                        phba->link_state = LPFC_HBA_ERROR;
5308                }
5309        }
5310}
5311
5312/* Reg_vpi to tell firmware to resume normal operations */
5313void
5314lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5315{
5316        LPFC_MBOXQ_t *regvpimbox;
5317
5318        regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5319        if (regvpimbox) {
5320                lpfc_reg_vpi(vport, regvpimbox);
5321                regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5322                regvpimbox->vport = vport;
5323                if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5324                                        == MBX_NOT_FINISHED) {
5325                        mempool_free(regvpimbox, phba->mbox_mem_pool);
5326                }
5327        }
5328}
5329
5330/* Start Link up / RSCN discovery on NPR nodes */
5331void
5332lpfc_disc_start(struct lpfc_vport *vport)
5333{
5334        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5335        struct lpfc_hba  *phba = vport->phba;
5336        uint32_t num_sent;
5337        uint32_t clear_la_pending;
5338
5339        if (!lpfc_is_link_up(phba)) {
5340                lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5341                                 "3315 Link is not up %x\n",
5342                                 phba->link_state);
5343                return;
5344        }
5345
5346        if (phba->link_state == LPFC_CLEAR_LA)
5347                clear_la_pending = 1;
5348        else
5349                clear_la_pending = 0;
5350
5351        if (vport->port_state < LPFC_VPORT_READY)
5352                vport->port_state = LPFC_DISC_AUTH;
5353
5354        lpfc_set_disctmo(vport);
5355
5356        vport->fc_prevDID = vport->fc_myDID;
5357        vport->num_disc_nodes = 0;
5358
5359        /* Start Discovery state <hba_state> */
5360        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5361                         "0202 Start Discovery hba state x%x "
5362                         "Data: x%x x%x x%x\n",
5363                         vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5364                         vport->fc_adisc_cnt);
5365
5366        /* First do ADISCs - if any */
5367        num_sent = lpfc_els_disc_adisc(vport);
5368
5369        if (num_sent)
5370                return;
5371
5372        /* Register the VPI for SLI3, NPIV only. */
5373        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5374            !(vport->fc_flag & FC_PT2PT) &&
5375            !(vport->fc_flag & FC_RSCN_MODE) &&
5376            (phba->sli_rev < LPFC_SLI_REV4)) {
5377                lpfc_issue_clear_la(phba, vport);
5378                lpfc_issue_reg_vpi(phba, vport);
5379                return;
5380        }
5381
5382        /*
5383         * For SLI2, we need to set port_state to READY and continue
5384         * discovery.
5385         */
5386        if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5387                /* If we get here, there is nothing to ADISC */
5388                lpfc_issue_clear_la(phba, vport);
5389
5390                if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5391                        vport->num_disc_nodes = 0;
5392                        /* go thru NPR nodes and issue ELS PLOGIs */
5393                        if (vport->fc_npr_cnt)
5394                                lpfc_els_disc_plogi(vport);
5395
5396                        if (!vport->num_disc_nodes) {
5397                                spin_lock_irq(shost->host_lock);
5398                                vport->fc_flag &= ~FC_NDISC_ACTIVE;
5399                                spin_unlock_irq(shost->host_lock);
5400                                lpfc_can_disctmo(vport);
5401                        }
5402                }
5403                vport->port_state = LPFC_VPORT_READY;
5404        } else {
5405                /* Next do PLOGIs - if any */
5406                num_sent = lpfc_els_disc_plogi(vport);
5407
5408                if (num_sent)
5409                        return;
5410
5411                if (vport->fc_flag & FC_RSCN_MODE) {
5412                        /* Check to see if more RSCNs came in while we
5413                         * were processing this one.
5414                         */
5415                        if ((vport->fc_rscn_id_cnt == 0) &&
5416                            (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5417                                spin_lock_irq(shost->host_lock);
5418                                vport->fc_flag &= ~FC_RSCN_MODE;
5419                                spin_unlock_irq(shost->host_lock);
5420                                lpfc_can_disctmo(vport);
5421                        } else
5422                                lpfc_els_handle_rscn(vport);
5423                }
5424        }
5425        return;
5426}
5427
5428/*
5429 *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5430 *  ring the match the sppecified nodelist.
5431 */
5432static void
5433lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5434{
5435        LIST_HEAD(completions);
5436        struct lpfc_sli *psli;
5437        IOCB_t     *icmd;
5438        struct lpfc_iocbq    *iocb, *next_iocb;
5439        struct lpfc_sli_ring *pring;
5440
5441        psli = &phba->sli;
5442        pring = lpfc_phba_elsring(phba);
5443        if (unlikely(!pring))
5444                return;
5445
5446        /* Error matching iocb on txq or txcmplq
5447         * First check the txq.
5448         */
5449        spin_lock_irq(&phba->hbalock);
5450        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5451                if (iocb->context1 != ndlp) {
5452                        continue;
5453                }
5454                icmd = &iocb->iocb;
5455                if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5456                    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5457
5458                        list_move_tail(&iocb->list, &completions);
5459                }
5460        }
5461
5462        /* Next check the txcmplq */
5463        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5464                if (iocb->context1 != ndlp) {
5465                        continue;
5466                }
5467                icmd = &iocb->iocb;
5468                if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5469                    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5470                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5471                }
5472        }
5473        spin_unlock_irq(&phba->hbalock);
5474
5475        /* Cancel all the IOCBs from the completions list */
5476        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5477                              IOERR_SLI_ABORTED);
5478}
5479
5480static void
5481lpfc_disc_flush_list(struct lpfc_vport *vport)
5482{
5483        struct lpfc_nodelist *ndlp, *next_ndlp;
5484        struct lpfc_hba *phba = vport->phba;
5485
5486        if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5487                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5488                                         nlp_listp) {
5489                        if (!NLP_CHK_NODE_ACT(ndlp))
5490                                continue;
5491                        if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5492                            ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5493                                lpfc_free_tx(phba, ndlp);
5494                        }
5495                }
5496        }
5497}
5498
5499void
5500lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5501{
5502        lpfc_els_flush_rscn(vport);
5503        lpfc_els_flush_cmd(vport);
5504        lpfc_disc_flush_list(vport);
5505}
5506
5507/*****************************************************************************/
5508/*
5509 * NAME:     lpfc_disc_timeout
5510 *
5511 * FUNCTION: Fibre Channel driver discovery timeout routine.
5512 *
5513 * EXECUTION ENVIRONMENT: interrupt only
5514 *
5515 * CALLED FROM:
5516 *      Timer function
5517 *
5518 * RETURNS:
5519 *      none
5520 */
5521/*****************************************************************************/
5522void
5523lpfc_disc_timeout(struct timer_list *t)
5524{
5525        struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
5526        struct lpfc_hba   *phba = vport->phba;
5527        uint32_t tmo_posted;
5528        unsigned long flags = 0;
5529
5530        if (unlikely(!phba))
5531                return;
5532
5533        spin_lock_irqsave(&vport->work_port_lock, flags);
5534        tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5535        if (!tmo_posted)
5536                vport->work_port_events |= WORKER_DISC_TMO;
5537        spin_unlock_irqrestore(&vport->work_port_lock, flags);
5538
5539        if (!tmo_posted)
5540                lpfc_worker_wake_up(phba);
5541        return;
5542}
5543
5544static void
5545lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5546{
5547        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5548        struct lpfc_hba  *phba = vport->phba;
5549        struct lpfc_sli  *psli = &phba->sli;
5550        struct lpfc_nodelist *ndlp, *next_ndlp;
5551        LPFC_MBOXQ_t *initlinkmbox;
5552        int rc, clrlaerr = 0;
5553
5554        if (!(vport->fc_flag & FC_DISC_TMO))
5555                return;
5556
5557        spin_lock_irq(shost->host_lock);
5558        vport->fc_flag &= ~FC_DISC_TMO;
5559        spin_unlock_irq(shost->host_lock);
5560
5561        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5562                "disc timeout:    state:x%x rtry:x%x flg:x%x",
5563                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5564
5565        switch (vport->port_state) {
5566
5567        case LPFC_LOCAL_CFG_LINK:
5568                /*
5569                 * port_state is identically  LPFC_LOCAL_CFG_LINK while
5570                 * waiting for FAN timeout
5571                 */
5572                lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5573                                 "0221 FAN timeout\n");
5574
5575                /* Start discovery by sending FLOGI, clean up old rpis */
5576                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5577                                         nlp_listp) {
5578                        if (!NLP_CHK_NODE_ACT(ndlp))
5579                                continue;
5580                        if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5581                                continue;
5582                        if (ndlp->nlp_type & NLP_FABRIC) {
5583                                /* Clean up the ndlp on Fabric connections */
5584                                lpfc_drop_node(vport, ndlp);
5585
5586                        } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5587                                /* Fail outstanding IO now since device
5588                                 * is marked for PLOGI.
5589                                 */
5590                                lpfc_unreg_rpi(vport, ndlp);
5591                        }
5592                }
5593                if (vport->port_state != LPFC_FLOGI) {
5594                        if (phba->sli_rev <= LPFC_SLI_REV3)
5595                                lpfc_initial_flogi(vport);
5596                        else
5597                                lpfc_issue_init_vfi(vport);
5598                        return;
5599                }
5600                break;
5601
5602        case LPFC_FDISC:
5603        case LPFC_FLOGI:
5604        /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5605                /* Initial FLOGI timeout */
5606                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5607                                 "0222 Initial %s timeout\n",
5608                                 vport->vpi ? "FDISC" : "FLOGI");
5609
5610                /* Assume no Fabric and go on with discovery.
5611                 * Check for outstanding ELS FLOGI to abort.
5612                 */
5613
5614                /* FLOGI failed, so just use loop map to make discovery list */
5615                lpfc_disc_list_loopmap(vport);
5616
5617                /* Start discovery */
5618                lpfc_disc_start(vport);
5619                break;
5620
5621        case LPFC_FABRIC_CFG_LINK:
5622        /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5623           NameServer login */
5624                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5625                                 "0223 Timeout while waiting for "
5626                                 "NameServer login\n");
5627                /* Next look for NameServer ndlp */
5628                ndlp = lpfc_findnode_did(vport, NameServer_DID);
5629                if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5630                        lpfc_els_abort(phba, ndlp);
5631
5632                /* ReStart discovery */
5633                goto restart_disc;
5634
5635        case LPFC_NS_QRY:
5636        /* Check for wait for NameServer Rsp timeout */
5637                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5638                                 "0224 NameServer Query timeout "
5639                                 "Data: x%x x%x\n",
5640                                 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5641
5642                if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5643                        /* Try it one more time */
5644                        vport->fc_ns_retry++;
5645                        vport->gidft_inp = 0;
5646                        rc = lpfc_issue_gidft(vport);
5647                        if (rc == 0)
5648                                break;
5649                }
5650                vport->fc_ns_retry = 0;
5651
5652restart_disc:
5653                /*
5654                 * Discovery is over.
5655                 * set port_state to PORT_READY if SLI2.
5656                 * cmpl_reg_vpi will set port_state to READY for SLI3.
5657                 */
5658                if (phba->sli_rev < LPFC_SLI_REV4) {
5659                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5660                                lpfc_issue_reg_vpi(phba, vport);
5661                        else  {
5662                                lpfc_issue_clear_la(phba, vport);
5663                                vport->port_state = LPFC_VPORT_READY;
5664                        }
5665                }
5666
5667                /* Setup and issue mailbox INITIALIZE LINK command */
5668                initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5669                if (!initlinkmbox) {
5670                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5671                                         "0206 Device Discovery "
5672                                         "completion error\n");
5673                        phba->link_state = LPFC_HBA_ERROR;
5674                        break;
5675                }
5676
5677                lpfc_linkdown(phba);
5678                lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5679                               phba->cfg_link_speed);
5680                initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5681                initlinkmbox->vport = vport;
5682                initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5683                rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5684                lpfc_set_loopback_flag(phba);
5685                if (rc == MBX_NOT_FINISHED)
5686                        mempool_free(initlinkmbox, phba->mbox_mem_pool);
5687
5688                break;
5689
5690        case LPFC_DISC_AUTH:
5691        /* Node Authentication timeout */
5692                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5693                                 "0227 Node Authentication timeout\n");
5694                lpfc_disc_flush_list(vport);
5695
5696                /*
5697                 * set port_state to PORT_READY if SLI2.
5698                 * cmpl_reg_vpi will set port_state to READY for SLI3.
5699                 */
5700                if (phba->sli_rev < LPFC_SLI_REV4) {
5701                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5702                                lpfc_issue_reg_vpi(phba, vport);
5703                        else  { /* NPIV Not enabled */
5704                                lpfc_issue_clear_la(phba, vport);
5705                                vport->port_state = LPFC_VPORT_READY;
5706                        }
5707                }
5708                break;
5709
5710        case LPFC_VPORT_READY:
5711                if (vport->fc_flag & FC_RSCN_MODE) {
5712                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5713                                         "0231 RSCN timeout Data: x%x "
5714                                         "x%x\n",
5715                                         vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5716
5717                        /* Cleanup any outstanding ELS commands */
5718                        lpfc_els_flush_cmd(vport);
5719
5720                        lpfc_els_flush_rscn(vport);
5721                        lpfc_disc_flush_list(vport);
5722                }
5723                break;
5724
5725        default:
5726                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5727                                 "0273 Unexpected discovery timeout, "
5728                                 "vport State x%x\n", vport->port_state);
5729                break;
5730        }
5731
5732        switch (phba->link_state) {
5733        case LPFC_CLEAR_LA:
5734                                /* CLEAR LA timeout */
5735                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5736                                 "0228 CLEAR LA timeout\n");
5737                clrlaerr = 1;
5738                break;
5739
5740        case LPFC_LINK_UP:
5741                lpfc_issue_clear_la(phba, vport);
5742                /* Drop thru */
5743        case LPFC_LINK_UNKNOWN:
5744        case LPFC_WARM_START:
5745        case LPFC_INIT_START:
5746        case LPFC_INIT_MBX_CMDS:
5747        case LPFC_LINK_DOWN:
5748        case LPFC_HBA_ERROR:
5749                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5750                                 "0230 Unexpected timeout, hba link "
5751                                 "state x%x\n", phba->link_state);
5752                clrlaerr = 1;
5753                break;
5754
5755        case LPFC_HBA_READY:
5756                break;
5757        }
5758
5759        if (clrlaerr) {
5760                lpfc_disc_flush_list(vport);
5761                if (phba->sli_rev != LPFC_SLI_REV4) {
5762                        psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
5763                                ~LPFC_STOP_IOCB_EVENT;
5764                        psli->sli3_ring[LPFC_FCP_RING].flag &=
5765                                ~LPFC_STOP_IOCB_EVENT;
5766                }
5767                vport->port_state = LPFC_VPORT_READY;
5768        }
5769        return;
5770}
5771
5772/*
5773 * This routine handles processing a NameServer REG_LOGIN mailbox
5774 * command upon completion. It is setup in the LPFC_MBOXQ
5775 * as the completion routine when the command is
5776 * handed off to the SLI layer.
5777 */
5778void
5779lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5780{
5781        MAILBOX_t *mb = &pmb->u.mb;
5782        struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
5783        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784        struct lpfc_vport    *vport = pmb->vport;
5785
5786        pmb->context1 = NULL;
5787        pmb->context2 = NULL;
5788
5789        if (phba->sli_rev < LPFC_SLI_REV4)
5790                ndlp->nlp_rpi = mb->un.varWords[0];
5791        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5792        ndlp->nlp_type |= NLP_FABRIC;
5793        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5794        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5795                         "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5796                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5797                         kref_read(&ndlp->kref),
5798                         ndlp->nlp_usg_map, ndlp);
5799        /*
5800         * Start issuing Fabric-Device Management Interface (FDMI) command to
5801         * 0xfffffa (FDMI well known port).
5802         * DHBA -> DPRT -> RHBA -> RPA  (physical port)
5803         * DPRT -> RPRT (vports)
5804         */
5805        if (vport->port_type == LPFC_PHYSICAL_PORT)
5806                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
5807        else
5808                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
5809
5810
5811        /* decrement the node reference count held for this callback
5812         * function.
5813         */
5814        lpfc_nlp_put(ndlp);
5815        lpfc_mbuf_free(phba, mp->virt, mp->phys);
5816        kfree(mp);
5817        mempool_free(pmb, phba->mbox_mem_pool);
5818
5819        return;
5820}
5821
5822static int
5823lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5824{
5825        uint16_t *rpi = param;
5826
5827        /* check for active node */
5828        if (!NLP_CHK_NODE_ACT(ndlp))
5829                return 0;
5830
5831        return ndlp->nlp_rpi == *rpi;
5832}
5833
5834static int
5835lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5836{
5837        return memcmp(&ndlp->nlp_portname, param,
5838                      sizeof(ndlp->nlp_portname)) == 0;
5839}
5840
5841static struct lpfc_nodelist *
5842__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5843{
5844        struct lpfc_nodelist *ndlp;
5845
5846        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5847                if (filter(ndlp, param)) {
5848                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5849                                         "3185 FIND node filter %p DID "
5850                                         "ndlp %p did x%x flg x%x st x%x "
5851                                         "xri x%x type x%x rpi x%x\n",
5852                                         filter, ndlp, ndlp->nlp_DID,
5853                                         ndlp->nlp_flag, ndlp->nlp_state,
5854                                         ndlp->nlp_xri, ndlp->nlp_type,
5855                                         ndlp->nlp_rpi);
5856                        return ndlp;
5857                }
5858        }
5859        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5860                         "3186 FIND node filter %p NOT FOUND.\n", filter);
5861        return NULL;
5862}
5863
5864/*
5865 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5866 * returns the node list element pointer else return NULL.
5867 */
5868struct lpfc_nodelist *
5869__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5870{
5871        return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
5872}
5873
5874/*
5875 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5876 * returns the node element list pointer else return NULL.
5877 */
5878struct lpfc_nodelist *
5879lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
5880{
5881        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5882        struct lpfc_nodelist *ndlp;
5883
5884        spin_lock_irq(shost->host_lock);
5885        ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
5886        spin_unlock_irq(shost->host_lock);
5887        return ndlp;
5888}
5889
5890/*
5891 * This routine looks up the ndlp lists for the given RPI. If the rpi
5892 * is found, the routine returns the node element list pointer else
5893 * return NULL.
5894 */
5895struct lpfc_nodelist *
5896lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5897{
5898        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5899        struct lpfc_nodelist *ndlp;
5900
5901        spin_lock_irq(shost->host_lock);
5902        ndlp = __lpfc_findnode_rpi(vport, rpi);
5903        spin_unlock_irq(shost->host_lock);
5904        return ndlp;
5905}
5906
5907/**
5908 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5909 * @phba: pointer to lpfc hba data structure.
5910 * @vpi: the physical host virtual N_Port identifier.
5911 *
5912 * This routine finds a vport on a HBA (referred by @phba) through a
5913 * @vpi. The function walks the HBA's vport list and returns the address
5914 * of the vport with the matching @vpi.
5915 *
5916 * Return code
5917 *    NULL - No vport with the matching @vpi found
5918 *    Otherwise - Address to the vport with the matching @vpi.
5919 **/
5920struct lpfc_vport *
5921lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5922{
5923        struct lpfc_vport *vport;
5924        unsigned long flags;
5925        int i = 0;
5926
5927        /* The physical ports are always vpi 0 - translate is unnecessary. */
5928        if (vpi > 0) {
5929                /*
5930                 * Translate the physical vpi to the logical vpi.  The
5931                 * vport stores the logical vpi.
5932                 */
5933                for (i = 0; i < phba->max_vpi; i++) {
5934                        if (vpi == phba->vpi_ids[i])
5935                                break;
5936                }
5937
5938                if (i >= phba->max_vpi) {
5939                        lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
5940                                         "2936 Could not find Vport mapped "
5941                                         "to vpi %d\n", vpi);
5942                        return NULL;
5943                }
5944        }
5945
5946        spin_lock_irqsave(&phba->hbalock, flags);
5947        list_for_each_entry(vport, &phba->port_list, listentry) {
5948                if (vport->vpi == i) {
5949                        spin_unlock_irqrestore(&phba->hbalock, flags);
5950                        return vport;
5951                }
5952        }
5953        spin_unlock_irqrestore(&phba->hbalock, flags);
5954        return NULL;
5955}
5956
5957struct lpfc_nodelist *
5958lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
5959{
5960        struct lpfc_nodelist *ndlp;
5961        int rpi = LPFC_RPI_ALLOC_ERROR;
5962
5963        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5964                rpi = lpfc_sli4_alloc_rpi(vport->phba);
5965                if (rpi == LPFC_RPI_ALLOC_ERROR)
5966                        return NULL;
5967        }
5968
5969        ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
5970        if (!ndlp) {
5971                if (vport->phba->sli_rev == LPFC_SLI_REV4)
5972                        lpfc_sli4_free_rpi(vport->phba, rpi);
5973                return NULL;
5974        }
5975
5976        memset(ndlp, 0, sizeof (struct lpfc_nodelist));
5977
5978        lpfc_initialize_node(vport, ndlp, did);
5979        INIT_LIST_HEAD(&ndlp->nlp_listp);
5980        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5981                ndlp->nlp_rpi = rpi;
5982                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5983                                 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5984                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
5985                                 ndlp->nlp_flag,
5986                                 kref_read(&ndlp->kref),
5987                                 ndlp->nlp_usg_map, ndlp);
5988
5989                ndlp->active_rrqs_xri_bitmap =
5990                                mempool_alloc(vport->phba->active_rrq_pool,
5991                                              GFP_KERNEL);
5992                if (ndlp->active_rrqs_xri_bitmap)
5993                        memset(ndlp->active_rrqs_xri_bitmap, 0,
5994                               ndlp->phba->cfg_rrq_xri_bitmap_sz);
5995        }
5996
5997
5998
5999        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6000                "node init:       did:x%x",
6001                ndlp->nlp_DID, 0, 0);
6002
6003        return ndlp;
6004}
6005
6006/* This routine releases all resources associated with a specifc NPort's ndlp
6007 * and mempool_free's the nodelist.
6008 */
6009static void
6010lpfc_nlp_release(struct kref *kref)
6011{
6012        struct lpfc_hba *phba;
6013        unsigned long flags;
6014        struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6015                                                  kref);
6016
6017        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6018                "node release:    did:x%x flg:x%x type:x%x",
6019                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6020
6021        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6022                        "0279 lpfc_nlp_release: ndlp:x%p did %x "
6023                        "usgmap:x%x refcnt:%d rpi:%x\n",
6024                        (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6025                        kref_read(&ndlp->kref), ndlp->nlp_rpi);
6026
6027        /* remove ndlp from action. */
6028        lpfc_nlp_remove(ndlp->vport, ndlp);
6029
6030        /* clear the ndlp active flag for all release cases */
6031        phba = ndlp->phba;
6032        spin_lock_irqsave(&phba->ndlp_lock, flags);
6033        NLP_CLR_NODE_ACT(ndlp);
6034        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6035        if (phba->sli_rev == LPFC_SLI_REV4)
6036                lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
6037
6038        /* free ndlp memory for final ndlp release */
6039        if (NLP_CHK_FREE_REQ(ndlp)) {
6040                kfree(ndlp->lat_data);
6041                if (phba->sli_rev == LPFC_SLI_REV4)
6042                        mempool_free(ndlp->active_rrqs_xri_bitmap,
6043                                     ndlp->phba->active_rrq_pool);
6044                mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6045        }
6046}
6047
6048/* This routine bumps the reference count for a ndlp structure to ensure
6049 * that one discovery thread won't free a ndlp while another discovery thread
6050 * is using it.
6051 */
6052struct lpfc_nodelist *
6053lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6054{
6055        struct lpfc_hba *phba;
6056        unsigned long flags;
6057
6058        if (ndlp) {
6059                lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6060                        "node get:        did:x%x flg:x%x refcnt:x%x",
6061                        ndlp->nlp_DID, ndlp->nlp_flag,
6062                        kref_read(&ndlp->kref));
6063                /* The check of ndlp usage to prevent incrementing the
6064                 * ndlp reference count that is in the process of being
6065                 * released.
6066                 */
6067                phba = ndlp->phba;
6068                spin_lock_irqsave(&phba->ndlp_lock, flags);
6069                if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6070                        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6071                        lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6072                                "0276 lpfc_nlp_get: ndlp:x%p "
6073                                "usgmap:x%x refcnt:%d\n",
6074                                (void *)ndlp, ndlp->nlp_usg_map,
6075                                kref_read(&ndlp->kref));
6076                        return NULL;
6077                } else
6078                        kref_get(&ndlp->kref);
6079                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6080        }
6081        return ndlp;
6082}
6083
6084/* This routine decrements the reference count for a ndlp structure. If the
6085 * count goes to 0, this indicates the the associated nodelist should be
6086 * freed. Returning 1 indicates the ndlp resource has been released; on the
6087 * other hand, returning 0 indicates the ndlp resource has not been released
6088 * yet.
6089 */
6090int
6091lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6092{
6093        struct lpfc_hba *phba;
6094        unsigned long flags;
6095
6096        if (!ndlp)
6097                return 1;
6098
6099        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6100        "node put:        did:x%x flg:x%x refcnt:x%x",
6101                ndlp->nlp_DID, ndlp->nlp_flag,
6102                kref_read(&ndlp->kref));
6103        phba = ndlp->phba;
6104        spin_lock_irqsave(&phba->ndlp_lock, flags);
6105        /* Check the ndlp memory free acknowledge flag to avoid the
6106         * possible race condition that kref_put got invoked again
6107         * after previous one has done ndlp memory free.
6108         */
6109        if (NLP_CHK_FREE_ACK(ndlp)) {
6110                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6111                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6112                                "0274 lpfc_nlp_put: ndlp:x%p "
6113                                "usgmap:x%x refcnt:%d\n",
6114                                (void *)ndlp, ndlp->nlp_usg_map,
6115                                kref_read(&ndlp->kref));
6116                return 1;
6117        }
6118        /* Check the ndlp inactivate log flag to avoid the possible
6119         * race condition that kref_put got invoked again after ndlp
6120         * is already in inactivating state.
6121         */
6122        if (NLP_CHK_IACT_REQ(ndlp)) {
6123                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6124                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6125                                "0275 lpfc_nlp_put: ndlp:x%p "
6126                                "usgmap:x%x refcnt:%d\n",
6127                                (void *)ndlp, ndlp->nlp_usg_map,
6128                                kref_read(&ndlp->kref));
6129                return 1;
6130        }
6131        /* For last put, mark the ndlp usage flags to make sure no
6132         * other kref_get and kref_put on the same ndlp shall get
6133         * in between the process when the final kref_put has been
6134         * invoked on this ndlp.
6135         */
6136        if (kref_read(&ndlp->kref) == 1) {
6137                /* Indicate ndlp is put to inactive state. */
6138                NLP_SET_IACT_REQ(ndlp);
6139                /* Acknowledge ndlp memory free has been seen. */
6140                if (NLP_CHK_FREE_REQ(ndlp))
6141                        NLP_SET_FREE_ACK(ndlp);
6142        }
6143        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6144        /* Note, the kref_put returns 1 when decrementing a reference
6145         * count that was 1, it invokes the release callback function,
6146         * but it still left the reference count as 1 (not actually
6147         * performs the last decrementation). Otherwise, it actually
6148         * decrements the reference count and returns 0.
6149         */
6150        return kref_put(&ndlp->kref, lpfc_nlp_release);
6151}
6152
6153/* This routine free's the specified nodelist if it is not in use
6154 * by any other discovery thread. This routine returns 1 if the
6155 * ndlp has been freed. A return value of 0 indicates the ndlp is
6156 * not yet been released.
6157 */
6158int
6159lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6160{
6161        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6162                "node not used:   did:x%x flg:x%x refcnt:x%x",
6163                ndlp->nlp_DID, ndlp->nlp_flag,
6164                kref_read(&ndlp->kref));
6165        if (kref_read(&ndlp->kref) == 1)
6166                if (lpfc_nlp_put(ndlp))
6167                        return 1;
6168        return 0;
6169}
6170
6171/**
6172 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6173 * @phba: Pointer to hba context object.
6174 *
6175 * This function iterate through all FC nodes associated
6176 * will all vports to check if there is any node with
6177 * fc_rports associated with it. If there is an fc_rport
6178 * associated with the node, then the node is either in
6179 * discovered state or its devloss_timer is pending.
6180 */
6181static int
6182lpfc_fcf_inuse(struct lpfc_hba *phba)
6183{
6184        struct lpfc_vport **vports;
6185        int i, ret = 0;
6186        struct lpfc_nodelist *ndlp;
6187        struct Scsi_Host  *shost;
6188
6189        vports = lpfc_create_vport_work_array(phba);
6190
6191        /* If driver cannot allocate memory, indicate fcf is in use */
6192        if (!vports)
6193                return 1;
6194
6195        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6196                shost = lpfc_shost_from_vport(vports[i]);
6197                spin_lock_irq(shost->host_lock);
6198                /*
6199                 * IF the CVL_RCVD bit is not set then we have sent the
6200                 * flogi.
6201                 * If dev_loss fires while we are waiting we do not want to
6202                 * unreg the fcf.
6203                 */
6204                if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6205                        spin_unlock_irq(shost->host_lock);
6206                        ret =  1;
6207                        goto out;
6208                }
6209                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6210                        if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6211                          (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6212                                ret = 1;
6213                                spin_unlock_irq(shost->host_lock);
6214                                goto out;
6215                        } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6216                                ret = 1;
6217                                lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
6218                                                "2624 RPI %x DID %x flag %x "
6219                                                "still logged in\n",
6220                                                ndlp->nlp_rpi, ndlp->nlp_DID,
6221                                                ndlp->nlp_flag);
6222                        }
6223                }
6224                spin_unlock_irq(shost->host_lock);
6225        }
6226out:
6227        lpfc_destroy_vport_work_array(phba, vports);
6228        return ret;
6229}
6230
6231/**
6232 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6233 * @phba: Pointer to hba context object.
6234 * @mboxq: Pointer to mailbox object.
6235 *
6236 * This function frees memory associated with the mailbox command.
6237 */
6238void
6239lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6240{
6241        struct lpfc_vport *vport = mboxq->vport;
6242        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6243
6244        if (mboxq->u.mb.mbxStatus) {
6245                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6246                        "2555 UNREG_VFI mbxStatus error x%x "
6247                        "HBA state x%x\n",
6248                        mboxq->u.mb.mbxStatus, vport->port_state);
6249        }
6250        spin_lock_irq(shost->host_lock);
6251        phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6252        spin_unlock_irq(shost->host_lock);
6253        mempool_free(mboxq, phba->mbox_mem_pool);
6254        return;
6255}
6256
6257/**
6258 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6259 * @phba: Pointer to hba context object.
6260 * @mboxq: Pointer to mailbox object.
6261 *
6262 * This function frees memory associated with the mailbox command.
6263 */
6264static void
6265lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6266{
6267        struct lpfc_vport *vport = mboxq->vport;
6268
6269        if (mboxq->u.mb.mbxStatus) {
6270                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6271                        "2550 UNREG_FCFI mbxStatus error x%x "
6272                        "HBA state x%x\n",
6273                        mboxq->u.mb.mbxStatus, vport->port_state);
6274        }
6275        mempool_free(mboxq, phba->mbox_mem_pool);
6276        return;
6277}
6278
6279/**
6280 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6281 * @phba: Pointer to hba context object.
6282 *
6283 * This function prepare the HBA for unregistering the currently registered
6284 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6285 * VFIs.
6286 */
6287int
6288lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6289{
6290        struct lpfc_vport **vports;
6291        struct lpfc_nodelist *ndlp;
6292        struct Scsi_Host *shost;
6293        int i = 0, rc;
6294
6295        /* Unregister RPIs */
6296        if (lpfc_fcf_inuse(phba))
6297                lpfc_unreg_hba_rpis(phba);
6298
6299        /* At this point, all discovery is aborted */
6300        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6301
6302        /* Unregister VPIs */
6303        vports = lpfc_create_vport_work_array(phba);
6304        if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6305                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6306                        /* Stop FLOGI/FDISC retries */
6307                        ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6308                        if (ndlp)
6309                                lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6310                        lpfc_cleanup_pending_mbox(vports[i]);
6311                        if (phba->sli_rev == LPFC_SLI_REV4)
6312                                lpfc_sli4_unreg_all_rpis(vports[i]);
6313                        lpfc_mbx_unreg_vpi(vports[i]);
6314                        shost = lpfc_shost_from_vport(vports[i]);
6315                        spin_lock_irq(shost->host_lock);
6316                        vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6317                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6318                        spin_unlock_irq(shost->host_lock);
6319                }
6320        lpfc_destroy_vport_work_array(phba, vports);
6321        if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6322                ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6323                if (ndlp)
6324                        lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6325                lpfc_cleanup_pending_mbox(phba->pport);
6326                if (phba->sli_rev == LPFC_SLI_REV4)
6327                        lpfc_sli4_unreg_all_rpis(phba->pport);
6328                lpfc_mbx_unreg_vpi(phba->pport);
6329                shost = lpfc_shost_from_vport(phba->pport);
6330                spin_lock_irq(shost->host_lock);
6331                phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6332                phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6333                spin_unlock_irq(shost->host_lock);
6334        }
6335
6336        /* Cleanup any outstanding ELS commands */
6337        lpfc_els_flush_all_cmd(phba);
6338
6339        /* Unregister the physical port VFI */
6340        rc = lpfc_issue_unreg_vfi(phba->pport);
6341        return rc;
6342}
6343
6344/**
6345 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6346 * @phba: Pointer to hba context object.
6347 *
6348 * This function issues synchronous unregister FCF mailbox command to HBA to
6349 * unregister the currently registered FCF record. The driver does not reset
6350 * the driver FCF usage state flags.
6351 *
6352 * Return 0 if successfully issued, none-zero otherwise.
6353 */
6354int
6355lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6356{
6357        LPFC_MBOXQ_t *mbox;
6358        int rc;
6359
6360        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6361        if (!mbox) {
6362                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6363                                "2551 UNREG_FCFI mbox allocation failed"
6364                                "HBA state x%x\n", phba->pport->port_state);
6365                return -ENOMEM;
6366        }
6367        lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6368        mbox->vport = phba->pport;
6369        mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6370        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6371
6372        if (rc == MBX_NOT_FINISHED) {
6373                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6374                                "2552 Unregister FCFI command failed rc x%x "
6375                                "HBA state x%x\n",
6376                                rc, phba->pport->port_state);
6377                return -EINVAL;
6378        }
6379        return 0;
6380}
6381
6382/**
6383 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6384 * @phba: Pointer to hba context object.
6385 *
6386 * This function unregisters the currently reigstered FCF. This function
6387 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6388 */
6389void
6390lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6391{
6392        int rc;
6393
6394        /* Preparation for unregistering fcf */
6395        rc = lpfc_unregister_fcf_prep(phba);
6396        if (rc) {
6397                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6398                                "2748 Failed to prepare for unregistering "
6399                                "HBA's FCF record: rc=%d\n", rc);
6400                return;
6401        }
6402
6403        /* Now, unregister FCF record and reset HBA FCF state */
6404        rc = lpfc_sli4_unregister_fcf(phba);
6405        if (rc)
6406                return;
6407        /* Reset HBA FCF states after successful unregister FCF */
6408        phba->fcf.fcf_flag = 0;
6409        phba->fcf.current_rec.flag = 0;
6410
6411        /*
6412         * If driver is not unloading, check if there is any other
6413         * FCF record that can be used for discovery.
6414         */
6415        if ((phba->pport->load_flag & FC_UNLOADING) ||
6416            (phba->link_state < LPFC_LINK_UP))
6417                return;
6418
6419        /* This is considered as the initial FCF discovery scan */
6420        spin_lock_irq(&phba->hbalock);
6421        phba->fcf.fcf_flag |= FCF_INIT_DISC;
6422        spin_unlock_irq(&phba->hbalock);
6423
6424        /* Reset FCF roundrobin bmask for new discovery */
6425        lpfc_sli4_clear_fcf_rr_bmask(phba);
6426
6427        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6428
6429        if (rc) {
6430                spin_lock_irq(&phba->hbalock);
6431                phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6432                spin_unlock_irq(&phba->hbalock);
6433                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6434                                "2553 lpfc_unregister_unused_fcf failed "
6435                                "to read FCF record HBA state x%x\n",
6436                                phba->pport->port_state);
6437        }
6438}
6439
6440/**
6441 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6442 * @phba: Pointer to hba context object.
6443 *
6444 * This function just unregisters the currently reigstered FCF. It does not
6445 * try to find another FCF for discovery.
6446 */
6447void
6448lpfc_unregister_fcf(struct lpfc_hba *phba)
6449{
6450        int rc;
6451
6452        /* Preparation for unregistering fcf */
6453        rc = lpfc_unregister_fcf_prep(phba);
6454        if (rc) {
6455                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6456                                "2749 Failed to prepare for unregistering "
6457                                "HBA's FCF record: rc=%d\n", rc);
6458                return;
6459        }
6460
6461        /* Now, unregister FCF record and reset HBA FCF state */
6462        rc = lpfc_sli4_unregister_fcf(phba);
6463        if (rc)
6464                return;
6465        /* Set proper HBA FCF states after successful unregister FCF */
6466        spin_lock_irq(&phba->hbalock);
6467        phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6468        spin_unlock_irq(&phba->hbalock);
6469}
6470
6471/**
6472 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6473 * @phba: Pointer to hba context object.
6474 *
6475 * This function check if there are any connected remote port for the FCF and
6476 * if all the devices are disconnected, this function unregister FCFI.
6477 * This function also tries to use another FCF for discovery.
6478 */
6479void
6480lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6481{
6482        /*
6483         * If HBA is not running in FIP mode, if HBA does not support
6484         * FCoE, if FCF discovery is ongoing, or if FCF has not been
6485         * registered, do nothing.
6486         */
6487        spin_lock_irq(&phba->hbalock);
6488        if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6489            !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6490            !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6491            (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6492            (phba->pport->port_state == LPFC_FLOGI)) {
6493                spin_unlock_irq(&phba->hbalock);
6494                return;
6495        }
6496        spin_unlock_irq(&phba->hbalock);
6497
6498        if (lpfc_fcf_inuse(phba))
6499                return;
6500
6501        lpfc_unregister_fcf_rescan(phba);
6502}
6503
6504/**
6505 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6506 * @phba: Pointer to hba context object.
6507 * @buff: Buffer containing the FCF connection table as in the config
6508 *         region.
6509 * This function create driver data structure for the FCF connection
6510 * record table read from config region 23.
6511 */
6512static void
6513lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6514        uint8_t *buff)
6515{
6516        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6517        struct lpfc_fcf_conn_hdr *conn_hdr;
6518        struct lpfc_fcf_conn_rec *conn_rec;
6519        uint32_t record_count;
6520        int i;
6521
6522        /* Free the current connect table */
6523        list_for_each_entry_safe(conn_entry, next_conn_entry,
6524                &phba->fcf_conn_rec_list, list) {
6525                list_del_init(&conn_entry->list);
6526                kfree(conn_entry);
6527        }
6528
6529        conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6530        record_count = conn_hdr->length * sizeof(uint32_t)/
6531                sizeof(struct lpfc_fcf_conn_rec);
6532
6533        conn_rec = (struct lpfc_fcf_conn_rec *)
6534                (buff + sizeof(struct lpfc_fcf_conn_hdr));
6535
6536        for (i = 0; i < record_count; i++) {
6537                if (!(conn_rec[i].flags & FCFCNCT_VALID))
6538                        continue;
6539                conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6540                        GFP_KERNEL);
6541                if (!conn_entry) {
6542                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6543                                "2566 Failed to allocate connection"
6544                                " table entry\n");
6545                        return;
6546                }
6547
6548                memcpy(&conn_entry->conn_rec, &conn_rec[i],
6549                        sizeof(struct lpfc_fcf_conn_rec));
6550                list_add_tail(&conn_entry->list,
6551                        &phba->fcf_conn_rec_list);
6552        }
6553
6554        if (!list_empty(&phba->fcf_conn_rec_list)) {
6555                i = 0;
6556                list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6557                                    list) {
6558                        conn_rec = &conn_entry->conn_rec;
6559                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6560                                        "3345 FCF connection list rec[%02d]: "
6561                                        "flags:x%04x, vtag:x%04x, "
6562                                        "fabric_name:x%02x:%02x:%02x:%02x:"
6563                                        "%02x:%02x:%02x:%02x, "
6564                                        "switch_name:x%02x:%02x:%02x:%02x:"
6565                                        "%02x:%02x:%02x:%02x\n", i++,
6566                                        conn_rec->flags, conn_rec->vlan_tag,
6567                                        conn_rec->fabric_name[0],
6568                                        conn_rec->fabric_name[1],
6569                                        conn_rec->fabric_name[2],
6570                                        conn_rec->fabric_name[3],
6571                                        conn_rec->fabric_name[4],
6572                                        conn_rec->fabric_name[5],
6573                                        conn_rec->fabric_name[6],
6574                                        conn_rec->fabric_name[7],
6575                                        conn_rec->switch_name[0],
6576                                        conn_rec->switch_name[1],
6577                                        conn_rec->switch_name[2],
6578                                        conn_rec->switch_name[3],
6579                                        conn_rec->switch_name[4],
6580                                        conn_rec->switch_name[5],
6581                                        conn_rec->switch_name[6],
6582                                        conn_rec->switch_name[7]);
6583                }
6584        }
6585}
6586
6587/**
6588 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6589 * @phba: Pointer to hba context object.
6590 * @buff: Buffer containing the FCoE parameter data structure.
6591 *
6592 *  This function update driver data structure with config
6593 *  parameters read from config region 23.
6594 */
6595static void
6596lpfc_read_fcoe_param(struct lpfc_hba *phba,
6597                        uint8_t *buff)
6598{
6599        struct lpfc_fip_param_hdr *fcoe_param_hdr;
6600        struct lpfc_fcoe_params *fcoe_param;
6601
6602        fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6603                buff;
6604        fcoe_param = (struct lpfc_fcoe_params *)
6605                (buff + sizeof(struct lpfc_fip_param_hdr));
6606
6607        if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6608                (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6609                return;
6610
6611        if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6612                phba->valid_vlan = 1;
6613                phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6614                        0xFFF;
6615        }
6616
6617        phba->fc_map[0] = fcoe_param->fc_map[0];
6618        phba->fc_map[1] = fcoe_param->fc_map[1];
6619        phba->fc_map[2] = fcoe_param->fc_map[2];
6620        return;
6621}
6622
6623/**
6624 * lpfc_get_rec_conf23 - Get a record type in config region data.
6625 * @buff: Buffer containing config region 23 data.
6626 * @size: Size of the data buffer.
6627 * @rec_type: Record type to be searched.
6628 *
6629 * This function searches config region data to find the beginning
6630 * of the record specified by record_type. If record found, this
6631 * function return pointer to the record else return NULL.
6632 */
6633static uint8_t *
6634lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6635{
6636        uint32_t offset = 0, rec_length;
6637
6638        if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6639                (size < sizeof(uint32_t)))
6640                return NULL;
6641
6642        rec_length = buff[offset + 1];
6643
6644        /*
6645         * One TLV record has one word header and number of data words
6646         * specified in the rec_length field of the record header.
6647         */
6648        while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6649                <= size) {
6650                if (buff[offset] == rec_type)
6651                        return &buff[offset];
6652
6653                if (buff[offset] == LPFC_REGION23_LAST_REC)
6654                        return NULL;
6655
6656                offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6657                rec_length = buff[offset + 1];
6658        }
6659        return NULL;
6660}
6661
6662/**
6663 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6664 * @phba: Pointer to lpfc_hba data structure.
6665 * @buff: Buffer containing config region 23 data.
6666 * @size: Size of the data buffer.
6667 *
6668 * This function parses the FCoE config parameters in config region 23 and
6669 * populate driver data structure with the parameters.
6670 */
6671void
6672lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6673                uint8_t *buff,
6674                uint32_t size)
6675{
6676        uint32_t offset = 0;
6677        uint8_t *rec_ptr;
6678
6679        /*
6680         * If data size is less than 2 words signature and version cannot be
6681         * verified.
6682         */
6683        if (size < 2*sizeof(uint32_t))
6684                return;
6685
6686        /* Check the region signature first */
6687        if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6688                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6689                        "2567 Config region 23 has bad signature\n");
6690                return;
6691        }
6692
6693        offset += 4;
6694
6695        /* Check the data structure version */
6696        if (buff[offset] != LPFC_REGION23_VERSION) {
6697                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698                        "2568 Config region 23 has bad version\n");
6699                return;
6700        }
6701        offset += 4;
6702
6703        /* Read FCoE param record */
6704        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6705                        size - offset, FCOE_PARAM_TYPE);
6706        if (rec_ptr)
6707                lpfc_read_fcoe_param(phba, rec_ptr);
6708
6709        /* Read FCF connection table */
6710        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6711                size - offset, FCOE_CONN_TBL_TYPE);
6712        if (rec_ptr)
6713                lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6714
6715}
6716