linux/drivers/scsi/lpfc/lpfc_hbadisc.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/slab.h>
  27#include <linux/pci.h>
  28#include <linux/kthread.h>
  29#include <linux/interrupt.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_host.h>
  34#include <scsi/scsi_transport_fc.h>
  35#include <scsi/fc/fc_fs.h>
  36
  37#include <linux/nvme-fc-driver.h>
  38
  39#include "lpfc_hw4.h"
  40#include "lpfc_hw.h"
  41#include "lpfc_nl.h"
  42#include "lpfc_disc.h"
  43#include "lpfc_sli.h"
  44#include "lpfc_sli4.h"
  45#include "lpfc.h"
  46#include "lpfc_scsi.h"
  47#include "lpfc_nvme.h"
  48#include "lpfc_logmsg.h"
  49#include "lpfc_crtn.h"
  50#include "lpfc_vport.h"
  51#include "lpfc_debugfs.h"
  52
  53/* AlpaArray for assignment of scsid for scan-down and bind_method */
  54static uint8_t lpfcAlpaArray[] = {
  55        0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
  56        0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
  57        0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
  58        0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
  59        0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
  60        0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
  61        0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
  62        0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
  63        0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
  64        0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
  65        0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
  66        0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
  67        0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
  68};
  69
  70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
  71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
  72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
  73static int lpfc_fcf_inuse(struct lpfc_hba *);
  74
  75void
  76lpfc_terminate_rport_io(struct fc_rport *rport)
  77{
  78        struct lpfc_rport_data *rdata;
  79        struct lpfc_nodelist * ndlp;
  80        struct lpfc_hba *phba;
  81
  82        rdata = rport->dd_data;
  83        ndlp = rdata->pnode;
  84
  85        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  86                if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
  87                        printk(KERN_ERR "Cannot find remote node"
  88                        " to terminate I/O Data x%x\n",
  89                        rport->port_id);
  90                return;
  91        }
  92
  93        phba  = ndlp->phba;
  94
  95        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
  96                "rport terminate: sid:x%x did:x%x flg:x%x",
  97                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
  98
  99        if (ndlp->nlp_sid != NLP_NO_SID) {
 100                lpfc_sli_abort_iocb(ndlp->vport,
 101                        &phba->sli.sli3_ring[LPFC_FCP_RING],
 102                        ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 103        }
 104}
 105
 106/*
 107 * This function will be called when dev_loss_tmo fire.
 108 */
 109void
 110lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 111{
 112        struct lpfc_rport_data *rdata;
 113        struct lpfc_nodelist * ndlp;
 114        struct lpfc_vport *vport;
 115        struct Scsi_Host *shost;
 116        struct lpfc_hba   *phba;
 117        struct lpfc_work_evt *evtp;
 118        int  put_node;
 119        int  put_rport;
 120
 121        rdata = rport->dd_data;
 122        ndlp = rdata->pnode;
 123        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 124                return;
 125
 126        vport = ndlp->vport;
 127        phba  = vport->phba;
 128
 129        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 130                "rport devlosscb: sid:x%x did:x%x flg:x%x",
 131                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 132
 133        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 134                         "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
 135                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 136
 137        /* Don't defer this if we are in the process of deleting the vport
 138         * or unloading the driver. The unload will cleanup the node
 139         * appropriately we just need to cleanup the ndlp rport info here.
 140         */
 141        if (vport->load_flag & FC_UNLOADING) {
 142                put_node = rdata->pnode != NULL;
 143                put_rport = ndlp->rport != NULL;
 144                rdata->pnode = NULL;
 145                ndlp->rport = NULL;
 146                if (put_node)
 147                        lpfc_nlp_put(ndlp);
 148                if (put_rport)
 149                        put_device(&rport->dev);
 150                return;
 151        }
 152
 153        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
 154                return;
 155
 156        if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
 157                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
 158                                "6789 rport name %llx != node port name %llx",
 159                                rport->port_name,
 160                                wwn_to_u64(ndlp->nlp_portname.u.wwn));
 161
 162        evtp = &ndlp->dev_loss_evt;
 163
 164        if (!list_empty(&evtp->evt_listp)) {
 165                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
 166                                "6790 rport name %llx dev_loss_evt pending",
 167                                rport->port_name);
 168                return;
 169        }
 170
 171        shost = lpfc_shost_from_vport(vport);
 172        spin_lock_irq(shost->host_lock);
 173        ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
 174        spin_unlock_irq(shost->host_lock);
 175
 176        /* We need to hold the node by incrementing the reference
 177         * count until this queued work is done
 178         */
 179        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 180
 181        spin_lock_irq(&phba->hbalock);
 182        if (evtp->evt_arg1) {
 183                evtp->evt = LPFC_EVT_DEV_LOSS;
 184                list_add_tail(&evtp->evt_listp, &phba->work_list);
 185                lpfc_worker_wake_up(phba);
 186        }
 187        spin_unlock_irq(&phba->hbalock);
 188
 189        return;
 190}
 191
 192/**
 193 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
 194 * @ndlp: Pointer to remote node object.
 195 *
 196 * This function is called from the worker thread when devloss timeout timer
 197 * expires. For SLI4 host, this routine shall return 1 when at lease one
 198 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
 199 * routine shall return 0 when there is no remote node is still in use of FCF
 200 * when devloss timeout happened to this @ndlp.
 201 **/
 202static int
 203lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 204{
 205        struct lpfc_rport_data *rdata;
 206        struct fc_rport   *rport;
 207        struct lpfc_vport *vport;
 208        struct lpfc_hba   *phba;
 209        struct Scsi_Host  *shost;
 210        uint8_t *name;
 211        int  put_node;
 212        int warn_on = 0;
 213        int fcf_inuse = 0;
 214
 215        rport = ndlp->rport;
 216        vport = ndlp->vport;
 217        shost = lpfc_shost_from_vport(vport);
 218
 219        spin_lock_irq(shost->host_lock);
 220        ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
 221        spin_unlock_irq(shost->host_lock);
 222
 223        if (!rport)
 224                return fcf_inuse;
 225
 226        name = (uint8_t *) &ndlp->nlp_portname;
 227        phba  = vport->phba;
 228
 229        if (phba->sli_rev == LPFC_SLI_REV4)
 230                fcf_inuse = lpfc_fcf_inuse(phba);
 231
 232        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 233                "rport devlosstmo:did:x%x type:x%x id:x%x",
 234                ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
 235
 236        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 237                         "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
 238                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 239
 240        /*
 241         * lpfc_nlp_remove if reached with dangling rport drops the
 242         * reference. To make sure that does not happen clear rport
 243         * pointer in ndlp before lpfc_nlp_put.
 244         */
 245        rdata = rport->dd_data;
 246
 247        /* Don't defer this if we are in the process of deleting the vport
 248         * or unloading the driver. The unload will cleanup the node
 249         * appropriately we just need to cleanup the ndlp rport info here.
 250         */
 251        if (vport->load_flag & FC_UNLOADING) {
 252                if (ndlp->nlp_sid != NLP_NO_SID) {
 253                        /* flush the target */
 254                        lpfc_sli_abort_iocb(vport,
 255                                            &phba->sli.sli3_ring[LPFC_FCP_RING],
 256                                            ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 257                }
 258                put_node = rdata->pnode != NULL;
 259                rdata->pnode = NULL;
 260                ndlp->rport = NULL;
 261                if (put_node)
 262                        lpfc_nlp_put(ndlp);
 263                put_device(&rport->dev);
 264
 265                return fcf_inuse;
 266        }
 267
 268        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
 269                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 270                                 "0284 Devloss timeout Ignored on "
 271                                 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
 272                                 "NPort x%x\n",
 273                                 *name, *(name+1), *(name+2), *(name+3),
 274                                 *(name+4), *(name+5), *(name+6), *(name+7),
 275                                 ndlp->nlp_DID);
 276                return fcf_inuse;
 277        }
 278
 279        put_node = rdata->pnode != NULL;
 280        rdata->pnode = NULL;
 281        ndlp->rport = NULL;
 282        if (put_node)
 283                lpfc_nlp_put(ndlp);
 284        put_device(&rport->dev);
 285
 286        if (ndlp->nlp_type & NLP_FABRIC)
 287                return fcf_inuse;
 288
 289        if (ndlp->nlp_sid != NLP_NO_SID) {
 290                warn_on = 1;
 291                lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
 292                                    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 293        }
 294
 295        if (warn_on) {
 296                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 297                                 "0203 Devloss timeout on "
 298                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 299                                 "NPort x%06x Data: x%x x%x x%x\n",
 300                                 *name, *(name+1), *(name+2), *(name+3),
 301                                 *(name+4), *(name+5), *(name+6), *(name+7),
 302                                 ndlp->nlp_DID, ndlp->nlp_flag,
 303                                 ndlp->nlp_state, ndlp->nlp_rpi);
 304        } else {
 305                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 306                                 "0204 Devloss timeout on "
 307                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 308                                 "NPort x%06x Data: x%x x%x x%x\n",
 309                                 *name, *(name+1), *(name+2), *(name+3),
 310                                 *(name+4), *(name+5), *(name+6), *(name+7),
 311                                 ndlp->nlp_DID, ndlp->nlp_flag,
 312                                 ndlp->nlp_state, ndlp->nlp_rpi);
 313        }
 314
 315        if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
 316            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
 317            (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
 318            (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
 319            (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
 320                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 321
 322        return fcf_inuse;
 323}
 324
 325/**
 326 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
 327 * @phba: Pointer to hba context object.
 328 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
 329 * @nlp_did: remote node identifer with devloss timeout.
 330 *
 331 * This function is called from the worker thread after invoking devloss
 332 * timeout handler and releasing the reference count for the ndlp with
 333 * which the devloss timeout was handled for SLI4 host. For the devloss
 334 * timeout of the last remote node which had been in use of FCF, when this
 335 * routine is invoked, it shall be guaranteed that none of the remote are
 336 * in-use of FCF. When devloss timeout to the last remote using the FCF,
 337 * if the FIP engine is neither in FCF table scan process nor roundrobin
 338 * failover process, the in-use FCF shall be unregistered. If the FIP
 339 * engine is in FCF discovery process, the devloss timeout state shall
 340 * be set for either the FCF table scan process or roundrobin failover
 341 * process to unregister the in-use FCF.
 342 **/
 343static void
 344lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
 345                                    uint32_t nlp_did)
 346{
 347        /* If devloss timeout happened to a remote node when FCF had no
 348         * longer been in-use, do nothing.
 349         */
 350        if (!fcf_inuse)
 351                return;
 352
 353        if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
 354                spin_lock_irq(&phba->hbalock);
 355                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 356                        if (phba->hba_flag & HBA_DEVLOSS_TMO) {
 357                                spin_unlock_irq(&phba->hbalock);
 358                                return;
 359                        }
 360                        phba->hba_flag |= HBA_DEVLOSS_TMO;
 361                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 362                                        "2847 Last remote node (x%x) using "
 363                                        "FCF devloss tmo\n", nlp_did);
 364                }
 365                if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
 366                        spin_unlock_irq(&phba->hbalock);
 367                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 368                                        "2868 Devloss tmo to FCF rediscovery "
 369                                        "in progress\n");
 370                        return;
 371                }
 372                if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
 373                        spin_unlock_irq(&phba->hbalock);
 374                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 375                                        "2869 Devloss tmo to idle FIP engine, "
 376                                        "unreg in-use FCF and rescan.\n");
 377                        /* Unregister in-use FCF and rescan */
 378                        lpfc_unregister_fcf_rescan(phba);
 379                        return;
 380                }
 381                spin_unlock_irq(&phba->hbalock);
 382                if (phba->hba_flag & FCF_TS_INPROG)
 383                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 384                                        "2870 FCF table scan in progress\n");
 385                if (phba->hba_flag & FCF_RR_INPROG)
 386                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 387                                        "2871 FLOGI roundrobin FCF failover "
 388                                        "in progress\n");
 389        }
 390        lpfc_unregister_unused_fcf(phba);
 391}
 392
 393/**
 394 * lpfc_alloc_fast_evt - Allocates data structure for posting event
 395 * @phba: Pointer to hba context object.
 396 *
 397 * This function is called from the functions which need to post
 398 * events from interrupt context. This function allocates data
 399 * structure required for posting event. It also keeps track of
 400 * number of events pending and prevent event storm when there are
 401 * too many events.
 402 **/
 403struct lpfc_fast_path_event *
 404lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
 405        struct lpfc_fast_path_event *ret;
 406
 407        /* If there are lot of fast event do not exhaust memory due to this */
 408        if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
 409                return NULL;
 410
 411        ret = kzalloc(sizeof(struct lpfc_fast_path_event),
 412                        GFP_ATOMIC);
 413        if (ret) {
 414                atomic_inc(&phba->fast_event_count);
 415                INIT_LIST_HEAD(&ret->work_evt.evt_listp);
 416                ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
 417        }
 418        return ret;
 419}
 420
 421/**
 422 * lpfc_free_fast_evt - Frees event data structure
 423 * @phba: Pointer to hba context object.
 424 * @evt:  Event object which need to be freed.
 425 *
 426 * This function frees the data structure required for posting
 427 * events.
 428 **/
 429void
 430lpfc_free_fast_evt(struct lpfc_hba *phba,
 431                struct lpfc_fast_path_event *evt) {
 432
 433        atomic_dec(&phba->fast_event_count);
 434        kfree(evt);
 435}
 436
 437/**
 438 * lpfc_send_fastpath_evt - Posts events generated from fast path
 439 * @phba: Pointer to hba context object.
 440 * @evtp: Event data structure.
 441 *
 442 * This function is called from worker thread, when the interrupt
 443 * context need to post an event. This function posts the event
 444 * to fc transport netlink interface.
 445 **/
 446static void
 447lpfc_send_fastpath_evt(struct lpfc_hba *phba,
 448                struct lpfc_work_evt *evtp)
 449{
 450        unsigned long evt_category, evt_sub_category;
 451        struct lpfc_fast_path_event *fast_evt_data;
 452        char *evt_data;
 453        uint32_t evt_data_size;
 454        struct Scsi_Host *shost;
 455
 456        fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
 457                work_evt);
 458
 459        evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
 460        evt_sub_category = (unsigned long) fast_evt_data->un.
 461                        fabric_evt.subcategory;
 462        shost = lpfc_shost_from_vport(fast_evt_data->vport);
 463        if (evt_category == FC_REG_FABRIC_EVENT) {
 464                if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
 465                        evt_data = (char *) &fast_evt_data->un.read_check_error;
 466                        evt_data_size = sizeof(fast_evt_data->un.
 467                                read_check_error);
 468                } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
 469                        (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
 470                        evt_data = (char *) &fast_evt_data->un.fabric_evt;
 471                        evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
 472                } else {
 473                        lpfc_free_fast_evt(phba, fast_evt_data);
 474                        return;
 475                }
 476        } else if (evt_category == FC_REG_SCSI_EVENT) {
 477                switch (evt_sub_category) {
 478                case LPFC_EVENT_QFULL:
 479                case LPFC_EVENT_DEVBSY:
 480                        evt_data = (char *) &fast_evt_data->un.scsi_evt;
 481                        evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
 482                        break;
 483                case LPFC_EVENT_CHECK_COND:
 484                        evt_data = (char *) &fast_evt_data->un.check_cond_evt;
 485                        evt_data_size =  sizeof(fast_evt_data->un.
 486                                check_cond_evt);
 487                        break;
 488                case LPFC_EVENT_VARQUEDEPTH:
 489                        evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
 490                        evt_data_size = sizeof(fast_evt_data->un.
 491                                queue_depth_evt);
 492                        break;
 493                default:
 494                        lpfc_free_fast_evt(phba, fast_evt_data);
 495                        return;
 496                }
 497        } else {
 498                lpfc_free_fast_evt(phba, fast_evt_data);
 499                return;
 500        }
 501
 502        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 503                fc_host_post_vendor_event(shost,
 504                        fc_get_event_number(),
 505                        evt_data_size,
 506                        evt_data,
 507                        LPFC_NL_VENDOR_ID);
 508
 509        lpfc_free_fast_evt(phba, fast_evt_data);
 510        return;
 511}
 512
 513static void
 514lpfc_work_list_done(struct lpfc_hba *phba)
 515{
 516        struct lpfc_work_evt  *evtp = NULL;
 517        struct lpfc_nodelist  *ndlp;
 518        int free_evt;
 519        int fcf_inuse;
 520        uint32_t nlp_did;
 521
 522        spin_lock_irq(&phba->hbalock);
 523        while (!list_empty(&phba->work_list)) {
 524                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
 525                                 evt_listp);
 526                spin_unlock_irq(&phba->hbalock);
 527                free_evt = 1;
 528                switch (evtp->evt) {
 529                case LPFC_EVT_ELS_RETRY:
 530                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
 531                        lpfc_els_retry_delay_handler(ndlp);
 532                        free_evt = 0; /* evt is part of ndlp */
 533                        /* decrement the node reference count held
 534                         * for this queued work
 535                         */
 536                        lpfc_nlp_put(ndlp);
 537                        break;
 538                case LPFC_EVT_DEV_LOSS:
 539                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
 540                        fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
 541                        free_evt = 0;
 542                        /* decrement the node reference count held for
 543                         * this queued work
 544                         */
 545                        nlp_did = ndlp->nlp_DID;
 546                        lpfc_nlp_put(ndlp);
 547                        if (phba->sli_rev == LPFC_SLI_REV4)
 548                                lpfc_sli4_post_dev_loss_tmo_handler(phba,
 549                                                                    fcf_inuse,
 550                                                                    nlp_did);
 551                        break;
 552                case LPFC_EVT_ONLINE:
 553                        if (phba->link_state < LPFC_LINK_DOWN)
 554                                *(int *) (evtp->evt_arg1) = lpfc_online(phba);
 555                        else
 556                                *(int *) (evtp->evt_arg1) = 0;
 557                        complete((struct completion *)(evtp->evt_arg2));
 558                        break;
 559                case LPFC_EVT_OFFLINE_PREP:
 560                        if (phba->link_state >= LPFC_LINK_DOWN)
 561                                lpfc_offline_prep(phba, LPFC_MBX_WAIT);
 562                        *(int *)(evtp->evt_arg1) = 0;
 563                        complete((struct completion *)(evtp->evt_arg2));
 564                        break;
 565                case LPFC_EVT_OFFLINE:
 566                        lpfc_offline(phba);
 567                        lpfc_sli_brdrestart(phba);
 568                        *(int *)(evtp->evt_arg1) =
 569                                lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
 570                        lpfc_unblock_mgmt_io(phba);
 571                        complete((struct completion *)(evtp->evt_arg2));
 572                        break;
 573                case LPFC_EVT_WARM_START:
 574                        lpfc_offline(phba);
 575                        lpfc_reset_barrier(phba);
 576                        lpfc_sli_brdreset(phba);
 577                        lpfc_hba_down_post(phba);
 578                        *(int *)(evtp->evt_arg1) =
 579                                lpfc_sli_brdready(phba, HS_MBRDY);
 580                        lpfc_unblock_mgmt_io(phba);
 581                        complete((struct completion *)(evtp->evt_arg2));
 582                        break;
 583                case LPFC_EVT_KILL:
 584                        lpfc_offline(phba);
 585                        *(int *)(evtp->evt_arg1)
 586                                = (phba->pport->stopped)
 587                                        ? 0 : lpfc_sli_brdkill(phba);
 588                        lpfc_unblock_mgmt_io(phba);
 589                        complete((struct completion *)(evtp->evt_arg2));
 590                        break;
 591                case LPFC_EVT_FASTPATH_MGMT_EVT:
 592                        lpfc_send_fastpath_evt(phba, evtp);
 593                        free_evt = 0;
 594                        break;
 595                case LPFC_EVT_RESET_HBA:
 596                        if (!(phba->pport->load_flag & FC_UNLOADING))
 597                                lpfc_reset_hba(phba);
 598                        break;
 599                }
 600                if (free_evt)
 601                        kfree(evtp);
 602                spin_lock_irq(&phba->hbalock);
 603        }
 604        spin_unlock_irq(&phba->hbalock);
 605
 606}
 607
 608static void
 609lpfc_work_done(struct lpfc_hba *phba)
 610{
 611        struct lpfc_sli_ring *pring;
 612        uint32_t ha_copy, status, control, work_port_events;
 613        struct lpfc_vport **vports;
 614        struct lpfc_vport *vport;
 615        int i;
 616
 617        spin_lock_irq(&phba->hbalock);
 618        ha_copy = phba->work_ha;
 619        phba->work_ha = 0;
 620        spin_unlock_irq(&phba->hbalock);
 621
 622        /* First, try to post the next mailbox command to SLI4 device */
 623        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
 624                lpfc_sli4_post_async_mbox(phba);
 625
 626        if (ha_copy & HA_ERATT)
 627                /* Handle the error attention event */
 628                lpfc_handle_eratt(phba);
 629
 630        if (ha_copy & HA_MBATT)
 631                lpfc_sli_handle_mb_event(phba);
 632
 633        if (ha_copy & HA_LATT)
 634                lpfc_handle_latt(phba);
 635
 636        /* Process SLI4 events */
 637        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
 638                if (phba->hba_flag & HBA_RRQ_ACTIVE)
 639                        lpfc_handle_rrq_active(phba);
 640                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
 641                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
 642                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
 643                        lpfc_sli4_els_xri_abort_event_proc(phba);
 644                if (phba->hba_flag & ASYNC_EVENT)
 645                        lpfc_sli4_async_event_proc(phba);
 646                if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
 647                        spin_lock_irq(&phba->hbalock);
 648                        phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
 649                        spin_unlock_irq(&phba->hbalock);
 650                        lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 651                }
 652                if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
 653                        lpfc_sli4_fcf_redisc_event_proc(phba);
 654        }
 655
 656        vports = lpfc_create_vport_work_array(phba);
 657        if (vports != NULL)
 658                for (i = 0; i <= phba->max_vports; i++) {
 659                        /*
 660                         * We could have no vports in array if unloading, so if
 661                         * this happens then just use the pport
 662                         */
 663                        if (vports[i] == NULL && i == 0)
 664                                vport = phba->pport;
 665                        else
 666                                vport = vports[i];
 667                        if (vport == NULL)
 668                                break;
 669                        spin_lock_irq(&vport->work_port_lock);
 670                        work_port_events = vport->work_port_events;
 671                        vport->work_port_events &= ~work_port_events;
 672                        spin_unlock_irq(&vport->work_port_lock);
 673                        if (work_port_events & WORKER_DISC_TMO)
 674                                lpfc_disc_timeout_handler(vport);
 675                        if (work_port_events & WORKER_ELS_TMO)
 676                                lpfc_els_timeout_handler(vport);
 677                        if (work_port_events & WORKER_HB_TMO)
 678                                lpfc_hb_timeout_handler(phba);
 679                        if (work_port_events & WORKER_MBOX_TMO)
 680                                lpfc_mbox_timeout_handler(phba);
 681                        if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
 682                                lpfc_unblock_fabric_iocbs(phba);
 683                        if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
 684                                lpfc_ramp_down_queue_handler(phba);
 685                        if (work_port_events & WORKER_DELAYED_DISC_TMO)
 686                                lpfc_delayed_disc_timeout_handler(vport);
 687                }
 688        lpfc_destroy_vport_work_array(phba, vports);
 689
 690        pring = lpfc_phba_elsring(phba);
 691        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
 692        status >>= (4*LPFC_ELS_RING);
 693        if (pring && (status & HA_RXMASK ||
 694                      pring->flag & LPFC_DEFERRED_RING_EVENT ||
 695                      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
 696                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 697                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
 698                        /* Preserve legacy behavior. */
 699                        if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
 700                                set_bit(LPFC_DATA_READY, &phba->data_flags);
 701                } else {
 702                        if (phba->link_state >= LPFC_LINK_UP ||
 703                            phba->link_flag & LS_MDS_LOOPBACK) {
 704                                pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 705                                lpfc_sli_handle_slow_ring_event(phba, pring,
 706                                                                (status &
 707                                                                HA_RXMASK));
 708                        }
 709                }
 710                if (phba->sli_rev == LPFC_SLI_REV4)
 711                        lpfc_drain_txq(phba);
 712                /*
 713                 * Turn on Ring interrupts
 714                 */
 715                if (phba->sli_rev <= LPFC_SLI_REV3) {
 716                        spin_lock_irq(&phba->hbalock);
 717                        control = readl(phba->HCregaddr);
 718                        if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
 719                                lpfc_debugfs_slow_ring_trc(phba,
 720                                        "WRK Enable ring: cntl:x%x hacopy:x%x",
 721                                        control, ha_copy, 0);
 722
 723                                control |= (HC_R0INT_ENA << LPFC_ELS_RING);
 724                                writel(control, phba->HCregaddr);
 725                                readl(phba->HCregaddr); /* flush */
 726                        } else {
 727                                lpfc_debugfs_slow_ring_trc(phba,
 728                                        "WRK Ring ok:     cntl:x%x hacopy:x%x",
 729                                        control, ha_copy, 0);
 730                        }
 731                        spin_unlock_irq(&phba->hbalock);
 732                }
 733        }
 734        lpfc_work_list_done(phba);
 735}
 736
 737int
 738lpfc_do_work(void *p)
 739{
 740        struct lpfc_hba *phba = p;
 741        int rc;
 742
 743        set_user_nice(current, -20);
 744        current->flags |= PF_NOFREEZE;
 745        phba->data_flags = 0;
 746
 747        while (!kthread_should_stop()) {
 748                /* wait and check worker queue activities */
 749                rc = wait_event_interruptible(phba->work_waitq,
 750                                        (test_and_clear_bit(LPFC_DATA_READY,
 751                                                            &phba->data_flags)
 752                                         || kthread_should_stop()));
 753                /* Signal wakeup shall terminate the worker thread */
 754                if (rc) {
 755                        lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
 756                                        "0433 Wakeup on signal: rc=x%x\n", rc);
 757                        break;
 758                }
 759
 760                /* Attend pending lpfc data processing */
 761                lpfc_work_done(phba);
 762        }
 763        phba->worker_thread = NULL;
 764        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
 765                        "0432 Worker thread stopped.\n");
 766        return 0;
 767}
 768
 769/*
 770 * This is only called to handle FC worker events. Since this a rare
 771 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
 772 * embedding it in the IOCB.
 773 */
 774int
 775lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 776                      uint32_t evt)
 777{
 778        struct lpfc_work_evt  *evtp;
 779        unsigned long flags;
 780
 781        /*
 782         * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
 783         * be queued to worker thread for processing
 784         */
 785        evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
 786        if (!evtp)
 787                return 0;
 788
 789        evtp->evt_arg1  = arg1;
 790        evtp->evt_arg2  = arg2;
 791        evtp->evt       = evt;
 792
 793        spin_lock_irqsave(&phba->hbalock, flags);
 794        list_add_tail(&evtp->evt_listp, &phba->work_list);
 795        spin_unlock_irqrestore(&phba->hbalock, flags);
 796
 797        lpfc_worker_wake_up(phba);
 798
 799        return 1;
 800}
 801
 802void
 803lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 804{
 805        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 806        struct lpfc_hba  *phba = vport->phba;
 807        struct lpfc_nodelist *ndlp, *next_ndlp;
 808
 809        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 810                if (!NLP_CHK_NODE_ACT(ndlp))
 811                        continue;
 812                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 813                        continue;
 814                if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
 815                        ((vport->port_type == LPFC_NPIV_PORT) &&
 816                        (ndlp->nlp_DID == NameServer_DID)))
 817                        lpfc_unreg_rpi(vport, ndlp);
 818
 819                /* Leave Fabric nodes alone on link down */
 820                if ((phba->sli_rev < LPFC_SLI_REV4) &&
 821                    (!remove && ndlp->nlp_type & NLP_FABRIC))
 822                        continue;
 823                lpfc_disc_state_machine(vport, ndlp, NULL,
 824                                        remove
 825                                        ? NLP_EVT_DEVICE_RM
 826                                        : NLP_EVT_DEVICE_RECOVERY);
 827        }
 828        if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
 829                if (phba->sli_rev == LPFC_SLI_REV4)
 830                        lpfc_sli4_unreg_all_rpis(vport);
 831                lpfc_mbx_unreg_vpi(vport);
 832                spin_lock_irq(shost->host_lock);
 833                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 834                spin_unlock_irq(shost->host_lock);
 835        }
 836}
 837
 838void
 839lpfc_port_link_failure(struct lpfc_vport *vport)
 840{
 841        lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
 842
 843        /* Cleanup any outstanding received buffers */
 844        lpfc_cleanup_rcv_buffers(vport);
 845
 846        /* Cleanup any outstanding RSCN activity */
 847        lpfc_els_flush_rscn(vport);
 848
 849        /* Cleanup any outstanding ELS commands */
 850        lpfc_els_flush_cmd(vport);
 851
 852        lpfc_cleanup_rpis(vport, 0);
 853
 854        /* Turn off discovery timer if its running */
 855        lpfc_can_disctmo(vport);
 856}
 857
 858void
 859lpfc_linkdown_port(struct lpfc_vport *vport)
 860{
 861        struct lpfc_hba  *phba = vport->phba;
 862        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 863
 864        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 865                fc_host_post_event(shost, fc_get_event_number(),
 866                                   FCH_EVT_LINKDOWN, 0);
 867
 868        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 869                "Link Down:       state:x%x rtry:x%x flg:x%x",
 870                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
 871
 872        lpfc_port_link_failure(vport);
 873
 874        /* Stop delayed Nport discovery */
 875        spin_lock_irq(shost->host_lock);
 876        vport->fc_flag &= ~FC_DISC_DELAYED;
 877        spin_unlock_irq(shost->host_lock);
 878        del_timer_sync(&vport->delayed_disc_tmo);
 879}
 880
 881int
 882lpfc_linkdown(struct lpfc_hba *phba)
 883{
 884        struct lpfc_vport *vport = phba->pport;
 885        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 886        struct lpfc_vport **vports;
 887        LPFC_MBOXQ_t          *mb;
 888        int i;
 889
 890        if (phba->link_state == LPFC_LINK_DOWN)
 891                return 0;
 892
 893        /* Block all SCSI stack I/Os */
 894        lpfc_scsi_dev_block(phba);
 895
 896        spin_lock_irq(&phba->hbalock);
 897        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
 898        spin_unlock_irq(&phba->hbalock);
 899        if (phba->link_state > LPFC_LINK_DOWN) {
 900                phba->link_state = LPFC_LINK_DOWN;
 901                spin_lock_irq(shost->host_lock);
 902                phba->pport->fc_flag &= ~FC_LBIT;
 903                spin_unlock_irq(shost->host_lock);
 904        }
 905        vports = lpfc_create_vport_work_array(phba);
 906        if (vports != NULL) {
 907                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 908                        /* Issue a LINK DOWN event to all nodes */
 909                        lpfc_linkdown_port(vports[i]);
 910
 911                        vports[i]->fc_myDID = 0;
 912
 913                        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
 914                            (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 915                                if (phba->nvmet_support)
 916                                        lpfc_nvmet_update_targetport(phba);
 917                                else
 918                                        lpfc_nvme_update_localport(vports[i]);
 919                        }
 920                }
 921        }
 922        lpfc_destroy_vport_work_array(phba, vports);
 923        /* Clean up any firmware default rpi's */
 924        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 925        if (mb) {
 926                lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
 927                mb->vport = vport;
 928                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 929                if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 930                    == MBX_NOT_FINISHED) {
 931                        mempool_free(mb, phba->mbox_mem_pool);
 932                }
 933        }
 934
 935        /* Setup myDID for link up if we are in pt2pt mode */
 936        if (phba->pport->fc_flag & FC_PT2PT) {
 937                mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 938                if (mb) {
 939                        lpfc_config_link(phba, mb);
 940                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 941                        mb->vport = vport;
 942                        if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 943                            == MBX_NOT_FINISHED) {
 944                                mempool_free(mb, phba->mbox_mem_pool);
 945                        }
 946                }
 947                spin_lock_irq(shost->host_lock);
 948                phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
 949                spin_unlock_irq(shost->host_lock);
 950        }
 951        return 0;
 952}
 953
 954static void
 955lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
 956{
 957        struct lpfc_nodelist *ndlp;
 958
 959        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 960                ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 961                if (!NLP_CHK_NODE_ACT(ndlp))
 962                        continue;
 963                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 964                        continue;
 965                if (ndlp->nlp_type & NLP_FABRIC) {
 966                        /* On Linkup its safe to clean up the ndlp
 967                         * from Fabric connections.
 968                         */
 969                        if (ndlp->nlp_DID != Fabric_DID)
 970                                lpfc_unreg_rpi(vport, ndlp);
 971                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 972                } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
 973                        /* Fail outstanding IO now since device is
 974                         * marked for PLOGI.
 975                         */
 976                        lpfc_unreg_rpi(vport, ndlp);
 977                }
 978        }
 979}
 980
 981static void
 982lpfc_linkup_port(struct lpfc_vport *vport)
 983{
 984        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 985        struct lpfc_hba  *phba = vport->phba;
 986
 987        if ((vport->load_flag & FC_UNLOADING) != 0)
 988                return;
 989
 990        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 991                "Link Up:         top:x%x speed:x%x flg:x%x",
 992                phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
 993
 994        /* If NPIV is not enabled, only bring the physical port up */
 995        if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 996                (vport != phba->pport))
 997                return;
 998
 999        if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1000                fc_host_post_event(shost, fc_get_event_number(),
1001                                   FCH_EVT_LINKUP, 0);
1002
1003        spin_lock_irq(shost->host_lock);
1004        vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1005                            FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1006        vport->fc_flag |= FC_NDISC_ACTIVE;
1007        vport->fc_ns_retry = 0;
1008        spin_unlock_irq(shost->host_lock);
1009
1010        if (vport->fc_flag & FC_LBIT)
1011                lpfc_linkup_cleanup_nodes(vport);
1012
1013}
1014
1015static int
1016lpfc_linkup(struct lpfc_hba *phba)
1017{
1018        struct lpfc_vport **vports;
1019        int i;
1020
1021        phba->link_state = LPFC_LINK_UP;
1022
1023        /* Unblock fabric iocbs if they are blocked */
1024        clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1025        del_timer_sync(&phba->fabric_block_timer);
1026
1027        vports = lpfc_create_vport_work_array(phba);
1028        if (vports != NULL)
1029                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1030                        lpfc_linkup_port(vports[i]);
1031        lpfc_destroy_vport_work_array(phba, vports);
1032
1033        return 0;
1034}
1035
1036/*
1037 * This routine handles processing a CLEAR_LA mailbox
1038 * command upon completion. It is setup in the LPFC_MBOXQ
1039 * as the completion routine when the command is
1040 * handed off to the SLI layer. SLI3 only.
1041 */
1042static void
1043lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1044{
1045        struct lpfc_vport *vport = pmb->vport;
1046        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1047        struct lpfc_sli   *psli = &phba->sli;
1048        MAILBOX_t *mb = &pmb->u.mb;
1049        uint32_t control;
1050
1051        /* Since we don't do discovery right now, turn these off here */
1052        psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1053        psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1054
1055        /* Check for error */
1056        if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1057                /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1058                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1059                                 "0320 CLEAR_LA mbxStatus error x%x hba "
1060                                 "state x%x\n",
1061                                 mb->mbxStatus, vport->port_state);
1062                phba->link_state = LPFC_HBA_ERROR;
1063                goto out;
1064        }
1065
1066        if (vport->port_type == LPFC_PHYSICAL_PORT)
1067                phba->link_state = LPFC_HBA_READY;
1068
1069        spin_lock_irq(&phba->hbalock);
1070        psli->sli_flag |= LPFC_PROCESS_LA;
1071        control = readl(phba->HCregaddr);
1072        control |= HC_LAINT_ENA;
1073        writel(control, phba->HCregaddr);
1074        readl(phba->HCregaddr); /* flush */
1075        spin_unlock_irq(&phba->hbalock);
1076        mempool_free(pmb, phba->mbox_mem_pool);
1077        return;
1078
1079out:
1080        /* Device Discovery completes */
1081        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1082                         "0225 Device Discovery completes\n");
1083        mempool_free(pmb, phba->mbox_mem_pool);
1084
1085        spin_lock_irq(shost->host_lock);
1086        vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1087        spin_unlock_irq(shost->host_lock);
1088
1089        lpfc_can_disctmo(vport);
1090
1091        /* turn on Link Attention interrupts */
1092
1093        spin_lock_irq(&phba->hbalock);
1094        psli->sli_flag |= LPFC_PROCESS_LA;
1095        control = readl(phba->HCregaddr);
1096        control |= HC_LAINT_ENA;
1097        writel(control, phba->HCregaddr);
1098        readl(phba->HCregaddr); /* flush */
1099        spin_unlock_irq(&phba->hbalock);
1100
1101        return;
1102}
1103
1104
1105void
1106lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1107{
1108        struct lpfc_vport *vport = pmb->vport;
1109        uint8_t bbscn = 0;
1110
1111        if (pmb->u.mb.mbxStatus)
1112                goto out;
1113
1114        mempool_free(pmb, phba->mbox_mem_pool);
1115
1116        /* don't perform discovery for SLI4 loopback diagnostic test */
1117        if ((phba->sli_rev == LPFC_SLI_REV4) &&
1118            !(phba->hba_flag & HBA_FCOE_MODE) &&
1119            (phba->link_flag & LS_LOOPBACK_MODE))
1120                return;
1121
1122        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1123            vport->fc_flag & FC_PUBLIC_LOOP &&
1124            !(vport->fc_flag & FC_LBIT)) {
1125                        /* Need to wait for FAN - use discovery timer
1126                         * for timeout.  port_state is identically
1127                         * LPFC_LOCAL_CFG_LINK while waiting for FAN
1128                         */
1129                        lpfc_set_disctmo(vport);
1130                        return;
1131        }
1132
1133        /* Start discovery by sending a FLOGI. port_state is identically
1134         * LPFC_FLOGI while waiting for FLOGI cmpl
1135         */
1136        if (vport->port_state != LPFC_FLOGI) {
1137                if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
1138                        bbscn = bf_get(lpfc_bbscn_def,
1139                                       &phba->sli4_hba.bbscn_params);
1140                        vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
1141                        vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
1142                }
1143                lpfc_initial_flogi(vport);
1144        } else if (vport->fc_flag & FC_PT2PT) {
1145                lpfc_disc_start(vport);
1146        }
1147        return;
1148
1149out:
1150        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1151                         "0306 CONFIG_LINK mbxStatus error x%x "
1152                         "HBA state x%x\n",
1153                         pmb->u.mb.mbxStatus, vport->port_state);
1154        mempool_free(pmb, phba->mbox_mem_pool);
1155
1156        lpfc_linkdown(phba);
1157
1158        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1159                         "0200 CONFIG_LINK bad hba state x%x\n",
1160                         vport->port_state);
1161
1162        lpfc_issue_clear_la(phba, vport);
1163        return;
1164}
1165
1166/**
1167 * lpfc_sli4_clear_fcf_rr_bmask
1168 * @phba pointer to the struct lpfc_hba for this port.
1169 * This fucnction resets the round robin bit mask and clears the
1170 * fcf priority list. The list deletions are done while holding the
1171 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1172 * from the lpfc_fcf_pri record.
1173 **/
1174void
1175lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1176{
1177        struct lpfc_fcf_pri *fcf_pri;
1178        struct lpfc_fcf_pri *next_fcf_pri;
1179        memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1180        spin_lock_irq(&phba->hbalock);
1181        list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1182                                &phba->fcf.fcf_pri_list, list) {
1183                list_del_init(&fcf_pri->list);
1184                fcf_pri->fcf_rec.flag = 0;
1185        }
1186        spin_unlock_irq(&phba->hbalock);
1187}
1188static void
1189lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1190{
1191        struct lpfc_vport *vport = mboxq->vport;
1192
1193        if (mboxq->u.mb.mbxStatus) {
1194                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1195                         "2017 REG_FCFI mbxStatus error x%x "
1196                         "HBA state x%x\n",
1197                         mboxq->u.mb.mbxStatus, vport->port_state);
1198                goto fail_out;
1199        }
1200
1201        /* Start FCoE discovery by sending a FLOGI. */
1202        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1203        /* Set the FCFI registered flag */
1204        spin_lock_irq(&phba->hbalock);
1205        phba->fcf.fcf_flag |= FCF_REGISTERED;
1206        spin_unlock_irq(&phba->hbalock);
1207
1208        /* If there is a pending FCoE event, restart FCF table scan. */
1209        if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1210                lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1211                goto fail_out;
1212
1213        /* Mark successful completion of FCF table scan */
1214        spin_lock_irq(&phba->hbalock);
1215        phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1216        phba->hba_flag &= ~FCF_TS_INPROG;
1217        if (vport->port_state != LPFC_FLOGI) {
1218                phba->hba_flag |= FCF_RR_INPROG;
1219                spin_unlock_irq(&phba->hbalock);
1220                lpfc_issue_init_vfi(vport);
1221                goto out;
1222        }
1223        spin_unlock_irq(&phba->hbalock);
1224        goto out;
1225
1226fail_out:
1227        spin_lock_irq(&phba->hbalock);
1228        phba->hba_flag &= ~FCF_RR_INPROG;
1229        spin_unlock_irq(&phba->hbalock);
1230out:
1231        mempool_free(mboxq, phba->mbox_mem_pool);
1232}
1233
1234/**
1235 * lpfc_fab_name_match - Check if the fcf fabric name match.
1236 * @fab_name: pointer to fabric name.
1237 * @new_fcf_record: pointer to fcf record.
1238 *
1239 * This routine compare the fcf record's fabric name with provided
1240 * fabric name. If the fabric name are identical this function
1241 * returns 1 else return 0.
1242 **/
1243static uint32_t
1244lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1245{
1246        if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1247                return 0;
1248        if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1249                return 0;
1250        if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1251                return 0;
1252        if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1253                return 0;
1254        if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1255                return 0;
1256        if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1257                return 0;
1258        if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1259                return 0;
1260        if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1261                return 0;
1262        return 1;
1263}
1264
1265/**
1266 * lpfc_sw_name_match - Check if the fcf switch name match.
1267 * @fab_name: pointer to fabric name.
1268 * @new_fcf_record: pointer to fcf record.
1269 *
1270 * This routine compare the fcf record's switch name with provided
1271 * switch name. If the switch name are identical this function
1272 * returns 1 else return 0.
1273 **/
1274static uint32_t
1275lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1276{
1277        if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1278                return 0;
1279        if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1280                return 0;
1281        if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1282                return 0;
1283        if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1284                return 0;
1285        if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1286                return 0;
1287        if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1288                return 0;
1289        if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1290                return 0;
1291        if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1292                return 0;
1293        return 1;
1294}
1295
1296/**
1297 * lpfc_mac_addr_match - Check if the fcf mac address match.
1298 * @mac_addr: pointer to mac address.
1299 * @new_fcf_record: pointer to fcf record.
1300 *
1301 * This routine compare the fcf record's mac address with HBA's
1302 * FCF mac address. If the mac addresses are identical this function
1303 * returns 1 else return 0.
1304 **/
1305static uint32_t
1306lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1307{
1308        if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1309                return 0;
1310        if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1311                return 0;
1312        if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1313                return 0;
1314        if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1315                return 0;
1316        if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1317                return 0;
1318        if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1319                return 0;
1320        return 1;
1321}
1322
1323static bool
1324lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1325{
1326        return (curr_vlan_id == new_vlan_id);
1327}
1328
1329/**
1330 * lpfc_update_fcf_record - Update driver fcf record
1331 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1332 * @phba: pointer to lpfc hba data structure.
1333 * @fcf_index: Index for the lpfc_fcf_record.
1334 * @new_fcf_record: pointer to hba fcf record.
1335 *
1336 * This routine updates the driver FCF priority record from the new HBA FCF
1337 * record. This routine is called with the host lock held.
1338 **/
1339static void
1340__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1341                                 struct fcf_record *new_fcf_record
1342                                 )
1343{
1344        struct lpfc_fcf_pri *fcf_pri;
1345
1346        fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1347        fcf_pri->fcf_rec.fcf_index = fcf_index;
1348        /* FCF record priority */
1349        fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1350
1351}
1352
1353/**
1354 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1355 * @fcf: pointer to driver fcf record.
1356 * @new_fcf_record: pointer to fcf record.
1357 *
1358 * This routine copies the FCF information from the FCF
1359 * record to lpfc_hba data structure.
1360 **/
1361static void
1362lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1363                     struct fcf_record *new_fcf_record)
1364{
1365        /* Fabric name */
1366        fcf_rec->fabric_name[0] =
1367                bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1368        fcf_rec->fabric_name[1] =
1369                bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1370        fcf_rec->fabric_name[2] =
1371                bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1372        fcf_rec->fabric_name[3] =
1373                bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1374        fcf_rec->fabric_name[4] =
1375                bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1376        fcf_rec->fabric_name[5] =
1377                bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1378        fcf_rec->fabric_name[6] =
1379                bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1380        fcf_rec->fabric_name[7] =
1381                bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1382        /* Mac address */
1383        fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1384        fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1385        fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1386        fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1387        fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1388        fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1389        /* FCF record index */
1390        fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1391        /* FCF record priority */
1392        fcf_rec->priority = new_fcf_record->fip_priority;
1393        /* Switch name */
1394        fcf_rec->switch_name[0] =
1395                bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1396        fcf_rec->switch_name[1] =
1397                bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1398        fcf_rec->switch_name[2] =
1399                bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1400        fcf_rec->switch_name[3] =
1401                bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1402        fcf_rec->switch_name[4] =
1403                bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1404        fcf_rec->switch_name[5] =
1405                bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1406        fcf_rec->switch_name[6] =
1407                bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1408        fcf_rec->switch_name[7] =
1409                bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1410}
1411
1412/**
1413 * lpfc_update_fcf_record - Update driver fcf record
1414 * @phba: pointer to lpfc hba data structure.
1415 * @fcf_rec: pointer to driver fcf record.
1416 * @new_fcf_record: pointer to hba fcf record.
1417 * @addr_mode: address mode to be set to the driver fcf record.
1418 * @vlan_id: vlan tag to be set to the driver fcf record.
1419 * @flag: flag bits to be set to the driver fcf record.
1420 *
1421 * This routine updates the driver FCF record from the new HBA FCF record
1422 * together with the address mode, vlan_id, and other informations. This
1423 * routine is called with the host lock held.
1424 **/
1425static void
1426__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1427                       struct fcf_record *new_fcf_record, uint32_t addr_mode,
1428                       uint16_t vlan_id, uint32_t flag)
1429{
1430        /* Copy the fields from the HBA's FCF record */
1431        lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1432        /* Update other fields of driver FCF record */
1433        fcf_rec->addr_mode = addr_mode;
1434        fcf_rec->vlan_id = vlan_id;
1435        fcf_rec->flag |= (flag | RECORD_VALID);
1436        __lpfc_update_fcf_record_pri(phba,
1437                bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1438                                 new_fcf_record);
1439}
1440
1441/**
1442 * lpfc_register_fcf - Register the FCF with hba.
1443 * @phba: pointer to lpfc hba data structure.
1444 *
1445 * This routine issues a register fcfi mailbox command to register
1446 * the fcf with HBA.
1447 **/
1448static void
1449lpfc_register_fcf(struct lpfc_hba *phba)
1450{
1451        LPFC_MBOXQ_t *fcf_mbxq;
1452        int rc;
1453
1454        spin_lock_irq(&phba->hbalock);
1455        /* If the FCF is not available do nothing. */
1456        if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1457                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1458                spin_unlock_irq(&phba->hbalock);
1459                return;
1460        }
1461
1462        /* The FCF is already registered, start discovery */
1463        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1464                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1465                phba->hba_flag &= ~FCF_TS_INPROG;
1466                if (phba->pport->port_state != LPFC_FLOGI &&
1467                    phba->pport->fc_flag & FC_FABRIC) {
1468                        phba->hba_flag |= FCF_RR_INPROG;
1469                        spin_unlock_irq(&phba->hbalock);
1470                        lpfc_initial_flogi(phba->pport);
1471                        return;
1472                }
1473                spin_unlock_irq(&phba->hbalock);
1474                return;
1475        }
1476        spin_unlock_irq(&phba->hbalock);
1477
1478        fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1479        if (!fcf_mbxq) {
1480                spin_lock_irq(&phba->hbalock);
1481                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1482                spin_unlock_irq(&phba->hbalock);
1483                return;
1484        }
1485
1486        lpfc_reg_fcfi(phba, fcf_mbxq);
1487        fcf_mbxq->vport = phba->pport;
1488        fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1489        rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1490        if (rc == MBX_NOT_FINISHED) {
1491                spin_lock_irq(&phba->hbalock);
1492                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1493                spin_unlock_irq(&phba->hbalock);
1494                mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1495        }
1496
1497        return;
1498}
1499
1500/**
1501 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1502 * @phba: pointer to lpfc hba data structure.
1503 * @new_fcf_record: pointer to fcf record.
1504 * @boot_flag: Indicates if this record used by boot bios.
1505 * @addr_mode: The address mode to be used by this FCF
1506 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1507 *
1508 * This routine compare the fcf record with connect list obtained from the
1509 * config region to decide if this FCF can be used for SAN discovery. It returns
1510 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1511 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1512 * is used by boot bios and addr_mode will indicate the addressing mode to be
1513 * used for this FCF when the function returns.
1514 * If the FCF record need to be used with a particular vlan id, the vlan is
1515 * set in the vlan_id on return of the function. If not VLAN tagging need to
1516 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1517 **/
1518static int
1519lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1520                        struct fcf_record *new_fcf_record,
1521                        uint32_t *boot_flag, uint32_t *addr_mode,
1522                        uint16_t *vlan_id)
1523{
1524        struct lpfc_fcf_conn_entry *conn_entry;
1525        int i, j, fcf_vlan_id = 0;
1526
1527        /* Find the lowest VLAN id in the FCF record */
1528        for (i = 0; i < 512; i++) {
1529                if (new_fcf_record->vlan_bitmap[i]) {
1530                        fcf_vlan_id = i * 8;
1531                        j = 0;
1532                        while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1533                                j++;
1534                                fcf_vlan_id++;
1535                        }
1536                        break;
1537                }
1538        }
1539
1540        /* FCF not valid/available or solicitation in progress */
1541        if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1542            !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1543            bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1544                return 0;
1545
1546        if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1547                *boot_flag = 0;
1548                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1549                                new_fcf_record);
1550                if (phba->valid_vlan)
1551                        *vlan_id = phba->vlan_id;
1552                else
1553                        *vlan_id = LPFC_FCOE_NULL_VID;
1554                return 1;
1555        }
1556
1557        /*
1558         * If there are no FCF connection table entry, driver connect to all
1559         * FCFs.
1560         */
1561        if (list_empty(&phba->fcf_conn_rec_list)) {
1562                *boot_flag = 0;
1563                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1564                        new_fcf_record);
1565
1566                /*
1567                 * When there are no FCF connect entries, use driver's default
1568                 * addressing mode - FPMA.
1569                 */
1570                if (*addr_mode & LPFC_FCF_FPMA)
1571                        *addr_mode = LPFC_FCF_FPMA;
1572
1573                /* If FCF record report a vlan id use that vlan id */
1574                if (fcf_vlan_id)
1575                        *vlan_id = fcf_vlan_id;
1576                else
1577                        *vlan_id = LPFC_FCOE_NULL_VID;
1578                return 1;
1579        }
1580
1581        list_for_each_entry(conn_entry,
1582                            &phba->fcf_conn_rec_list, list) {
1583                if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1584                        continue;
1585
1586                if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1587                        !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1588                                             new_fcf_record))
1589                        continue;
1590                if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1591                        !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1592                                            new_fcf_record))
1593                        continue;
1594                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1595                        /*
1596                         * If the vlan bit map does not have the bit set for the
1597                         * vlan id to be used, then it is not a match.
1598                         */
1599                        if (!(new_fcf_record->vlan_bitmap
1600                                [conn_entry->conn_rec.vlan_tag / 8] &
1601                                (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1602                                continue;
1603                }
1604
1605                /*
1606                 * If connection record does not support any addressing mode,
1607                 * skip the FCF record.
1608                 */
1609                if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1610                        & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1611                        continue;
1612
1613                /*
1614                 * Check if the connection record specifies a required
1615                 * addressing mode.
1616                 */
1617                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1618                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1619
1620                        /*
1621                         * If SPMA required but FCF not support this continue.
1622                         */
1623                        if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1624                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1625                                        new_fcf_record) & LPFC_FCF_SPMA))
1626                                continue;
1627
1628                        /*
1629                         * If FPMA required but FCF not support this continue.
1630                         */
1631                        if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1632                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1633                                new_fcf_record) & LPFC_FCF_FPMA))
1634                                continue;
1635                }
1636
1637                /*
1638                 * This fcf record matches filtering criteria.
1639                 */
1640                if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1641                        *boot_flag = 1;
1642                else
1643                        *boot_flag = 0;
1644
1645                /*
1646                 * If user did not specify any addressing mode, or if the
1647                 * preferred addressing mode specified by user is not supported
1648                 * by FCF, allow fabric to pick the addressing mode.
1649                 */
1650                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1651                                new_fcf_record);
1652                /*
1653                 * If the user specified a required address mode, assign that
1654                 * address mode
1655                 */
1656                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1657                        (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1658                        *addr_mode = (conn_entry->conn_rec.flags &
1659                                FCFCNCT_AM_SPMA) ?
1660                                LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1661                /*
1662                 * If the user specified a preferred address mode, use the
1663                 * addr mode only if FCF support the addr_mode.
1664                 */
1665                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1666                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1667                        (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1668                        (*addr_mode & LPFC_FCF_SPMA))
1669                                *addr_mode = LPFC_FCF_SPMA;
1670                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1671                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1672                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1673                        (*addr_mode & LPFC_FCF_FPMA))
1674                                *addr_mode = LPFC_FCF_FPMA;
1675
1676                /* If matching connect list has a vlan id, use it */
1677                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1678                        *vlan_id = conn_entry->conn_rec.vlan_tag;
1679                /*
1680                 * If no vlan id is specified in connect list, use the vlan id
1681                 * in the FCF record
1682                 */
1683                else if (fcf_vlan_id)
1684                        *vlan_id = fcf_vlan_id;
1685                else
1686                        *vlan_id = LPFC_FCOE_NULL_VID;
1687
1688                return 1;
1689        }
1690
1691        return 0;
1692}
1693
1694/**
1695 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1696 * @phba: pointer to lpfc hba data structure.
1697 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1698 *
1699 * This function check if there is any fcoe event pending while driver
1700 * scan FCF entries. If there is any pending event, it will restart the
1701 * FCF saning and return 1 else return 0.
1702 */
1703int
1704lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1705{
1706        /*
1707         * If the Link is up and no FCoE events while in the
1708         * FCF discovery, no need to restart FCF discovery.
1709         */
1710        if ((phba->link_state  >= LPFC_LINK_UP) &&
1711            (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1712                return 0;
1713
1714        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1715                        "2768 Pending link or FCF event during current "
1716                        "handling of the previous event: link_state:x%x, "
1717                        "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1718                        phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1719                        phba->fcoe_eventtag);
1720
1721        spin_lock_irq(&phba->hbalock);
1722        phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1723        spin_unlock_irq(&phba->hbalock);
1724
1725        if (phba->link_state >= LPFC_LINK_UP) {
1726                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1727                                "2780 Restart FCF table scan due to "
1728                                "pending FCF event:evt_tag_at_scan:x%x, "
1729                                "evt_tag_current:x%x\n",
1730                                phba->fcoe_eventtag_at_fcf_scan,
1731                                phba->fcoe_eventtag);
1732                lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1733        } else {
1734                /*
1735                 * Do not continue FCF discovery and clear FCF_TS_INPROG
1736                 * flag
1737                 */
1738                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1739                                "2833 Stop FCF discovery process due to link "
1740                                "state change (x%x)\n", phba->link_state);
1741                spin_lock_irq(&phba->hbalock);
1742                phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1743                phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1744                spin_unlock_irq(&phba->hbalock);
1745        }
1746
1747        /* Unregister the currently registered FCF if required */
1748        if (unreg_fcf) {
1749                spin_lock_irq(&phba->hbalock);
1750                phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1751                spin_unlock_irq(&phba->hbalock);
1752                lpfc_sli4_unregister_fcf(phba);
1753        }
1754        return 1;
1755}
1756
1757/**
1758 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1759 * @phba: pointer to lpfc hba data structure.
1760 * @fcf_cnt: number of eligible fcf record seen so far.
1761 *
1762 * This function makes an running random selection decision on FCF record to
1763 * use through a sequence of @fcf_cnt eligible FCF records with equal
1764 * probability. To perform integer manunipulation of random numbers with
1765 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1766 * from prandom_u32() are taken as the random random number generated.
1767 *
1768 * Returns true when outcome is for the newly read FCF record should be
1769 * chosen; otherwise, return false when outcome is for keeping the previously
1770 * chosen FCF record.
1771 **/
1772static bool
1773lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1774{
1775        uint32_t rand_num;
1776
1777        /* Get 16-bit uniform random number */
1778        rand_num = 0xFFFF & prandom_u32();
1779
1780        /* Decision with probability 1/fcf_cnt */
1781        if ((fcf_cnt * rand_num) < 0xFFFF)
1782                return true;
1783        else
1784                return false;
1785}
1786
1787/**
1788 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1789 * @phba: pointer to lpfc hba data structure.
1790 * @mboxq: pointer to mailbox object.
1791 * @next_fcf_index: pointer to holder of next fcf index.
1792 *
1793 * This routine parses the non-embedded fcf mailbox command by performing the
1794 * necessarily error checking, non-embedded read FCF record mailbox command
1795 * SGE parsing, and endianness swapping.
1796 *
1797 * Returns the pointer to the new FCF record in the non-embedded mailbox
1798 * command DMA memory if successfully, other NULL.
1799 */
1800static struct fcf_record *
1801lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1802                             uint16_t *next_fcf_index)
1803{
1804        void *virt_addr;
1805        struct lpfc_mbx_sge sge;
1806        struct lpfc_mbx_read_fcf_tbl *read_fcf;
1807        uint32_t shdr_status, shdr_add_status, if_type;
1808        union lpfc_sli4_cfg_shdr *shdr;
1809        struct fcf_record *new_fcf_record;
1810
1811        /* Get the first SGE entry from the non-embedded DMA memory. This
1812         * routine only uses a single SGE.
1813         */
1814        lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1815        if (unlikely(!mboxq->sge_array)) {
1816                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1817                                "2524 Failed to get the non-embedded SGE "
1818                                "virtual address\n");
1819                return NULL;
1820        }
1821        virt_addr = mboxq->sge_array->addr[0];
1822
1823        shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1824        lpfc_sli_pcimem_bcopy(shdr, shdr,
1825                              sizeof(union lpfc_sli4_cfg_shdr));
1826        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1827        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1828        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1829        if (shdr_status || shdr_add_status) {
1830                if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1831                                        if_type == LPFC_SLI_INTF_IF_TYPE_2)
1832                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1833                                        "2726 READ_FCF_RECORD Indicates empty "
1834                                        "FCF table.\n");
1835                else
1836                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1837                                        "2521 READ_FCF_RECORD mailbox failed "
1838                                        "with status x%x add_status x%x, "
1839                                        "mbx\n", shdr_status, shdr_add_status);
1840                return NULL;
1841        }
1842
1843        /* Interpreting the returned information of the FCF record */
1844        read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1845        lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1846                              sizeof(struct lpfc_mbx_read_fcf_tbl));
1847        *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1848        new_fcf_record = (struct fcf_record *)(virt_addr +
1849                          sizeof(struct lpfc_mbx_read_fcf_tbl));
1850        lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1851                                offsetof(struct fcf_record, vlan_bitmap));
1852        new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1853        new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1854
1855        return new_fcf_record;
1856}
1857
1858/**
1859 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1860 * @phba: pointer to lpfc hba data structure.
1861 * @fcf_record: pointer to the fcf record.
1862 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1863 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1864 *
1865 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1866 * enabled.
1867 **/
1868static void
1869lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1870                              struct fcf_record *fcf_record,
1871                              uint16_t vlan_id,
1872                              uint16_t next_fcf_index)
1873{
1874        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1875                        "2764 READ_FCF_RECORD:\n"
1876                        "\tFCF_Index     : x%x\n"
1877                        "\tFCF_Avail     : x%x\n"
1878                        "\tFCF_Valid     : x%x\n"
1879                        "\tFCF_SOL       : x%x\n"
1880                        "\tFIP_Priority  : x%x\n"
1881                        "\tMAC_Provider  : x%x\n"
1882                        "\tLowest VLANID : x%x\n"
1883                        "\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
1884                        "\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1885                        "\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1886                        "\tNext_FCF_Index: x%x\n",
1887                        bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1888                        bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1889                        bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1890                        bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1891                        fcf_record->fip_priority,
1892                        bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1893                        vlan_id,
1894                        bf_get(lpfc_fcf_record_mac_0, fcf_record),
1895                        bf_get(lpfc_fcf_record_mac_1, fcf_record),
1896                        bf_get(lpfc_fcf_record_mac_2, fcf_record),
1897                        bf_get(lpfc_fcf_record_mac_3, fcf_record),
1898                        bf_get(lpfc_fcf_record_mac_4, fcf_record),
1899                        bf_get(lpfc_fcf_record_mac_5, fcf_record),
1900                        bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1901                        bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1902                        bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1903                        bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1904                        bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1905                        bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1906                        bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1907                        bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1908                        bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1909                        bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1910                        bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1911                        bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1912                        bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1913                        bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1914                        bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1915                        bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1916                        next_fcf_index);
1917}
1918
1919/**
1920 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1921 * @phba: pointer to lpfc hba data structure.
1922 * @fcf_rec: pointer to an existing FCF record.
1923 * @new_fcf_record: pointer to a new FCF record.
1924 * @new_vlan_id: vlan id from the new FCF record.
1925 *
1926 * This function performs matching test of a new FCF record against an existing
1927 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1928 * will not be used as part of the FCF record matching criteria.
1929 *
1930 * Returns true if all the fields matching, otherwise returns false.
1931 */
1932static bool
1933lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1934                           struct lpfc_fcf_rec *fcf_rec,
1935                           struct fcf_record *new_fcf_record,
1936                           uint16_t new_vlan_id)
1937{
1938        if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1939                if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1940                        return false;
1941        if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1942                return false;
1943        if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1944                return false;
1945        if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1946                return false;
1947        if (fcf_rec->priority != new_fcf_record->fip_priority)
1948                return false;
1949        return true;
1950}
1951
1952/**
1953 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1954 * @vport: Pointer to vport object.
1955 * @fcf_index: index to next fcf.
1956 *
1957 * This function processing the roundrobin fcf failover to next fcf index.
1958 * When this function is invoked, there will be a current fcf registered
1959 * for flogi.
1960 * Return: 0 for continue retrying flogi on currently registered fcf;
1961 *         1 for stop flogi on currently registered fcf;
1962 */
1963int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1964{
1965        struct lpfc_hba *phba = vport->phba;
1966        int rc;
1967
1968        if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1969                spin_lock_irq(&phba->hbalock);
1970                if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1971                        spin_unlock_irq(&phba->hbalock);
1972                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1973                                        "2872 Devloss tmo with no eligible "
1974                                        "FCF, unregister in-use FCF (x%x) "
1975                                        "and rescan FCF table\n",
1976                                        phba->fcf.current_rec.fcf_indx);
1977                        lpfc_unregister_fcf_rescan(phba);
1978                        goto stop_flogi_current_fcf;
1979                }
1980                /* Mark the end to FLOGI roundrobin failover */
1981                phba->hba_flag &= ~FCF_RR_INPROG;
1982                /* Allow action to new fcf asynchronous event */
1983                phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1984                spin_unlock_irq(&phba->hbalock);
1985                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1986                                "2865 No FCF available, stop roundrobin FCF "
1987                                "failover and change port state:x%x/x%x\n",
1988                                phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1989                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1990                goto stop_flogi_current_fcf;
1991        } else {
1992                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1993                                "2794 Try FLOGI roundrobin FCF failover to "
1994                                "(x%x)\n", fcf_index);
1995                rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1996                if (rc)
1997                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1998                                        "2761 FLOGI roundrobin FCF failover "
1999                                        "failed (rc:x%x) to read FCF (x%x)\n",
2000                                        rc, phba->fcf.current_rec.fcf_indx);
2001                else
2002                        goto stop_flogi_current_fcf;
2003        }
2004        return 0;
2005
2006stop_flogi_current_fcf:
2007        lpfc_can_disctmo(vport);
2008        return 1;
2009}
2010
2011/**
2012 * lpfc_sli4_fcf_pri_list_del
2013 * @phba: pointer to lpfc hba data structure.
2014 * @fcf_index the index of the fcf record to delete
2015 * This routine checks the on list flag of the fcf_index to be deleted.
2016 * If it is one the list then it is removed from the list, and the flag
2017 * is cleared. This routine grab the hbalock before removing the fcf
2018 * record from the list.
2019 **/
2020static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2021                        uint16_t fcf_index)
2022{
2023        struct lpfc_fcf_pri *new_fcf_pri;
2024
2025        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2026        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2027                "3058 deleting idx x%x pri x%x flg x%x\n",
2028                fcf_index, new_fcf_pri->fcf_rec.priority,
2029                 new_fcf_pri->fcf_rec.flag);
2030        spin_lock_irq(&phba->hbalock);
2031        if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2032                if (phba->fcf.current_rec.priority ==
2033                                new_fcf_pri->fcf_rec.priority)
2034                        phba->fcf.eligible_fcf_cnt--;
2035                list_del_init(&new_fcf_pri->list);
2036                new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2037        }
2038        spin_unlock_irq(&phba->hbalock);
2039}
2040
2041/**
2042 * lpfc_sli4_set_fcf_flogi_fail
2043 * @phba: pointer to lpfc hba data structure.
2044 * @fcf_index the index of the fcf record to update
2045 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2046 * flag so the the round robin slection for the particular priority level
2047 * will try a different fcf record that does not have this bit set.
2048 * If the fcf record is re-read for any reason this flag is cleared brfore
2049 * adding it to the priority list.
2050 **/
2051void
2052lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2053{
2054        struct lpfc_fcf_pri *new_fcf_pri;
2055        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2056        spin_lock_irq(&phba->hbalock);
2057        new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2058        spin_unlock_irq(&phba->hbalock);
2059}
2060
2061/**
2062 * lpfc_sli4_fcf_pri_list_add
2063 * @phba: pointer to lpfc hba data structure.
2064 * @fcf_index the index of the fcf record to add
2065 * This routine checks the priority of the fcf_index to be added.
2066 * If it is a lower priority than the current head of the fcf_pri list
2067 * then it is added to the list in the right order.
2068 * If it is the same priority as the current head of the list then it
2069 * is added to the head of the list and its bit in the rr_bmask is set.
2070 * If the fcf_index to be added is of a higher priority than the current
2071 * head of the list then the rr_bmask is cleared, its bit is set in the
2072 * rr_bmask and it is added to the head of the list.
2073 * returns:
2074 * 0=success 1=failure
2075 **/
2076int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2077        struct fcf_record *new_fcf_record)
2078{
2079        uint16_t current_fcf_pri;
2080        uint16_t last_index;
2081        struct lpfc_fcf_pri *fcf_pri;
2082        struct lpfc_fcf_pri *next_fcf_pri;
2083        struct lpfc_fcf_pri *new_fcf_pri;
2084        int ret;
2085
2086        new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2087        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2088                "3059 adding idx x%x pri x%x flg x%x\n",
2089                fcf_index, new_fcf_record->fip_priority,
2090                 new_fcf_pri->fcf_rec.flag);
2091        spin_lock_irq(&phba->hbalock);
2092        if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2093                list_del_init(&new_fcf_pri->list);
2094        new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2095        new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2096        if (list_empty(&phba->fcf.fcf_pri_list)) {
2097                list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2098                ret = lpfc_sli4_fcf_rr_index_set(phba,
2099                                new_fcf_pri->fcf_rec.fcf_index);
2100                goto out;
2101        }
2102
2103        last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2104                                LPFC_SLI4_FCF_TBL_INDX_MAX);
2105        if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2106                ret = 0; /* Empty rr list */
2107                goto out;
2108        }
2109        current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2110        if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
2111                list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2112                if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
2113                        memset(phba->fcf.fcf_rr_bmask, 0,
2114                                sizeof(*phba->fcf.fcf_rr_bmask));
2115                        /* fcfs_at_this_priority_level = 1; */
2116                        phba->fcf.eligible_fcf_cnt = 1;
2117                } else
2118                        /* fcfs_at_this_priority_level++; */
2119                        phba->fcf.eligible_fcf_cnt++;
2120                ret = lpfc_sli4_fcf_rr_index_set(phba,
2121                                new_fcf_pri->fcf_rec.fcf_index);
2122                goto out;
2123        }
2124
2125        list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2126                                &phba->fcf.fcf_pri_list, list) {
2127                if (new_fcf_pri->fcf_rec.priority <=
2128                                fcf_pri->fcf_rec.priority) {
2129                        if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2130                                list_add(&new_fcf_pri->list,
2131                                                &phba->fcf.fcf_pri_list);
2132                        else
2133                                list_add(&new_fcf_pri->list,
2134                                         &((struct lpfc_fcf_pri *)
2135                                        fcf_pri->list.prev)->list);
2136                        ret = 0;
2137                        goto out;
2138                } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2139                        || new_fcf_pri->fcf_rec.priority <
2140                                next_fcf_pri->fcf_rec.priority) {
2141                        list_add(&new_fcf_pri->list, &fcf_pri->list);
2142                        ret = 0;
2143                        goto out;
2144                }
2145                if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2146                        continue;
2147
2148        }
2149        ret = 1;
2150out:
2151        /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2152        new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2153        spin_unlock_irq(&phba->hbalock);
2154        return ret;
2155}
2156
2157/**
2158 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2159 * @phba: pointer to lpfc hba data structure.
2160 * @mboxq: pointer to mailbox object.
2161 *
2162 * This function iterates through all the fcf records available in
2163 * HBA and chooses the optimal FCF record for discovery. After finding
2164 * the FCF for discovery it registers the FCF record and kicks start
2165 * discovery.
2166 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2167 * use an FCF record which matches fabric name and mac address of the
2168 * currently used FCF record.
2169 * If the driver supports only one FCF, it will try to use the FCF record
2170 * used by BOOT_BIOS.
2171 */
2172void
2173lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2174{
2175        struct fcf_record *new_fcf_record;
2176        uint32_t boot_flag, addr_mode;
2177        uint16_t fcf_index, next_fcf_index;
2178        struct lpfc_fcf_rec *fcf_rec = NULL;
2179        uint32_t seed;
2180        uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2181        bool select_new_fcf;
2182        int rc;
2183
2184        /* If there is pending FCoE event restart FCF table scan */
2185        if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2186                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2187                return;
2188        }
2189
2190        /* Parse the FCF record from the non-embedded mailbox command */
2191        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2192                                                      &next_fcf_index);
2193        if (!new_fcf_record) {
2194                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2195                                "2765 Mailbox command READ_FCF_RECORD "
2196                                "failed to retrieve a FCF record.\n");
2197                /* Let next new FCF event trigger fast failover */
2198                spin_lock_irq(&phba->hbalock);
2199                phba->hba_flag &= ~FCF_TS_INPROG;
2200                spin_unlock_irq(&phba->hbalock);
2201                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2202                return;
2203        }
2204
2205        /* Check the FCF record against the connection list */
2206        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2207                                      &addr_mode, &vlan_id);
2208
2209        /* Log the FCF record information if turned on */
2210        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2211                                      next_fcf_index);
2212
2213        /*
2214         * If the fcf record does not match with connect list entries
2215         * read the next entry; otherwise, this is an eligible FCF
2216         * record for roundrobin FCF failover.
2217         */
2218        if (!rc) {
2219                lpfc_sli4_fcf_pri_list_del(phba,
2220                                        bf_get(lpfc_fcf_record_fcf_index,
2221                                               new_fcf_record));
2222                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2223                                "2781 FCF (x%x) failed connection "
2224                                "list check: (x%x/x%x/%x)\n",
2225                                bf_get(lpfc_fcf_record_fcf_index,
2226                                       new_fcf_record),
2227                                bf_get(lpfc_fcf_record_fcf_avail,
2228                                       new_fcf_record),
2229                                bf_get(lpfc_fcf_record_fcf_valid,
2230                                       new_fcf_record),
2231                                bf_get(lpfc_fcf_record_fcf_sol,
2232                                       new_fcf_record));
2233                if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2234                    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2235                    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2236                        if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2237                            phba->fcf.current_rec.fcf_indx) {
2238                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2239                                        "2862 FCF (x%x) matches property "
2240                                        "of in-use FCF (x%x)\n",
2241                                        bf_get(lpfc_fcf_record_fcf_index,
2242                                               new_fcf_record),
2243                                        phba->fcf.current_rec.fcf_indx);
2244                                goto read_next_fcf;
2245                        }
2246                        /*
2247                         * In case the current in-use FCF record becomes
2248                         * invalid/unavailable during FCF discovery that
2249                         * was not triggered by fast FCF failover process,
2250                         * treat it as fast FCF failover.
2251                         */
2252                        if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2253                            !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2254                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2255                                                "2835 Invalid in-use FCF "
2256                                                "(x%x), enter FCF failover "
2257                                                "table scan.\n",
2258                                                phba->fcf.current_rec.fcf_indx);
2259                                spin_lock_irq(&phba->hbalock);
2260                                phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2261                                spin_unlock_irq(&phba->hbalock);
2262                                lpfc_sli4_mbox_cmd_free(phba, mboxq);
2263                                lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2264                                                LPFC_FCOE_FCF_GET_FIRST);
2265                                return;
2266                        }
2267                }
2268                goto read_next_fcf;
2269        } else {
2270                fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2271                rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2272                                                        new_fcf_record);
2273                if (rc)
2274                        goto read_next_fcf;
2275        }
2276
2277        /*
2278         * If this is not the first FCF discovery of the HBA, use last
2279         * FCF record for the discovery. The condition that a rescan
2280         * matches the in-use FCF record: fabric name, switch name, mac
2281         * address, and vlan_id.
2282         */
2283        spin_lock_irq(&phba->hbalock);
2284        if (phba->fcf.fcf_flag & FCF_IN_USE) {
2285                if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2286                        lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2287                    new_fcf_record, vlan_id)) {
2288                        if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2289                            phba->fcf.current_rec.fcf_indx) {
2290                                phba->fcf.fcf_flag |= FCF_AVAILABLE;
2291                                if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2292                                        /* Stop FCF redisc wait timer */
2293                                        __lpfc_sli4_stop_fcf_redisc_wait_timer(
2294                                                                        phba);
2295                                else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2296                                        /* Fast failover, mark completed */
2297                                        phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2298                                spin_unlock_irq(&phba->hbalock);
2299                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2300                                                "2836 New FCF matches in-use "
2301                                                "FCF (x%x), port_state:x%x, "
2302                                                "fc_flag:x%x\n",
2303                                                phba->fcf.current_rec.fcf_indx,
2304                                                phba->pport->port_state,
2305                                                phba->pport->fc_flag);
2306                                goto out;
2307                        } else
2308                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2309                                        "2863 New FCF (x%x) matches "
2310                                        "property of in-use FCF (x%x)\n",
2311                                        bf_get(lpfc_fcf_record_fcf_index,
2312                                               new_fcf_record),
2313                                        phba->fcf.current_rec.fcf_indx);
2314                }
2315                /*
2316                 * Read next FCF record from HBA searching for the matching
2317                 * with in-use record only if not during the fast failover
2318                 * period. In case of fast failover period, it shall try to
2319                 * determine whether the FCF record just read should be the
2320                 * next candidate.
2321                 */
2322                if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2323                        spin_unlock_irq(&phba->hbalock);
2324                        goto read_next_fcf;
2325                }
2326        }
2327        /*
2328         * Update on failover FCF record only if it's in FCF fast-failover
2329         * period; otherwise, update on current FCF record.
2330         */
2331        if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2332                fcf_rec = &phba->fcf.failover_rec;
2333        else
2334                fcf_rec = &phba->fcf.current_rec;
2335
2336        if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2337                /*
2338                 * If the driver FCF record does not have boot flag
2339                 * set and new hba fcf record has boot flag set, use
2340                 * the new hba fcf record.
2341                 */
2342                if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2343                        /* Choose this FCF record */
2344                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2345                                        "2837 Update current FCF record "
2346                                        "(x%x) with new FCF record (x%x)\n",
2347                                        fcf_rec->fcf_indx,
2348                                        bf_get(lpfc_fcf_record_fcf_index,
2349                                        new_fcf_record));
2350                        __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2351                                        addr_mode, vlan_id, BOOT_ENABLE);
2352                        spin_unlock_irq(&phba->hbalock);
2353                        goto read_next_fcf;
2354                }
2355                /*
2356                 * If the driver FCF record has boot flag set and the
2357                 * new hba FCF record does not have boot flag, read
2358                 * the next FCF record.
2359                 */
2360                if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2361                        spin_unlock_irq(&phba->hbalock);
2362                        goto read_next_fcf;
2363                }
2364                /*
2365                 * If the new hba FCF record has lower priority value
2366                 * than the driver FCF record, use the new record.
2367                 */
2368                if (new_fcf_record->fip_priority < fcf_rec->priority) {
2369                        /* Choose the new FCF record with lower priority */
2370                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2371                                        "2838 Update current FCF record "
2372                                        "(x%x) with new FCF record (x%x)\n",
2373                                        fcf_rec->fcf_indx,
2374                                        bf_get(lpfc_fcf_record_fcf_index,
2375                                               new_fcf_record));
2376                        __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2377                                        addr_mode, vlan_id, 0);
2378                        /* Reset running random FCF selection count */
2379                        phba->fcf.eligible_fcf_cnt = 1;
2380                } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2381                        /* Update running random FCF selection count */
2382                        phba->fcf.eligible_fcf_cnt++;
2383                        select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2384                                                phba->fcf.eligible_fcf_cnt);
2385                        if (select_new_fcf) {
2386                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2387                                        "2839 Update current FCF record "
2388                                        "(x%x) with new FCF record (x%x)\n",
2389                                        fcf_rec->fcf_indx,
2390                                        bf_get(lpfc_fcf_record_fcf_index,
2391                                               new_fcf_record));
2392                                /* Choose the new FCF by random selection */
2393                                __lpfc_update_fcf_record(phba, fcf_rec,
2394                                                         new_fcf_record,
2395                                                         addr_mode, vlan_id, 0);
2396                        }
2397                }
2398                spin_unlock_irq(&phba->hbalock);
2399                goto read_next_fcf;
2400        }
2401        /*
2402         * This is the first suitable FCF record, choose this record for
2403         * initial best-fit FCF.
2404         */
2405        if (fcf_rec) {
2406                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2407                                "2840 Update initial FCF candidate "
2408                                "with FCF (x%x)\n",
2409                                bf_get(lpfc_fcf_record_fcf_index,
2410                                       new_fcf_record));
2411                __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2412                                         addr_mode, vlan_id, (boot_flag ?
2413                                         BOOT_ENABLE : 0));
2414                phba->fcf.fcf_flag |= FCF_AVAILABLE;
2415                /* Setup initial running random FCF selection count */
2416                phba->fcf.eligible_fcf_cnt = 1;
2417                /* Seeding the random number generator for random selection */
2418                seed = (uint32_t)(0xFFFFFFFF & jiffies);
2419                prandom_seed(seed);
2420        }
2421        spin_unlock_irq(&phba->hbalock);
2422        goto read_next_fcf;
2423
2424read_next_fcf:
2425        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2426        if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2427                if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2428                        /*
2429                         * Case of FCF fast failover scan
2430                         */
2431
2432                        /*
2433                         * It has not found any suitable FCF record, cancel
2434                         * FCF scan inprogress, and do nothing
2435                         */
2436                        if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2437                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2438                                               "2782 No suitable FCF found: "
2439                                               "(x%x/x%x)\n",
2440                                               phba->fcoe_eventtag_at_fcf_scan,
2441                                               bf_get(lpfc_fcf_record_fcf_index,
2442                                                      new_fcf_record));
2443                                spin_lock_irq(&phba->hbalock);
2444                                if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2445                                        phba->hba_flag &= ~FCF_TS_INPROG;
2446                                        spin_unlock_irq(&phba->hbalock);
2447                                        /* Unregister in-use FCF and rescan */
2448                                        lpfc_printf_log(phba, KERN_INFO,
2449                                                        LOG_FIP,
2450                                                        "2864 On devloss tmo "
2451                                                        "unreg in-use FCF and "
2452                                                        "rescan FCF table\n");
2453                                        lpfc_unregister_fcf_rescan(phba);
2454                                        return;
2455                                }
2456                                /*
2457                                 * Let next new FCF event trigger fast failover
2458                                 */
2459                                phba->hba_flag &= ~FCF_TS_INPROG;
2460                                spin_unlock_irq(&phba->hbalock);
2461                                return;
2462                        }
2463                        /*
2464                         * It has found a suitable FCF record that is not
2465                         * the same as in-use FCF record, unregister the
2466                         * in-use FCF record, replace the in-use FCF record
2467                         * with the new FCF record, mark FCF fast failover
2468                         * completed, and then start register the new FCF
2469                         * record.
2470                         */
2471
2472                        /* Unregister the current in-use FCF record */
2473                        lpfc_unregister_fcf(phba);
2474
2475                        /* Replace in-use record with the new record */
2476                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2477                                        "2842 Replace in-use FCF (x%x) "
2478                                        "with failover FCF (x%x)\n",
2479                                        phba->fcf.current_rec.fcf_indx,
2480                                        phba->fcf.failover_rec.fcf_indx);
2481                        memcpy(&phba->fcf.current_rec,
2482                               &phba->fcf.failover_rec,
2483                               sizeof(struct lpfc_fcf_rec));
2484                        /*
2485                         * Mark the fast FCF failover rediscovery completed
2486                         * and the start of the first round of the roundrobin
2487                         * FCF failover.
2488                         */
2489                        spin_lock_irq(&phba->hbalock);
2490                        phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2491                        spin_unlock_irq(&phba->hbalock);
2492                        /* Register to the new FCF record */
2493                        lpfc_register_fcf(phba);
2494                } else {
2495                        /*
2496                         * In case of transaction period to fast FCF failover,
2497                         * do nothing when search to the end of the FCF table.
2498                         */
2499                        if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2500                            (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2501                                return;
2502
2503                        if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2504                                phba->fcf.fcf_flag & FCF_IN_USE) {
2505                                /*
2506                                 * In case the current in-use FCF record no
2507                                 * longer existed during FCF discovery that
2508                                 * was not triggered by fast FCF failover
2509                                 * process, treat it as fast FCF failover.
2510                                 */
2511                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2512                                                "2841 In-use FCF record (x%x) "
2513                                                "not reported, entering fast "
2514                                                "FCF failover mode scanning.\n",
2515                                                phba->fcf.current_rec.fcf_indx);
2516                                spin_lock_irq(&phba->hbalock);
2517                                phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2518                                spin_unlock_irq(&phba->hbalock);
2519                                lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2520                                                LPFC_FCOE_FCF_GET_FIRST);
2521                                return;
2522                        }
2523                        /* Register to the new FCF record */
2524                        lpfc_register_fcf(phba);
2525                }
2526        } else
2527                lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2528        return;
2529
2530out:
2531        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2532        lpfc_register_fcf(phba);
2533
2534        return;
2535}
2536
2537/**
2538 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2539 * @phba: pointer to lpfc hba data structure.
2540 * @mboxq: pointer to mailbox object.
2541 *
2542 * This is the callback function for FLOGI failure roundrobin FCF failover
2543 * read FCF record mailbox command from the eligible FCF record bmask for
2544 * performing the failover. If the FCF read back is not valid/available, it
2545 * fails through to retrying FLOGI to the currently registered FCF again.
2546 * Otherwise, if the FCF read back is valid and available, it will set the
2547 * newly read FCF record to the failover FCF record, unregister currently
2548 * registered FCF record, copy the failover FCF record to the current
2549 * FCF record, and then register the current FCF record before proceeding
2550 * to trying FLOGI on the new failover FCF.
2551 */
2552void
2553lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2554{
2555        struct fcf_record *new_fcf_record;
2556        uint32_t boot_flag, addr_mode;
2557        uint16_t next_fcf_index, fcf_index;
2558        uint16_t current_fcf_index;
2559        uint16_t vlan_id;
2560        int rc;
2561
2562        /* If link state is not up, stop the roundrobin failover process */
2563        if (phba->link_state < LPFC_LINK_UP) {
2564                spin_lock_irq(&phba->hbalock);
2565                phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2566                phba->hba_flag &= ~FCF_RR_INPROG;
2567                spin_unlock_irq(&phba->hbalock);
2568                goto out;
2569        }
2570
2571        /* Parse the FCF record from the non-embedded mailbox command */
2572        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2573                                                      &next_fcf_index);
2574        if (!new_fcf_record) {
2575                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2576                                "2766 Mailbox command READ_FCF_RECORD "
2577                                "failed to retrieve a FCF record. "
2578                                "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2579                                phba->fcf.fcf_flag);
2580                lpfc_unregister_fcf_rescan(phba);
2581                goto out;
2582        }
2583
2584        /* Get the needed parameters from FCF record */
2585        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2586                                      &addr_mode, &vlan_id);
2587
2588        /* Log the FCF record information if turned on */
2589        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2590                                      next_fcf_index);
2591
2592        fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2593        if (!rc) {
2594                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2595                                "2848 Remove ineligible FCF (x%x) from "
2596                                "from roundrobin bmask\n", fcf_index);
2597                /* Clear roundrobin bmask bit for ineligible FCF */
2598                lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2599                /* Perform next round of roundrobin FCF failover */
2600                fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2601                rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2602                if (rc)
2603                        goto out;
2604                goto error_out;
2605        }
2606
2607        if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2608                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2609                                "2760 Perform FLOGI roundrobin FCF failover: "
2610                                "FCF (x%x) back to FCF (x%x)\n",
2611                                phba->fcf.current_rec.fcf_indx, fcf_index);
2612                /* Wait 500 ms before retrying FLOGI to current FCF */
2613                msleep(500);
2614                lpfc_issue_init_vfi(phba->pport);
2615                goto out;
2616        }
2617
2618        /* Upload new FCF record to the failover FCF record */
2619        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2620                        "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2621                        phba->fcf.failover_rec.fcf_indx, fcf_index);
2622        spin_lock_irq(&phba->hbalock);
2623        __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2624                                 new_fcf_record, addr_mode, vlan_id,
2625                                 (boot_flag ? BOOT_ENABLE : 0));
2626        spin_unlock_irq(&phba->hbalock);
2627
2628        current_fcf_index = phba->fcf.current_rec.fcf_indx;
2629
2630        /* Unregister the current in-use FCF record */
2631        lpfc_unregister_fcf(phba);
2632
2633        /* Replace in-use record with the new record */
2634        memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2635               sizeof(struct lpfc_fcf_rec));
2636
2637        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2638                        "2783 Perform FLOGI roundrobin FCF failover: FCF "
2639                        "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2640
2641error_out:
2642        lpfc_register_fcf(phba);
2643out:
2644        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2645}
2646
2647/**
2648 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2649 * @phba: pointer to lpfc hba data structure.
2650 * @mboxq: pointer to mailbox object.
2651 *
2652 * This is the callback function of read FCF record mailbox command for
2653 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2654 * failover when a new FCF event happened. If the FCF read back is
2655 * valid/available and it passes the connection list check, it updates
2656 * the bmask for the eligible FCF record for roundrobin failover.
2657 */
2658void
2659lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2660{
2661        struct fcf_record *new_fcf_record;
2662        uint32_t boot_flag, addr_mode;
2663        uint16_t fcf_index, next_fcf_index;
2664        uint16_t vlan_id;
2665        int rc;
2666
2667        /* If link state is not up, no need to proceed */
2668        if (phba->link_state < LPFC_LINK_UP)
2669                goto out;
2670
2671        /* If FCF discovery period is over, no need to proceed */
2672        if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2673                goto out;
2674
2675        /* Parse the FCF record from the non-embedded mailbox command */
2676        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2677                                                      &next_fcf_index);
2678        if (!new_fcf_record) {
2679                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2680                                "2767 Mailbox command READ_FCF_RECORD "
2681                                "failed to retrieve a FCF record.\n");
2682                goto out;
2683        }
2684
2685        /* Check the connection list for eligibility */
2686        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2687                                      &addr_mode, &vlan_id);
2688
2689        /* Log the FCF record information if turned on */
2690        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2691                                      next_fcf_index);
2692
2693        if (!rc)
2694                goto out;
2695
2696        /* Update the eligible FCF record index bmask */
2697        fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2698
2699        rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2700
2701out:
2702        lpfc_sli4_mbox_cmd_free(phba, mboxq);
2703}
2704
2705/**
2706 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2707 * @phba: pointer to lpfc hba data structure.
2708 * @mboxq: pointer to mailbox data structure.
2709 *
2710 * This function handles completion of init vfi mailbox command.
2711 */
2712void
2713lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2714{
2715        struct lpfc_vport *vport = mboxq->vport;
2716
2717        /*
2718         * VFI not supported on interface type 0, just do the flogi
2719         * Also continue if the VFI is in use - just use the same one.
2720         */
2721        if (mboxq->u.mb.mbxStatus &&
2722            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2723                        LPFC_SLI_INTF_IF_TYPE_0) &&
2724            mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2725                lpfc_printf_vlog(vport, KERN_ERR,
2726                                LOG_MBOX,
2727                                "2891 Init VFI mailbox failed 0x%x\n",
2728                                mboxq->u.mb.mbxStatus);
2729                mempool_free(mboxq, phba->mbox_mem_pool);
2730                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2731                return;
2732        }
2733
2734        lpfc_initial_flogi(vport);
2735        mempool_free(mboxq, phba->mbox_mem_pool);
2736        return;
2737}
2738
2739/**
2740 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2741 * @vport: pointer to lpfc_vport data structure.
2742 *
2743 * This function issue a init_vfi mailbox command to initialize the VFI and
2744 * VPI for the physical port.
2745 */
2746void
2747lpfc_issue_init_vfi(struct lpfc_vport *vport)
2748{
2749        LPFC_MBOXQ_t *mboxq;
2750        int rc;
2751        struct lpfc_hba *phba = vport->phba;
2752
2753        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2754        if (!mboxq) {
2755                lpfc_printf_vlog(vport, KERN_ERR,
2756                        LOG_MBOX, "2892 Failed to allocate "
2757                        "init_vfi mailbox\n");
2758                return;
2759        }
2760        lpfc_init_vfi(mboxq, vport);
2761        mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2762        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2763        if (rc == MBX_NOT_FINISHED) {
2764                lpfc_printf_vlog(vport, KERN_ERR,
2765                        LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2766                mempool_free(mboxq, vport->phba->mbox_mem_pool);
2767        }
2768}
2769
2770/**
2771 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2772 * @phba: pointer to lpfc hba data structure.
2773 * @mboxq: pointer to mailbox data structure.
2774 *
2775 * This function handles completion of init vpi mailbox command.
2776 */
2777void
2778lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2779{
2780        struct lpfc_vport *vport = mboxq->vport;
2781        struct lpfc_nodelist *ndlp;
2782        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2783
2784        if (mboxq->u.mb.mbxStatus) {
2785                lpfc_printf_vlog(vport, KERN_ERR,
2786                                LOG_MBOX,
2787                                "2609 Init VPI mailbox failed 0x%x\n",
2788                                mboxq->u.mb.mbxStatus);
2789                mempool_free(mboxq, phba->mbox_mem_pool);
2790                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2791                return;
2792        }
2793        spin_lock_irq(shost->host_lock);
2794        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2795        spin_unlock_irq(shost->host_lock);
2796
2797        /* If this port is physical port or FDISC is done, do reg_vpi */
2798        if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2799                        ndlp = lpfc_findnode_did(vport, Fabric_DID);
2800                        if (!ndlp)
2801                                lpfc_printf_vlog(vport, KERN_ERR,
2802                                        LOG_DISCOVERY,
2803                                        "2731 Cannot find fabric "
2804                                        "controller node\n");
2805                        else
2806                                lpfc_register_new_vport(phba, vport, ndlp);
2807                        mempool_free(mboxq, phba->mbox_mem_pool);
2808                        return;
2809        }
2810
2811        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2812                lpfc_initial_fdisc(vport);
2813        else {
2814                lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2815                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2816                                 "2606 No NPIV Fabric support\n");
2817        }
2818        mempool_free(mboxq, phba->mbox_mem_pool);
2819        return;
2820}
2821
2822/**
2823 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2824 * @vport: pointer to lpfc_vport data structure.
2825 *
2826 * This function issue a init_vpi mailbox command to initialize
2827 * VPI for the vport.
2828 */
2829void
2830lpfc_issue_init_vpi(struct lpfc_vport *vport)
2831{
2832        LPFC_MBOXQ_t *mboxq;
2833        int rc, vpi;
2834
2835        if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2836                vpi = lpfc_alloc_vpi(vport->phba);
2837                if (!vpi) {
2838                        lpfc_printf_vlog(vport, KERN_ERR,
2839                                         LOG_MBOX,
2840                                         "3303 Failed to obtain vport vpi\n");
2841                        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2842                        return;
2843                }
2844                vport->vpi = vpi;
2845        }
2846
2847        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2848        if (!mboxq) {
2849                lpfc_printf_vlog(vport, KERN_ERR,
2850                        LOG_MBOX, "2607 Failed to allocate "
2851                        "init_vpi mailbox\n");
2852                return;
2853        }
2854        lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2855        mboxq->vport = vport;
2856        mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2857        rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2858        if (rc == MBX_NOT_FINISHED) {
2859                lpfc_printf_vlog(vport, KERN_ERR,
2860                        LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2861                mempool_free(mboxq, vport->phba->mbox_mem_pool);
2862        }
2863}
2864
2865/**
2866 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2867 * @phba: pointer to lpfc hba data structure.
2868 *
2869 * This function loops through the list of vports on the @phba and issues an
2870 * FDISC if possible.
2871 */
2872void
2873lpfc_start_fdiscs(struct lpfc_hba *phba)
2874{
2875        struct lpfc_vport **vports;
2876        int i;
2877
2878        vports = lpfc_create_vport_work_array(phba);
2879        if (vports != NULL) {
2880                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2881                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2882                                continue;
2883                        /* There are no vpi for this vport */
2884                        if (vports[i]->vpi > phba->max_vpi) {
2885                                lpfc_vport_set_state(vports[i],
2886                                                     FC_VPORT_FAILED);
2887                                continue;
2888                        }
2889                        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2890                                lpfc_vport_set_state(vports[i],
2891                                                     FC_VPORT_LINKDOWN);
2892                                continue;
2893                        }
2894                        if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2895                                lpfc_issue_init_vpi(vports[i]);
2896                                continue;
2897                        }
2898                        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2899                                lpfc_initial_fdisc(vports[i]);
2900                        else {
2901                                lpfc_vport_set_state(vports[i],
2902                                                     FC_VPORT_NO_FABRIC_SUPP);
2903                                lpfc_printf_vlog(vports[i], KERN_ERR,
2904                                                 LOG_ELS,
2905                                                 "0259 No NPIV "
2906                                                 "Fabric support\n");
2907                        }
2908                }
2909        }
2910        lpfc_destroy_vport_work_array(phba, vports);
2911}
2912
2913void
2914lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2915{
2916        struct lpfc_dmabuf *dmabuf = mboxq->context1;
2917        struct lpfc_vport *vport = mboxq->vport;
2918        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2919
2920        /*
2921         * VFI not supported for interface type 0, so ignore any mailbox
2922         * error (except VFI in use) and continue with the discovery.
2923         */
2924        if (mboxq->u.mb.mbxStatus &&
2925            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2926                        LPFC_SLI_INTF_IF_TYPE_0) &&
2927            mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2928                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2929                         "2018 REG_VFI mbxStatus error x%x "
2930                         "HBA state x%x\n",
2931                         mboxq->u.mb.mbxStatus, vport->port_state);
2932                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2933                        /* FLOGI failed, use loop map to make discovery list */
2934                        lpfc_disc_list_loopmap(vport);
2935                        /* Start discovery */
2936                        lpfc_disc_start(vport);
2937                        goto out_free_mem;
2938                }
2939                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2940                goto out_free_mem;
2941        }
2942
2943        /* If the VFI is already registered, there is nothing else to do
2944         * Unless this was a VFI update and we are in PT2PT mode, then
2945         * we should drop through to set the port state to ready.
2946         */
2947        if (vport->fc_flag & FC_VFI_REGISTERED)
2948                if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2949                      vport->fc_flag & FC_PT2PT))
2950                        goto out_free_mem;
2951
2952        /* The VPI is implicitly registered when the VFI is registered */
2953        spin_lock_irq(shost->host_lock);
2954        vport->vpi_state |= LPFC_VPI_REGISTERED;
2955        vport->fc_flag |= FC_VFI_REGISTERED;
2956        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2957        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2958        spin_unlock_irq(shost->host_lock);
2959
2960        /* In case SLI4 FC loopback test, we are ready */
2961        if ((phba->sli_rev == LPFC_SLI_REV4) &&
2962            (phba->link_flag & LS_LOOPBACK_MODE)) {
2963                phba->link_state = LPFC_HBA_READY;
2964                goto out_free_mem;
2965        }
2966
2967        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2968                         "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
2969                         "alpacnt:%d LinkState:%x topology:%x\n",
2970                         vport->port_state, vport->fc_flag, vport->fc_myDID,
2971                         vport->phba->alpa_map[0],
2972                         phba->link_state, phba->fc_topology);
2973
2974        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2975                /*
2976                 * For private loop or for NPort pt2pt,
2977                 * just start discovery and we are done.
2978                 */
2979                if ((vport->fc_flag & FC_PT2PT) ||
2980                    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2981                    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2982
2983                        /* Use loop map to make discovery list */
2984                        lpfc_disc_list_loopmap(vport);
2985                        /* Start discovery */
2986                        if (vport->fc_flag & FC_PT2PT)
2987                                vport->port_state = LPFC_VPORT_READY;
2988                        else
2989                                lpfc_disc_start(vport);
2990                } else {
2991                        lpfc_start_fdiscs(phba);
2992                        lpfc_do_scr_ns_plogi(phba, vport);
2993                }
2994        }
2995
2996out_free_mem:
2997        mempool_free(mboxq, phba->mbox_mem_pool);
2998        if (dmabuf) {
2999                lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3000                kfree(dmabuf);
3001        }
3002        return;
3003}
3004
3005static void
3006lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3007{
3008        MAILBOX_t *mb = &pmb->u.mb;
3009        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
3010        struct lpfc_vport  *vport = pmb->vport;
3011        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3012        struct serv_parm *sp = &vport->fc_sparam;
3013        uint32_t ed_tov;
3014
3015        /* Check for error */
3016        if (mb->mbxStatus) {
3017                /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3018                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3019                                 "0319 READ_SPARAM mbxStatus error x%x "
3020                                 "hba state x%x>\n",
3021                                 mb->mbxStatus, vport->port_state);
3022                lpfc_linkdown(phba);
3023                goto out;
3024        }
3025
3026        memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3027               sizeof (struct serv_parm));
3028
3029        ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3030        if (sp->cmn.edtovResolution)    /* E_D_TOV ticks are in nanoseconds */
3031                ed_tov = (ed_tov + 999999) / 1000000;
3032
3033        phba->fc_edtov = ed_tov;
3034        phba->fc_ratov = (2 * ed_tov) / 1000;
3035        if (phba->fc_ratov < FF_DEF_RATOV) {
3036                /* RA_TOV should be atleast 10sec for initial flogi */
3037                phba->fc_ratov = FF_DEF_RATOV;
3038        }
3039
3040        lpfc_update_vport_wwn(vport);
3041        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3042        if (vport->port_type == LPFC_PHYSICAL_PORT) {
3043                memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3044                memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3045        }
3046
3047        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3048        kfree(mp);
3049        mempool_free(pmb, phba->mbox_mem_pool);
3050        return;
3051
3052out:
3053        pmb->context1 = NULL;
3054        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3055        kfree(mp);
3056        lpfc_issue_clear_la(phba, vport);
3057        mempool_free(pmb, phba->mbox_mem_pool);
3058        return;
3059}
3060
3061static void
3062lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3063{
3064        struct lpfc_vport *vport = phba->pport;
3065        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3066        struct Scsi_Host *shost;
3067        int i;
3068        struct lpfc_dmabuf *mp;
3069        int rc;
3070        struct fcf_record *fcf_record;
3071        uint32_t fc_flags = 0;
3072
3073        spin_lock_irq(&phba->hbalock);
3074        phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3075
3076        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3077                switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3078                case LPFC_LINK_SPEED_1GHZ:
3079                case LPFC_LINK_SPEED_2GHZ:
3080                case LPFC_LINK_SPEED_4GHZ:
3081                case LPFC_LINK_SPEED_8GHZ:
3082                case LPFC_LINK_SPEED_10GHZ:
3083                case LPFC_LINK_SPEED_16GHZ:
3084                case LPFC_LINK_SPEED_32GHZ:
3085                case LPFC_LINK_SPEED_64GHZ:
3086                        break;
3087                default:
3088                        phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3089                        break;
3090                }
3091        }
3092
3093        if (phba->fc_topology &&
3094            phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3095                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3096                                "3314 Toplogy changed was 0x%x is 0x%x\n",
3097                                phba->fc_topology,
3098                                bf_get(lpfc_mbx_read_top_topology, la));
3099                phba->fc_topology_changed = 1;
3100        }
3101
3102        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3103        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3104
3105        shost = lpfc_shost_from_vport(vport);
3106        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3107                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3108
3109                /* if npiv is enabled and this adapter supports npiv log
3110                 * a message that npiv is not supported in this topology
3111                 */
3112                if (phba->cfg_enable_npiv && phba->max_vpi)
3113                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3114                                "1309 Link Up Event npiv not supported in loop "
3115                                "topology\n");
3116                                /* Get Loop Map information */
3117                if (bf_get(lpfc_mbx_read_top_il, la))
3118                        fc_flags |= FC_LBIT;
3119
3120                vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3121                i = la->lilpBde64.tus.f.bdeSize;
3122
3123                if (i == 0) {
3124                        phba->alpa_map[0] = 0;
3125                } else {
3126                        if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3127                                int numalpa, j, k;
3128                                union {
3129                                        uint8_t pamap[16];
3130                                        struct {
3131                                                uint32_t wd1;
3132                                                uint32_t wd2;
3133                                                uint32_t wd3;
3134                                                uint32_t wd4;
3135                                        } pa;
3136                                } un;
3137                                numalpa = phba->alpa_map[0];
3138                                j = 0;
3139                                while (j < numalpa) {
3140                                        memset(un.pamap, 0, 16);
3141                                        for (k = 1; j < numalpa; k++) {
3142                                                un.pamap[k - 1] =
3143                                                        phba->alpa_map[j + 1];
3144                                                j++;
3145                                                if (k == 16)
3146                                                        break;
3147                                        }
3148                                        /* Link Up Event ALPA map */
3149                                        lpfc_printf_log(phba,
3150                                                        KERN_WARNING,
3151                                                        LOG_LINK_EVENT,
3152                                                        "1304 Link Up Event "
3153                                                        "ALPA map Data: x%x "
3154                                                        "x%x x%x x%x\n",
3155                                                        un.pa.wd1, un.pa.wd2,
3156                                                        un.pa.wd3, un.pa.wd4);
3157                                }
3158                        }
3159                }
3160        } else {
3161                if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3162                        if (phba->max_vpi && phba->cfg_enable_npiv &&
3163                           (phba->sli_rev >= LPFC_SLI_REV3))
3164                                phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3165                }
3166                vport->fc_myDID = phba->fc_pref_DID;
3167                fc_flags |= FC_LBIT;
3168        }
3169        spin_unlock_irq(&phba->hbalock);
3170
3171        if (fc_flags) {
3172                spin_lock_irq(shost->host_lock);
3173                vport->fc_flag |= fc_flags;
3174                spin_unlock_irq(shost->host_lock);
3175        }
3176
3177        lpfc_linkup(phba);
3178        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3179        if (!sparam_mbox)
3180                goto out;
3181
3182        rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3183        if (rc) {
3184                mempool_free(sparam_mbox, phba->mbox_mem_pool);
3185                goto out;
3186        }
3187        sparam_mbox->vport = vport;
3188        sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3189        rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3190        if (rc == MBX_NOT_FINISHED) {
3191                mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
3192                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3193                kfree(mp);
3194                mempool_free(sparam_mbox, phba->mbox_mem_pool);
3195                goto out;
3196        }
3197
3198        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3199                cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3200                if (!cfglink_mbox)
3201                        goto out;
3202                vport->port_state = LPFC_LOCAL_CFG_LINK;
3203                lpfc_config_link(phba, cfglink_mbox);
3204                cfglink_mbox->vport = vport;
3205                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3206                rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3207                if (rc == MBX_NOT_FINISHED) {
3208                        mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3209                        goto out;
3210                }
3211        } else {
3212                vport->port_state = LPFC_VPORT_UNKNOWN;
3213                /*
3214                 * Add the driver's default FCF record at FCF index 0 now. This
3215                 * is phase 1 implementation that support FCF index 0 and driver
3216                 * defaults.
3217                 */
3218                if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3219                        fcf_record = kzalloc(sizeof(struct fcf_record),
3220                                        GFP_KERNEL);
3221                        if (unlikely(!fcf_record)) {
3222                                lpfc_printf_log(phba, KERN_ERR,
3223                                        LOG_MBOX | LOG_SLI,
3224                                        "2554 Could not allocate memory for "
3225                                        "fcf record\n");
3226                                rc = -ENODEV;
3227                                goto out;
3228                        }
3229
3230                        lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3231                                                LPFC_FCOE_FCF_DEF_INDEX);
3232                        rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3233                        if (unlikely(rc)) {
3234                                lpfc_printf_log(phba, KERN_ERR,
3235                                        LOG_MBOX | LOG_SLI,
3236                                        "2013 Could not manually add FCF "
3237                                        "record 0, status %d\n", rc);
3238                                rc = -ENODEV;
3239                                kfree(fcf_record);
3240                                goto out;
3241                        }
3242                        kfree(fcf_record);
3243                }
3244                /*
3245                 * The driver is expected to do FIP/FCF. Call the port
3246                 * and get the FCF Table.
3247                 */
3248                spin_lock_irq(&phba->hbalock);
3249                if (phba->hba_flag & FCF_TS_INPROG) {
3250                        spin_unlock_irq(&phba->hbalock);
3251                        return;
3252                }
3253                /* This is the initial FCF discovery scan */
3254                phba->fcf.fcf_flag |= FCF_INIT_DISC;
3255                spin_unlock_irq(&phba->hbalock);
3256                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3257                                "2778 Start FCF table scan at linkup\n");
3258                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3259                                                     LPFC_FCOE_FCF_GET_FIRST);
3260                if (rc) {
3261                        spin_lock_irq(&phba->hbalock);
3262                        phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3263                        spin_unlock_irq(&phba->hbalock);
3264                        goto out;
3265                }
3266                /* Reset FCF roundrobin bmask for new discovery */
3267                lpfc_sli4_clear_fcf_rr_bmask(phba);
3268        }
3269
3270        return;
3271out:
3272        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3273        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3274                         "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3275                         vport->port_state, sparam_mbox, cfglink_mbox);
3276        lpfc_issue_clear_la(phba, vport);
3277        return;
3278}
3279
3280static void
3281lpfc_enable_la(struct lpfc_hba *phba)
3282{
3283        uint32_t control;
3284        struct lpfc_sli *psli = &phba->sli;
3285        spin_lock_irq(&phba->hbalock);
3286        psli->sli_flag |= LPFC_PROCESS_LA;
3287        if (phba->sli_rev <= LPFC_SLI_REV3) {
3288                control = readl(phba->HCregaddr);
3289                control |= HC_LAINT_ENA;
3290                writel(control, phba->HCregaddr);
3291                readl(phba->HCregaddr); /* flush */
3292        }
3293        spin_unlock_irq(&phba->hbalock);
3294}
3295
3296static void
3297lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3298{
3299        lpfc_linkdown(phba);
3300        lpfc_enable_la(phba);
3301        lpfc_unregister_unused_fcf(phba);
3302        /* turn on Link Attention interrupts - no CLEAR_LA needed */
3303}
3304
3305
3306/*
3307 * This routine handles processing a READ_TOPOLOGY mailbox
3308 * command upon completion. It is setup in the LPFC_MBOXQ
3309 * as the completion routine when the command is
3310 * handed off to the SLI layer. SLI4 only.
3311 */
3312void
3313lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3314{
3315        struct lpfc_vport *vport = pmb->vport;
3316        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3317        struct lpfc_mbx_read_top *la;
3318        struct lpfc_sli_ring *pring;
3319        MAILBOX_t *mb = &pmb->u.mb;
3320        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3321        uint8_t attn_type;
3322
3323        /* Unblock ELS traffic */
3324        pring = lpfc_phba_elsring(phba);
3325        if (pring)
3326                pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3327
3328        /* Check for error */
3329        if (mb->mbxStatus) {
3330                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3331                                "1307 READ_LA mbox error x%x state x%x\n",
3332                                mb->mbxStatus, vport->port_state);
3333                lpfc_mbx_issue_link_down(phba);
3334                phba->link_state = LPFC_HBA_ERROR;
3335                goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3336        }
3337
3338        la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3339        attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3340
3341        memcpy(&phba->alpa_map[0], mp->virt, 128);
3342
3343        spin_lock_irq(shost->host_lock);
3344        if (bf_get(lpfc_mbx_read_top_pb, la))
3345                vport->fc_flag |= FC_BYPASSED_MODE;
3346        else
3347                vport->fc_flag &= ~FC_BYPASSED_MODE;
3348        spin_unlock_irq(shost->host_lock);
3349
3350        if (phba->fc_eventTag <= la->eventTag) {
3351                phba->fc_stat.LinkMultiEvent++;
3352                if (attn_type == LPFC_ATT_LINK_UP)
3353                        if (phba->fc_eventTag != 0)
3354                                lpfc_linkdown(phba);
3355        }
3356
3357        phba->fc_eventTag = la->eventTag;
3358        if (phba->sli_rev < LPFC_SLI_REV4) {
3359                spin_lock_irq(&phba->hbalock);
3360                if (bf_get(lpfc_mbx_read_top_mm, la))
3361                        phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3362                else
3363                        phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3364                spin_unlock_irq(&phba->hbalock);
3365        }
3366
3367        phba->link_events++;
3368        if ((attn_type == LPFC_ATT_LINK_UP) &&
3369            !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3370                phba->fc_stat.LinkUp++;
3371                if (phba->link_flag & LS_LOOPBACK_MODE) {
3372                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3373                                        "1306 Link Up Event in loop back mode "
3374                                        "x%x received Data: x%x x%x x%x x%x\n",
3375                                        la->eventTag, phba->fc_eventTag,
3376                                        bf_get(lpfc_mbx_read_top_alpa_granted,
3377                                               la),
3378                                        bf_get(lpfc_mbx_read_top_link_spd, la),
3379                                        phba->alpa_map[0]);
3380                } else {
3381                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3382                                        "1303 Link Up Event x%x received "
3383                                        "Data: x%x x%x x%x x%x x%x x%x %d\n",
3384                                        la->eventTag, phba->fc_eventTag,
3385                                        bf_get(lpfc_mbx_read_top_alpa_granted,
3386                                               la),
3387                                        bf_get(lpfc_mbx_read_top_link_spd, la),
3388                                        phba->alpa_map[0],
3389                                        bf_get(lpfc_mbx_read_top_mm, la),
3390                                        bf_get(lpfc_mbx_read_top_fa, la),
3391                                        phba->wait_4_mlo_maint_flg);
3392                }
3393                lpfc_mbx_process_link_up(phba, la);
3394        } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3395                   attn_type == LPFC_ATT_UNEXP_WWPN) {
3396                phba->fc_stat.LinkDown++;
3397                if (phba->link_flag & LS_LOOPBACK_MODE)
3398                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3399                                "1308 Link Down Event in loop back mode "
3400                                "x%x received "
3401                                "Data: x%x x%x x%x\n",
3402                                la->eventTag, phba->fc_eventTag,
3403                                phba->pport->port_state, vport->fc_flag);
3404                else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3405                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3406                                "1313 Link Down UNEXP WWPN Event x%x received "
3407                                "Data: x%x x%x x%x x%x x%x\n",
3408                                la->eventTag, phba->fc_eventTag,
3409                                phba->pport->port_state, vport->fc_flag,
3410                                bf_get(lpfc_mbx_read_top_mm, la),
3411                                bf_get(lpfc_mbx_read_top_fa, la));
3412                else
3413                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3414                                "1305 Link Down Event x%x received "
3415                                "Data: x%x x%x x%x x%x x%x\n",
3416                                la->eventTag, phba->fc_eventTag,
3417                                phba->pport->port_state, vport->fc_flag,
3418                                bf_get(lpfc_mbx_read_top_mm, la),
3419                                bf_get(lpfc_mbx_read_top_fa, la));
3420                lpfc_mbx_issue_link_down(phba);
3421        }
3422        if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3423            attn_type == LPFC_ATT_LINK_UP) {
3424                if (phba->link_state != LPFC_LINK_DOWN) {
3425                        phba->fc_stat.LinkDown++;
3426                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3427                                "1312 Link Down Event x%x received "
3428                                "Data: x%x x%x x%x\n",
3429                                la->eventTag, phba->fc_eventTag,
3430                                phba->pport->port_state, vport->fc_flag);
3431                        lpfc_mbx_issue_link_down(phba);
3432                } else
3433                        lpfc_enable_la(phba);
3434
3435                lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3436                                "1310 Menlo Maint Mode Link up Event x%x rcvd "
3437                                "Data: x%x x%x x%x\n",
3438                                la->eventTag, phba->fc_eventTag,
3439                                phba->pport->port_state, vport->fc_flag);
3440                /*
3441                 * The cmnd that triggered this will be waiting for this
3442                 * signal.
3443                 */
3444                /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3445                if (phba->wait_4_mlo_maint_flg) {
3446                        phba->wait_4_mlo_maint_flg = 0;
3447                        wake_up_interruptible(&phba->wait_4_mlo_m_q);
3448                }
3449        }
3450
3451        if ((phba->sli_rev < LPFC_SLI_REV4) &&
3452            bf_get(lpfc_mbx_read_top_fa, la)) {
3453                if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3454                        lpfc_issue_clear_la(phba, vport);
3455                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3456                                "1311 fa %d\n",
3457                                bf_get(lpfc_mbx_read_top_fa, la));
3458        }
3459
3460lpfc_mbx_cmpl_read_topology_free_mbuf:
3461        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3462        kfree(mp);
3463        mempool_free(pmb, phba->mbox_mem_pool);
3464        return;
3465}
3466
3467/*
3468 * This routine handles processing a REG_LOGIN mailbox
3469 * command upon completion. It is setup in the LPFC_MBOXQ
3470 * as the completion routine when the command is
3471 * handed off to the SLI layer.
3472 */
3473void
3474lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3475{
3476        struct lpfc_vport  *vport = pmb->vport;
3477        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3478        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3479        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3480
3481        pmb->context1 = NULL;
3482        pmb->context2 = NULL;
3483
3484        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3485                         "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3486                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3487                         kref_read(&ndlp->kref),
3488                         ndlp->nlp_usg_map, ndlp);
3489        if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3490                ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3491
3492        if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3493            ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3494                /* We rcvd a rscn after issuing this
3495                 * mbox reg login, we may have cycled
3496                 * back through the state and be
3497                 * back at reg login state so this
3498                 * mbox needs to be ignored becase
3499                 * there is another reg login in
3500                 * process.
3501                 */
3502                spin_lock_irq(shost->host_lock);
3503                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3504                spin_unlock_irq(shost->host_lock);
3505
3506                /*
3507                 * We cannot leave the RPI registered because
3508                 * if we go thru discovery again for this ndlp
3509                 * a subsequent REG_RPI will fail.
3510                 */
3511                ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3512                lpfc_unreg_rpi(vport, ndlp);
3513        }
3514
3515        /* Call state machine */
3516        lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3517
3518        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3519        kfree(mp);
3520        mempool_free(pmb, phba->mbox_mem_pool);
3521        /* decrement the node reference count held for this callback
3522         * function.
3523         */
3524        lpfc_nlp_put(ndlp);
3525
3526        return;
3527}
3528
3529static void
3530lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3531{
3532        MAILBOX_t *mb = &pmb->u.mb;
3533        struct lpfc_vport *vport = pmb->vport;
3534        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3535
3536        switch (mb->mbxStatus) {
3537        case 0x0011:
3538        case 0x0020:
3539                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3540                                 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3541                                 mb->mbxStatus);
3542                break;
3543        /* If VPI is busy, reset the HBA */
3544        case 0x9700:
3545                lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3546                        "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3547                        vport->vpi, mb->mbxStatus);
3548                if (!(phba->pport->load_flag & FC_UNLOADING))
3549                        lpfc_workq_post_event(phba, NULL, NULL,
3550                                LPFC_EVT_RESET_HBA);
3551        }
3552        spin_lock_irq(shost->host_lock);
3553        vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3554        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3555        spin_unlock_irq(shost->host_lock);
3556        vport->unreg_vpi_cmpl = VPORT_OK;
3557        mempool_free(pmb, phba->mbox_mem_pool);
3558        lpfc_cleanup_vports_rrqs(vport, NULL);
3559        /*
3560         * This shost reference might have been taken at the beginning of
3561         * lpfc_vport_delete()
3562         */
3563        if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3564                scsi_host_put(shost);
3565}
3566
3567int
3568lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3569{
3570        struct lpfc_hba  *phba = vport->phba;
3571        LPFC_MBOXQ_t *mbox;
3572        int rc;
3573
3574        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3575        if (!mbox)
3576                return 1;
3577
3578        lpfc_unreg_vpi(phba, vport->vpi, mbox);
3579        mbox->vport = vport;
3580        mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3581        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3582        if (rc == MBX_NOT_FINISHED) {
3583                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3584                                 "1800 Could not issue unreg_vpi\n");
3585                mempool_free(mbox, phba->mbox_mem_pool);
3586                vport->unreg_vpi_cmpl = VPORT_ERROR;
3587                return rc;
3588        }
3589        return 0;
3590}
3591
3592static void
3593lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3594{
3595        struct lpfc_vport *vport = pmb->vport;
3596        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3597        MAILBOX_t *mb = &pmb->u.mb;
3598
3599        switch (mb->mbxStatus) {
3600        case 0x0011:
3601        case 0x9601:
3602        case 0x9602:
3603                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3604                                 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3605                                 mb->mbxStatus);
3606                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3607                spin_lock_irq(shost->host_lock);
3608                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3609                spin_unlock_irq(shost->host_lock);
3610                vport->fc_myDID = 0;
3611
3612                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3613                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3614                        if (phba->nvmet_support)
3615                                lpfc_nvmet_update_targetport(phba);
3616                        else
3617                                lpfc_nvme_update_localport(vport);
3618                }
3619                goto out;
3620        }
3621
3622        spin_lock_irq(shost->host_lock);
3623        vport->vpi_state |= LPFC_VPI_REGISTERED;
3624        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3625        spin_unlock_irq(shost->host_lock);
3626        vport->num_disc_nodes = 0;
3627        /* go thru NPR list and issue ELS PLOGIs */
3628        if (vport->fc_npr_cnt)
3629                lpfc_els_disc_plogi(vport);
3630
3631        if (!vport->num_disc_nodes) {
3632                spin_lock_irq(shost->host_lock);
3633                vport->fc_flag &= ~FC_NDISC_ACTIVE;
3634                spin_unlock_irq(shost->host_lock);
3635                lpfc_can_disctmo(vport);
3636        }
3637        vport->port_state = LPFC_VPORT_READY;
3638
3639out:
3640        mempool_free(pmb, phba->mbox_mem_pool);
3641        return;
3642}
3643
3644/**
3645 * lpfc_create_static_vport - Read HBA config region to create static vports.
3646 * @phba: pointer to lpfc hba data structure.
3647 *
3648 * This routine issue a DUMP mailbox command for config region 22 to get
3649 * the list of static vports to be created. The function create vports
3650 * based on the information returned from the HBA.
3651 **/
3652void
3653lpfc_create_static_vport(struct lpfc_hba *phba)
3654{
3655        LPFC_MBOXQ_t *pmb = NULL;
3656        MAILBOX_t *mb;
3657        struct static_vport_info *vport_info;
3658        int mbx_wait_rc = 0, i;
3659        struct fc_vport_identifiers vport_id;
3660        struct fc_vport *new_fc_vport;
3661        struct Scsi_Host *shost;
3662        struct lpfc_vport *vport;
3663        uint16_t offset = 0;
3664        uint8_t *vport_buff;
3665        struct lpfc_dmabuf *mp;
3666        uint32_t byte_count = 0;
3667
3668        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3669        if (!pmb) {
3670                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3671                                "0542 lpfc_create_static_vport failed to"
3672                                " allocate mailbox memory\n");
3673                return;
3674        }
3675        memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3676        mb = &pmb->u.mb;
3677
3678        vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3679        if (!vport_info) {
3680                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3681                                "0543 lpfc_create_static_vport failed to"
3682                                " allocate vport_info\n");
3683                mempool_free(pmb, phba->mbox_mem_pool);
3684                return;
3685        }
3686
3687        vport_buff = (uint8_t *) vport_info;
3688        do {
3689                /* free dma buffer from previous round */
3690                if (pmb->context1) {
3691                        mp = (struct lpfc_dmabuf *)pmb->context1;
3692                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3693                        kfree(mp);
3694                }
3695                if (lpfc_dump_static_vport(phba, pmb, offset))
3696                        goto out;
3697
3698                pmb->vport = phba->pport;
3699                mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3700                                                        LPFC_MBOX_TMO);
3701
3702                if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3703                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3704                                "0544 lpfc_create_static_vport failed to"
3705                                " issue dump mailbox command ret 0x%x "
3706                                "status 0x%x\n",
3707                                mbx_wait_rc, mb->mbxStatus);
3708                        goto out;
3709                }
3710
3711                if (phba->sli_rev == LPFC_SLI_REV4) {
3712                        byte_count = pmb->u.mqe.un.mb_words[5];
3713                        mp = (struct lpfc_dmabuf *)pmb->context1;
3714                        if (byte_count > sizeof(struct static_vport_info) -
3715                                        offset)
3716                                byte_count = sizeof(struct static_vport_info)
3717                                        - offset;
3718                        memcpy(vport_buff + offset, mp->virt, byte_count);
3719                        offset += byte_count;
3720                } else {
3721                        if (mb->un.varDmp.word_cnt >
3722                                sizeof(struct static_vport_info) - offset)
3723                                mb->un.varDmp.word_cnt =
3724                                        sizeof(struct static_vport_info)
3725                                                - offset;
3726                        byte_count = mb->un.varDmp.word_cnt;
3727                        lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3728                                vport_buff + offset,
3729                                byte_count);
3730
3731                        offset += byte_count;
3732                }
3733
3734        } while (byte_count &&
3735                offset < sizeof(struct static_vport_info));
3736
3737
3738        if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3739                ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3740                        != VPORT_INFO_REV)) {
3741                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3742                        "0545 lpfc_create_static_vport bad"
3743                        " information header 0x%x 0x%x\n",
3744                        le32_to_cpu(vport_info->signature),
3745                        le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3746
3747                goto out;
3748        }
3749
3750        shost = lpfc_shost_from_vport(phba->pport);
3751
3752        for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3753                memset(&vport_id, 0, sizeof(vport_id));
3754                vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3755                vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3756                if (!vport_id.port_name || !vport_id.node_name)
3757                        continue;
3758
3759                vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3760                vport_id.vport_type = FC_PORTTYPE_NPIV;
3761                vport_id.disable = false;
3762                new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3763
3764                if (!new_fc_vport) {
3765                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3766                                "0546 lpfc_create_static_vport failed to"
3767                                " create vport\n");
3768                        continue;
3769                }
3770
3771                vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3772                vport->vport_flag |= STATIC_VPORT;
3773        }
3774
3775out:
3776        kfree(vport_info);
3777        if (mbx_wait_rc != MBX_TIMEOUT) {
3778                if (pmb->context1) {
3779                        mp = (struct lpfc_dmabuf *)pmb->context1;
3780                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3781                        kfree(mp);
3782                }
3783                mempool_free(pmb, phba->mbox_mem_pool);
3784        }
3785
3786        return;
3787}
3788
3789/*
3790 * This routine handles processing a Fabric REG_LOGIN mailbox
3791 * command upon completion. It is setup in the LPFC_MBOXQ
3792 * as the completion routine when the command is
3793 * handed off to the SLI layer.
3794 */
3795void
3796lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3797{
3798        struct lpfc_vport *vport = pmb->vport;
3799        MAILBOX_t *mb = &pmb->u.mb;
3800        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3801        struct lpfc_nodelist *ndlp;
3802        struct Scsi_Host *shost;
3803
3804        ndlp = (struct lpfc_nodelist *) pmb->context2;
3805        pmb->context1 = NULL;
3806        pmb->context2 = NULL;
3807
3808        if (mb->mbxStatus) {
3809                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3810                                 "0258 Register Fabric login error: 0x%x\n",
3811                                 mb->mbxStatus);
3812                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3813                kfree(mp);
3814                mempool_free(pmb, phba->mbox_mem_pool);
3815
3816                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3817                        /* FLOGI failed, use loop map to make discovery list */
3818                        lpfc_disc_list_loopmap(vport);
3819
3820                        /* Start discovery */
3821                        lpfc_disc_start(vport);
3822                        /* Decrement the reference count to ndlp after the
3823                         * reference to the ndlp are done.
3824                         */
3825                        lpfc_nlp_put(ndlp);
3826                        return;
3827                }
3828
3829                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3830                /* Decrement the reference count to ndlp after the reference
3831                 * to the ndlp are done.
3832                 */
3833                lpfc_nlp_put(ndlp);
3834                return;
3835        }
3836
3837        if (phba->sli_rev < LPFC_SLI_REV4)
3838                ndlp->nlp_rpi = mb->un.varWords[0];
3839        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3840        ndlp->nlp_type |= NLP_FABRIC;
3841        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3842
3843        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3844                /* when physical port receive logo donot start
3845                 * vport discovery */
3846                if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3847                        lpfc_start_fdiscs(phba);
3848                else {
3849                        shost = lpfc_shost_from_vport(vport);
3850                        spin_lock_irq(shost->host_lock);
3851                        vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3852                        spin_unlock_irq(shost->host_lock);
3853                }
3854                lpfc_do_scr_ns_plogi(phba, vport);
3855        }
3856
3857        lpfc_mbuf_free(phba, mp->virt, mp->phys);
3858        kfree(mp);
3859        mempool_free(pmb, phba->mbox_mem_pool);
3860
3861        /* Drop the reference count from the mbox at the end after
3862         * all the current reference to the ndlp have been done.
3863         */
3864        lpfc_nlp_put(ndlp);
3865        return;
3866}
3867
3868 /*
3869  * This routine will issue a GID_FT for each FC4 Type supported
3870  * by the driver. ALL GID_FTs must complete before discovery is started.
3871  */
3872int
3873lpfc_issue_gidft(struct lpfc_vport *vport)
3874{
3875        struct lpfc_hba *phba = vport->phba;
3876
3877        /* Good status, issue CT Request to NameServer */
3878        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3879            (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3880                if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3881                        /* Cannot issue NameServer FCP Query, so finish up
3882                         * discovery
3883                         */
3884                        lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3885                                         "0604 %s FC TYPE %x %s\n",
3886                                         "Failed to issue GID_FT to ",
3887                                         FC_TYPE_FCP,
3888                                         "Finishing discovery.");
3889                        return 0;
3890                }
3891                vport->gidft_inp++;
3892        }
3893
3894        if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3895            (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3896                if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
3897                        /* Cannot issue NameServer NVME Query, so finish up
3898                         * discovery
3899                         */
3900                        lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3901                                         "0605 %s FC_TYPE %x %s %d\n",
3902                                         "Failed to issue GID_FT to ",
3903                                         FC_TYPE_NVME,
3904                                         "Finishing discovery: gidftinp ",
3905                                         vport->gidft_inp);
3906                        if (vport->gidft_inp == 0)
3907                                return 0;
3908                } else
3909                        vport->gidft_inp++;
3910        }
3911        return vport->gidft_inp;
3912}
3913
3914/*
3915 * This routine handles processing a NameServer REG_LOGIN mailbox
3916 * command upon completion. It is setup in the LPFC_MBOXQ
3917 * as the completion routine when the command is
3918 * handed off to the SLI layer.
3919 */
3920void
3921lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3922{
3923        MAILBOX_t *mb = &pmb->u.mb;
3924        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3925        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3926        struct lpfc_vport *vport = pmb->vport;
3927
3928        pmb->context1 = NULL;
3929        pmb->context2 = NULL;
3930        vport->gidft_inp = 0;
3931
3932        if (mb->mbxStatus) {
3933                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3934                                 "0260 Register NameServer error: 0x%x\n",
3935                                 mb->mbxStatus);
3936
3937out:
3938                /* decrement the node reference count held for this
3939                 * callback function.
3940                 */
3941                lpfc_nlp_put(ndlp);
3942                lpfc_mbuf_free(phba, mp->virt, mp->phys);
3943                kfree(mp);
3944                mempool_free(pmb, phba->mbox_mem_pool);
3945
3946                /* If no other thread is using the ndlp, free it */
3947                lpfc_nlp_not_used(ndlp);
3948
3949                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3950                        /*
3951                         * RegLogin failed, use loop map to make discovery
3952                         * list
3953                         */
3954                        lpfc_disc_list_loopmap(vport);
3955
3956                        /* Start discovery */
3957                        lpfc_disc_start(vport);
3958                        return;
3959                }
3960                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3961                return;
3962        }
3963
3964        if (phba->sli_rev < LPFC_SLI_REV4)
3965                ndlp->nlp_rpi = mb->un.varWords[0];
3966        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3967        ndlp->nlp_type |= NLP_FABRIC;
3968        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3969        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3970                         "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3971                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3972                         kref_read(&ndlp->kref),
3973                         ndlp->nlp_usg_map, ndlp);
3974
3975        if (vport->port_state < LPFC_VPORT_READY) {
3976                /* Link up discovery requires Fabric registration. */
3977                lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3978                lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3979                lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3980                lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3981
3982                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3983                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
3984                        lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
3985
3986                if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3987                    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
3988                        lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
3989                                    FC_TYPE_NVME);
3990
3991                /* Issue SCR just before NameServer GID_FT Query */
3992                lpfc_issue_els_scr(vport, SCR_DID, 0);
3993        }
3994
3995        vport->fc_ns_retry = 0;
3996        if (lpfc_issue_gidft(vport) == 0)
3997                goto out;
3998
3999        /*
4000         * At this point in time we may need to wait for multiple
4001         * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4002         *
4003         * decrement the node reference count held for this
4004         * callback function.
4005         */
4006        lpfc_nlp_put(ndlp);
4007        lpfc_mbuf_free(phba, mp->virt, mp->phys);
4008        kfree(mp);
4009        mempool_free(pmb, phba->mbox_mem_pool);
4010
4011        return;
4012}
4013
4014static void
4015lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4016{
4017        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4018        struct fc_rport  *rport;
4019        struct lpfc_rport_data *rdata;
4020        struct fc_rport_identifiers rport_ids;
4021        struct lpfc_hba  *phba = vport->phba;
4022
4023        if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4024                return;
4025
4026        /* Remote port has reappeared. Re-register w/ FC transport */
4027        rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4028        rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4029        rport_ids.port_id = ndlp->nlp_DID;
4030        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4031
4032        /*
4033         * We leave our node pointer in rport->dd_data when we unregister a
4034         * FCP target port.  But fc_remote_port_add zeros the space to which
4035         * rport->dd_data points.  So, if we're reusing a previously
4036         * registered port, drop the reference that we took the last time we
4037         * registered the port.
4038         */
4039        rport = ndlp->rport;
4040        if (rport) {
4041                rdata = rport->dd_data;
4042                /* break the link before dropping the ref */
4043                ndlp->rport = NULL;
4044                if (rdata) {
4045                        if (rdata->pnode == ndlp)
4046                                lpfc_nlp_put(ndlp);
4047                        rdata->pnode = NULL;
4048                }
4049                /* drop reference for earlier registeration */
4050                put_device(&rport->dev);
4051        }
4052
4053        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4054                "rport add:       did:x%x flg:x%x type x%x",
4055                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4056
4057        /* Don't add the remote port if unloading. */
4058        if (vport->load_flag & FC_UNLOADING)
4059                return;
4060
4061        ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4062        if (!rport || !get_device(&rport->dev)) {
4063                dev_printk(KERN_WARNING, &phba->pcidev->dev,
4064                           "Warning: fc_remote_port_add failed\n");
4065                return;
4066        }
4067
4068        /* initialize static port data */
4069        rport->maxframe_size = ndlp->nlp_maxframe;
4070        rport->supported_classes = ndlp->nlp_class_sup;
4071        rdata = rport->dd_data;
4072        rdata->pnode = lpfc_nlp_get(ndlp);
4073
4074        if (ndlp->nlp_type & NLP_FCP_TARGET)
4075                rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
4076        if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4077                rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4078
4079        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
4080                fc_remote_port_rolechg(rport, rport_ids.roles);
4081
4082        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4083                         "3183 rport register x%06x, rport %p role x%x\n",
4084                         ndlp->nlp_DID, rport, rport_ids.roles);
4085
4086        if ((rport->scsi_target_id != -1) &&
4087            (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4088                ndlp->nlp_sid = rport->scsi_target_id;
4089        }
4090        return;
4091}
4092
4093static void
4094lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4095{
4096        struct fc_rport *rport = ndlp->rport;
4097        struct lpfc_vport *vport = ndlp->vport;
4098        struct lpfc_hba  *phba = vport->phba;
4099
4100        if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4101                return;
4102
4103        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4104                "rport delete:    did:x%x flg:x%x type x%x",
4105                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4106
4107        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4108                         "3184 rport unregister x%06x, rport %p\n",
4109                         ndlp->nlp_DID, rport);
4110
4111        fc_remote_port_delete(rport);
4112
4113        return;
4114}
4115
4116static void
4117lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4118{
4119        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4120
4121        spin_lock_irq(shost->host_lock);
4122        switch (state) {
4123        case NLP_STE_UNUSED_NODE:
4124                vport->fc_unused_cnt += count;
4125                break;
4126        case NLP_STE_PLOGI_ISSUE:
4127                vport->fc_plogi_cnt += count;
4128                break;
4129        case NLP_STE_ADISC_ISSUE:
4130                vport->fc_adisc_cnt += count;
4131                break;
4132        case NLP_STE_REG_LOGIN_ISSUE:
4133                vport->fc_reglogin_cnt += count;
4134                break;
4135        case NLP_STE_PRLI_ISSUE:
4136                vport->fc_prli_cnt += count;
4137                break;
4138        case NLP_STE_UNMAPPED_NODE:
4139                vport->fc_unmap_cnt += count;
4140                break;
4141        case NLP_STE_MAPPED_NODE:
4142                vport->fc_map_cnt += count;
4143                break;
4144        case NLP_STE_NPR_NODE:
4145                if (vport->fc_npr_cnt == 0 && count == -1)
4146                        vport->fc_npr_cnt = 0;
4147                else
4148                        vport->fc_npr_cnt += count;
4149                break;
4150        }
4151        spin_unlock_irq(shost->host_lock);
4152}
4153
4154static void
4155lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4156                       int old_state, int new_state)
4157{
4158        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4159
4160        if (new_state == NLP_STE_UNMAPPED_NODE) {
4161                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4162                ndlp->nlp_type |= NLP_FC_NODE;
4163        }
4164        if (new_state == NLP_STE_MAPPED_NODE)
4165                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4166        if (new_state == NLP_STE_NPR_NODE)
4167                ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4168
4169        /* FCP and NVME Transport interface */
4170        if ((old_state == NLP_STE_MAPPED_NODE ||
4171             old_state == NLP_STE_UNMAPPED_NODE)) {
4172                if (ndlp->rport) {
4173                        vport->phba->nport_event_cnt++;
4174                        lpfc_unregister_remote_port(ndlp);
4175                }
4176
4177                if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4178                        vport->phba->nport_event_cnt++;
4179                        if (vport->phba->nvmet_support == 0) {
4180                                /* Start devloss if target. */
4181                                if (ndlp->nlp_type & NLP_NVME_TARGET)
4182                                        lpfc_nvme_unregister_port(vport, ndlp);
4183                        } else {
4184                                /* NVMET has no upcall. */
4185                                lpfc_nlp_put(ndlp);
4186                        }
4187                }
4188        }
4189
4190        /* FCP and NVME Transport interfaces */
4191
4192        if (new_state ==  NLP_STE_MAPPED_NODE ||
4193            new_state == NLP_STE_UNMAPPED_NODE) {
4194                if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
4195                    ndlp->nlp_DID == Fabric_DID ||
4196                    ndlp->nlp_DID == NameServer_DID ||
4197                    ndlp->nlp_DID == FDMI_DID) {
4198                        vport->phba->nport_event_cnt++;
4199                        /*
4200                         * Tell the fc transport about the port, if we haven't
4201                         * already. If we have, and it's a scsi entity, be
4202                         */
4203                        lpfc_register_remote_port(vport, ndlp);
4204                }
4205                /* Notify the NVME transport of this new rport. */
4206                if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4207                    ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4208                        if (vport->phba->nvmet_support == 0) {
4209                                /* Register this rport with the transport.
4210                                 * Only NVME Target Rports are registered with
4211                                 * the transport.
4212                                 */
4213                                if (ndlp->nlp_type & NLP_NVME_TARGET) {
4214                                        vport->phba->nport_event_cnt++;
4215                                        lpfc_nvme_register_port(vport, ndlp);
4216                                }
4217                        } else {
4218                                /* Just take an NDLP ref count since the
4219                                 * target does not register rports.
4220                                 */
4221                                lpfc_nlp_get(ndlp);
4222                        }
4223                }
4224        }
4225
4226        if ((new_state ==  NLP_STE_MAPPED_NODE) &&
4227                (vport->stat_data_enabled)) {
4228                /*
4229                 * A new target is discovered, if there is no buffer for
4230                 * statistical data collection allocate buffer.
4231                 */
4232                ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4233                                         sizeof(struct lpfc_scsicmd_bkt),
4234                                         GFP_KERNEL);
4235
4236                if (!ndlp->lat_data)
4237                        lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
4238                                "0286 lpfc_nlp_state_cleanup failed to "
4239                                "allocate statistical data buffer DID "
4240                                "0x%x\n", ndlp->nlp_DID);
4241        }
4242        /*
4243         * If the node just added to Mapped list was an FCP target,
4244         * but the remote port registration failed or assigned a target
4245         * id outside the presentable range - move the node to the
4246         * Unmapped List.
4247         */
4248        if ((new_state == NLP_STE_MAPPED_NODE) &&
4249            (ndlp->nlp_type & NLP_FCP_TARGET) &&
4250            (!ndlp->rport ||
4251             ndlp->rport->scsi_target_id == -1 ||
4252             ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4253                spin_lock_irq(shost->host_lock);
4254                ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4255                spin_unlock_irq(shost->host_lock);
4256                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4257        }
4258}
4259
4260static char *
4261lpfc_nlp_state_name(char *buffer, size_t size, int state)
4262{
4263        static char *states[] = {
4264                [NLP_STE_UNUSED_NODE] = "UNUSED",
4265                [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4266                [NLP_STE_ADISC_ISSUE] = "ADISC",
4267                [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4268                [NLP_STE_PRLI_ISSUE] = "PRLI",
4269                [NLP_STE_LOGO_ISSUE] = "LOGO",
4270                [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4271                [NLP_STE_MAPPED_NODE] = "MAPPED",
4272                [NLP_STE_NPR_NODE] = "NPR",
4273        };
4274
4275        if (state < NLP_STE_MAX_STATE && states[state])
4276                strlcpy(buffer, states[state], size);
4277        else
4278                snprintf(buffer, size, "unknown (%d)", state);
4279        return buffer;
4280}
4281
4282void
4283lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4284                   int state)
4285{
4286        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4287        int  old_state = ndlp->nlp_state;
4288        char name1[16], name2[16];
4289
4290        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4291                         "0904 NPort state transition x%06x, %s -> %s\n",
4292                         ndlp->nlp_DID,
4293                         lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4294                         lpfc_nlp_state_name(name2, sizeof(name2), state));
4295
4296        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4297                "node statechg    did:x%x old:%d ste:%d",
4298                ndlp->nlp_DID, old_state, state);
4299
4300        if (old_state == NLP_STE_NPR_NODE &&
4301            state != NLP_STE_NPR_NODE)
4302                lpfc_cancel_retry_delay_tmo(vport, ndlp);
4303        if (old_state == NLP_STE_UNMAPPED_NODE) {
4304                ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4305                ndlp->nlp_type &= ~NLP_FC_NODE;
4306        }
4307
4308        if (list_empty(&ndlp->nlp_listp)) {
4309                spin_lock_irq(shost->host_lock);
4310                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4311                spin_unlock_irq(shost->host_lock);
4312        } else if (old_state)
4313                lpfc_nlp_counters(vport, old_state, -1);
4314
4315        ndlp->nlp_state = state;
4316        lpfc_nlp_counters(vport, state, 1);
4317        lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4318}
4319
4320void
4321lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4322{
4323        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4324
4325        if (list_empty(&ndlp->nlp_listp)) {
4326                spin_lock_irq(shost->host_lock);
4327                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4328                spin_unlock_irq(shost->host_lock);
4329        }
4330}
4331
4332void
4333lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4334{
4335        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4336
4337        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4338        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4339                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4340        spin_lock_irq(shost->host_lock);
4341        list_del_init(&ndlp->nlp_listp);
4342        spin_unlock_irq(shost->host_lock);
4343        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4344                                NLP_STE_UNUSED_NODE);
4345}
4346
4347static void
4348lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4349{
4350        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4351        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4352                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4353        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4354                                NLP_STE_UNUSED_NODE);
4355}
4356/**
4357 * lpfc_initialize_node - Initialize all fields of node object
4358 * @vport: Pointer to Virtual Port object.
4359 * @ndlp: Pointer to FC node object.
4360 * @did: FC_ID of the node.
4361 *
4362 * This function is always called when node object need to be initialized.
4363 * It initializes all the fields of the node object. Although the reference
4364 * to phba from @ndlp can be obtained indirectly through it's reference to
4365 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4366 * to the life-span of the @ndlp might go beyond the existence of @vport as
4367 * the final release of ndlp is determined by its reference count. And, the
4368 * operation on @ndlp needs the reference to phba.
4369 **/
4370static inline void
4371lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4372        uint32_t did)
4373{
4374        INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4375        INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4376        setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
4377                        (unsigned long)ndlp);
4378        ndlp->nlp_DID = did;
4379        ndlp->vport = vport;
4380        ndlp->phba = vport->phba;
4381        ndlp->nlp_sid = NLP_NO_SID;
4382        ndlp->nlp_fc4_type = NLP_FC4_NONE;
4383        kref_init(&ndlp->kref);
4384        NLP_INT_NODE_ACT(ndlp);
4385        atomic_set(&ndlp->cmd_pending, 0);
4386        ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4387}
4388
4389struct lpfc_nodelist *
4390lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4391                 int state)
4392{
4393        struct lpfc_hba *phba = vport->phba;
4394        uint32_t did;
4395        unsigned long flags;
4396        unsigned long *active_rrqs_xri_bitmap = NULL;
4397        int rpi = LPFC_RPI_ALLOC_ERROR;
4398
4399        if (!ndlp)
4400                return NULL;
4401
4402        if (phba->sli_rev == LPFC_SLI_REV4) {
4403                rpi = lpfc_sli4_alloc_rpi(vport->phba);
4404                if (rpi == LPFC_RPI_ALLOC_ERROR)
4405                        return NULL;
4406        }
4407
4408        spin_lock_irqsave(&phba->ndlp_lock, flags);
4409        /* The ndlp should not be in memory free mode */
4410        if (NLP_CHK_FREE_REQ(ndlp)) {
4411                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4412                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4413                                "0277 lpfc_enable_node: ndlp:x%p "
4414                                "usgmap:x%x refcnt:%d\n",
4415                                (void *)ndlp, ndlp->nlp_usg_map,
4416                                kref_read(&ndlp->kref));
4417                goto free_rpi;
4418        }
4419        /* The ndlp should not already be in active mode */
4420        if (NLP_CHK_NODE_ACT(ndlp)) {
4421                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4422                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4423                                "0278 lpfc_enable_node: ndlp:x%p "
4424                                "usgmap:x%x refcnt:%d\n",
4425                                (void *)ndlp, ndlp->nlp_usg_map,
4426                                kref_read(&ndlp->kref));
4427                goto free_rpi;
4428        }
4429
4430        /* Keep the original DID */
4431        did = ndlp->nlp_DID;
4432        if (phba->sli_rev == LPFC_SLI_REV4)
4433                active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4434
4435        /* re-initialize ndlp except of ndlp linked list pointer */
4436        memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4437                sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4438        lpfc_initialize_node(vport, ndlp, did);
4439
4440        if (phba->sli_rev == LPFC_SLI_REV4)
4441                ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4442
4443        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4444        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4445                ndlp->nlp_rpi = rpi;
4446                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4447                                 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4448                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4449                                 ndlp->nlp_flag,
4450                                 kref_read(&ndlp->kref),
4451                                 ndlp->nlp_usg_map, ndlp);
4452        }
4453
4454
4455        if (state != NLP_STE_UNUSED_NODE)
4456                lpfc_nlp_set_state(vport, ndlp, state);
4457
4458        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4459                "node enable:       did:x%x",
4460                ndlp->nlp_DID, 0, 0);
4461        return ndlp;
4462
4463free_rpi:
4464        if (phba->sli_rev == LPFC_SLI_REV4)
4465                lpfc_sli4_free_rpi(vport->phba, rpi);
4466        return NULL;
4467}
4468
4469void
4470lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4471{
4472        /*
4473         * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4474         * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4475         * the ndlp from the vport. The ndlp marked as UNUSED on the list
4476         * until ALL other outstanding threads have completed. We check
4477         * that the ndlp not already in the UNUSED state before we proceed.
4478         */
4479        if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4480                return;
4481        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4482        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4483                lpfc_cleanup_vports_rrqs(vport, ndlp);
4484                lpfc_unreg_rpi(vport, ndlp);
4485        }
4486
4487        lpfc_nlp_put(ndlp);
4488        return;
4489}
4490
4491/*
4492 * Start / ReStart rescue timer for Discovery / RSCN handling
4493 */
4494void
4495lpfc_set_disctmo(struct lpfc_vport *vport)
4496{
4497        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4498        struct lpfc_hba  *phba = vport->phba;
4499        uint32_t tmo;
4500
4501        if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4502                /* For FAN, timeout should be greater than edtov */
4503                tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4504        } else {
4505                /* Normal discovery timeout should be > than ELS/CT timeout
4506                 * FC spec states we need 3 * ratov for CT requests
4507                 */
4508                tmo = ((phba->fc_ratov * 3) + 3);
4509        }
4510
4511
4512        if (!timer_pending(&vport->fc_disctmo)) {
4513                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4514                        "set disc timer:  tmo:x%x state:x%x flg:x%x",
4515                        tmo, vport->port_state, vport->fc_flag);
4516        }
4517
4518        mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4519        spin_lock_irq(shost->host_lock);
4520        vport->fc_flag |= FC_DISC_TMO;
4521        spin_unlock_irq(shost->host_lock);
4522
4523        /* Start Discovery Timer state <hba_state> */
4524        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4525                         "0247 Start Discovery Timer state x%x "
4526                         "Data: x%x x%lx x%x x%x\n",
4527                         vport->port_state, tmo,
4528                         (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4529                         vport->fc_adisc_cnt);
4530
4531        return;
4532}
4533
4534/*
4535 * Cancel rescue timer for Discovery / RSCN handling
4536 */
4537int
4538lpfc_can_disctmo(struct lpfc_vport *vport)
4539{
4540        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4541        unsigned long iflags;
4542
4543        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4544                "can disc timer:  state:x%x rtry:x%x flg:x%x",
4545                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4546
4547        /* Turn off discovery timer if its running */
4548        if (vport->fc_flag & FC_DISC_TMO) {
4549                spin_lock_irqsave(shost->host_lock, iflags);
4550                vport->fc_flag &= ~FC_DISC_TMO;
4551                spin_unlock_irqrestore(shost->host_lock, iflags);
4552                del_timer_sync(&vport->fc_disctmo);
4553                spin_lock_irqsave(&vport->work_port_lock, iflags);
4554                vport->work_port_events &= ~WORKER_DISC_TMO;
4555                spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4556        }
4557
4558        /* Cancel Discovery Timer state <hba_state> */
4559        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4560                         "0248 Cancel Discovery Timer state x%x "
4561                         "Data: x%x x%x x%x\n",
4562                         vport->port_state, vport->fc_flag,
4563                         vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4564        return 0;
4565}
4566
4567/*
4568 * Check specified ring for outstanding IOCB on the SLI queue
4569 * Return true if iocb matches the specified nport
4570 */
4571int
4572lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4573                    struct lpfc_sli_ring *pring,
4574                    struct lpfc_iocbq *iocb,
4575                    struct lpfc_nodelist *ndlp)
4576{
4577        IOCB_t *icmd = &iocb->iocb;
4578        struct lpfc_vport    *vport = ndlp->vport;
4579
4580        if (iocb->vport != vport)
4581                return 0;
4582
4583        if (pring->ringno == LPFC_ELS_RING) {
4584                switch (icmd->ulpCommand) {
4585                case CMD_GEN_REQUEST64_CR:
4586                        if (iocb->context_un.ndlp == ndlp)
4587                                return 1;
4588                case CMD_ELS_REQUEST64_CR:
4589                        if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4590                                return 1;
4591                case CMD_XMIT_ELS_RSP64_CX:
4592                        if (iocb->context1 == (uint8_t *) ndlp)
4593                                return 1;
4594                }
4595        } else if (pring->ringno == LPFC_FCP_RING) {
4596                /* Skip match check if waiting to relogin to FCP target */
4597                if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4598                    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4599                        return 0;
4600                }
4601                if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4602                        return 1;
4603                }
4604        }
4605        return 0;
4606}
4607
4608static void
4609__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4610                struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4611                struct list_head *dequeue_list)
4612{
4613        struct lpfc_iocbq *iocb, *next_iocb;
4614
4615        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4616                /* Check to see if iocb matches the nport */
4617                if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4618                        /* match, dequeue */
4619                        list_move_tail(&iocb->list, dequeue_list);
4620        }
4621}
4622
4623static void
4624lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4625                struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4626{
4627        struct lpfc_sli *psli = &phba->sli;
4628        uint32_t i;
4629
4630        spin_lock_irq(&phba->hbalock);
4631        for (i = 0; i < psli->num_rings; i++)
4632                __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4633                                                dequeue_list);
4634        spin_unlock_irq(&phba->hbalock);
4635}
4636
4637static void
4638lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4639                struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4640{
4641        struct lpfc_sli_ring *pring;
4642        struct lpfc_queue *qp = NULL;
4643
4644        spin_lock_irq(&phba->hbalock);
4645        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4646                pring = qp->pring;
4647                if (!pring)
4648                        continue;
4649                spin_lock(&pring->ring_lock);
4650                __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4651                spin_unlock(&pring->ring_lock);
4652        }
4653        spin_unlock_irq(&phba->hbalock);
4654}
4655
4656/*
4657 * Free resources / clean up outstanding I/Os
4658 * associated with nlp_rpi in the LPFC_NODELIST entry.
4659 */
4660static int
4661lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4662{
4663        LIST_HEAD(completions);
4664
4665        lpfc_fabric_abort_nport(ndlp);
4666
4667        /*
4668         * Everything that matches on txcmplq will be returned
4669         * by firmware with a no rpi error.
4670         */
4671        if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4672                if (phba->sli_rev != LPFC_SLI_REV4)
4673                        lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4674                else
4675                        lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4676        }
4677
4678        /* Cancel all the IOCBs from the completions list */
4679        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4680                              IOERR_SLI_ABORTED);
4681
4682        return 0;
4683}
4684
4685/**
4686 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4687 * @phba: Pointer to HBA context object.
4688 * @pmb: Pointer to mailbox object.
4689 *
4690 * This function will issue an ELS LOGO command after completing
4691 * the UNREG_RPI.
4692 **/
4693void
4694lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4695{
4696        struct lpfc_vport  *vport = pmb->vport;
4697        struct lpfc_nodelist *ndlp;
4698
4699        ndlp = (struct lpfc_nodelist *)(pmb->context1);
4700        if (!ndlp)
4701                return;
4702        lpfc_issue_els_logo(vport, ndlp, 0);
4703        mempool_free(pmb, phba->mbox_mem_pool);
4704}
4705
4706/*
4707 * Free rpi associated with LPFC_NODELIST entry.
4708 * This routine is called from lpfc_freenode(), when we are removing
4709 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4710 * LOGO that completes successfully, and we are waiting to PLOGI back
4711 * to the remote NPort. In addition, it is called after we receive
4712 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4713 * we are waiting to PLOGI back to the remote NPort.
4714 */
4715int
4716lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4717{
4718        struct lpfc_hba *phba = vport->phba;
4719        LPFC_MBOXQ_t    *mbox;
4720        int rc, acc_plogi = 1;
4721        uint16_t rpi;
4722
4723        if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4724            ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4725                if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4726                        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4727                                         "3366 RPI x%x needs to be "
4728                                         "unregistered nlp_flag x%x "
4729                                         "did x%x\n",
4730                                         ndlp->nlp_rpi, ndlp->nlp_flag,
4731                                         ndlp->nlp_DID);
4732                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4733                if (mbox) {
4734                        /* SLI4 ports require the physical rpi value. */
4735                        rpi = ndlp->nlp_rpi;
4736                        if (phba->sli_rev == LPFC_SLI_REV4)
4737                                rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4738
4739                        lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4740                        mbox->vport = vport;
4741                        if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4742                                mbox->context1 = ndlp;
4743                                mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4744                        } else {
4745                                if (phba->sli_rev == LPFC_SLI_REV4 &&
4746                                    (!(vport->load_flag & FC_UNLOADING)) &&
4747                                    (bf_get(lpfc_sli_intf_if_type,
4748                                     &phba->sli4_hba.sli_intf) ==
4749                                      LPFC_SLI_INTF_IF_TYPE_2) &&
4750                                    (kref_read(&ndlp->kref) > 0)) {
4751                                        mbox->context1 = lpfc_nlp_get(ndlp);
4752                                        mbox->mbox_cmpl =
4753                                                lpfc_sli4_unreg_rpi_cmpl_clr;
4754                                        /*
4755                                         * accept PLOGIs after unreg_rpi_cmpl
4756                                         */
4757                                        acc_plogi = 0;
4758                                } else
4759                                        mbox->mbox_cmpl =
4760                                                lpfc_sli_def_mbox_cmpl;
4761                        }
4762
4763                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4764                        if (rc == MBX_NOT_FINISHED) {
4765                                mempool_free(mbox, phba->mbox_mem_pool);
4766                                acc_plogi = 1;
4767                        }
4768                }
4769                lpfc_no_rpi(phba, ndlp);
4770
4771                if (phba->sli_rev != LPFC_SLI_REV4)
4772                        ndlp->nlp_rpi = 0;
4773                ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4774                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4775                if (acc_plogi)
4776                        ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4777                return 1;
4778        }
4779        ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4780        return 0;
4781}
4782
4783/**
4784 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4785 * @phba: pointer to lpfc hba data structure.
4786 *
4787 * This routine is invoked to unregister all the currently registered RPIs
4788 * to the HBA.
4789 **/
4790void
4791lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4792{
4793        struct lpfc_vport **vports;
4794        struct lpfc_nodelist *ndlp;
4795        struct Scsi_Host *shost;
4796        int i;
4797
4798        vports = lpfc_create_vport_work_array(phba);
4799        if (!vports) {
4800                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4801                        "2884 Vport array allocation failed \n");
4802                return;
4803        }
4804        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4805                shost = lpfc_shost_from_vport(vports[i]);
4806                spin_lock_irq(shost->host_lock);
4807                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4808                        if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4809                                /* The mempool_alloc might sleep */
4810                                spin_unlock_irq(shost->host_lock);
4811                                lpfc_unreg_rpi(vports[i], ndlp);
4812                                spin_lock_irq(shost->host_lock);
4813                        }
4814                }
4815                spin_unlock_irq(shost->host_lock);
4816        }
4817        lpfc_destroy_vport_work_array(phba, vports);
4818}
4819
4820void
4821lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4822{
4823        struct lpfc_hba  *phba  = vport->phba;
4824        LPFC_MBOXQ_t     *mbox;
4825        int rc;
4826
4827        if (phba->sli_rev == LPFC_SLI_REV4) {
4828                lpfc_sli4_unreg_all_rpis(vport);
4829                return;
4830        }
4831
4832        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4833        if (mbox) {
4834                lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4835                                 mbox);
4836                mbox->vport = vport;
4837                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4838                mbox->context1 = NULL;
4839                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4840                if (rc != MBX_TIMEOUT)
4841                        mempool_free(mbox, phba->mbox_mem_pool);
4842
4843                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4844                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4845                                "1836 Could not issue "
4846                                "unreg_login(all_rpis) status %d\n", rc);
4847        }
4848}
4849
4850void
4851lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4852{
4853        struct lpfc_hba  *phba  = vport->phba;
4854        LPFC_MBOXQ_t     *mbox;
4855        int rc;
4856
4857        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4858        if (mbox) {
4859                lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4860                               mbox);
4861                mbox->vport = vport;
4862                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4863                mbox->context1 = NULL;
4864                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4865                if (rc != MBX_TIMEOUT)
4866                        mempool_free(mbox, phba->mbox_mem_pool);
4867
4868                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4869                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4870                                         "1815 Could not issue "
4871                                         "unreg_did (default rpis) status %d\n",
4872                                         rc);
4873        }
4874}
4875
4876/*
4877 * Free resources associated with LPFC_NODELIST entry
4878 * so it can be freed.
4879 */
4880static int
4881lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4882{
4883        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4884        struct lpfc_hba  *phba = vport->phba;
4885        LPFC_MBOXQ_t *mb, *nextmb;
4886        struct lpfc_dmabuf *mp;
4887
4888        /* Cleanup node for NPort <nlp_DID> */
4889        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4890                         "0900 Cleanup node for NPort x%x "
4891                         "Data: x%x x%x x%x\n",
4892                         ndlp->nlp_DID, ndlp->nlp_flag,
4893                         ndlp->nlp_state, ndlp->nlp_rpi);
4894        if (NLP_CHK_FREE_REQ(ndlp)) {
4895                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4896                                "0280 lpfc_cleanup_node: ndlp:x%p "
4897                                "usgmap:x%x refcnt:%d\n",
4898                                (void *)ndlp, ndlp->nlp_usg_map,
4899                                kref_read(&ndlp->kref));
4900                lpfc_dequeue_node(vport, ndlp);
4901        } else {
4902                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4903                                "0281 lpfc_cleanup_node: ndlp:x%p "
4904                                "usgmap:x%x refcnt:%d\n",
4905                                (void *)ndlp, ndlp->nlp_usg_map,
4906                                kref_read(&ndlp->kref));
4907                lpfc_disable_node(vport, ndlp);
4908        }
4909
4910
4911        /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4912
4913        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4914        if ((mb = phba->sli.mbox_active)) {
4915                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4916                   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4917                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4918                        mb->context2 = NULL;
4919                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4920                }
4921        }
4922
4923        spin_lock_irq(&phba->hbalock);
4924        /* Cleanup REG_LOGIN completions which are not yet processed */
4925        list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4926                if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4927                        (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4928                        (ndlp != (struct lpfc_nodelist *) mb->context2))
4929                        continue;
4930
4931                mb->context2 = NULL;
4932                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4933        }
4934
4935        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4936                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4937                   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4938                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4939                        mp = (struct lpfc_dmabuf *) (mb->context1);
4940                        if (mp) {
4941                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4942                                kfree(mp);
4943                        }
4944                        list_del(&mb->list);
4945                        mempool_free(mb, phba->mbox_mem_pool);
4946                        /* We shall not invoke the lpfc_nlp_put to decrement
4947                         * the ndlp reference count as we are in the process
4948                         * of lpfc_nlp_release.
4949                         */
4950                }
4951        }
4952        spin_unlock_irq(&phba->hbalock);
4953
4954        lpfc_els_abort(phba, ndlp);
4955
4956        spin_lock_irq(shost->host_lock);
4957        ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4958        spin_unlock_irq(shost->host_lock);
4959
4960        ndlp->nlp_last_elscmd = 0;
4961        del_timer_sync(&ndlp->nlp_delayfunc);
4962
4963        list_del_init(&ndlp->els_retry_evt.evt_listp);
4964        list_del_init(&ndlp->dev_loss_evt.evt_listp);
4965        lpfc_cleanup_vports_rrqs(vport, ndlp);
4966        lpfc_unreg_rpi(vport, ndlp);
4967
4968        return 0;
4969}
4970
4971/*
4972 * Check to see if we can free the nlp back to the freelist.
4973 * If we are in the middle of using the nlp in the discovery state
4974 * machine, defer the free till we reach the end of the state machine.
4975 */
4976static void
4977lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4978{
4979        struct lpfc_hba  *phba = vport->phba;
4980        struct lpfc_rport_data *rdata;
4981        struct fc_rport *rport;
4982        LPFC_MBOXQ_t *mbox;
4983        int rc;
4984
4985        lpfc_cancel_retry_delay_tmo(vport, ndlp);
4986        if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4987            !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4988            !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
4989            phba->sli_rev != LPFC_SLI_REV4) {
4990                /* For this case we need to cleanup the default rpi
4991                 * allocated by the firmware.
4992                 */
4993                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4994                                 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
4995                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4996                                 kref_read(&ndlp->kref),
4997                                 ndlp->nlp_usg_map, ndlp);
4998                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
4999                        != NULL) {
5000                        rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5001                            (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5002                        if (rc) {
5003                                mempool_free(mbox, phba->mbox_mem_pool);
5004                        }
5005                        else {
5006                                mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5007                                mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5008                                mbox->vport = vport;
5009                                mbox->context2 = ndlp;
5010                                rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5011                                if (rc == MBX_NOT_FINISHED) {
5012                                        mempool_free(mbox, phba->mbox_mem_pool);
5013                                }
5014                        }
5015                }
5016        }
5017        lpfc_cleanup_node(vport, ndlp);
5018
5019        /*
5020         * ndlp->rport must be set to NULL before it reaches here
5021         * i.e. break rport/node link before doing lpfc_nlp_put for
5022         * registered rport and then drop the reference of rport.
5023         */
5024        if (ndlp->rport) {
5025                /*
5026                 * extra lpfc_nlp_put dropped the reference of ndlp
5027                 * for registered rport so need to cleanup rport
5028                 */
5029                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5030                                "0940 removed node x%p DID x%x "
5031                                " rport not null %p\n",
5032                                ndlp, ndlp->nlp_DID, ndlp->rport);
5033                rport = ndlp->rport;
5034                rdata = rport->dd_data;
5035                rdata->pnode = NULL;
5036                ndlp->rport = NULL;
5037                put_device(&rport->dev);
5038        }
5039}
5040
5041static int
5042lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5043              uint32_t did)
5044{
5045        D_ID mydid, ndlpdid, matchdid;
5046
5047        if (did == Bcast_DID)
5048                return 0;
5049
5050        /* First check for Direct match */
5051        if (ndlp->nlp_DID == did)
5052                return 1;
5053
5054        /* Next check for area/domain identically equals 0 match */
5055        mydid.un.word = vport->fc_myDID;
5056        if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5057                return 0;
5058        }
5059
5060        matchdid.un.word = did;
5061        ndlpdid.un.word = ndlp->nlp_DID;
5062        if (matchdid.un.b.id == ndlpdid.un.b.id) {
5063                if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5064                    (mydid.un.b.area == matchdid.un.b.area)) {
5065                        /* This code is supposed to match the ID
5066                         * for a private loop device that is
5067                         * connect to fl_port. But we need to
5068                         * check that the port did not just go
5069                         * from pt2pt to fabric or we could end
5070                         * up matching ndlp->nlp_DID 000001 to
5071                         * fabric DID 0x20101
5072                         */
5073                        if ((ndlpdid.un.b.domain == 0) &&
5074                            (ndlpdid.un.b.area == 0)) {
5075                                if (ndlpdid.un.b.id &&
5076                                    vport->phba->fc_topology ==
5077                                    LPFC_TOPOLOGY_LOOP)
5078                                        return 1;
5079                        }
5080                        return 0;
5081                }
5082
5083                matchdid.un.word = ndlp->nlp_DID;
5084                if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5085                    (mydid.un.b.area == ndlpdid.un.b.area)) {
5086                        if ((matchdid.un.b.domain == 0) &&
5087                            (matchdid.un.b.area == 0)) {
5088                                if (matchdid.un.b.id)
5089                                        return 1;
5090                        }
5091                }
5092        }
5093        return 0;
5094}
5095
5096/* Search for a nodelist entry */
5097static struct lpfc_nodelist *
5098__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5099{
5100        struct lpfc_nodelist *ndlp;
5101        uint32_t data1;
5102
5103        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5104                if (lpfc_matchdid(vport, ndlp, did)) {
5105                        data1 = (((uint32_t) ndlp->nlp_state << 24) |
5106                                 ((uint32_t) ndlp->nlp_xri << 16) |
5107                                 ((uint32_t) ndlp->nlp_type << 8) |
5108                                 ((uint32_t) ndlp->nlp_rpi & 0xff));
5109                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5110                                         "0929 FIND node DID "
5111                                         "Data: x%p x%x x%x x%x %p\n",
5112                                         ndlp, ndlp->nlp_DID,
5113                                         ndlp->nlp_flag, data1,
5114                                         ndlp->active_rrqs_xri_bitmap);
5115                        return ndlp;
5116                }
5117        }
5118
5119        /* FIND node did <did> NOT FOUND */
5120        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5121                         "0932 FIND node did x%x NOT FOUND.\n", did);
5122        return NULL;
5123}
5124
5125struct lpfc_nodelist *
5126lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5127{
5128        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5129        struct lpfc_nodelist *ndlp;
5130        unsigned long iflags;
5131
5132        spin_lock_irqsave(shost->host_lock, iflags);
5133        ndlp = __lpfc_findnode_did(vport, did);
5134        spin_unlock_irqrestore(shost->host_lock, iflags);
5135        return ndlp;
5136}
5137
5138struct lpfc_nodelist *
5139lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5140{
5141        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5142        struct lpfc_nodelist *ndlp;
5143
5144        ndlp = lpfc_findnode_did(vport, did);
5145        if (!ndlp) {
5146                if (vport->phba->nvmet_support)
5147                        return NULL;
5148                if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5149                    lpfc_rscn_payload_check(vport, did) == 0)
5150                        return NULL;
5151                ndlp = lpfc_nlp_init(vport, did);
5152                if (!ndlp)
5153                        return NULL;
5154                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5155                spin_lock_irq(shost->host_lock);
5156                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5157                spin_unlock_irq(shost->host_lock);
5158                return ndlp;
5159        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5160                if (vport->phba->nvmet_support)
5161                        return NULL;
5162                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5163                if (!ndlp)
5164                        return NULL;
5165                spin_lock_irq(shost->host_lock);
5166                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5167                spin_unlock_irq(shost->host_lock);
5168                return ndlp;
5169        }
5170
5171        /* The NVME Target does not want to actively manage an rport.
5172         * The goal is to allow the target to reset its state and clear
5173         * pending IO in preparation for the initiator to recover.
5174         */
5175        if ((vport->fc_flag & FC_RSCN_MODE) &&
5176            !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5177                if (lpfc_rscn_payload_check(vport, did)) {
5178
5179                        /* Since this node is marked for discovery,
5180                         * delay timeout is not needed.
5181                         */
5182                        lpfc_cancel_retry_delay_tmo(vport, ndlp);
5183
5184                        /* NVME Target mode waits until rport is known to be
5185                         * impacted by the RSCN before it transitions.  No
5186                         * active management - just go to NPR provided the
5187                         * node had a valid login.
5188                         */
5189                        if (vport->phba->nvmet_support)
5190                                return ndlp;
5191
5192                        /* If we've already received a PLOGI from this NPort
5193                         * we don't need to try to discover it again.
5194                         */
5195                        if (ndlp->nlp_flag & NLP_RCV_PLOGI)
5196                                return NULL;
5197
5198                        spin_lock_irq(shost->host_lock);
5199                        ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5200                        spin_unlock_irq(shost->host_lock);
5201                } else
5202                        ndlp = NULL;
5203        } else {
5204                /* If the initiator received a PLOGI from this NPort or if the
5205                 * initiator is already in the process of discovery on it,
5206                 * there's no need to try to discover it again.
5207                 */
5208                if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5209                    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5210                    (!vport->phba->nvmet_support &&
5211                     ndlp->nlp_flag & NLP_RCV_PLOGI))
5212                        return NULL;
5213
5214                if (vport->phba->nvmet_support)
5215                        return ndlp;
5216
5217                /* Moving to NPR state clears unsolicited flags and
5218                 * allows for rediscovery
5219                 */
5220                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5221
5222                spin_lock_irq(shost->host_lock);
5223                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5224                spin_unlock_irq(shost->host_lock);
5225        }
5226        return ndlp;
5227}
5228
5229/* Build a list of nodes to discover based on the loopmap */
5230void
5231lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5232{
5233        struct lpfc_hba  *phba = vport->phba;
5234        int j;
5235        uint32_t alpa, index;
5236
5237        if (!lpfc_is_link_up(phba))
5238                return;
5239
5240        if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5241                return;
5242
5243        /* Check for loop map present or not */
5244        if (phba->alpa_map[0]) {
5245                for (j = 1; j <= phba->alpa_map[0]; j++) {
5246                        alpa = phba->alpa_map[j];
5247                        if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5248                                continue;
5249                        lpfc_setup_disc_node(vport, alpa);
5250                }
5251        } else {
5252                /* No alpamap, so try all alpa's */
5253                for (j = 0; j < FC_MAXLOOP; j++) {
5254                        /* If cfg_scan_down is set, start from highest
5255                         * ALPA (0xef) to lowest (0x1).
5256                         */
5257                        if (vport->cfg_scan_down)
5258                                index = j;
5259                        else
5260                                index = FC_MAXLOOP - j - 1;
5261                        alpa = lpfcAlpaArray[index];
5262                        if ((vport->fc_myDID & 0xff) == alpa)
5263                                continue;
5264                        lpfc_setup_disc_node(vport, alpa);
5265                }
5266        }
5267        return;
5268}
5269
5270/* SLI3 only */
5271void
5272lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5273{
5274        LPFC_MBOXQ_t *mbox;
5275        struct lpfc_sli *psli = &phba->sli;
5276        struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5277        struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
5278        int  rc;
5279
5280        /*
5281         * if it's not a physical port or if we already send
5282         * clear_la then don't send it.
5283         */
5284        if ((phba->link_state >= LPFC_CLEAR_LA) ||
5285            (vport->port_type != LPFC_PHYSICAL_PORT) ||
5286                (phba->sli_rev == LPFC_SLI_REV4))
5287                return;
5288
5289                        /* Link up discovery */
5290        if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5291                phba->link_state = LPFC_CLEAR_LA;
5292                lpfc_clear_la(phba, mbox);
5293                mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5294                mbox->vport = vport;
5295                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5296                if (rc == MBX_NOT_FINISHED) {
5297                        mempool_free(mbox, phba->mbox_mem_pool);
5298                        lpfc_disc_flush_list(vport);
5299                        extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5300                        fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5301                        phba->link_state = LPFC_HBA_ERROR;
5302                }
5303        }
5304}
5305
5306/* Reg_vpi to tell firmware to resume normal operations */
5307void
5308lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5309{
5310        LPFC_MBOXQ_t *regvpimbox;
5311
5312        regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5313        if (regvpimbox) {
5314                lpfc_reg_vpi(vport, regvpimbox);
5315                regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5316                regvpimbox->vport = vport;
5317                if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5318                                        == MBX_NOT_FINISHED) {
5319                        mempool_free(regvpimbox, phba->mbox_mem_pool);
5320                }
5321        }
5322}
5323
5324/* Start Link up / RSCN discovery on NPR nodes */
5325void
5326lpfc_disc_start(struct lpfc_vport *vport)
5327{
5328        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5329        struct lpfc_hba  *phba = vport->phba;
5330        uint32_t num_sent;
5331        uint32_t clear_la_pending;
5332
5333        if (!lpfc_is_link_up(phba)) {
5334                lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5335                                 "3315 Link is not up %x\n",
5336                                 phba->link_state);
5337                return;
5338        }
5339
5340        if (phba->link_state == LPFC_CLEAR_LA)
5341                clear_la_pending = 1;
5342        else
5343                clear_la_pending = 0;
5344
5345        if (vport->port_state < LPFC_VPORT_READY)
5346                vport->port_state = LPFC_DISC_AUTH;
5347
5348        lpfc_set_disctmo(vport);
5349
5350        vport->fc_prevDID = vport->fc_myDID;
5351        vport->num_disc_nodes = 0;
5352
5353        /* Start Discovery state <hba_state> */
5354        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5355                         "0202 Start Discovery hba state x%x "
5356                         "Data: x%x x%x x%x\n",
5357                         vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5358                         vport->fc_adisc_cnt);
5359
5360        /* First do ADISCs - if any */
5361        num_sent = lpfc_els_disc_adisc(vport);
5362
5363        if (num_sent)
5364                return;
5365
5366        /* Register the VPI for SLI3, NPIV only. */
5367        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5368            !(vport->fc_flag & FC_PT2PT) &&
5369            !(vport->fc_flag & FC_RSCN_MODE) &&
5370            (phba->sli_rev < LPFC_SLI_REV4)) {
5371                lpfc_issue_clear_la(phba, vport);
5372                lpfc_issue_reg_vpi(phba, vport);
5373                return;
5374        }
5375
5376        /*
5377         * For SLI2, we need to set port_state to READY and continue
5378         * discovery.
5379         */
5380        if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5381                /* If we get here, there is nothing to ADISC */
5382                lpfc_issue_clear_la(phba, vport);
5383
5384                if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5385                        vport->num_disc_nodes = 0;
5386                        /* go thru NPR nodes and issue ELS PLOGIs */
5387                        if (vport->fc_npr_cnt)
5388                                lpfc_els_disc_plogi(vport);
5389
5390                        if (!vport->num_disc_nodes) {
5391                                spin_lock_irq(shost->host_lock);
5392                                vport->fc_flag &= ~FC_NDISC_ACTIVE;
5393                                spin_unlock_irq(shost->host_lock);
5394                                lpfc_can_disctmo(vport);
5395                        }
5396                }
5397                vport->port_state = LPFC_VPORT_READY;
5398        } else {
5399                /* Next do PLOGIs - if any */
5400                num_sent = lpfc_els_disc_plogi(vport);
5401
5402                if (num_sent)
5403                        return;
5404
5405                if (vport->fc_flag & FC_RSCN_MODE) {
5406                        /* Check to see if more RSCNs came in while we
5407                         * were processing this one.
5408                         */
5409                        if ((vport->fc_rscn_id_cnt == 0) &&
5410                            (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5411                                spin_lock_irq(shost->host_lock);
5412                                vport->fc_flag &= ~FC_RSCN_MODE;
5413                                spin_unlock_irq(shost->host_lock);
5414                                lpfc_can_disctmo(vport);
5415                        } else
5416                                lpfc_els_handle_rscn(vport);
5417                }
5418        }
5419        return;
5420}
5421
5422/*
5423 *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5424 *  ring the match the sppecified nodelist.
5425 */
5426static void
5427lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5428{
5429        LIST_HEAD(completions);
5430        struct lpfc_sli *psli;
5431        IOCB_t     *icmd;
5432        struct lpfc_iocbq    *iocb, *next_iocb;
5433        struct lpfc_sli_ring *pring;
5434
5435        psli = &phba->sli;
5436        pring = lpfc_phba_elsring(phba);
5437        if (unlikely(!pring))
5438                return;
5439
5440        /* Error matching iocb on txq or txcmplq
5441         * First check the txq.
5442         */
5443        spin_lock_irq(&phba->hbalock);
5444        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5445                if (iocb->context1 != ndlp) {
5446                        continue;
5447                }
5448                icmd = &iocb->iocb;
5449                if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5450                    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5451
5452                        list_move_tail(&iocb->list, &completions);
5453                }
5454        }
5455
5456        /* Next check the txcmplq */
5457        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5458                if (iocb->context1 != ndlp) {
5459                        continue;
5460                }
5461                icmd = &iocb->iocb;
5462                if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5463                    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5464                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5465                }
5466        }
5467        spin_unlock_irq(&phba->hbalock);
5468
5469        /* Cancel all the IOCBs from the completions list */
5470        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5471                              IOERR_SLI_ABORTED);
5472}
5473
5474static void
5475lpfc_disc_flush_list(struct lpfc_vport *vport)
5476{
5477        struct lpfc_nodelist *ndlp, *next_ndlp;
5478        struct lpfc_hba *phba = vport->phba;
5479
5480        if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5481                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5482                                         nlp_listp) {
5483                        if (!NLP_CHK_NODE_ACT(ndlp))
5484                                continue;
5485                        if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5486                            ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5487                                lpfc_free_tx(phba, ndlp);
5488                        }
5489                }
5490        }
5491}
5492
5493void
5494lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5495{
5496        lpfc_els_flush_rscn(vport);
5497        lpfc_els_flush_cmd(vport);
5498        lpfc_disc_flush_list(vport);
5499}
5500
5501/*****************************************************************************/
5502/*
5503 * NAME:     lpfc_disc_timeout
5504 *
5505 * FUNCTION: Fibre Channel driver discovery timeout routine.
5506 *
5507 * EXECUTION ENVIRONMENT: interrupt only
5508 *
5509 * CALLED FROM:
5510 *      Timer function
5511 *
5512 * RETURNS:
5513 *      none
5514 */
5515/*****************************************************************************/
5516void
5517lpfc_disc_timeout(unsigned long ptr)
5518{
5519        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5520        struct lpfc_hba   *phba = vport->phba;
5521        uint32_t tmo_posted;
5522        unsigned long flags = 0;
5523
5524        if (unlikely(!phba))
5525                return;
5526
5527        spin_lock_irqsave(&vport->work_port_lock, flags);
5528        tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5529        if (!tmo_posted)
5530                vport->work_port_events |= WORKER_DISC_TMO;
5531        spin_unlock_irqrestore(&vport->work_port_lock, flags);
5532
5533        if (!tmo_posted)
5534                lpfc_worker_wake_up(phba);
5535        return;
5536}
5537
5538static void
5539lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5540{
5541        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5542        struct lpfc_hba  *phba = vport->phba;
5543        struct lpfc_sli  *psli = &phba->sli;
5544        struct lpfc_nodelist *ndlp, *next_ndlp;
5545        LPFC_MBOXQ_t *initlinkmbox;
5546        int rc, clrlaerr = 0;
5547
5548        if (!(vport->fc_flag & FC_DISC_TMO))
5549                return;
5550
5551        spin_lock_irq(shost->host_lock);
5552        vport->fc_flag &= ~FC_DISC_TMO;
5553        spin_unlock_irq(shost->host_lock);
5554
5555        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5556                "disc timeout:    state:x%x rtry:x%x flg:x%x",
5557                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5558
5559        switch (vport->port_state) {
5560
5561        case LPFC_LOCAL_CFG_LINK:
5562                /*
5563                 * port_state is identically  LPFC_LOCAL_CFG_LINK while
5564                 * waiting for FAN timeout
5565                 */
5566                lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5567                                 "0221 FAN timeout\n");
5568
5569                /* Start discovery by sending FLOGI, clean up old rpis */
5570                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5571                                         nlp_listp) {
5572                        if (!NLP_CHK_NODE_ACT(ndlp))
5573                                continue;
5574                        if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5575                                continue;
5576                        if (ndlp->nlp_type & NLP_FABRIC) {
5577                                /* Clean up the ndlp on Fabric connections */
5578                                lpfc_drop_node(vport, ndlp);
5579
5580                        } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5581                                /* Fail outstanding IO now since device
5582                                 * is marked for PLOGI.
5583                                 */
5584                                lpfc_unreg_rpi(vport, ndlp);
5585                        }
5586                }
5587                if (vport->port_state != LPFC_FLOGI) {
5588                        if (phba->sli_rev <= LPFC_SLI_REV3)
5589                                lpfc_initial_flogi(vport);
5590                        else
5591                                lpfc_issue_init_vfi(vport);
5592                        return;
5593                }
5594                break;
5595
5596        case LPFC_FDISC:
5597        case LPFC_FLOGI:
5598        /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5599                /* Initial FLOGI timeout */
5600                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5601                                 "0222 Initial %s timeout\n",
5602                                 vport->vpi ? "FDISC" : "FLOGI");
5603
5604                /* Assume no Fabric and go on with discovery.
5605                 * Check for outstanding ELS FLOGI to abort.
5606                 */
5607
5608                /* FLOGI failed, so just use loop map to make discovery list */
5609                lpfc_disc_list_loopmap(vport);
5610
5611                /* Start discovery */
5612                lpfc_disc_start(vport);
5613                break;
5614
5615        case LPFC_FABRIC_CFG_LINK:
5616        /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5617           NameServer login */
5618                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5619                                 "0223 Timeout while waiting for "
5620                                 "NameServer login\n");
5621                /* Next look for NameServer ndlp */
5622                ndlp = lpfc_findnode_did(vport, NameServer_DID);
5623                if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5624                        lpfc_els_abort(phba, ndlp);
5625
5626                /* ReStart discovery */
5627                goto restart_disc;
5628
5629        case LPFC_NS_QRY:
5630        /* Check for wait for NameServer Rsp timeout */
5631                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5632                                 "0224 NameServer Query timeout "
5633                                 "Data: x%x x%x\n",
5634                                 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5635
5636                if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5637                        /* Try it one more time */
5638                        vport->fc_ns_retry++;
5639                        vport->gidft_inp = 0;
5640                        rc = lpfc_issue_gidft(vport);
5641                        if (rc == 0)
5642                                break;
5643                }
5644                vport->fc_ns_retry = 0;
5645
5646restart_disc:
5647                /*
5648                 * Discovery is over.
5649                 * set port_state to PORT_READY if SLI2.
5650                 * cmpl_reg_vpi will set port_state to READY for SLI3.
5651                 */
5652                if (phba->sli_rev < LPFC_SLI_REV4) {
5653                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5654                                lpfc_issue_reg_vpi(phba, vport);
5655                        else  {
5656                                lpfc_issue_clear_la(phba, vport);
5657                                vport->port_state = LPFC_VPORT_READY;
5658                        }
5659                }
5660
5661                /* Setup and issue mailbox INITIALIZE LINK command */
5662                initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5663                if (!initlinkmbox) {
5664                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5665                                         "0206 Device Discovery "
5666                                         "completion error\n");
5667                        phba->link_state = LPFC_HBA_ERROR;
5668                        break;
5669                }
5670
5671                lpfc_linkdown(phba);
5672                lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5673                               phba->cfg_link_speed);
5674                initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5675                initlinkmbox->vport = vport;
5676                initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5677                rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5678                lpfc_set_loopback_flag(phba);
5679                if (rc == MBX_NOT_FINISHED)
5680                        mempool_free(initlinkmbox, phba->mbox_mem_pool);
5681
5682                break;
5683
5684        case LPFC_DISC_AUTH:
5685        /* Node Authentication timeout */
5686                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5687                                 "0227 Node Authentication timeout\n");
5688                lpfc_disc_flush_list(vport);
5689
5690                /*
5691                 * set port_state to PORT_READY if SLI2.
5692                 * cmpl_reg_vpi will set port_state to READY for SLI3.
5693                 */
5694                if (phba->sli_rev < LPFC_SLI_REV4) {
5695                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5696                                lpfc_issue_reg_vpi(phba, vport);
5697                        else  { /* NPIV Not enabled */
5698                                lpfc_issue_clear_la(phba, vport);
5699                                vport->port_state = LPFC_VPORT_READY;
5700                        }
5701                }
5702                break;
5703
5704        case LPFC_VPORT_READY:
5705                if (vport->fc_flag & FC_RSCN_MODE) {
5706                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5707                                         "0231 RSCN timeout Data: x%x "
5708                                         "x%x\n",
5709                                         vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5710
5711                        /* Cleanup any outstanding ELS commands */
5712                        lpfc_els_flush_cmd(vport);
5713
5714                        lpfc_els_flush_rscn(vport);
5715                        lpfc_disc_flush_list(vport);
5716                }
5717                break;
5718
5719        default:
5720                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5721                                 "0273 Unexpected discovery timeout, "
5722                                 "vport State x%x\n", vport->port_state);
5723                break;
5724        }
5725
5726        switch (phba->link_state) {
5727        case LPFC_CLEAR_LA:
5728                                /* CLEAR LA timeout */
5729                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5730                                 "0228 CLEAR LA timeout\n");
5731                clrlaerr = 1;
5732                break;
5733
5734        case LPFC_LINK_UP:
5735                lpfc_issue_clear_la(phba, vport);
5736                /* Drop thru */
5737        case LPFC_LINK_UNKNOWN:
5738        case LPFC_WARM_START:
5739        case LPFC_INIT_START:
5740        case LPFC_INIT_MBX_CMDS:
5741        case LPFC_LINK_DOWN:
5742        case LPFC_HBA_ERROR:
5743                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5744                                 "0230 Unexpected timeout, hba link "
5745                                 "state x%x\n", phba->link_state);
5746                clrlaerr = 1;
5747                break;
5748
5749        case LPFC_HBA_READY:
5750                break;
5751        }
5752
5753        if (clrlaerr) {
5754                lpfc_disc_flush_list(vport);
5755                if (phba->sli_rev != LPFC_SLI_REV4) {
5756                        psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
5757                                ~LPFC_STOP_IOCB_EVENT;
5758                        psli->sli3_ring[LPFC_FCP_RING].flag &=
5759                                ~LPFC_STOP_IOCB_EVENT;
5760                }
5761                vport->port_state = LPFC_VPORT_READY;
5762        }
5763        return;
5764}
5765
5766/*
5767 * This routine handles processing a NameServer REG_LOGIN mailbox
5768 * command upon completion. It is setup in the LPFC_MBOXQ
5769 * as the completion routine when the command is
5770 * handed off to the SLI layer.
5771 */
5772void
5773lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5774{
5775        MAILBOX_t *mb = &pmb->u.mb;
5776        struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
5777        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5778        struct lpfc_vport    *vport = pmb->vport;
5779
5780        pmb->context1 = NULL;
5781        pmb->context2 = NULL;
5782
5783        if (phba->sli_rev < LPFC_SLI_REV4)
5784                ndlp->nlp_rpi = mb->un.varWords[0];
5785        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5786        ndlp->nlp_type |= NLP_FABRIC;
5787        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5788        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5789                         "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5790                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5791                         kref_read(&ndlp->kref),
5792                         ndlp->nlp_usg_map, ndlp);
5793        /*
5794         * Start issuing Fabric-Device Management Interface (FDMI) command to
5795         * 0xfffffa (FDMI well known port).
5796         * DHBA -> DPRT -> RHBA -> RPA  (physical port)
5797         * DPRT -> RPRT (vports)
5798         */
5799        if (vport->port_type == LPFC_PHYSICAL_PORT)
5800                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
5801        else
5802                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
5803
5804
5805        /* decrement the node reference count held for this callback
5806         * function.
5807         */
5808        lpfc_nlp_put(ndlp);
5809        lpfc_mbuf_free(phba, mp->virt, mp->phys);
5810        kfree(mp);
5811        mempool_free(pmb, phba->mbox_mem_pool);
5812
5813        return;
5814}
5815
5816static int
5817lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5818{
5819        uint16_t *rpi = param;
5820
5821        /* check for active node */
5822        if (!NLP_CHK_NODE_ACT(ndlp))
5823                return 0;
5824
5825        return ndlp->nlp_rpi == *rpi;
5826}
5827
5828static int
5829lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5830{
5831        return memcmp(&ndlp->nlp_portname, param,
5832                      sizeof(ndlp->nlp_portname)) == 0;
5833}
5834
5835static struct lpfc_nodelist *
5836__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5837{
5838        struct lpfc_nodelist *ndlp;
5839
5840        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5841                if (filter(ndlp, param)) {
5842                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5843                                         "3185 FIND node filter %p DID "
5844                                         "ndlp %p did x%x flg x%x st x%x "
5845                                         "xri x%x type x%x rpi x%x\n",
5846                                         filter, ndlp, ndlp->nlp_DID,
5847                                         ndlp->nlp_flag, ndlp->nlp_state,
5848                                         ndlp->nlp_xri, ndlp->nlp_type,
5849                                         ndlp->nlp_rpi);
5850                        return ndlp;
5851                }
5852        }
5853        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5854                         "3186 FIND node filter %p NOT FOUND.\n", filter);
5855        return NULL;
5856}
5857
5858/*
5859 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5860 * returns the node list element pointer else return NULL.
5861 */
5862struct lpfc_nodelist *
5863__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5864{
5865        return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
5866}
5867
5868/*
5869 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5870 * returns the node element list pointer else return NULL.
5871 */
5872struct lpfc_nodelist *
5873lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
5874{
5875        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5876        struct lpfc_nodelist *ndlp;
5877
5878        spin_lock_irq(shost->host_lock);
5879        ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
5880        spin_unlock_irq(shost->host_lock);
5881        return ndlp;
5882}
5883
5884/*
5885 * This routine looks up the ndlp lists for the given RPI. If the rpi
5886 * is found, the routine returns the node element list pointer else
5887 * return NULL.
5888 */
5889struct lpfc_nodelist *
5890lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5891{
5892        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5893        struct lpfc_nodelist *ndlp;
5894
5895        spin_lock_irq(shost->host_lock);
5896        ndlp = __lpfc_findnode_rpi(vport, rpi);
5897        spin_unlock_irq(shost->host_lock);
5898        return ndlp;
5899}
5900
5901/**
5902 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5903 * @phba: pointer to lpfc hba data structure.
5904 * @vpi: the physical host virtual N_Port identifier.
5905 *
5906 * This routine finds a vport on a HBA (referred by @phba) through a
5907 * @vpi. The function walks the HBA's vport list and returns the address
5908 * of the vport with the matching @vpi.
5909 *
5910 * Return code
5911 *    NULL - No vport with the matching @vpi found
5912 *    Otherwise - Address to the vport with the matching @vpi.
5913 **/
5914struct lpfc_vport *
5915lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5916{
5917        struct lpfc_vport *vport;
5918        unsigned long flags;
5919        int i = 0;
5920
5921        /* The physical ports are always vpi 0 - translate is unnecessary. */
5922        if (vpi > 0) {
5923                /*
5924                 * Translate the physical vpi to the logical vpi.  The
5925                 * vport stores the logical vpi.
5926                 */
5927                for (i = 0; i < phba->max_vpi; i++) {
5928                        if (vpi == phba->vpi_ids[i])
5929                                break;
5930                }
5931
5932                if (i >= phba->max_vpi) {
5933                        lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
5934                                         "2936 Could not find Vport mapped "
5935                                         "to vpi %d\n", vpi);
5936                        return NULL;
5937                }
5938        }
5939
5940        spin_lock_irqsave(&phba->hbalock, flags);
5941        list_for_each_entry(vport, &phba->port_list, listentry) {
5942                if (vport->vpi == i) {
5943                        spin_unlock_irqrestore(&phba->hbalock, flags);
5944                        return vport;
5945                }
5946        }
5947        spin_unlock_irqrestore(&phba->hbalock, flags);
5948        return NULL;
5949}
5950
5951struct lpfc_nodelist *
5952lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
5953{
5954        struct lpfc_nodelist *ndlp;
5955        int rpi = LPFC_RPI_ALLOC_ERROR;
5956
5957        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5958                rpi = lpfc_sli4_alloc_rpi(vport->phba);
5959                if (rpi == LPFC_RPI_ALLOC_ERROR)
5960                        return NULL;
5961        }
5962
5963        ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
5964        if (!ndlp) {
5965                if (vport->phba->sli_rev == LPFC_SLI_REV4)
5966                        lpfc_sli4_free_rpi(vport->phba, rpi);
5967                return NULL;
5968        }
5969
5970        memset(ndlp, 0, sizeof (struct lpfc_nodelist));
5971
5972        lpfc_initialize_node(vport, ndlp, did);
5973        INIT_LIST_HEAD(&ndlp->nlp_listp);
5974        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5975                ndlp->nlp_rpi = rpi;
5976                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5977                                 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5978                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
5979                                 ndlp->nlp_flag,
5980                                 kref_read(&ndlp->kref),
5981                                 ndlp->nlp_usg_map, ndlp);
5982
5983                ndlp->active_rrqs_xri_bitmap =
5984                                mempool_alloc(vport->phba->active_rrq_pool,
5985                                              GFP_KERNEL);
5986                if (ndlp->active_rrqs_xri_bitmap)
5987                        memset(ndlp->active_rrqs_xri_bitmap, 0,
5988                               ndlp->phba->cfg_rrq_xri_bitmap_sz);
5989        }
5990
5991
5992
5993        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
5994                "node init:       did:x%x",
5995                ndlp->nlp_DID, 0, 0);
5996
5997        return ndlp;
5998}
5999
6000/* This routine releases all resources associated with a specifc NPort's ndlp
6001 * and mempool_free's the nodelist.
6002 */
6003static void
6004lpfc_nlp_release(struct kref *kref)
6005{
6006        struct lpfc_hba *phba;
6007        unsigned long flags;
6008        struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6009                                                  kref);
6010
6011        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6012                "node release:    did:x%x flg:x%x type:x%x",
6013                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6014
6015        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6016                        "0279 lpfc_nlp_release: ndlp:x%p did %x "
6017                        "usgmap:x%x refcnt:%d rpi:%x\n",
6018                        (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6019                        kref_read(&ndlp->kref), ndlp->nlp_rpi);
6020
6021        /* remove ndlp from action. */
6022        lpfc_nlp_remove(ndlp->vport, ndlp);
6023
6024        /* clear the ndlp active flag for all release cases */
6025        phba = ndlp->phba;
6026        spin_lock_irqsave(&phba->ndlp_lock, flags);
6027        NLP_CLR_NODE_ACT(ndlp);
6028        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6029        if (phba->sli_rev == LPFC_SLI_REV4)
6030                lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
6031
6032        /* free ndlp memory for final ndlp release */
6033        if (NLP_CHK_FREE_REQ(ndlp)) {
6034                kfree(ndlp->lat_data);
6035                if (phba->sli_rev == LPFC_SLI_REV4)
6036                        mempool_free(ndlp->active_rrqs_xri_bitmap,
6037                                     ndlp->phba->active_rrq_pool);
6038                mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6039        }
6040}
6041
6042/* This routine bumps the reference count for a ndlp structure to ensure
6043 * that one discovery thread won't free a ndlp while another discovery thread
6044 * is using it.
6045 */
6046struct lpfc_nodelist *
6047lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6048{
6049        struct lpfc_hba *phba;
6050        unsigned long flags;
6051
6052        if (ndlp) {
6053                lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6054                        "node get:        did:x%x flg:x%x refcnt:x%x",
6055                        ndlp->nlp_DID, ndlp->nlp_flag,
6056                        kref_read(&ndlp->kref));
6057                /* The check of ndlp usage to prevent incrementing the
6058                 * ndlp reference count that is in the process of being
6059                 * released.
6060                 */
6061                phba = ndlp->phba;
6062                spin_lock_irqsave(&phba->ndlp_lock, flags);
6063                if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6064                        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6065                        lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6066                                "0276 lpfc_nlp_get: ndlp:x%p "
6067                                "usgmap:x%x refcnt:%d\n",
6068                                (void *)ndlp, ndlp->nlp_usg_map,
6069                                kref_read(&ndlp->kref));
6070                        return NULL;
6071                } else
6072                        kref_get(&ndlp->kref);
6073                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6074        }
6075        return ndlp;
6076}
6077
6078/* This routine decrements the reference count for a ndlp structure. If the
6079 * count goes to 0, this indicates the the associated nodelist should be
6080 * freed. Returning 1 indicates the ndlp resource has been released; on the
6081 * other hand, returning 0 indicates the ndlp resource has not been released
6082 * yet.
6083 */
6084int
6085lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6086{
6087        struct lpfc_hba *phba;
6088        unsigned long flags;
6089
6090        if (!ndlp)
6091                return 1;
6092
6093        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6094        "node put:        did:x%x flg:x%x refcnt:x%x",
6095                ndlp->nlp_DID, ndlp->nlp_flag,
6096                kref_read(&ndlp->kref));
6097        phba = ndlp->phba;
6098        spin_lock_irqsave(&phba->ndlp_lock, flags);
6099        /* Check the ndlp memory free acknowledge flag to avoid the
6100         * possible race condition that kref_put got invoked again
6101         * after previous one has done ndlp memory free.
6102         */
6103        if (NLP_CHK_FREE_ACK(ndlp)) {
6104                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6105                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6106                                "0274 lpfc_nlp_put: ndlp:x%p "
6107                                "usgmap:x%x refcnt:%d\n",
6108                                (void *)ndlp, ndlp->nlp_usg_map,
6109                                kref_read(&ndlp->kref));
6110                return 1;
6111        }
6112        /* Check the ndlp inactivate log flag to avoid the possible
6113         * race condition that kref_put got invoked again after ndlp
6114         * is already in inactivating state.
6115         */
6116        if (NLP_CHK_IACT_REQ(ndlp)) {
6117                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6118                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6119                                "0275 lpfc_nlp_put: ndlp:x%p "
6120                                "usgmap:x%x refcnt:%d\n",
6121                                (void *)ndlp, ndlp->nlp_usg_map,
6122                                kref_read(&ndlp->kref));
6123                return 1;
6124        }
6125        /* For last put, mark the ndlp usage flags to make sure no
6126         * other kref_get and kref_put on the same ndlp shall get
6127         * in between the process when the final kref_put has been
6128         * invoked on this ndlp.
6129         */
6130        if (kref_read(&ndlp->kref) == 1) {
6131                /* Indicate ndlp is put to inactive state. */
6132                NLP_SET_IACT_REQ(ndlp);
6133                /* Acknowledge ndlp memory free has been seen. */
6134                if (NLP_CHK_FREE_REQ(ndlp))
6135                        NLP_SET_FREE_ACK(ndlp);
6136        }
6137        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6138        /* Note, the kref_put returns 1 when decrementing a reference
6139         * count that was 1, it invokes the release callback function,
6140         * but it still left the reference count as 1 (not actually
6141         * performs the last decrementation). Otherwise, it actually
6142         * decrements the reference count and returns 0.
6143         */
6144        return kref_put(&ndlp->kref, lpfc_nlp_release);
6145}
6146
6147/* This routine free's the specified nodelist if it is not in use
6148 * by any other discovery thread. This routine returns 1 if the
6149 * ndlp has been freed. A return value of 0 indicates the ndlp is
6150 * not yet been released.
6151 */
6152int
6153lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6154{
6155        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6156                "node not used:   did:x%x flg:x%x refcnt:x%x",
6157                ndlp->nlp_DID, ndlp->nlp_flag,
6158                kref_read(&ndlp->kref));
6159        if (kref_read(&ndlp->kref) == 1)
6160                if (lpfc_nlp_put(ndlp))
6161                        return 1;
6162        return 0;
6163}
6164
6165/**
6166 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6167 * @phba: Pointer to hba context object.
6168 *
6169 * This function iterate through all FC nodes associated
6170 * will all vports to check if there is any node with
6171 * fc_rports associated with it. If there is an fc_rport
6172 * associated with the node, then the node is either in
6173 * discovered state or its devloss_timer is pending.
6174 */
6175static int
6176lpfc_fcf_inuse(struct lpfc_hba *phba)
6177{
6178        struct lpfc_vport **vports;
6179        int i, ret = 0;
6180        struct lpfc_nodelist *ndlp;
6181        struct Scsi_Host  *shost;
6182
6183        vports = lpfc_create_vport_work_array(phba);
6184
6185        /* If driver cannot allocate memory, indicate fcf is in use */
6186        if (!vports)
6187                return 1;
6188
6189        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6190                shost = lpfc_shost_from_vport(vports[i]);
6191                spin_lock_irq(shost->host_lock);
6192                /*
6193                 * IF the CVL_RCVD bit is not set then we have sent the
6194                 * flogi.
6195                 * If dev_loss fires while we are waiting we do not want to
6196                 * unreg the fcf.
6197                 */
6198                if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6199                        spin_unlock_irq(shost->host_lock);
6200                        ret =  1;
6201                        goto out;
6202                }
6203                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6204                        if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6205                          (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6206                                ret = 1;
6207                                spin_unlock_irq(shost->host_lock);
6208                                goto out;
6209                        } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6210                                ret = 1;
6211                                lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
6212                                                "2624 RPI %x DID %x flag %x "
6213                                                "still logged in\n",
6214                                                ndlp->nlp_rpi, ndlp->nlp_DID,
6215                                                ndlp->nlp_flag);
6216                        }
6217                }
6218                spin_unlock_irq(shost->host_lock);
6219        }
6220out:
6221        lpfc_destroy_vport_work_array(phba, vports);
6222        return ret;
6223}
6224
6225/**
6226 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6227 * @phba: Pointer to hba context object.
6228 * @mboxq: Pointer to mailbox object.
6229 *
6230 * This function frees memory associated with the mailbox command.
6231 */
6232void
6233lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6234{
6235        struct lpfc_vport *vport = mboxq->vport;
6236        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6237
6238        if (mboxq->u.mb.mbxStatus) {
6239                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6240                        "2555 UNREG_VFI mbxStatus error x%x "
6241                        "HBA state x%x\n",
6242                        mboxq->u.mb.mbxStatus, vport->port_state);
6243        }
6244        spin_lock_irq(shost->host_lock);
6245        phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6246        spin_unlock_irq(shost->host_lock);
6247        mempool_free(mboxq, phba->mbox_mem_pool);
6248        return;
6249}
6250
6251/**
6252 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6253 * @phba: Pointer to hba context object.
6254 * @mboxq: Pointer to mailbox object.
6255 *
6256 * This function frees memory associated with the mailbox command.
6257 */
6258static void
6259lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6260{
6261        struct lpfc_vport *vport = mboxq->vport;
6262
6263        if (mboxq->u.mb.mbxStatus) {
6264                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6265                        "2550 UNREG_FCFI mbxStatus error x%x "
6266                        "HBA state x%x\n",
6267                        mboxq->u.mb.mbxStatus, vport->port_state);
6268        }
6269        mempool_free(mboxq, phba->mbox_mem_pool);
6270        return;
6271}
6272
6273/**
6274 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6275 * @phba: Pointer to hba context object.
6276 *
6277 * This function prepare the HBA for unregistering the currently registered
6278 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6279 * VFIs.
6280 */
6281int
6282lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6283{
6284        struct lpfc_vport **vports;
6285        struct lpfc_nodelist *ndlp;
6286        struct Scsi_Host *shost;
6287        int i = 0, rc;
6288
6289        /* Unregister RPIs */
6290        if (lpfc_fcf_inuse(phba))
6291                lpfc_unreg_hba_rpis(phba);
6292
6293        /* At this point, all discovery is aborted */
6294        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6295
6296        /* Unregister VPIs */
6297        vports = lpfc_create_vport_work_array(phba);
6298        if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6299                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6300                        /* Stop FLOGI/FDISC retries */
6301                        ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6302                        if (ndlp)
6303                                lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6304                        lpfc_cleanup_pending_mbox(vports[i]);
6305                        if (phba->sli_rev == LPFC_SLI_REV4)
6306                                lpfc_sli4_unreg_all_rpis(vports[i]);
6307                        lpfc_mbx_unreg_vpi(vports[i]);
6308                        shost = lpfc_shost_from_vport(vports[i]);
6309                        spin_lock_irq(shost->host_lock);
6310                        vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6311                        vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6312                        spin_unlock_irq(shost->host_lock);
6313                }
6314        lpfc_destroy_vport_work_array(phba, vports);
6315        if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6316                ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6317                if (ndlp)
6318                        lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6319                lpfc_cleanup_pending_mbox(phba->pport);
6320                if (phba->sli_rev == LPFC_SLI_REV4)
6321                        lpfc_sli4_unreg_all_rpis(phba->pport);
6322                lpfc_mbx_unreg_vpi(phba->pport);
6323                shost = lpfc_shost_from_vport(phba->pport);
6324                spin_lock_irq(shost->host_lock);
6325                phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6326                phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6327                spin_unlock_irq(shost->host_lock);
6328        }
6329
6330        /* Cleanup any outstanding ELS commands */
6331        lpfc_els_flush_all_cmd(phba);
6332
6333        /* Unregister the physical port VFI */
6334        rc = lpfc_issue_unreg_vfi(phba->pport);
6335        return rc;
6336}
6337
6338/**
6339 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6340 * @phba: Pointer to hba context object.
6341 *
6342 * This function issues synchronous unregister FCF mailbox command to HBA to
6343 * unregister the currently registered FCF record. The driver does not reset
6344 * the driver FCF usage state flags.
6345 *
6346 * Return 0 if successfully issued, none-zero otherwise.
6347 */
6348int
6349lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6350{
6351        LPFC_MBOXQ_t *mbox;
6352        int rc;
6353
6354        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6355        if (!mbox) {
6356                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6357                                "2551 UNREG_FCFI mbox allocation failed"
6358                                "HBA state x%x\n", phba->pport->port_state);
6359                return -ENOMEM;
6360        }
6361        lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6362        mbox->vport = phba->pport;
6363        mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6364        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6365
6366        if (rc == MBX_NOT_FINISHED) {
6367                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6368                                "2552 Unregister FCFI command failed rc x%x "
6369                                "HBA state x%x\n",
6370                                rc, phba->pport->port_state);
6371                return -EINVAL;
6372        }
6373        return 0;
6374}
6375
6376/**
6377 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6378 * @phba: Pointer to hba context object.
6379 *
6380 * This function unregisters the currently reigstered FCF. This function
6381 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6382 */
6383void
6384lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6385{
6386        int rc;
6387
6388        /* Preparation for unregistering fcf */
6389        rc = lpfc_unregister_fcf_prep(phba);
6390        if (rc) {
6391                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6392                                "2748 Failed to prepare for unregistering "
6393                                "HBA's FCF record: rc=%d\n", rc);
6394                return;
6395        }
6396
6397        /* Now, unregister FCF record and reset HBA FCF state */
6398        rc = lpfc_sli4_unregister_fcf(phba);
6399        if (rc)
6400                return;
6401        /* Reset HBA FCF states after successful unregister FCF */
6402        phba->fcf.fcf_flag = 0;
6403        phba->fcf.current_rec.flag = 0;
6404
6405        /*
6406         * If driver is not unloading, check if there is any other
6407         * FCF record that can be used for discovery.
6408         */
6409        if ((phba->pport->load_flag & FC_UNLOADING) ||
6410            (phba->link_state < LPFC_LINK_UP))
6411                return;
6412
6413        /* This is considered as the initial FCF discovery scan */
6414        spin_lock_irq(&phba->hbalock);
6415        phba->fcf.fcf_flag |= FCF_INIT_DISC;
6416        spin_unlock_irq(&phba->hbalock);
6417
6418        /* Reset FCF roundrobin bmask for new discovery */
6419        lpfc_sli4_clear_fcf_rr_bmask(phba);
6420
6421        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6422
6423        if (rc) {
6424                spin_lock_irq(&phba->hbalock);
6425                phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6426                spin_unlock_irq(&phba->hbalock);
6427                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6428                                "2553 lpfc_unregister_unused_fcf failed "
6429                                "to read FCF record HBA state x%x\n",
6430                                phba->pport->port_state);
6431        }
6432}
6433
6434/**
6435 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6436 * @phba: Pointer to hba context object.
6437 *
6438 * This function just unregisters the currently reigstered FCF. It does not
6439 * try to find another FCF for discovery.
6440 */
6441void
6442lpfc_unregister_fcf(struct lpfc_hba *phba)
6443{
6444        int rc;
6445
6446        /* Preparation for unregistering fcf */
6447        rc = lpfc_unregister_fcf_prep(phba);
6448        if (rc) {
6449                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6450                                "2749 Failed to prepare for unregistering "
6451                                "HBA's FCF record: rc=%d\n", rc);
6452                return;
6453        }
6454
6455        /* Now, unregister FCF record and reset HBA FCF state */
6456        rc = lpfc_sli4_unregister_fcf(phba);
6457        if (rc)
6458                return;
6459        /* Set proper HBA FCF states after successful unregister FCF */
6460        spin_lock_irq(&phba->hbalock);
6461        phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6462        spin_unlock_irq(&phba->hbalock);
6463}
6464
6465/**
6466 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6467 * @phba: Pointer to hba context object.
6468 *
6469 * This function check if there are any connected remote port for the FCF and
6470 * if all the devices are disconnected, this function unregister FCFI.
6471 * This function also tries to use another FCF for discovery.
6472 */
6473void
6474lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6475{
6476        /*
6477         * If HBA is not running in FIP mode, if HBA does not support
6478         * FCoE, if FCF discovery is ongoing, or if FCF has not been
6479         * registered, do nothing.
6480         */
6481        spin_lock_irq(&phba->hbalock);
6482        if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6483            !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6484            !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6485            (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6486            (phba->pport->port_state == LPFC_FLOGI)) {
6487                spin_unlock_irq(&phba->hbalock);
6488                return;
6489        }
6490        spin_unlock_irq(&phba->hbalock);
6491
6492        if (lpfc_fcf_inuse(phba))
6493                return;
6494
6495        lpfc_unregister_fcf_rescan(phba);
6496}
6497
6498/**
6499 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6500 * @phba: Pointer to hba context object.
6501 * @buff: Buffer containing the FCF connection table as in the config
6502 *         region.
6503 * This function create driver data structure for the FCF connection
6504 * record table read from config region 23.
6505 */
6506static void
6507lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6508        uint8_t *buff)
6509{
6510        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6511        struct lpfc_fcf_conn_hdr *conn_hdr;
6512        struct lpfc_fcf_conn_rec *conn_rec;
6513        uint32_t record_count;
6514        int i;
6515
6516        /* Free the current connect table */
6517        list_for_each_entry_safe(conn_entry, next_conn_entry,
6518                &phba->fcf_conn_rec_list, list) {
6519                list_del_init(&conn_entry->list);
6520                kfree(conn_entry);
6521        }
6522
6523        conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6524        record_count = conn_hdr->length * sizeof(uint32_t)/
6525                sizeof(struct lpfc_fcf_conn_rec);
6526
6527        conn_rec = (struct lpfc_fcf_conn_rec *)
6528                (buff + sizeof(struct lpfc_fcf_conn_hdr));
6529
6530        for (i = 0; i < record_count; i++) {
6531                if (!(conn_rec[i].flags & FCFCNCT_VALID))
6532                        continue;
6533                conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6534                        GFP_KERNEL);
6535                if (!conn_entry) {
6536                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6537                                "2566 Failed to allocate connection"
6538                                " table entry\n");
6539                        return;
6540                }
6541
6542                memcpy(&conn_entry->conn_rec, &conn_rec[i],
6543                        sizeof(struct lpfc_fcf_conn_rec));
6544                list_add_tail(&conn_entry->list,
6545                        &phba->fcf_conn_rec_list);
6546        }
6547
6548        if (!list_empty(&phba->fcf_conn_rec_list)) {
6549                i = 0;
6550                list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6551                                    list) {
6552                        conn_rec = &conn_entry->conn_rec;
6553                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6554                                        "3345 FCF connection list rec[%02d]: "
6555                                        "flags:x%04x, vtag:x%04x, "
6556                                        "fabric_name:x%02x:%02x:%02x:%02x:"
6557                                        "%02x:%02x:%02x:%02x, "
6558                                        "switch_name:x%02x:%02x:%02x:%02x:"
6559                                        "%02x:%02x:%02x:%02x\n", i++,
6560                                        conn_rec->flags, conn_rec->vlan_tag,
6561                                        conn_rec->fabric_name[0],
6562                                        conn_rec->fabric_name[1],
6563                                        conn_rec->fabric_name[2],
6564                                        conn_rec->fabric_name[3],
6565                                        conn_rec->fabric_name[4],
6566                                        conn_rec->fabric_name[5],
6567                                        conn_rec->fabric_name[6],
6568                                        conn_rec->fabric_name[7],
6569                                        conn_rec->switch_name[0],
6570                                        conn_rec->switch_name[1],
6571                                        conn_rec->switch_name[2],
6572                                        conn_rec->switch_name[3],
6573                                        conn_rec->switch_name[4],
6574                                        conn_rec->switch_name[5],
6575                                        conn_rec->switch_name[6],
6576                                        conn_rec->switch_name[7]);
6577                }
6578        }
6579}
6580
6581/**
6582 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6583 * @phba: Pointer to hba context object.
6584 * @buff: Buffer containing the FCoE parameter data structure.
6585 *
6586 *  This function update driver data structure with config
6587 *  parameters read from config region 23.
6588 */
6589static void
6590lpfc_read_fcoe_param(struct lpfc_hba *phba,
6591                        uint8_t *buff)
6592{
6593        struct lpfc_fip_param_hdr *fcoe_param_hdr;
6594        struct lpfc_fcoe_params *fcoe_param;
6595
6596        fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6597                buff;
6598        fcoe_param = (struct lpfc_fcoe_params *)
6599                (buff + sizeof(struct lpfc_fip_param_hdr));
6600
6601        if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6602                (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6603                return;
6604
6605        if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6606                phba->valid_vlan = 1;
6607                phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6608                        0xFFF;
6609        }
6610
6611        phba->fc_map[0] = fcoe_param->fc_map[0];
6612        phba->fc_map[1] = fcoe_param->fc_map[1];
6613        phba->fc_map[2] = fcoe_param->fc_map[2];
6614        return;
6615}
6616
6617/**
6618 * lpfc_get_rec_conf23 - Get a record type in config region data.
6619 * @buff: Buffer containing config region 23 data.
6620 * @size: Size of the data buffer.
6621 * @rec_type: Record type to be searched.
6622 *
6623 * This function searches config region data to find the beginning
6624 * of the record specified by record_type. If record found, this
6625 * function return pointer to the record else return NULL.
6626 */
6627static uint8_t *
6628lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6629{
6630        uint32_t offset = 0, rec_length;
6631
6632        if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6633                (size < sizeof(uint32_t)))
6634                return NULL;
6635
6636        rec_length = buff[offset + 1];
6637
6638        /*
6639         * One TLV record has one word header and number of data words
6640         * specified in the rec_length field of the record header.
6641         */
6642        while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6643                <= size) {
6644                if (buff[offset] == rec_type)
6645                        return &buff[offset];
6646
6647                if (buff[offset] == LPFC_REGION23_LAST_REC)
6648                        return NULL;
6649
6650                offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6651                rec_length = buff[offset + 1];
6652        }
6653        return NULL;
6654}
6655
6656/**
6657 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6658 * @phba: Pointer to lpfc_hba data structure.
6659 * @buff: Buffer containing config region 23 data.
6660 * @size: Size of the data buffer.
6661 *
6662 * This function parses the FCoE config parameters in config region 23 and
6663 * populate driver data structure with the parameters.
6664 */
6665void
6666lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6667                uint8_t *buff,
6668                uint32_t size)
6669{
6670        uint32_t offset = 0;
6671        uint8_t *rec_ptr;
6672
6673        /*
6674         * If data size is less than 2 words signature and version cannot be
6675         * verified.
6676         */
6677        if (size < 2*sizeof(uint32_t))
6678                return;
6679
6680        /* Check the region signature first */
6681        if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6682                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6683                        "2567 Config region 23 has bad signature\n");
6684                return;
6685        }
6686
6687        offset += 4;
6688
6689        /* Check the data structure version */
6690        if (buff[offset] != LPFC_REGION23_VERSION) {
6691                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6692                        "2568 Config region 23 has bad version\n");
6693                return;
6694        }
6695        offset += 4;
6696
6697        /* Read FCoE param record */
6698        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6699                        size - offset, FCOE_PARAM_TYPE);
6700        if (rec_ptr)
6701                lpfc_read_fcoe_param(phba, rec_ptr);
6702
6703        /* Read FCF connection table */
6704        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6705                size - offset, FCOE_CONN_TBL_TYPE);
6706        if (rec_ptr)
6707                lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6708
6709}
6710