linux/drivers/scsi/lpfc/lpfc_hbadisc.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
   5 * EMULEX and SLI are trademarks of Emulex.                        *
   6 * www.emulex.com                                                  *
   7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
   8 *                                                                 *
   9 * This program is free software; you can redistribute it and/or   *
  10 * modify it under the terms of version 2 of the GNU General       *
  11 * Public License as published by the Free Software Foundation.    *
  12 * This program is distributed in the hope that it will be useful. *
  13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  18 * more details, a copy of which can be found in the file COPYING  *
  19 * included with this package.                                     *
  20 *******************************************************************/
  21
  22#include <linux/blkdev.h>
  23#include <linux/pci.h>
  24#include <linux/kthread.h>
  25#include <linux/interrupt.h>
  26
  27#include <scsi/scsi.h>
  28#include <scsi/scsi_device.h>
  29#include <scsi/scsi_host.h>
  30#include <scsi/scsi_transport_fc.h>
  31
  32#include "lpfc_hw4.h"
  33#include "lpfc_hw.h"
  34#include "lpfc_nl.h"
  35#include "lpfc_disc.h"
  36#include "lpfc_sli.h"
  37#include "lpfc_sli4.h"
  38#include "lpfc_scsi.h"
  39#include "lpfc.h"
  40#include "lpfc_logmsg.h"
  41#include "lpfc_crtn.h"
  42#include "lpfc_vport.h"
  43#include "lpfc_debugfs.h"
  44
  45/* AlpaArray for assignment of scsid for scan-down and bind_method */
  46static uint8_t lpfcAlpaArray[] = {
  47        0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
  48        0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
  49        0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
  50        0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
  51        0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
  52        0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
  53        0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
  54        0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
  55        0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
  56        0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
  57        0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
  58        0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
  59        0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
  60};
  61
  62static void lpfc_disc_timeout_handler(struct lpfc_vport *);
  63static void lpfc_disc_flush_list(struct lpfc_vport *vport);
  64static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
  65
  66void
  67lpfc_terminate_rport_io(struct fc_rport *rport)
  68{
  69        struct lpfc_rport_data *rdata;
  70        struct lpfc_nodelist * ndlp;
  71        struct lpfc_hba *phba;
  72
  73        rdata = rport->dd_data;
  74        ndlp = rdata->pnode;
  75
  76        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  77                if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
  78                        printk(KERN_ERR "Cannot find remote node"
  79                        " to terminate I/O Data x%x\n",
  80                        rport->port_id);
  81                return;
  82        }
  83
  84        phba  = ndlp->phba;
  85
  86        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
  87                "rport terminate: sid:x%x did:x%x flg:x%x",
  88                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
  89
  90        if (ndlp->nlp_sid != NLP_NO_SID) {
  91                lpfc_sli_abort_iocb(ndlp->vport,
  92                        &phba->sli.ring[phba->sli.fcp_ring],
  93                        ndlp->nlp_sid, 0, LPFC_CTX_TGT);
  94        }
  95}
  96
  97/*
  98 * This function will be called when dev_loss_tmo fire.
  99 */
 100void
 101lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 102{
 103        struct lpfc_rport_data *rdata;
 104        struct lpfc_nodelist * ndlp;
 105        struct lpfc_vport *vport;
 106        struct lpfc_hba   *phba;
 107        struct lpfc_work_evt *evtp;
 108        int  put_node;
 109        int  put_rport;
 110
 111        rdata = rport->dd_data;
 112        ndlp = rdata->pnode;
 113        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 114                return;
 115
 116        vport = ndlp->vport;
 117        phba  = vport->phba;
 118
 119        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 120                "rport devlosscb: sid:x%x did:x%x flg:x%x",
 121                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 122
 123        /* Don't defer this if we are in the process of deleting the vport
 124         * or unloading the driver. The unload will cleanup the node
 125         * appropriately we just need to cleanup the ndlp rport info here.
 126         */
 127        if (vport->load_flag & FC_UNLOADING) {
 128                put_node = rdata->pnode != NULL;
 129                put_rport = ndlp->rport != NULL;
 130                rdata->pnode = NULL;
 131                ndlp->rport = NULL;
 132                if (put_node)
 133                        lpfc_nlp_put(ndlp);
 134                if (put_rport)
 135                        put_device(&rport->dev);
 136                return;
 137        }
 138
 139        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
 140                return;
 141
 142        evtp = &ndlp->dev_loss_evt;
 143
 144        if (!list_empty(&evtp->evt_listp))
 145                return;
 146
 147        spin_lock_irq(&phba->hbalock);
 148        /* We need to hold the node by incrementing the reference
 149         * count until this queued work is done
 150         */
 151        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 152        if (evtp->evt_arg1) {
 153                evtp->evt = LPFC_EVT_DEV_LOSS;
 154                list_add_tail(&evtp->evt_listp, &phba->work_list);
 155                lpfc_worker_wake_up(phba);
 156        }
 157        spin_unlock_irq(&phba->hbalock);
 158
 159        return;
 160}
 161
 162/*
 163 * This function is called from the worker thread when dev_loss_tmo
 164 * expire.
 165 */
 166static void
 167lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 168{
 169        struct lpfc_rport_data *rdata;
 170        struct fc_rport   *rport;
 171        struct lpfc_vport *vport;
 172        struct lpfc_hba   *phba;
 173        uint8_t *name;
 174        int  put_node;
 175        int  put_rport;
 176        int warn_on = 0;
 177
 178        rport = ndlp->rport;
 179
 180        if (!rport)
 181                return;
 182
 183        rdata = rport->dd_data;
 184        name = (uint8_t *) &ndlp->nlp_portname;
 185        vport = ndlp->vport;
 186        phba  = vport->phba;
 187
 188        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 189                "rport devlosstmo:did:x%x type:x%x id:x%x",
 190                ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
 191
 192        /* Don't defer this if we are in the process of deleting the vport
 193         * or unloading the driver. The unload will cleanup the node
 194         * appropriately we just need to cleanup the ndlp rport info here.
 195         */
 196        if (vport->load_flag & FC_UNLOADING) {
 197                if (ndlp->nlp_sid != NLP_NO_SID) {
 198                        /* flush the target */
 199                        lpfc_sli_abort_iocb(vport,
 200                                        &phba->sli.ring[phba->sli.fcp_ring],
 201                                        ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 202                }
 203                put_node = rdata->pnode != NULL;
 204                put_rport = ndlp->rport != NULL;
 205                rdata->pnode = NULL;
 206                ndlp->rport = NULL;
 207                if (put_node)
 208                        lpfc_nlp_put(ndlp);
 209                if (put_rport)
 210                        put_device(&rport->dev);
 211                return;
 212        }
 213
 214        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
 215                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 216                                 "0284 Devloss timeout Ignored on "
 217                                 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
 218                                 "NPort x%x\n",
 219                                 *name, *(name+1), *(name+2), *(name+3),
 220                                 *(name+4), *(name+5), *(name+6), *(name+7),
 221                                 ndlp->nlp_DID);
 222                return;
 223        }
 224
 225        if (ndlp->nlp_type & NLP_FABRIC) {
 226                /* We will clean up these Nodes in linkup */
 227                put_node = rdata->pnode != NULL;
 228                put_rport = ndlp->rport != NULL;
 229                rdata->pnode = NULL;
 230                ndlp->rport = NULL;
 231                if (put_node)
 232                        lpfc_nlp_put(ndlp);
 233                if (put_rport)
 234                        put_device(&rport->dev);
 235                return;
 236        }
 237
 238        if (ndlp->nlp_sid != NLP_NO_SID) {
 239                warn_on = 1;
 240                /* flush the target */
 241                lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 242                                    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 243        }
 244
 245        if (warn_on) {
 246                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 247                                 "0203 Devloss timeout on "
 248                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 249                                 "NPort x%06x Data: x%x x%x x%x\n",
 250                                 *name, *(name+1), *(name+2), *(name+3),
 251                                 *(name+4), *(name+5), *(name+6), *(name+7),
 252                                 ndlp->nlp_DID, ndlp->nlp_flag,
 253                                 ndlp->nlp_state, ndlp->nlp_rpi);
 254        } else {
 255                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 256                                 "0204 Devloss timeout on "
 257                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
 258                                 "NPort x%06x Data: x%x x%x x%x\n",
 259                                 *name, *(name+1), *(name+2), *(name+3),
 260                                 *(name+4), *(name+5), *(name+6), *(name+7),
 261                                 ndlp->nlp_DID, ndlp->nlp_flag,
 262                                 ndlp->nlp_state, ndlp->nlp_rpi);
 263        }
 264
 265        put_node = rdata->pnode != NULL;
 266        put_rport = ndlp->rport != NULL;
 267        rdata->pnode = NULL;
 268        ndlp->rport = NULL;
 269        if (put_node)
 270                lpfc_nlp_put(ndlp);
 271        if (put_rport)
 272                put_device(&rport->dev);
 273
 274        if (!(vport->load_flag & FC_UNLOADING) &&
 275            !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
 276            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
 277            (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
 278                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 279
 280        lpfc_unregister_unused_fcf(phba);
 281}
 282
 283/**
 284 * lpfc_alloc_fast_evt - Allocates data structure for posting event
 285 * @phba: Pointer to hba context object.
 286 *
 287 * This function is called from the functions which need to post
 288 * events from interrupt context. This function allocates data
 289 * structure required for posting event. It also keeps track of
 290 * number of events pending and prevent event storm when there are
 291 * too many events.
 292 **/
 293struct lpfc_fast_path_event *
 294lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
 295        struct lpfc_fast_path_event *ret;
 296
 297        /* If there are lot of fast event do not exhaust memory due to this */
 298        if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
 299                return NULL;
 300
 301        ret = kzalloc(sizeof(struct lpfc_fast_path_event),
 302                        GFP_ATOMIC);
 303        if (ret) {
 304                atomic_inc(&phba->fast_event_count);
 305                INIT_LIST_HEAD(&ret->work_evt.evt_listp);
 306                ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
 307        }
 308        return ret;
 309}
 310
 311/**
 312 * lpfc_free_fast_evt - Frees event data structure
 313 * @phba: Pointer to hba context object.
 314 * @evt:  Event object which need to be freed.
 315 *
 316 * This function frees the data structure required for posting
 317 * events.
 318 **/
 319void
 320lpfc_free_fast_evt(struct lpfc_hba *phba,
 321                struct lpfc_fast_path_event *evt) {
 322
 323        atomic_dec(&phba->fast_event_count);
 324        kfree(evt);
 325}
 326
 327/**
 328 * lpfc_send_fastpath_evt - Posts events generated from fast path
 329 * @phba: Pointer to hba context object.
 330 * @evtp: Event data structure.
 331 *
 332 * This function is called from worker thread, when the interrupt
 333 * context need to post an event. This function posts the event
 334 * to fc transport netlink interface.
 335 **/
 336static void
 337lpfc_send_fastpath_evt(struct lpfc_hba *phba,
 338                struct lpfc_work_evt *evtp)
 339{
 340        unsigned long evt_category, evt_sub_category;
 341        struct lpfc_fast_path_event *fast_evt_data;
 342        char *evt_data;
 343        uint32_t evt_data_size;
 344        struct Scsi_Host *shost;
 345
 346        fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
 347                work_evt);
 348
 349        evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
 350        evt_sub_category = (unsigned long) fast_evt_data->un.
 351                        fabric_evt.subcategory;
 352        shost = lpfc_shost_from_vport(fast_evt_data->vport);
 353        if (evt_category == FC_REG_FABRIC_EVENT) {
 354                if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
 355                        evt_data = (char *) &fast_evt_data->un.read_check_error;
 356                        evt_data_size = sizeof(fast_evt_data->un.
 357                                read_check_error);
 358                } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
 359                        (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
 360                        evt_data = (char *) &fast_evt_data->un.fabric_evt;
 361                        evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
 362                } else {
 363                        lpfc_free_fast_evt(phba, fast_evt_data);
 364                        return;
 365                }
 366        } else if (evt_category == FC_REG_SCSI_EVENT) {
 367                switch (evt_sub_category) {
 368                case LPFC_EVENT_QFULL:
 369                case LPFC_EVENT_DEVBSY:
 370                        evt_data = (char *) &fast_evt_data->un.scsi_evt;
 371                        evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
 372                        break;
 373                case LPFC_EVENT_CHECK_COND:
 374                        evt_data = (char *) &fast_evt_data->un.check_cond_evt;
 375                        evt_data_size =  sizeof(fast_evt_data->un.
 376                                check_cond_evt);
 377                        break;
 378                case LPFC_EVENT_VARQUEDEPTH:
 379                        evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
 380                        evt_data_size = sizeof(fast_evt_data->un.
 381                                queue_depth_evt);
 382                        break;
 383                default:
 384                        lpfc_free_fast_evt(phba, fast_evt_data);
 385                        return;
 386                }
 387        } else {
 388                lpfc_free_fast_evt(phba, fast_evt_data);
 389                return;
 390        }
 391
 392        fc_host_post_vendor_event(shost,
 393                fc_get_event_number(),
 394                evt_data_size,
 395                evt_data,
 396                LPFC_NL_VENDOR_ID);
 397
 398        lpfc_free_fast_evt(phba, fast_evt_data);
 399        return;
 400}
 401
 402static void
 403lpfc_work_list_done(struct lpfc_hba *phba)
 404{
 405        struct lpfc_work_evt  *evtp = NULL;
 406        struct lpfc_nodelist  *ndlp;
 407        int free_evt;
 408
 409        spin_lock_irq(&phba->hbalock);
 410        while (!list_empty(&phba->work_list)) {
 411                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
 412                                 evt_listp);
 413                spin_unlock_irq(&phba->hbalock);
 414                free_evt = 1;
 415                switch (evtp->evt) {
 416                case LPFC_EVT_ELS_RETRY:
 417                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
 418                        lpfc_els_retry_delay_handler(ndlp);
 419                        free_evt = 0; /* evt is part of ndlp */
 420                        /* decrement the node reference count held
 421                         * for this queued work
 422                         */
 423                        lpfc_nlp_put(ndlp);
 424                        break;
 425                case LPFC_EVT_DEV_LOSS:
 426                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
 427                        lpfc_dev_loss_tmo_handler(ndlp);
 428                        free_evt = 0;
 429                        /* decrement the node reference count held for
 430                         * this queued work
 431                         */
 432                        lpfc_nlp_put(ndlp);
 433                        break;
 434                case LPFC_EVT_ONLINE:
 435                        if (phba->link_state < LPFC_LINK_DOWN)
 436                                *(int *) (evtp->evt_arg1) = lpfc_online(phba);
 437                        else
 438                                *(int *) (evtp->evt_arg1) = 0;
 439                        complete((struct completion *)(evtp->evt_arg2));
 440                        break;
 441                case LPFC_EVT_OFFLINE_PREP:
 442                        if (phba->link_state >= LPFC_LINK_DOWN)
 443                                lpfc_offline_prep(phba);
 444                        *(int *)(evtp->evt_arg1) = 0;
 445                        complete((struct completion *)(evtp->evt_arg2));
 446                        break;
 447                case LPFC_EVT_OFFLINE:
 448                        lpfc_offline(phba);
 449                        lpfc_sli_brdrestart(phba);
 450                        *(int *)(evtp->evt_arg1) =
 451                                lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
 452                        lpfc_unblock_mgmt_io(phba);
 453                        complete((struct completion *)(evtp->evt_arg2));
 454                        break;
 455                case LPFC_EVT_WARM_START:
 456                        lpfc_offline(phba);
 457                        lpfc_reset_barrier(phba);
 458                        lpfc_sli_brdreset(phba);
 459                        lpfc_hba_down_post(phba);
 460                        *(int *)(evtp->evt_arg1) =
 461                                lpfc_sli_brdready(phba, HS_MBRDY);
 462                        lpfc_unblock_mgmt_io(phba);
 463                        complete((struct completion *)(evtp->evt_arg2));
 464                        break;
 465                case LPFC_EVT_KILL:
 466                        lpfc_offline(phba);
 467                        *(int *)(evtp->evt_arg1)
 468                                = (phba->pport->stopped)
 469                                        ? 0 : lpfc_sli_brdkill(phba);
 470                        lpfc_unblock_mgmt_io(phba);
 471                        complete((struct completion *)(evtp->evt_arg2));
 472                        break;
 473                case LPFC_EVT_FASTPATH_MGMT_EVT:
 474                        lpfc_send_fastpath_evt(phba, evtp);
 475                        free_evt = 0;
 476                        break;
 477                }
 478                if (free_evt)
 479                        kfree(evtp);
 480                spin_lock_irq(&phba->hbalock);
 481        }
 482        spin_unlock_irq(&phba->hbalock);
 483
 484}
 485
 486static void
 487lpfc_work_done(struct lpfc_hba *phba)
 488{
 489        struct lpfc_sli_ring *pring;
 490        uint32_t ha_copy, status, control, work_port_events;
 491        struct lpfc_vport **vports;
 492        struct lpfc_vport *vport;
 493        int i;
 494
 495        spin_lock_irq(&phba->hbalock);
 496        ha_copy = phba->work_ha;
 497        phba->work_ha = 0;
 498        spin_unlock_irq(&phba->hbalock);
 499
 500        /* First, try to post the next mailbox command to SLI4 device */
 501        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
 502                lpfc_sli4_post_async_mbox(phba);
 503
 504        if (ha_copy & HA_ERATT)
 505                /* Handle the error attention event */
 506                lpfc_handle_eratt(phba);
 507
 508        if (ha_copy & HA_MBATT)
 509                lpfc_sli_handle_mb_event(phba);
 510
 511        if (ha_copy & HA_LATT)
 512                lpfc_handle_latt(phba);
 513
 514        /* Process SLI4 events */
 515        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
 516                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
 517                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
 518                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
 519                        lpfc_sli4_els_xri_abort_event_proc(phba);
 520                if (phba->hba_flag & ASYNC_EVENT)
 521                        lpfc_sli4_async_event_proc(phba);
 522                if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
 523                        spin_lock_irq(&phba->hbalock);
 524                        phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
 525                        spin_unlock_irq(&phba->hbalock);
 526                        lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 527                }
 528                if (phba->hba_flag & HBA_RECEIVE_BUFFER)
 529                        lpfc_sli4_handle_received_buffer(phba);
 530        }
 531
 532        vports = lpfc_create_vport_work_array(phba);
 533        if (vports != NULL)
 534                for (i = 0; i <= phba->max_vports; i++) {
 535                        /*
 536                         * We could have no vports in array if unloading, so if
 537                         * this happens then just use the pport
 538                         */
 539                        if (vports[i] == NULL && i == 0)
 540                                vport = phba->pport;
 541                        else
 542                                vport = vports[i];
 543                        if (vport == NULL)
 544                                break;
 545                        spin_lock_irq(&vport->work_port_lock);
 546                        work_port_events = vport->work_port_events;
 547                        vport->work_port_events &= ~work_port_events;
 548                        spin_unlock_irq(&vport->work_port_lock);
 549                        if (work_port_events & WORKER_DISC_TMO)
 550                                lpfc_disc_timeout_handler(vport);
 551                        if (work_port_events & WORKER_ELS_TMO)
 552                                lpfc_els_timeout_handler(vport);
 553                        if (work_port_events & WORKER_HB_TMO)
 554                                lpfc_hb_timeout_handler(phba);
 555                        if (work_port_events & WORKER_MBOX_TMO)
 556                                lpfc_mbox_timeout_handler(phba);
 557                        if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
 558                                lpfc_unblock_fabric_iocbs(phba);
 559                        if (work_port_events & WORKER_FDMI_TMO)
 560                                lpfc_fdmi_timeout_handler(vport);
 561                        if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
 562                                lpfc_ramp_down_queue_handler(phba);
 563                        if (work_port_events & WORKER_RAMP_UP_QUEUE)
 564                                lpfc_ramp_up_queue_handler(phba);
 565                }
 566        lpfc_destroy_vport_work_array(phba, vports);
 567
 568        pring = &phba->sli.ring[LPFC_ELS_RING];
 569        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
 570        status >>= (4*LPFC_ELS_RING);
 571        if ((status & HA_RXMASK)
 572                || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
 573                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 574                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
 575                        /* Set the lpfc data pending flag */
 576                        set_bit(LPFC_DATA_READY, &phba->data_flags);
 577                } else {
 578                        pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 579                        lpfc_sli_handle_slow_ring_event(phba, pring,
 580                                                        (status &
 581                                                         HA_RXMASK));
 582                }
 583                /*
 584                 * Turn on Ring interrupts
 585                 */
 586                if (phba->sli_rev <= LPFC_SLI_REV3) {
 587                        spin_lock_irq(&phba->hbalock);
 588                        control = readl(phba->HCregaddr);
 589                        if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
 590                                lpfc_debugfs_slow_ring_trc(phba,
 591                                        "WRK Enable ring: cntl:x%x hacopy:x%x",
 592                                        control, ha_copy, 0);
 593
 594                                control |= (HC_R0INT_ENA << LPFC_ELS_RING);
 595                                writel(control, phba->HCregaddr);
 596                                readl(phba->HCregaddr); /* flush */
 597                        } else {
 598                                lpfc_debugfs_slow_ring_trc(phba,
 599                                        "WRK Ring ok:     cntl:x%x hacopy:x%x",
 600                                        control, ha_copy, 0);
 601                        }
 602                        spin_unlock_irq(&phba->hbalock);
 603                }
 604        }
 605        lpfc_work_list_done(phba);
 606}
 607
 608int
 609lpfc_do_work(void *p)
 610{
 611        struct lpfc_hba *phba = p;
 612        int rc;
 613
 614        set_user_nice(current, -20);
 615        phba->data_flags = 0;
 616
 617        while (!kthread_should_stop()) {
 618                /* wait and check worker queue activities */
 619                rc = wait_event_interruptible(phba->work_waitq,
 620                                        (test_and_clear_bit(LPFC_DATA_READY,
 621                                                            &phba->data_flags)
 622                                         || kthread_should_stop()));
 623                /* Signal wakeup shall terminate the worker thread */
 624                if (rc) {
 625                        lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
 626                                        "0433 Wakeup on signal: rc=x%x\n", rc);
 627                        break;
 628                }
 629
 630                /* Attend pending lpfc data processing */
 631                lpfc_work_done(phba);
 632        }
 633        phba->worker_thread = NULL;
 634        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
 635                        "0432 Worker thread stopped.\n");
 636        return 0;
 637}
 638
 639/*
 640 * This is only called to handle FC worker events. Since this a rare
 641 * occurance, we allocate a struct lpfc_work_evt structure here instead of
 642 * embedding it in the IOCB.
 643 */
 644int
 645lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 646                      uint32_t evt)
 647{
 648        struct lpfc_work_evt  *evtp;
 649        unsigned long flags;
 650
 651        /*
 652         * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
 653         * be queued to worker thread for processing
 654         */
 655        evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
 656        if (!evtp)
 657                return 0;
 658
 659        evtp->evt_arg1  = arg1;
 660        evtp->evt_arg2  = arg2;
 661        evtp->evt       = evt;
 662
 663        spin_lock_irqsave(&phba->hbalock, flags);
 664        list_add_tail(&evtp->evt_listp, &phba->work_list);
 665        spin_unlock_irqrestore(&phba->hbalock, flags);
 666
 667        lpfc_worker_wake_up(phba);
 668
 669        return 1;
 670}
 671
 672void
 673lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 674{
 675        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 676        struct lpfc_hba  *phba = vport->phba;
 677        struct lpfc_nodelist *ndlp, *next_ndlp;
 678        int  rc;
 679
 680        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 681                if (!NLP_CHK_NODE_ACT(ndlp))
 682                        continue;
 683                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 684                        continue;
 685                if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
 686                        ((vport->port_type == LPFC_NPIV_PORT) &&
 687                        (ndlp->nlp_DID == NameServer_DID)))
 688                        lpfc_unreg_rpi(vport, ndlp);
 689
 690                /* Leave Fabric nodes alone on link down */
 691                if (!remove && ndlp->nlp_type & NLP_FABRIC)
 692                        continue;
 693                rc = lpfc_disc_state_machine(vport, ndlp, NULL,
 694                                             remove
 695                                             ? NLP_EVT_DEVICE_RM
 696                                             : NLP_EVT_DEVICE_RECOVERY);
 697        }
 698        if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
 699                lpfc_mbx_unreg_vpi(vport);
 700                spin_lock_irq(shost->host_lock);
 701                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 702                spin_unlock_irq(shost->host_lock);
 703        }
 704}
 705
 706void
 707lpfc_port_link_failure(struct lpfc_vport *vport)
 708{
 709        /* Cleanup any outstanding RSCN activity */
 710        lpfc_els_flush_rscn(vport);
 711
 712        /* Cleanup any outstanding ELS commands */
 713        lpfc_els_flush_cmd(vport);
 714
 715        lpfc_cleanup_rpis(vport, 0);
 716
 717        /* Turn off discovery timer if its running */
 718        lpfc_can_disctmo(vport);
 719}
 720
 721void
 722lpfc_linkdown_port(struct lpfc_vport *vport)
 723{
 724        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 725
 726        fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
 727
 728        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 729                "Link Down:       state:x%x rtry:x%x flg:x%x",
 730                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
 731
 732        lpfc_port_link_failure(vport);
 733
 734}
 735
 736int
 737lpfc_linkdown(struct lpfc_hba *phba)
 738{
 739        struct lpfc_vport *vport = phba->pport;
 740        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 741        struct lpfc_vport **vports;
 742        LPFC_MBOXQ_t          *mb;
 743        int i;
 744
 745        if (phba->link_state == LPFC_LINK_DOWN)
 746                return 0;
 747        spin_lock_irq(&phba->hbalock);
 748        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
 749        if (phba->link_state > LPFC_LINK_DOWN) {
 750                phba->link_state = LPFC_LINK_DOWN;
 751                phba->pport->fc_flag &= ~FC_LBIT;
 752        }
 753        spin_unlock_irq(&phba->hbalock);
 754        vports = lpfc_create_vport_work_array(phba);
 755        if (vports != NULL)
 756                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 757                        /* Issue a LINK DOWN event to all nodes */
 758                        lpfc_linkdown_port(vports[i]);
 759                }
 760        lpfc_destroy_vport_work_array(phba, vports);
 761        /* Clean up any firmware default rpi's */
 762        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 763        if (mb) {
 764                lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
 765                mb->vport = vport;
 766                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 767                if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 768                    == MBX_NOT_FINISHED) {
 769                        mempool_free(mb, phba->mbox_mem_pool);
 770                }
 771        }
 772
 773        /* Setup myDID for link up if we are in pt2pt mode */
 774        if (phba->pport->fc_flag & FC_PT2PT) {
 775                phba->pport->fc_myDID = 0;
 776                mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 777                if (mb) {
 778                        lpfc_config_link(phba, mb);
 779                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 780                        mb->vport = vport;
 781                        if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
 782                            == MBX_NOT_FINISHED) {
 783                                mempool_free(mb, phba->mbox_mem_pool);
 784                        }
 785                }
 786                spin_lock_irq(shost->host_lock);
 787                phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
 788                spin_unlock_irq(shost->host_lock);
 789        }
 790
 791        return 0;
 792}
 793
 794static void
 795lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
 796{
 797        struct lpfc_nodelist *ndlp;
 798
 799        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 800                if (!NLP_CHK_NODE_ACT(ndlp))
 801                        continue;
 802                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 803                        continue;
 804                if (ndlp->nlp_type & NLP_FABRIC) {
 805                        /* On Linkup its safe to clean up the ndlp
 806                         * from Fabric connections.
 807                         */
 808                        if (ndlp->nlp_DID != Fabric_DID)
 809                                lpfc_unreg_rpi(vport, ndlp);
 810                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 811                } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
 812                        /* Fail outstanding IO now since device is
 813                         * marked for PLOGI.
 814                         */
 815                        lpfc_unreg_rpi(vport, ndlp);
 816                }
 817        }
 818}
 819
 820static void
 821lpfc_linkup_port(struct lpfc_vport *vport)
 822{
 823        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 824        struct lpfc_hba  *phba = vport->phba;
 825
 826        if ((vport->load_flag & FC_UNLOADING) != 0)
 827                return;
 828
 829        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 830                "Link Up:         top:x%x speed:x%x flg:x%x",
 831                phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
 832
 833        /* If NPIV is not enabled, only bring the physical port up */
 834        if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 835                (vport != phba->pport))
 836                return;
 837
 838        fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
 839
 840        spin_lock_irq(shost->host_lock);
 841        vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
 842                            FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
 843        vport->fc_flag |= FC_NDISC_ACTIVE;
 844        vport->fc_ns_retry = 0;
 845        spin_unlock_irq(shost->host_lock);
 846
 847        if (vport->fc_flag & FC_LBIT)
 848                lpfc_linkup_cleanup_nodes(vport);
 849
 850}
 851
 852static int
 853lpfc_linkup(struct lpfc_hba *phba)
 854{
 855        struct lpfc_vport **vports;
 856        int i;
 857
 858        phba->link_state = LPFC_LINK_UP;
 859
 860        /* Unblock fabric iocbs if they are blocked */
 861        clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
 862        del_timer_sync(&phba->fabric_block_timer);
 863
 864        vports = lpfc_create_vport_work_array(phba);
 865        if (vports != NULL)
 866                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
 867                        lpfc_linkup_port(vports[i]);
 868        lpfc_destroy_vport_work_array(phba, vports);
 869        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 870            (phba->sli_rev < LPFC_SLI_REV4))
 871                lpfc_issue_clear_la(phba, phba->pport);
 872
 873        return 0;
 874}
 875
 876/*
 877 * This routine handles processing a CLEAR_LA mailbox
 878 * command upon completion. It is setup in the LPFC_MBOXQ
 879 * as the completion routine when the command is
 880 * handed off to the SLI layer.
 881 */
 882static void
 883lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 884{
 885        struct lpfc_vport *vport = pmb->vport;
 886        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 887        struct lpfc_sli   *psli = &phba->sli;
 888        MAILBOX_t *mb = &pmb->u.mb;
 889        uint32_t control;
 890
 891        /* Since we don't do discovery right now, turn these off here */
 892        psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 893        psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 894        psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 895
 896        /* Check for error */
 897        if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
 898                /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
 899                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
 900                                 "0320 CLEAR_LA mbxStatus error x%x hba "
 901                                 "state x%x\n",
 902                                 mb->mbxStatus, vport->port_state);
 903                phba->link_state = LPFC_HBA_ERROR;
 904                goto out;
 905        }
 906
 907        if (vport->port_type == LPFC_PHYSICAL_PORT)
 908                phba->link_state = LPFC_HBA_READY;
 909
 910        spin_lock_irq(&phba->hbalock);
 911        psli->sli_flag |= LPFC_PROCESS_LA;
 912        control = readl(phba->HCregaddr);
 913        control |= HC_LAINT_ENA;
 914        writel(control, phba->HCregaddr);
 915        readl(phba->HCregaddr); /* flush */
 916        spin_unlock_irq(&phba->hbalock);
 917        mempool_free(pmb, phba->mbox_mem_pool);
 918        return;
 919
 920out:
 921        /* Device Discovery completes */
 922        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 923                         "0225 Device Discovery completes\n");
 924        mempool_free(pmb, phba->mbox_mem_pool);
 925
 926        spin_lock_irq(shost->host_lock);
 927        vport->fc_flag &= ~FC_ABORT_DISCOVERY;
 928        spin_unlock_irq(shost->host_lock);
 929
 930        lpfc_can_disctmo(vport);
 931
 932        /* turn on Link Attention interrupts */
 933
 934        spin_lock_irq(&phba->hbalock);
 935        psli->sli_flag |= LPFC_PROCESS_LA;
 936        control = readl(phba->HCregaddr);
 937        control |= HC_LAINT_ENA;
 938        writel(control, phba->HCregaddr);
 939        readl(phba->HCregaddr); /* flush */
 940        spin_unlock_irq(&phba->hbalock);
 941
 942        return;
 943}
 944
 945
 946static void
 947lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 948{
 949        struct lpfc_vport *vport = pmb->vport;
 950
 951        if (pmb->u.mb.mbxStatus)
 952                goto out;
 953
 954        mempool_free(pmb, phba->mbox_mem_pool);
 955
 956        if (phba->fc_topology == TOPOLOGY_LOOP &&
 957            vport->fc_flag & FC_PUBLIC_LOOP &&
 958            !(vport->fc_flag & FC_LBIT)) {
 959                        /* Need to wait for FAN - use discovery timer
 960                         * for timeout.  port_state is identically
 961                         * LPFC_LOCAL_CFG_LINK while waiting for FAN
 962                         */
 963                        lpfc_set_disctmo(vport);
 964                        return;
 965        }
 966
 967        /* Start discovery by sending a FLOGI. port_state is identically
 968         * LPFC_FLOGI while waiting for FLOGI cmpl
 969         */
 970        if (vport->port_state != LPFC_FLOGI) {
 971                lpfc_initial_flogi(vport);
 972        }
 973        return;
 974
 975out:
 976        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
 977                         "0306 CONFIG_LINK mbxStatus error x%x "
 978                         "HBA state x%x\n",
 979                         pmb->u.mb.mbxStatus, vport->port_state);
 980        mempool_free(pmb, phba->mbox_mem_pool);
 981
 982        lpfc_linkdown(phba);
 983
 984        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 985                         "0200 CONFIG_LINK bad hba state x%x\n",
 986                         vport->port_state);
 987
 988        lpfc_issue_clear_la(phba, vport);
 989        return;
 990}
 991
 992static void
 993lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 994{
 995        struct lpfc_vport *vport = mboxq->vport;
 996        unsigned long flags;
 997
 998        if (mboxq->u.mb.mbxStatus) {
 999                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1000                         "2017 REG_FCFI mbxStatus error x%x "
1001                         "HBA state x%x\n",
1002                         mboxq->u.mb.mbxStatus, vport->port_state);
1003                mempool_free(mboxq, phba->mbox_mem_pool);
1004                return;
1005        }
1006
1007        /* Start FCoE discovery by sending a FLOGI. */
1008        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1009        /* Set the FCFI registered flag */
1010        spin_lock_irqsave(&phba->hbalock, flags);
1011        phba->fcf.fcf_flag |= FCF_REGISTERED;
1012        spin_unlock_irqrestore(&phba->hbalock, flags);
1013        /* If there is a pending FCoE event, restart FCF table scan. */
1014        if (lpfc_check_pending_fcoe_event(phba, 1)) {
1015                mempool_free(mboxq, phba->mbox_mem_pool);
1016                return;
1017        }
1018        if (vport->port_state != LPFC_FLOGI) {
1019                spin_lock_irqsave(&phba->hbalock, flags);
1020                phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1021                phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1022                spin_unlock_irqrestore(&phba->hbalock, flags);
1023                lpfc_initial_flogi(vport);
1024        }
1025
1026        mempool_free(mboxq, phba->mbox_mem_pool);
1027        return;
1028}
1029
1030/**
1031 * lpfc_fab_name_match - Check if the fcf fabric name match.
1032 * @fab_name: pointer to fabric name.
1033 * @new_fcf_record: pointer to fcf record.
1034 *
1035 * This routine compare the fcf record's fabric name with provided
1036 * fabric name. If the fabric name are identical this function
1037 * returns 1 else return 0.
1038 **/
1039static uint32_t
1040lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1041{
1042        if ((fab_name[0] ==
1043                bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1044            (fab_name[1] ==
1045                bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1046            (fab_name[2] ==
1047                bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1048            (fab_name[3] ==
1049                bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1050            (fab_name[4] ==
1051                bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1052            (fab_name[5] ==
1053                bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1054            (fab_name[6] ==
1055                bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1056            (fab_name[7] ==
1057                bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1058                return 1;
1059        else
1060                return 0;
1061}
1062
1063/**
1064 * lpfc_sw_name_match - Check if the fcf switch name match.
1065 * @fab_name: pointer to fabric name.
1066 * @new_fcf_record: pointer to fcf record.
1067 *
1068 * This routine compare the fcf record's switch name with provided
1069 * switch name. If the switch name are identical this function
1070 * returns 1 else return 0.
1071 **/
1072static uint32_t
1073lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1074{
1075        if ((sw_name[0] ==
1076                bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
1077            (sw_name[1] ==
1078                bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
1079            (sw_name[2] ==
1080                bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
1081            (sw_name[3] ==
1082                bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
1083            (sw_name[4] ==
1084                bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1085            (sw_name[5] ==
1086                bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1087            (sw_name[6] ==
1088                bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1089            (sw_name[7] ==
1090                bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1091                return 1;
1092        else
1093                return 0;
1094}
1095
1096/**
1097 * lpfc_mac_addr_match - Check if the fcf mac address match.
1098 * @phba: pointer to lpfc hba data structure.
1099 * @new_fcf_record: pointer to fcf record.
1100 *
1101 * This routine compare the fcf record's mac address with HBA's
1102 * FCF mac address. If the mac addresses are identical this function
1103 * returns 1 else return 0.
1104 **/
1105static uint32_t
1106lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1107{
1108        if ((phba->fcf.mac_addr[0] ==
1109                bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1110            (phba->fcf.mac_addr[1] ==
1111                bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1112            (phba->fcf.mac_addr[2] ==
1113                bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1114            (phba->fcf.mac_addr[3] ==
1115                bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1116            (phba->fcf.mac_addr[4] ==
1117                bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1118            (phba->fcf.mac_addr[5] ==
1119                bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1120                return 1;
1121        else
1122                return 0;
1123}
1124
1125/**
1126 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1127 * @phba: pointer to lpfc hba data structure.
1128 * @new_fcf_record: pointer to fcf record.
1129 *
1130 * This routine copies the FCF information from the FCF
1131 * record to lpfc_hba data structure.
1132 **/
1133static void
1134lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1135{
1136        phba->fcf.fabric_name[0] =
1137                bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1138        phba->fcf.fabric_name[1] =
1139                bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1140        phba->fcf.fabric_name[2] =
1141                bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1142        phba->fcf.fabric_name[3] =
1143                bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1144        phba->fcf.fabric_name[4] =
1145                bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1146        phba->fcf.fabric_name[5] =
1147                bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1148        phba->fcf.fabric_name[6] =
1149                bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1150        phba->fcf.fabric_name[7] =
1151                bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1152        phba->fcf.mac_addr[0] =
1153                bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1154        phba->fcf.mac_addr[1] =
1155                bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1156        phba->fcf.mac_addr[2] =
1157                bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1158        phba->fcf.mac_addr[3] =
1159                bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1160        phba->fcf.mac_addr[4] =
1161                bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1162        phba->fcf.mac_addr[5] =
1163                bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1164        phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1165        phba->fcf.priority = new_fcf_record->fip_priority;
1166        phba->fcf.switch_name[0] =
1167                bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1168        phba->fcf.switch_name[1] =
1169                bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1170        phba->fcf.switch_name[2] =
1171                bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1172        phba->fcf.switch_name[3] =
1173                bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1174        phba->fcf.switch_name[4] =
1175                bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1176        phba->fcf.switch_name[5] =
1177                bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1178        phba->fcf.switch_name[6] =
1179                bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1180        phba->fcf.switch_name[7] =
1181                bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1182}
1183
1184/**
1185 * lpfc_register_fcf - Register the FCF with hba.
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine issues a register fcfi mailbox command to register
1189 * the fcf with HBA.
1190 **/
1191static void
1192lpfc_register_fcf(struct lpfc_hba *phba)
1193{
1194        LPFC_MBOXQ_t *fcf_mbxq;
1195        int rc;
1196        unsigned long flags;
1197
1198        spin_lock_irqsave(&phba->hbalock, flags);
1199
1200        /* If the FCF is not availabe do nothing. */
1201        if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1202                spin_unlock_irqrestore(&phba->hbalock, flags);
1203                return;
1204        }
1205
1206        /* The FCF is already registered, start discovery */
1207        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1208                phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1209                phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1210                spin_unlock_irqrestore(&phba->hbalock, flags);
1211                if (phba->pport->port_state != LPFC_FLOGI)
1212                        lpfc_initial_flogi(phba->pport);
1213                return;
1214        }
1215        spin_unlock_irqrestore(&phba->hbalock, flags);
1216
1217        fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218                GFP_KERNEL);
1219        if (!fcf_mbxq)
1220                return;
1221
1222        lpfc_reg_fcfi(phba, fcf_mbxq);
1223        fcf_mbxq->vport = phba->pport;
1224        fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225        rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226        if (rc == MBX_NOT_FINISHED)
1227                mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1228
1229        return;
1230}
1231
1232/**
1233 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1234 * @phba: pointer to lpfc hba data structure.
1235 * @new_fcf_record: pointer to fcf record.
1236 * @boot_flag: Indicates if this record used by boot bios.
1237 * @addr_mode: The address mode to be used by this FCF
1238 *
1239 * This routine compare the fcf record with connect list obtained from the
1240 * config region to decide if this FCF can be used for SAN discovery. It returns
1241 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1242 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1243 * is used by boot bios and addr_mode will indicate the addressing mode to be
1244 * used for this FCF when the function returns.
1245 * If the FCF record need to be used with a particular vlan id, the vlan is
1246 * set in the vlan_id on return of the function. If not VLAN tagging need to
1247 * be used with the FCF vlan_id will be set to 0xFFFF;
1248 **/
1249static int
1250lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1251                        struct fcf_record *new_fcf_record,
1252                        uint32_t *boot_flag, uint32_t *addr_mode,
1253                        uint16_t *vlan_id)
1254{
1255        struct lpfc_fcf_conn_entry *conn_entry;
1256
1257        /* If FCF not available return 0 */
1258        if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1259                !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1260                return 0;
1261
1262        if (!phba->cfg_enable_fip) {
1263                *boot_flag = 0;
1264                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1265                                new_fcf_record);
1266                if (phba->valid_vlan)
1267                        *vlan_id = phba->vlan_id;
1268                else
1269                        *vlan_id = 0xFFFF;
1270                return 1;
1271        }
1272
1273        /*
1274         * If there are no FCF connection table entry, driver connect to all
1275         * FCFs.
1276         */
1277        if (list_empty(&phba->fcf_conn_rec_list)) {
1278                *boot_flag = 0;
1279                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1280                        new_fcf_record);
1281
1282                /*
1283                 * When there are no FCF connect entries, use driver's default
1284                 * addressing mode - FPMA.
1285                 */
1286                if (*addr_mode & LPFC_FCF_FPMA)
1287                        *addr_mode = LPFC_FCF_FPMA;
1288
1289                *vlan_id = 0xFFFF;
1290                return 1;
1291        }
1292
1293        list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1294                if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1295                        continue;
1296
1297                if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1298                        !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1299                                             new_fcf_record))
1300                        continue;
1301                if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1302                        !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1303                                            new_fcf_record))
1304                        continue;
1305                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1306                        /*
1307                         * If the vlan bit map does not have the bit set for the
1308                         * vlan id to be used, then it is not a match.
1309                         */
1310                        if (!(new_fcf_record->vlan_bitmap
1311                                [conn_entry->conn_rec.vlan_tag / 8] &
1312                                (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1313                                continue;
1314                }
1315
1316                /*
1317                 * If connection record does not support any addressing mode,
1318                 * skip the FCF record.
1319                 */
1320                if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1321                        & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1322                        continue;
1323
1324                /*
1325                 * Check if the connection record specifies a required
1326                 * addressing mode.
1327                 */
1328                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1329                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1330
1331                        /*
1332                         * If SPMA required but FCF not support this continue.
1333                         */
1334                        if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1335                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1336                                        new_fcf_record) & LPFC_FCF_SPMA))
1337                                continue;
1338
1339                        /*
1340                         * If FPMA required but FCF not support this continue.
1341                         */
1342                        if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1343                                !(bf_get(lpfc_fcf_record_mac_addr_prov,
1344                                new_fcf_record) & LPFC_FCF_FPMA))
1345                                continue;
1346                }
1347
1348                /*
1349                 * This fcf record matches filtering criteria.
1350                 */
1351                if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1352                        *boot_flag = 1;
1353                else
1354                        *boot_flag = 0;
1355
1356                /*
1357                 * If user did not specify any addressing mode, or if the
1358                 * prefered addressing mode specified by user is not supported
1359                 * by FCF, allow fabric to pick the addressing mode.
1360                 */
1361                *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1362                                new_fcf_record);
1363                /*
1364                 * If the user specified a required address mode, assign that
1365                 * address mode
1366                 */
1367                if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1368                        (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1369                        *addr_mode = (conn_entry->conn_rec.flags &
1370                                FCFCNCT_AM_SPMA) ?
1371                                LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1372                /*
1373                 * If the user specified a prefered address mode, use the
1374                 * addr mode only if FCF support the addr_mode.
1375                 */
1376                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1377                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1378                        (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1379                        (*addr_mode & LPFC_FCF_SPMA))
1380                                *addr_mode = LPFC_FCF_SPMA;
1381                else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1382                        (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1383                        !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1384                        (*addr_mode & LPFC_FCF_FPMA))
1385                                *addr_mode = LPFC_FCF_FPMA;
1386
1387                if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388                        *vlan_id = conn_entry->conn_rec.vlan_tag;
1389                else
1390                        *vlan_id = 0xFFFF;
1391
1392                return 1;
1393        }
1394
1395        return 0;
1396}
1397
1398/**
1399 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1400 * @phba: pointer to lpfc hba data structure.
1401 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1402 *
1403 * This function check if there is any fcoe event pending while driver
1404 * scan FCF entries. If there is any pending event, it will restart the
1405 * FCF saning and return 1 else return 0.
1406 */
1407int
1408lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1409{
1410        LPFC_MBOXQ_t *mbox;
1411        int rc;
1412        /*
1413         * If the Link is up and no FCoE events while in the
1414         * FCF discovery, no need to restart FCF discovery.
1415         */
1416        if ((phba->link_state  >= LPFC_LINK_UP) &&
1417                (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1418                return 0;
1419
1420        spin_lock_irq(&phba->hbalock);
1421        phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1422        spin_unlock_irq(&phba->hbalock);
1423
1424        if (phba->link_state >= LPFC_LINK_UP)
1425                lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1426
1427        if (unreg_fcf) {
1428                spin_lock_irq(&phba->hbalock);
1429                phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1430                spin_unlock_irq(&phba->hbalock);
1431                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1432                if (!mbox) {
1433                        lpfc_printf_log(phba, KERN_ERR,
1434                                LOG_DISCOVERY|LOG_MBOX,
1435                                "2610 UNREG_FCFI mbox allocation failed\n");
1436                        return 1;
1437                }
1438                lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1439                mbox->vport = phba->pport;
1440                mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1441                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1442                if (rc == MBX_NOT_FINISHED) {
1443                        lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1444                                "2611 UNREG_FCFI issue mbox failed\n");
1445                        mempool_free(mbox, phba->mbox_mem_pool);
1446                }
1447        }
1448
1449        return 1;
1450}
1451
1452/**
1453 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1454 * @phba: pointer to lpfc hba data structure.
1455 * @mboxq: pointer to mailbox object.
1456 *
1457 * This function iterate through all the fcf records available in
1458 * HBA and choose the optimal FCF record for discovery. After finding
1459 * the FCF for discovery it register the FCF record and kick start
1460 * discovery.
1461 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1462 * use a FCF record which match fabric name and mac address of the
1463 * currently used FCF record.
1464 * If the driver support only one FCF, it will try to use the FCF record
1465 * used by BOOT_BIOS.
1466 */
1467void
1468lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1469{
1470        void *virt_addr;
1471        dma_addr_t phys_addr;
1472        uint8_t *bytep;
1473        struct lpfc_mbx_sge sge;
1474        struct lpfc_mbx_read_fcf_tbl *read_fcf;
1475        uint32_t shdr_status, shdr_add_status;
1476        union lpfc_sli4_cfg_shdr *shdr;
1477        struct fcf_record *new_fcf_record;
1478        int rc;
1479        uint32_t boot_flag, addr_mode;
1480        uint32_t next_fcf_index;
1481        unsigned long flags;
1482        uint16_t vlan_id;
1483
1484        /* If there is pending FCoE event restart FCF table scan */
1485        if (lpfc_check_pending_fcoe_event(phba, 0)) {
1486                lpfc_sli4_mbox_cmd_free(phba, mboxq);
1487                return;
1488        }
1489
1490        /* Get the first SGE entry from the non-embedded DMA memory. This
1491         * routine only uses a single SGE.
1492         */
1493        lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1494        phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1495        if (unlikely(!mboxq->sge_array)) {
1496                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1497                                "2524 Failed to get the non-embedded SGE "
1498                                "virtual address\n");
1499                goto out;
1500        }
1501        virt_addr = mboxq->sge_array->addr[0];
1502
1503        shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1504        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1505        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1506                                 &shdr->response);
1507        /*
1508         * The FCF Record was read and there is no reason for the driver
1509         * to maintain the FCF record data or memory. Instead, just need
1510         * to book keeping the FCFIs can be used.
1511         */
1512        if (shdr_status || shdr_add_status) {
1513                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1514                                "2521 READ_FCF_RECORD mailbox failed "
1515                                "with status x%x add_status x%x, mbx\n",
1516                                shdr_status, shdr_add_status);
1517                goto out;
1518        }
1519        /* Interpreting the returned information of FCF records */
1520        read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1521        lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1522                              sizeof(struct lpfc_mbx_read_fcf_tbl));
1523        next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1524
1525        new_fcf_record = (struct fcf_record *)(virt_addr +
1526                          sizeof(struct lpfc_mbx_read_fcf_tbl));
1527        lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1528                              sizeof(struct fcf_record));
1529        bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1530
1531        rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1532                                      &boot_flag, &addr_mode,
1533                                        &vlan_id);
1534        /*
1535         * If the fcf record does not match with connect list entries
1536         * read the next entry.
1537         */
1538        if (!rc)
1539                goto read_next_fcf;
1540        /*
1541         * If this is not the first FCF discovery of the HBA, use last
1542         * FCF record for the discovery.
1543         */
1544        spin_lock_irqsave(&phba->hbalock, flags);
1545        if (phba->fcf.fcf_flag & FCF_IN_USE) {
1546                if (lpfc_fab_name_match(phba->fcf.fabric_name,
1547                                        new_fcf_record) &&
1548                    lpfc_sw_name_match(phba->fcf.switch_name,
1549                                        new_fcf_record) &&
1550                    lpfc_mac_addr_match(phba, new_fcf_record)) {
1551                        phba->fcf.fcf_flag |= FCF_AVAILABLE;
1552                        spin_unlock_irqrestore(&phba->hbalock, flags);
1553                        goto out;
1554                }
1555                spin_unlock_irqrestore(&phba->hbalock, flags);
1556                goto read_next_fcf;
1557        }
1558        if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1559                /*
1560                 * If the current FCF record does not have boot flag
1561                 * set and new fcf record has boot flag set, use the
1562                 * new fcf record.
1563                 */
1564                if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1565                        /* Use this FCF record */
1566                        lpfc_copy_fcf_record(phba, new_fcf_record);
1567                        phba->fcf.addr_mode = addr_mode;
1568                        phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1569                        if (vlan_id != 0xFFFF) {
1570                                phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1571                                phba->fcf.vlan_id = vlan_id;
1572                        }
1573                        spin_unlock_irqrestore(&phba->hbalock, flags);
1574                        goto read_next_fcf;
1575                }
1576                /*
1577                 * If the current FCF record has boot flag set and the
1578                 * new FCF record does not have boot flag, read the next
1579                 * FCF record.
1580                 */
1581                if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1582                        spin_unlock_irqrestore(&phba->hbalock, flags);
1583                        goto read_next_fcf;
1584                }
1585                /*
1586                 * If there is a record with lower priority value for
1587                 * the current FCF, use that record.
1588                 */
1589                if (lpfc_fab_name_match(phba->fcf.fabric_name,
1590                                        new_fcf_record) &&
1591                    (new_fcf_record->fip_priority < phba->fcf.priority)) {
1592                        /* Use this FCF record */
1593                        lpfc_copy_fcf_record(phba, new_fcf_record);
1594                        phba->fcf.addr_mode = addr_mode;
1595                        if (vlan_id != 0xFFFF) {
1596                                phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1597                                phba->fcf.vlan_id = vlan_id;
1598                        }
1599                        spin_unlock_irqrestore(&phba->hbalock, flags);
1600                        goto read_next_fcf;
1601                }
1602                spin_unlock_irqrestore(&phba->hbalock, flags);
1603                goto read_next_fcf;
1604        }
1605        /*
1606         * This is the first available FCF record, use this
1607         * record.
1608         */
1609        lpfc_copy_fcf_record(phba, new_fcf_record);
1610        phba->fcf.addr_mode = addr_mode;
1611        if (boot_flag)
1612                phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1613        phba->fcf.fcf_flag |= FCF_AVAILABLE;
1614        if (vlan_id != 0xFFFF) {
1615                phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1616                phba->fcf.vlan_id = vlan_id;
1617        }
1618        spin_unlock_irqrestore(&phba->hbalock, flags);
1619        goto read_next_fcf;
1620
1621read_next_fcf:
1622        lpfc_sli4_mbox_cmd_free(phba, mboxq);
1623        if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1624                lpfc_register_fcf(phba);
1625        else
1626                lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1627        return;
1628
1629out:
1630        lpfc_sli4_mbox_cmd_free(phba, mboxq);
1631        lpfc_register_fcf(phba);
1632
1633        return;
1634}
1635
1636/**
1637 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1638 * @phba: pointer to lpfc hba data structure.
1639 * @mboxq: pointer to mailbox data structure.
1640 *
1641 * This function handles completion of init vpi mailbox command.
1642 */
1643static void
1644lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1645{
1646        struct lpfc_vport *vport = mboxq->vport;
1647        if (mboxq->u.mb.mbxStatus) {
1648                lpfc_printf_vlog(vport, KERN_ERR,
1649                                LOG_MBOX,
1650                                "2609 Init VPI mailbox failed 0x%x\n",
1651                                mboxq->u.mb.mbxStatus);
1652                mempool_free(mboxq, phba->mbox_mem_pool);
1653                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1654                return;
1655        }
1656        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1657
1658        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1659                lpfc_initial_fdisc(vport);
1660        else {
1661                lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662                lpfc_printf_vlog(vport, KERN_ERR,
1663                        LOG_ELS,
1664                        "2606 No NPIV Fabric support\n");
1665        }
1666        return;
1667}
1668
1669/**
1670 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1671 * @phba: pointer to lpfc hba data structure.
1672 *
1673 * This function loops through the list of vports on the @phba and issues an
1674 * FDISC if possible.
1675 */
1676void
1677lpfc_start_fdiscs(struct lpfc_hba *phba)
1678{
1679        struct lpfc_vport **vports;
1680        int i;
1681        LPFC_MBOXQ_t *mboxq;
1682        int rc;
1683
1684        vports = lpfc_create_vport_work_array(phba);
1685        if (vports != NULL) {
1686                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1687                        if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1688                                continue;
1689                        /* There are no vpi for this vport */
1690                        if (vports[i]->vpi > phba->max_vpi) {
1691                                lpfc_vport_set_state(vports[i],
1692                                                     FC_VPORT_FAILED);
1693                                continue;
1694                        }
1695                        if (phba->fc_topology == TOPOLOGY_LOOP) {
1696                                lpfc_vport_set_state(vports[i],
1697                                                     FC_VPORT_LINKDOWN);
1698                                continue;
1699                        }
1700                        if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1701                                mboxq = mempool_alloc(phba->mbox_mem_pool,
1702                                        GFP_KERNEL);
1703                                if (!mboxq) {
1704                                        lpfc_printf_vlog(vports[i], KERN_ERR,
1705                                        LOG_MBOX, "2607 Failed to allocate "
1706                                        "init_vpi mailbox\n");
1707                                        continue;
1708                                }
1709                                lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1710                                mboxq->vport = vports[i];
1711                                mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1712                                rc = lpfc_sli_issue_mbox(phba, mboxq,
1713                                        MBX_NOWAIT);
1714                                if (rc == MBX_NOT_FINISHED) {
1715                                        lpfc_printf_vlog(vports[i], KERN_ERR,
1716                                        LOG_MBOX, "2608 Failed to issue "
1717                                        "init_vpi mailbox\n");
1718                                        mempool_free(mboxq,
1719                                                phba->mbox_mem_pool);
1720                                }
1721                                continue;
1722                        }
1723                        if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1724                                lpfc_initial_fdisc(vports[i]);
1725                        else {
1726                                lpfc_vport_set_state(vports[i],
1727                                                     FC_VPORT_NO_FABRIC_SUPP);
1728                                lpfc_printf_vlog(vports[i], KERN_ERR,
1729                                                 LOG_ELS,
1730                                                 "0259 No NPIV "
1731                                                 "Fabric support\n");
1732                        }
1733                }
1734        }
1735        lpfc_destroy_vport_work_array(phba, vports);
1736}
1737
1738void
1739lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1740{
1741        struct lpfc_dmabuf *dmabuf = mboxq->context1;
1742        struct lpfc_vport *vport = mboxq->vport;
1743
1744        if (mboxq->u.mb.mbxStatus) {
1745                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1746                         "2018 REG_VFI mbxStatus error x%x "
1747                         "HBA state x%x\n",
1748                         mboxq->u.mb.mbxStatus, vport->port_state);
1749                if (phba->fc_topology == TOPOLOGY_LOOP) {
1750                        /* FLOGI failed, use loop map to make discovery list */
1751                        lpfc_disc_list_loopmap(vport);
1752                        /* Start discovery */
1753                        lpfc_disc_start(vport);
1754                        goto fail_free_mem;
1755                }
1756                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1757                goto fail_free_mem;
1758        }
1759        /* Mark the vport has registered with its VFI */
1760        vport->vfi_state |= LPFC_VFI_REGISTERED;
1761
1762        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1763                lpfc_start_fdiscs(phba);
1764                lpfc_do_scr_ns_plogi(phba, vport);
1765        }
1766
1767fail_free_mem:
1768        mempool_free(mboxq, phba->mbox_mem_pool);
1769        lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1770        kfree(dmabuf);
1771        return;
1772}
1773
1774static void
1775lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1776{
1777        MAILBOX_t *mb = &pmb->u.mb;
1778        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
1779        struct lpfc_vport  *vport = pmb->vport;
1780
1781
1782        /* Check for error */
1783        if (mb->mbxStatus) {
1784                /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
1785                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1786                                 "0319 READ_SPARAM mbxStatus error x%x "
1787                                 "hba state x%x>\n",
1788                                 mb->mbxStatus, vport->port_state);
1789                lpfc_linkdown(phba);
1790                goto out;
1791        }
1792
1793        memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
1794               sizeof (struct serv_parm));
1795        if (phba->cfg_soft_wwnn)
1796                u64_to_wwn(phba->cfg_soft_wwnn,
1797                           vport->fc_sparam.nodeName.u.wwn);
1798        if (phba->cfg_soft_wwpn)
1799                u64_to_wwn(phba->cfg_soft_wwpn,
1800                           vport->fc_sparam.portName.u.wwn);
1801        memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
1802               sizeof(vport->fc_nodename));
1803        memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
1804               sizeof(vport->fc_portname));
1805        if (vport->port_type == LPFC_PHYSICAL_PORT) {
1806                memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
1807                memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
1808        }
1809
1810        lpfc_mbuf_free(phba, mp->virt, mp->phys);
1811        kfree(mp);
1812        mempool_free(pmb, phba->mbox_mem_pool);
1813        return;
1814
1815out:
1816        pmb->context1 = NULL;
1817        lpfc_mbuf_free(phba, mp->virt, mp->phys);
1818        kfree(mp);
1819        lpfc_issue_clear_la(phba, vport);
1820        mempool_free(pmb, phba->mbox_mem_pool);
1821        return;
1822}
1823
1824static void
1825lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1826{
1827        struct lpfc_vport *vport = phba->pport;
1828        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1829        int i;
1830        struct lpfc_dmabuf *mp;
1831        int rc;
1832        struct fcf_record *fcf_record;
1833
1834        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1835
1836        spin_lock_irq(&phba->hbalock);
1837        switch (la->UlnkSpeed) {
1838        case LA_1GHZ_LINK:
1839                phba->fc_linkspeed = LA_1GHZ_LINK;
1840                break;
1841        case LA_2GHZ_LINK:
1842                phba->fc_linkspeed = LA_2GHZ_LINK;
1843                break;
1844        case LA_4GHZ_LINK:
1845                phba->fc_linkspeed = LA_4GHZ_LINK;
1846                break;
1847        case LA_8GHZ_LINK:
1848                phba->fc_linkspeed = LA_8GHZ_LINK;
1849                break;
1850        case LA_10GHZ_LINK:
1851                phba->fc_linkspeed = LA_10GHZ_LINK;
1852                break;
1853        default:
1854                phba->fc_linkspeed = LA_UNKNW_LINK;
1855                break;
1856        }
1857
1858        phba->fc_topology = la->topology;
1859        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
1860
1861        if (phba->fc_topology == TOPOLOGY_LOOP) {
1862                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
1863
1864                if (phba->cfg_enable_npiv)
1865                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1866                                "1309 Link Up Event npiv not supported in loop "
1867                                "topology\n");
1868                                /* Get Loop Map information */
1869                if (la->il)
1870                        vport->fc_flag |= FC_LBIT;
1871
1872                vport->fc_myDID = la->granted_AL_PA;
1873                i = la->un.lilpBde64.tus.f.bdeSize;
1874
1875                if (i == 0) {
1876                        phba->alpa_map[0] = 0;
1877                } else {
1878                        if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
1879                                int numalpa, j, k;
1880                                union {
1881                                        uint8_t pamap[16];
1882                                        struct {
1883                                                uint32_t wd1;
1884                                                uint32_t wd2;
1885                                                uint32_t wd3;
1886                                                uint32_t wd4;
1887                                        } pa;
1888                                } un;
1889                                numalpa = phba->alpa_map[0];
1890                                j = 0;
1891                                while (j < numalpa) {
1892                                        memset(un.pamap, 0, 16);
1893                                        for (k = 1; j < numalpa; k++) {
1894                                                un.pamap[k - 1] =
1895                                                        phba->alpa_map[j + 1];
1896                                                j++;
1897                                                if (k == 16)
1898                                                        break;
1899                                        }
1900                                        /* Link Up Event ALPA map */
1901                                        lpfc_printf_log(phba,
1902                                                        KERN_WARNING,
1903                                                        LOG_LINK_EVENT,
1904                                                        "1304 Link Up Event "
1905                                                        "ALPA map Data: x%x "
1906                                                        "x%x x%x x%x\n",
1907                                                        un.pa.wd1, un.pa.wd2,
1908                                                        un.pa.wd3, un.pa.wd4);
1909                                }
1910                        }
1911                }
1912        } else {
1913                if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1914                        if (phba->max_vpi && phba->cfg_enable_npiv &&
1915                           (phba->sli_rev == 3))
1916                                phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1917                }
1918                vport->fc_myDID = phba->fc_pref_DID;
1919                vport->fc_flag |= FC_LBIT;
1920        }
1921        spin_unlock_irq(&phba->hbalock);
1922
1923        lpfc_linkup(phba);
1924        if (sparam_mbox) {
1925                lpfc_read_sparam(phba, sparam_mbox, 0);
1926                sparam_mbox->vport = vport;
1927                sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1928                rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1929                if (rc == MBX_NOT_FINISHED) {
1930                        mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1931                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
1932                        kfree(mp);
1933                        mempool_free(sparam_mbox, phba->mbox_mem_pool);
1934                        goto out;
1935                }
1936        }
1937
1938        if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1939                cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1940                if (!cfglink_mbox)
1941                        goto out;
1942                vport->port_state = LPFC_LOCAL_CFG_LINK;
1943                lpfc_config_link(phba, cfglink_mbox);
1944                cfglink_mbox->vport = vport;
1945                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1946                rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1947                if (rc == MBX_NOT_FINISHED) {
1948                        mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1949                        goto out;
1950                }
1951        } else {
1952                vport->port_state = LPFC_VPORT_UNKNOWN;
1953                /*
1954                 * Add the driver's default FCF record at FCF index 0 now. This
1955                 * is phase 1 implementation that support FCF index 0 and driver
1956                 * defaults.
1957                 */
1958                if (phba->cfg_enable_fip == 0) {
1959                        fcf_record = kzalloc(sizeof(struct fcf_record),
1960                                        GFP_KERNEL);
1961                        if (unlikely(!fcf_record)) {
1962                                lpfc_printf_log(phba, KERN_ERR,
1963                                        LOG_MBOX | LOG_SLI,
1964                                        "2554 Could not allocate memmory for "
1965                                        "fcf record\n");
1966                                rc = -ENODEV;
1967                                goto out;
1968                        }
1969
1970                        lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1971                                                LPFC_FCOE_FCF_DEF_INDEX);
1972                        rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1973                        if (unlikely(rc)) {
1974                                lpfc_printf_log(phba, KERN_ERR,
1975                                        LOG_MBOX | LOG_SLI,
1976                                        "2013 Could not manually add FCF "
1977                                        "record 0, status %d\n", rc);
1978                                rc = -ENODEV;
1979                                kfree(fcf_record);
1980                                goto out;
1981                        }
1982                        kfree(fcf_record);
1983                }
1984                /*
1985                 * The driver is expected to do FIP/FCF. Call the port
1986                 * and get the FCF Table.
1987                 */
1988                spin_lock_irq(&phba->hbalock);
1989                if (phba->hba_flag & FCF_DISC_INPROGRESS) {
1990                        spin_unlock_irq(&phba->hbalock);
1991                        return;
1992                }
1993                spin_unlock_irq(&phba->hbalock);
1994                rc = lpfc_sli4_read_fcf_record(phba,
1995                                        LPFC_FCOE_FCF_GET_FIRST);
1996                if (rc)
1997                        goto out;
1998        }
1999
2000        return;
2001out:
2002        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2003        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2004                         "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2005                         vport->port_state, sparam_mbox, cfglink_mbox);
2006        lpfc_issue_clear_la(phba, vport);
2007        return;
2008}
2009
2010static void
2011lpfc_enable_la(struct lpfc_hba *phba)
2012{
2013        uint32_t control;
2014        struct lpfc_sli *psli = &phba->sli;
2015        spin_lock_irq(&phba->hbalock);
2016        psli->sli_flag |= LPFC_PROCESS_LA;
2017        if (phba->sli_rev <= LPFC_SLI_REV3) {
2018                control = readl(phba->HCregaddr);
2019                control |= HC_LAINT_ENA;
2020                writel(control, phba->HCregaddr);
2021                readl(phba->HCregaddr); /* flush */
2022        }
2023        spin_unlock_irq(&phba->hbalock);
2024}
2025
2026static void
2027lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
2028{
2029        lpfc_linkdown(phba);
2030        lpfc_enable_la(phba);
2031        lpfc_unregister_unused_fcf(phba);
2032        /* turn on Link Attention interrupts - no CLEAR_LA needed */
2033}
2034
2035
2036/*
2037 * This routine handles processing a READ_LA mailbox
2038 * command upon completion. It is setup in the LPFC_MBOXQ
2039 * as the completion routine when the command is
2040 * handed off to the SLI layer.
2041 */
2042void
2043lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2044{
2045        struct lpfc_vport *vport = pmb->vport;
2046        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2047        READ_LA_VAR *la;
2048        MAILBOX_t *mb = &pmb->u.mb;
2049        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2050
2051        /* Unblock ELS traffic */
2052        phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2053        /* Check for error */
2054        if (mb->mbxStatus) {
2055                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
2056                                "1307 READ_LA mbox error x%x state x%x\n",
2057                                mb->mbxStatus, vport->port_state);
2058                lpfc_mbx_issue_link_down(phba);
2059                phba->link_state = LPFC_HBA_ERROR;
2060                goto lpfc_mbx_cmpl_read_la_free_mbuf;
2061        }
2062
2063        la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2064
2065        memcpy(&phba->alpa_map[0], mp->virt, 128);
2066
2067        spin_lock_irq(shost->host_lock);
2068        if (la->pb)
2069                vport->fc_flag |= FC_BYPASSED_MODE;
2070        else
2071                vport->fc_flag &= ~FC_BYPASSED_MODE;
2072        spin_unlock_irq(shost->host_lock);
2073
2074        if ((phba->fc_eventTag  < la->eventTag) ||
2075            (phba->fc_eventTag == la->eventTag)) {
2076                phba->fc_stat.LinkMultiEvent++;
2077                if (la->attType == AT_LINK_UP)
2078                        if (phba->fc_eventTag != 0)
2079                                lpfc_linkdown(phba);
2080        }
2081
2082        phba->fc_eventTag = la->eventTag;
2083        if (la->mm)
2084                phba->sli.sli_flag |= LPFC_MENLO_MAINT;
2085        else
2086                phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2087
2088        if (la->attType == AT_LINK_UP && (!la->mm)) {
2089                phba->fc_stat.LinkUp++;
2090                if (phba->link_flag & LS_LOOPBACK_MODE) {
2091                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2092                                        "1306 Link Up Event in loop back mode "
2093                                        "x%x received Data: x%x x%x x%x x%x\n",
2094                                        la->eventTag, phba->fc_eventTag,
2095                                        la->granted_AL_PA, la->UlnkSpeed,
2096                                        phba->alpa_map[0]);
2097                } else {
2098                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2099                                        "1303 Link Up Event x%x received "
2100                                        "Data: x%x x%x x%x x%x x%x x%x %d\n",
2101                                        la->eventTag, phba->fc_eventTag,
2102                                        la->granted_AL_PA, la->UlnkSpeed,
2103                                        phba->alpa_map[0],
2104                                        la->mm, la->fa,
2105                                        phba->wait_4_mlo_maint_flg);
2106                }
2107                lpfc_mbx_process_link_up(phba, la);
2108        } else if (la->attType == AT_LINK_DOWN) {
2109                phba->fc_stat.LinkDown++;
2110                if (phba->link_flag & LS_LOOPBACK_MODE) {
2111                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2112                                "1308 Link Down Event in loop back mode "
2113                                "x%x received "
2114                                "Data: x%x x%x x%x\n",
2115                                la->eventTag, phba->fc_eventTag,
2116                                phba->pport->port_state, vport->fc_flag);
2117                }
2118                else {
2119                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2120                                "1305 Link Down Event x%x received "
2121                                "Data: x%x x%x x%x x%x x%x\n",
2122                                la->eventTag, phba->fc_eventTag,
2123                                phba->pport->port_state, vport->fc_flag,
2124                                la->mm, la->fa);
2125                }
2126                lpfc_mbx_issue_link_down(phba);
2127        }
2128        if (la->mm && la->attType == AT_LINK_UP) {
2129                if (phba->link_state != LPFC_LINK_DOWN) {
2130                        phba->fc_stat.LinkDown++;
2131                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2132                                "1312 Link Down Event x%x received "
2133                                "Data: x%x x%x x%x\n",
2134                                la->eventTag, phba->fc_eventTag,
2135                                phba->pport->port_state, vport->fc_flag);
2136                        lpfc_mbx_issue_link_down(phba);
2137                } else
2138                        lpfc_enable_la(phba);
2139
2140                lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
2141                                "1310 Menlo Maint Mode Link up Event x%x rcvd "
2142                                "Data: x%x x%x x%x\n",
2143                                la->eventTag, phba->fc_eventTag,
2144                                phba->pport->port_state, vport->fc_flag);
2145                /*
2146                 * The cmnd that triggered this will be waiting for this
2147                 * signal.
2148                 */
2149                /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2150                if (phba->wait_4_mlo_maint_flg) {
2151                        phba->wait_4_mlo_maint_flg = 0;
2152                        wake_up_interruptible(&phba->wait_4_mlo_m_q);
2153                }
2154        }
2155
2156        if (la->fa) {
2157                if (la->mm)
2158                        lpfc_issue_clear_la(phba, vport);
2159                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
2160                                "1311 fa %d\n", la->fa);
2161        }
2162
2163lpfc_mbx_cmpl_read_la_free_mbuf:
2164        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2165        kfree(mp);
2166        mempool_free(pmb, phba->mbox_mem_pool);
2167        return;
2168}
2169
2170/*
2171 * This routine handles processing a REG_LOGIN mailbox
2172 * command upon completion. It is setup in the LPFC_MBOXQ
2173 * as the completion routine when the command is
2174 * handed off to the SLI layer.
2175 */
2176void
2177lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2178{
2179        struct lpfc_vport  *vport = pmb->vport;
2180        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2181        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2182
2183        pmb->context1 = NULL;
2184
2185        /* Good status, call state machine */
2186        lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
2187        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2188        kfree(mp);
2189        mempool_free(pmb, phba->mbox_mem_pool);
2190        /* decrement the node reference count held for this callback
2191         * function.
2192         */
2193        lpfc_nlp_put(ndlp);
2194
2195        return;
2196}
2197
2198static void
2199lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2200{
2201        MAILBOX_t *mb = &pmb->u.mb;
2202        struct lpfc_vport *vport = pmb->vport;
2203        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2204
2205        switch (mb->mbxStatus) {
2206        case 0x0011:
2207        case 0x0020:
2208        case 0x9700:
2209                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2210                                 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2211                                 mb->mbxStatus);
2212                break;
2213        }
2214        vport->unreg_vpi_cmpl = VPORT_OK;
2215        mempool_free(pmb, phba->mbox_mem_pool);
2216        /*
2217         * This shost reference might have been taken at the beginning of
2218         * lpfc_vport_delete()
2219         */
2220        if (vport->load_flag & FC_UNLOADING)
2221                scsi_host_put(shost);
2222}
2223
2224int
2225lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
2226{
2227        struct lpfc_hba  *phba = vport->phba;
2228        LPFC_MBOXQ_t *mbox;
2229        int rc;
2230
2231        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2232        if (!mbox)
2233                return 1;
2234
2235        lpfc_unreg_vpi(phba, vport->vpi, mbox);
2236        mbox->vport = vport;
2237        mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
2238        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2239        if (rc == MBX_NOT_FINISHED) {
2240                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
2241                                 "1800 Could not issue unreg_vpi\n");
2242                mempool_free(mbox, phba->mbox_mem_pool);
2243                vport->unreg_vpi_cmpl = VPORT_ERROR;
2244                return rc;
2245        }
2246        return 0;
2247}
2248
2249static void
2250lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2251{
2252        struct lpfc_vport *vport = pmb->vport;
2253        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2254        MAILBOX_t *mb = &pmb->u.mb;
2255
2256        switch (mb->mbxStatus) {
2257        case 0x0011:
2258        case 0x9601:
2259        case 0x9602:
2260                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2261                                 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2262                                 mb->mbxStatus);
2263                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2264                spin_lock_irq(shost->host_lock);
2265                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2266                spin_unlock_irq(shost->host_lock);
2267                vport->fc_myDID = 0;
2268                goto out;
2269        }
2270
2271        vport->num_disc_nodes = 0;
2272        /* go thru NPR list and issue ELS PLOGIs */
2273        if (vport->fc_npr_cnt)
2274                lpfc_els_disc_plogi(vport);
2275
2276        if (!vport->num_disc_nodes) {
2277                spin_lock_irq(shost->host_lock);
2278                vport->fc_flag &= ~FC_NDISC_ACTIVE;
2279                spin_unlock_irq(shost->host_lock);
2280                lpfc_can_disctmo(vport);
2281        }
2282        vport->port_state = LPFC_VPORT_READY;
2283
2284out:
2285        mempool_free(pmb, phba->mbox_mem_pool);
2286        return;
2287}
2288
2289/**
2290 * lpfc_create_static_vport - Read HBA config region to create static vports.
2291 * @phba: pointer to lpfc hba data structure.
2292 *
2293 * This routine issue a DUMP mailbox command for config region 22 to get
2294 * the list of static vports to be created. The function create vports
2295 * based on the information returned from the HBA.
2296 **/
2297void
2298lpfc_create_static_vport(struct lpfc_hba *phba)
2299{
2300        LPFC_MBOXQ_t *pmb = NULL;
2301        MAILBOX_t *mb;
2302        struct static_vport_info *vport_info;
2303        int rc = 0, i;
2304        struct fc_vport_identifiers vport_id;
2305        struct fc_vport *new_fc_vport;
2306        struct Scsi_Host *shost;
2307        struct lpfc_vport *vport;
2308        uint16_t offset = 0;
2309        uint8_t *vport_buff;
2310        struct lpfc_dmabuf *mp;
2311        uint32_t byte_count = 0;
2312
2313        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2314        if (!pmb) {
2315                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2316                                "0542 lpfc_create_static_vport failed to"
2317                                " allocate mailbox memory\n");
2318                return;
2319        }
2320
2321        mb = &pmb->u.mb;
2322
2323        vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2324        if (!vport_info) {
2325                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2326                                "0543 lpfc_create_static_vport failed to"
2327                                " allocate vport_info\n");
2328                mempool_free(pmb, phba->mbox_mem_pool);
2329                return;
2330        }
2331
2332        vport_buff = (uint8_t *) vport_info;
2333        do {
2334                if (lpfc_dump_static_vport(phba, pmb, offset))
2335                        goto out;
2336
2337                pmb->vport = phba->pport;
2338                rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2339
2340                if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2341                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2342                                "0544 lpfc_create_static_vport failed to"
2343                                " issue dump mailbox command ret 0x%x "
2344                                "status 0x%x\n",
2345                                rc, mb->mbxStatus);
2346                        goto out;
2347                }
2348
2349                if (phba->sli_rev == LPFC_SLI_REV4) {
2350                        byte_count = pmb->u.mqe.un.mb_words[5];
2351                        mp = (struct lpfc_dmabuf *) pmb->context2;
2352                        if (byte_count > sizeof(struct static_vport_info) -
2353                                        offset)
2354                                byte_count = sizeof(struct static_vport_info)
2355                                        - offset;
2356                        memcpy(vport_buff + offset, mp->virt, byte_count);
2357                        offset += byte_count;
2358                } else {
2359                        if (mb->un.varDmp.word_cnt >
2360                                sizeof(struct static_vport_info) - offset)
2361                                mb->un.varDmp.word_cnt =
2362                                        sizeof(struct static_vport_info)
2363                                                - offset;
2364                        byte_count = mb->un.varDmp.word_cnt;
2365                        lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2366                                vport_buff + offset,
2367                                byte_count);
2368
2369                        offset += byte_count;
2370                }
2371
2372        } while (byte_count &&
2373                offset < sizeof(struct static_vport_info));
2374
2375
2376        if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2377                ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2378                        != VPORT_INFO_REV)) {
2379                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2380                        "0545 lpfc_create_static_vport bad"
2381                        " information header 0x%x 0x%x\n",
2382                        le32_to_cpu(vport_info->signature),
2383                        le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2384
2385                goto out;
2386        }
2387
2388        shost = lpfc_shost_from_vport(phba->pport);
2389
2390        for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2391                memset(&vport_id, 0, sizeof(vport_id));
2392                vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2393                vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2394                if (!vport_id.port_name || !vport_id.node_name)
2395                        continue;
2396
2397                vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2398                vport_id.vport_type = FC_PORTTYPE_NPIV;
2399                vport_id.disable = false;
2400                new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2401
2402                if (!new_fc_vport) {
2403                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2404                                "0546 lpfc_create_static_vport failed to"
2405                                " create vport\n");
2406                        continue;
2407                }
2408
2409                vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2410                vport->vport_flag |= STATIC_VPORT;
2411        }
2412
2413out:
2414        kfree(vport_info);
2415        if (rc != MBX_TIMEOUT) {
2416                if (pmb->context2) {
2417                        mp = (struct lpfc_dmabuf *) pmb->context2;
2418                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2419                        kfree(mp);
2420                }
2421                mempool_free(pmb, phba->mbox_mem_pool);
2422        }
2423
2424        return;
2425}
2426
2427/*
2428 * This routine handles processing a Fabric REG_LOGIN mailbox
2429 * command upon completion. It is setup in the LPFC_MBOXQ
2430 * as the completion routine when the command is
2431 * handed off to the SLI layer.
2432 */
2433void
2434lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2435{
2436        struct lpfc_vport *vport = pmb->vport;
2437        MAILBOX_t *mb = &pmb->u.mb;
2438        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2439        struct lpfc_nodelist *ndlp;
2440
2441        ndlp = (struct lpfc_nodelist *) pmb->context2;
2442        pmb->context1 = NULL;
2443        pmb->context2 = NULL;
2444        if (mb->mbxStatus) {
2445                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2446                                 "0258 Register Fabric login error: 0x%x\n",
2447                                 mb->mbxStatus);
2448                lpfc_mbuf_free(phba, mp->virt, mp->phys);
2449                kfree(mp);
2450                mempool_free(pmb, phba->mbox_mem_pool);
2451
2452                if (phba->fc_topology == TOPOLOGY_LOOP) {
2453                        /* FLOGI failed, use loop map to make discovery list */
2454                        lpfc_disc_list_loopmap(vport);
2455
2456                        /* Start discovery */
2457                        lpfc_disc_start(vport);
2458                        /* Decrement the reference count to ndlp after the
2459                         * reference to the ndlp are done.
2460                         */
2461                        lpfc_nlp_put(ndlp);
2462                        return;
2463                }
2464
2465                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2466                /* Decrement the reference count to ndlp after the reference
2467                 * to the ndlp are done.
2468                 */
2469                lpfc_nlp_put(ndlp);
2470                return;
2471        }
2472
2473        ndlp->nlp_rpi = mb->un.varWords[0];
2474        ndlp->nlp_flag |= NLP_RPI_VALID;
2475        ndlp->nlp_type |= NLP_FABRIC;
2476        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2477
2478        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2479                lpfc_start_fdiscs(phba);
2480                lpfc_do_scr_ns_plogi(phba, vport);
2481        }
2482
2483        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2484        kfree(mp);
2485        mempool_free(pmb, phba->mbox_mem_pool);
2486
2487        /* Drop the reference count from the mbox at the end after
2488         * all the current reference to the ndlp have been done.
2489         */
2490        lpfc_nlp_put(ndlp);
2491        return;
2492}
2493
2494/*
2495 * This routine handles processing a NameServer REG_LOGIN mailbox
2496 * command upon completion. It is setup in the LPFC_MBOXQ
2497 * as the completion routine when the command is
2498 * handed off to the SLI layer.
2499 */
2500void
2501lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2502{
2503        MAILBOX_t *mb = &pmb->u.mb;
2504        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2505        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2506        struct lpfc_vport *vport = pmb->vport;
2507
2508        if (mb->mbxStatus) {
2509out:
2510                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2511                                 "0260 Register NameServer error: 0x%x\n",
2512                                 mb->mbxStatus);
2513                /* decrement the node reference count held for this
2514                 * callback function.
2515                 */
2516                lpfc_nlp_put(ndlp);
2517                lpfc_mbuf_free(phba, mp->virt, mp->phys);
2518                kfree(mp);
2519                mempool_free(pmb, phba->mbox_mem_pool);
2520
2521                /* If no other thread is using the ndlp, free it */
2522                lpfc_nlp_not_used(ndlp);
2523
2524                if (phba->fc_topology == TOPOLOGY_LOOP) {
2525                        /*
2526                         * RegLogin failed, use loop map to make discovery
2527                         * list
2528                         */
2529                        lpfc_disc_list_loopmap(vport);
2530
2531                        /* Start discovery */
2532                        lpfc_disc_start(vport);
2533                        return;
2534                }
2535                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2536                return;
2537        }
2538
2539        pmb->context1 = NULL;
2540
2541        ndlp->nlp_rpi = mb->un.varWords[0];
2542        ndlp->nlp_flag |= NLP_RPI_VALID;
2543        ndlp->nlp_type |= NLP_FABRIC;
2544        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2545
2546        if (vport->port_state < LPFC_VPORT_READY) {
2547                /* Link up discovery requires Fabric registration. */
2548                lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
2549                lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
2550                lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
2551                lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
2552                lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
2553
2554                /* Issue SCR just before NameServer GID_FT Query */
2555                lpfc_issue_els_scr(vport, SCR_DID, 0);
2556        }
2557
2558        vport->fc_ns_retry = 0;
2559        /* Good status, issue CT Request to NameServer */
2560        if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
2561                /* Cannot issue NameServer Query, so finish up discovery */
2562                goto out;
2563        }
2564
2565        /* decrement the node reference count held for this
2566         * callback function.
2567         */
2568        lpfc_nlp_put(ndlp);
2569        lpfc_mbuf_free(phba, mp->virt, mp->phys);
2570        kfree(mp);
2571        mempool_free(pmb, phba->mbox_mem_pool);
2572
2573        return;
2574}
2575
2576static void
2577lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2578{
2579        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2580        struct fc_rport  *rport;
2581        struct lpfc_rport_data *rdata;
2582        struct fc_rport_identifiers rport_ids;
2583        struct lpfc_hba  *phba = vport->phba;
2584
2585        /* Remote port has reappeared. Re-register w/ FC transport */
2586        rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2587        rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2588        rport_ids.port_id = ndlp->nlp_DID;
2589        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2590
2591        /*
2592         * We leave our node pointer in rport->dd_data when we unregister a
2593         * FCP target port.  But fc_remote_port_add zeros the space to which
2594         * rport->dd_data points.  So, if we're reusing a previously
2595         * registered port, drop the reference that we took the last time we
2596         * registered the port.
2597         */
2598        if (ndlp->rport && ndlp->rport->dd_data &&
2599            ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
2600                lpfc_nlp_put(ndlp);
2601
2602        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
2603                "rport add:       did:x%x flg:x%x type x%x",
2604                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2605
2606        ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
2607        if (!rport || !get_device(&rport->dev)) {
2608                dev_printk(KERN_WARNING, &phba->pcidev->dev,
2609                           "Warning: fc_remote_port_add failed\n");
2610                return;
2611        }
2612
2613        /* initialize static port data */
2614        rport->maxframe_size = ndlp->nlp_maxframe;
2615        rport->supported_classes = ndlp->nlp_class_sup;
2616        rdata = rport->dd_data;
2617        rdata->pnode = lpfc_nlp_get(ndlp);
2618
2619        if (ndlp->nlp_type & NLP_FCP_TARGET)
2620                rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2621        if (ndlp->nlp_type & NLP_FCP_INITIATOR)
2622                rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2623
2624
2625        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
2626                fc_remote_port_rolechg(rport, rport_ids.roles);
2627
2628        if ((rport->scsi_target_id != -1) &&
2629            (rport->scsi_target_id < LPFC_MAX_TARGET)) {
2630                ndlp->nlp_sid = rport->scsi_target_id;
2631        }
2632        return;
2633}
2634
2635static void
2636lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
2637{
2638        struct fc_rport *rport = ndlp->rport;
2639
2640        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
2641                "rport delete:    did:x%x flg:x%x type x%x",
2642                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2643
2644        fc_remote_port_delete(rport);
2645
2646        return;
2647}
2648
2649static void
2650lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
2651{
2652        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2653
2654        spin_lock_irq(shost->host_lock);
2655        switch (state) {
2656        case NLP_STE_UNUSED_NODE:
2657                vport->fc_unused_cnt += count;
2658                break;
2659        case NLP_STE_PLOGI_ISSUE:
2660                vport->fc_plogi_cnt += count;
2661                break;
2662        case NLP_STE_ADISC_ISSUE:
2663                vport->fc_adisc_cnt += count;
2664                break;
2665        case NLP_STE_REG_LOGIN_ISSUE:
2666                vport->fc_reglogin_cnt += count;
2667                break;
2668        case NLP_STE_PRLI_ISSUE:
2669                vport->fc_prli_cnt += count;
2670                break;
2671        case NLP_STE_UNMAPPED_NODE:
2672                vport->fc_unmap_cnt += count;
2673                break;
2674        case NLP_STE_MAPPED_NODE:
2675                vport->fc_map_cnt += count;
2676                break;
2677        case NLP_STE_NPR_NODE:
2678                vport->fc_npr_cnt += count;
2679                break;
2680        }
2681        spin_unlock_irq(shost->host_lock);
2682}
2683
2684static void
2685lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2686                       int old_state, int new_state)
2687{
2688        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2689
2690        if (new_state == NLP_STE_UNMAPPED_NODE) {
2691                ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2692                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
2693                ndlp->nlp_type |= NLP_FC_NODE;
2694        }
2695        if (new_state == NLP_STE_MAPPED_NODE)
2696                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
2697        if (new_state == NLP_STE_NPR_NODE)
2698                ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
2699
2700        /* Transport interface */
2701        if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
2702                            old_state == NLP_STE_UNMAPPED_NODE)) {
2703                vport->phba->nport_event_cnt++;
2704                lpfc_unregister_remote_port(ndlp);
2705        }
2706
2707        if (new_state ==  NLP_STE_MAPPED_NODE ||
2708            new_state == NLP_STE_UNMAPPED_NODE) {
2709                vport->phba->nport_event_cnt++;
2710                /*
2711                 * Tell the fc transport about the port, if we haven't
2712                 * already. If we have, and it's a scsi entity, be
2713                 * sure to unblock any attached scsi devices
2714                 */
2715                lpfc_register_remote_port(vport, ndlp);
2716        }
2717        if ((new_state ==  NLP_STE_MAPPED_NODE) &&
2718                (vport->stat_data_enabled)) {
2719                /*
2720                 * A new target is discovered, if there is no buffer for
2721                 * statistical data collection allocate buffer.
2722                 */
2723                ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
2724                                         sizeof(struct lpfc_scsicmd_bkt),
2725                                         GFP_KERNEL);
2726
2727                if (!ndlp->lat_data)
2728                        lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
2729                                "0286 lpfc_nlp_state_cleanup failed to "
2730                                "allocate statistical data buffer DID "
2731                                "0x%x\n", ndlp->nlp_DID);
2732        }
2733        /*
2734         * if we added to Mapped list, but the remote port
2735         * registration failed or assigned a target id outside
2736         * our presentable range - move the node to the
2737         * Unmapped List
2738         */
2739        if (new_state == NLP_STE_MAPPED_NODE &&
2740            (!ndlp->rport ||
2741             ndlp->rport->scsi_target_id == -1 ||
2742             ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
2743                spin_lock_irq(shost->host_lock);
2744                ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
2745                spin_unlock_irq(shost->host_lock);
2746                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2747        }
2748}
2749
2750static char *
2751lpfc_nlp_state_name(char *buffer, size_t size, int state)
2752{
2753        static char *states[] = {
2754                [NLP_STE_UNUSED_NODE] = "UNUSED",
2755                [NLP_STE_PLOGI_ISSUE] = "PLOGI",
2756                [NLP_STE_ADISC_ISSUE] = "ADISC",
2757                [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
2758                [NLP_STE_PRLI_ISSUE] = "PRLI",
2759                [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
2760                [NLP_STE_MAPPED_NODE] = "MAPPED",
2761                [NLP_STE_NPR_NODE] = "NPR",
2762        };
2763
2764        if (state < NLP_STE_MAX_STATE && states[state])
2765                strlcpy(buffer, states[state], size);
2766        else
2767                snprintf(buffer, size, "unknown (%d)", state);
2768        return buffer;
2769}
2770
2771void
2772lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2773                   int state)
2774{
2775        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2776        int  old_state = ndlp->nlp_state;
2777        char name1[16], name2[16];
2778
2779        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2780                         "0904 NPort state transition x%06x, %s -> %s\n",
2781                         ndlp->nlp_DID,
2782                         lpfc_nlp_state_name(name1, sizeof(name1), old_state),
2783                         lpfc_nlp_state_name(name2, sizeof(name2), state));
2784
2785        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2786                "node statechg    did:x%x old:%d ste:%d",
2787                ndlp->nlp_DID, old_state, state);
2788
2789        if (old_state == NLP_STE_NPR_NODE &&
2790            state != NLP_STE_NPR_NODE)
2791                lpfc_cancel_retry_delay_tmo(vport, ndlp);
2792        if (old_state == NLP_STE_UNMAPPED_NODE) {
2793                ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
2794                ndlp->nlp_type &= ~NLP_FC_NODE;
2795        }
2796
2797        if (list_empty(&ndlp->nlp_listp)) {
2798                spin_lock_irq(shost->host_lock);
2799                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
2800                spin_unlock_irq(shost->host_lock);
2801        } else if (old_state)
2802                lpfc_nlp_counters(vport, old_state, -1);
2803
2804        ndlp->nlp_state = state;
2805        lpfc_nlp_counters(vport, state, 1);
2806        lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
2807}
2808
2809void
2810lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2811{
2812        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2813
2814        if (list_empty(&ndlp->nlp_listp)) {
2815                spin_lock_irq(shost->host_lock);
2816                list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
2817                spin_unlock_irq(shost->host_lock);
2818        }
2819}
2820
2821void
2822lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2823{
2824        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2825
2826        lpfc_cancel_retry_delay_tmo(vport, ndlp);
2827        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2828                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
2829        spin_lock_irq(shost->host_lock);
2830        list_del_init(&ndlp->nlp_listp);
2831        spin_unlock_irq(shost->host_lock);
2832        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
2833                                NLP_STE_UNUSED_NODE);
2834}
2835
2836static void
2837lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2838{
2839        lpfc_cancel_retry_delay_tmo(vport, ndlp);
2840        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2841                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
2842        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
2843                                NLP_STE_UNUSED_NODE);
2844}
2845/**
2846 * lpfc_initialize_node - Initialize all fields of node object
2847 * @vport: Pointer to Virtual Port object.
2848 * @ndlp: Pointer to FC node object.
2849 * @did: FC_ID of the node.
2850 *
2851 * This function is always called when node object need to be initialized.
2852 * It initializes all the fields of the node object. Although the reference
2853 * to phba from @ndlp can be obtained indirectly through it's reference to
2854 * @vport, a direct reference to phba is taken here by @ndlp. This is due
2855 * to the life-span of the @ndlp might go beyond the existence of @vport as
2856 * the final release of ndlp is determined by its reference count. And, the
2857 * operation on @ndlp needs the reference to phba.
2858 **/
2859static inline void
2860lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2861        uint32_t did)
2862{
2863        INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2864        INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2865        init_timer(&ndlp->nlp_delayfunc);
2866        ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2867        ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2868        ndlp->nlp_DID = did;
2869        ndlp->vport = vport;
2870        ndlp->phba = vport->phba;
2871        ndlp->nlp_sid = NLP_NO_SID;
2872        kref_init(&ndlp->kref);
2873        NLP_INT_NODE_ACT(ndlp);
2874        atomic_set(&ndlp->cmd_pending, 0);
2875        ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2876}
2877
2878struct lpfc_nodelist *
2879lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2880                 int state)
2881{
2882        struct lpfc_hba *phba = vport->phba;
2883        uint32_t did;
2884        unsigned long flags;
2885
2886        if (!ndlp)
2887                return NULL;
2888
2889        spin_lock_irqsave(&phba->ndlp_lock, flags);
2890        /* The ndlp should not be in memory free mode */
2891        if (NLP_CHK_FREE_REQ(ndlp)) {
2892                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2893                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2894                                "0277 lpfc_enable_node: ndlp:x%p "
2895                                "usgmap:x%x refcnt:%d\n",
2896                                (void *)ndlp, ndlp->nlp_usg_map,
2897                                atomic_read(&ndlp->kref.refcount));
2898                return NULL;
2899        }
2900        /* The ndlp should not already be in active mode */
2901        if (NLP_CHK_NODE_ACT(ndlp)) {
2902                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2903                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2904                                "0278 lpfc_enable_node: ndlp:x%p "
2905                                "usgmap:x%x refcnt:%d\n",
2906                                (void *)ndlp, ndlp->nlp_usg_map,
2907                                atomic_read(&ndlp->kref.refcount));
2908                return NULL;
2909        }
2910
2911        /* Keep the original DID */
2912        did = ndlp->nlp_DID;
2913
2914        /* re-initialize ndlp except of ndlp linked list pointer */
2915        memset((((char *)ndlp) + sizeof (struct list_head)), 0,
2916                sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
2917        lpfc_initialize_node(vport, ndlp, did);
2918
2919        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2920
2921        if (state != NLP_STE_UNUSED_NODE)
2922                lpfc_nlp_set_state(vport, ndlp, state);
2923
2924        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2925                "node enable:       did:x%x",
2926                ndlp->nlp_DID, 0, 0);
2927        return ndlp;
2928}
2929
2930void
2931lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2932{
2933        /*
2934         * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
2935         * be used if we wish to issue the "last" lpfc_nlp_put() to remove
2936         * the ndlp from the vport. The ndlp marked as UNUSED on the list
2937         * until ALL other outstanding threads have completed. We check
2938         * that the ndlp not already in the UNUSED state before we proceed.
2939         */
2940        if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2941                return;
2942        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
2943        lpfc_nlp_put(ndlp);
2944        return;
2945}
2946
2947/*
2948 * Start / ReStart rescue timer for Discovery / RSCN handling
2949 */
2950void
2951lpfc_set_disctmo(struct lpfc_vport *vport)
2952{
2953        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2954        struct lpfc_hba  *phba = vport->phba;
2955        uint32_t tmo;
2956
2957        if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
2958                /* For FAN, timeout should be greater than edtov */
2959                tmo = (((phba->fc_edtov + 999) / 1000) + 1);
2960        } else {
2961                /* Normal discovery timeout should be > than ELS/CT timeout
2962                 * FC spec states we need 3 * ratov for CT requests
2963                 */
2964                tmo = ((phba->fc_ratov * 3) + 3);
2965        }
2966
2967
2968        if (!timer_pending(&vport->fc_disctmo)) {
2969                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2970                        "set disc timer:  tmo:x%x state:x%x flg:x%x",
2971                        tmo, vport->port_state, vport->fc_flag);
2972        }
2973
2974        mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
2975        spin_lock_irq(shost->host_lock);
2976        vport->fc_flag |= FC_DISC_TMO;
2977        spin_unlock_irq(shost->host_lock);
2978
2979        /* Start Discovery Timer state <hba_state> */
2980        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2981                         "0247 Start Discovery Timer state x%x "
2982                         "Data: x%x x%lx x%x x%x\n",
2983                         vport->port_state, tmo,
2984                         (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
2985                         vport->fc_adisc_cnt);
2986
2987        return;
2988}
2989
2990/*
2991 * Cancel rescue timer for Discovery / RSCN handling
2992 */
2993int
2994lpfc_can_disctmo(struct lpfc_vport *vport)
2995{
2996        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2997        unsigned long iflags;
2998
2999        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3000                "can disc timer:  state:x%x rtry:x%x flg:x%x",
3001                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
3002
3003        /* Turn off discovery timer if its running */
3004        if (vport->fc_flag & FC_DISC_TMO) {
3005                spin_lock_irqsave(shost->host_lock, iflags);
3006                vport->fc_flag &= ~FC_DISC_TMO;
3007                spin_unlock_irqrestore(shost->host_lock, iflags);
3008                del_timer_sync(&vport->fc_disctmo);
3009                spin_lock_irqsave(&vport->work_port_lock, iflags);
3010                vport->work_port_events &= ~WORKER_DISC_TMO;
3011                spin_unlock_irqrestore(&vport->work_port_lock, iflags);
3012        }
3013
3014        /* Cancel Discovery Timer state <hba_state> */
3015        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3016                         "0248 Cancel Discovery Timer state x%x "
3017                         "Data: x%x x%x x%x\n",
3018                         vport->port_state, vport->fc_flag,
3019                         vport->fc_plogi_cnt, vport->fc_adisc_cnt);
3020        return 0;
3021}
3022
3023/*
3024 * Check specified ring for outstanding IOCB on the SLI queue
3025 * Return true if iocb matches the specified nport
3026 */
3027int
3028lpfc_check_sli_ndlp(struct lpfc_hba *phba,
3029                    struct lpfc_sli_ring *pring,
3030                    struct lpfc_iocbq *iocb,
3031                    struct lpfc_nodelist *ndlp)
3032{
3033        struct lpfc_sli *psli = &phba->sli;
3034        IOCB_t *icmd = &iocb->iocb;
3035        struct lpfc_vport    *vport = ndlp->vport;
3036
3037        if (iocb->vport != vport)
3038                return 0;
3039
3040        if (pring->ringno == LPFC_ELS_RING) {
3041                switch (icmd->ulpCommand) {
3042                case CMD_GEN_REQUEST64_CR:
3043                        if (iocb->context_un.ndlp == ndlp)
3044                                return 1;
3045                case CMD_ELS_REQUEST64_CR:
3046                        if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
3047                                return 1;
3048                case CMD_XMIT_ELS_RSP64_CX:
3049                        if (iocb->context1 == (uint8_t *) ndlp)
3050                                return 1;
3051                }
3052        } else if (pring->ringno == psli->extra_ring) {
3053
3054        } else if (pring->ringno == psli->fcp_ring) {
3055                /* Skip match check if waiting to relogin to FCP target */
3056                if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3057                    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
3058                        return 0;
3059                }
3060                if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
3061                        return 1;
3062                }
3063        } else if (pring->ringno == psli->next_ring) {
3064
3065        }
3066        return 0;
3067}
3068
3069/*
3070 * Free resources / clean up outstanding I/Os
3071 * associated with nlp_rpi in the LPFC_NODELIST entry.
3072 */
3073static int
3074lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3075{
3076        LIST_HEAD(completions);
3077        struct lpfc_sli *psli;
3078        struct lpfc_sli_ring *pring;
3079        struct lpfc_iocbq *iocb, *next_iocb;
3080        uint32_t rpi, i;
3081
3082        lpfc_fabric_abort_nport(ndlp);
3083
3084        /*
3085         * Everything that matches on txcmplq will be returned
3086         * by firmware with a no rpi error.
3087         */
3088        psli = &phba->sli;
3089        rpi = ndlp->nlp_rpi;
3090        if (ndlp->nlp_flag & NLP_RPI_VALID) {
3091                /* Now process each ring */
3092                for (i = 0; i < psli->num_rings; i++) {
3093                        pring = &psli->ring[i];
3094
3095                        spin_lock_irq(&phba->hbalock);
3096                        list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
3097                                                 list) {
3098                                /*
3099                                 * Check to see if iocb matches the nport we are
3100                                 * looking for
3101                                 */
3102                                if ((lpfc_check_sli_ndlp(phba, pring, iocb,
3103                                                         ndlp))) {
3104                                        /* It matches, so deque and call compl
3105                                           with an error */
3106                                        list_move_tail(&iocb->list,
3107                                                       &completions);
3108                                        pring->txq_cnt--;
3109                                }
3110                        }
3111                        spin_unlock_irq(&phba->hbalock);
3112                }
3113        }
3114
3115        /* Cancel all the IOCBs from the completions list */
3116        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3117                              IOERR_SLI_ABORTED);
3118
3119        return 0;
3120}
3121
3122/*
3123 * Free rpi associated with LPFC_NODELIST entry.
3124 * This routine is called from lpfc_freenode(), when we are removing
3125 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3126 * LOGO that completes successfully, and we are waiting to PLOGI back
3127 * to the remote NPort. In addition, it is called after we receive
3128 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3129 * we are waiting to PLOGI back to the remote NPort.
3130 */
3131int
3132lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3133{
3134        struct lpfc_hba *phba = vport->phba;
3135        LPFC_MBOXQ_t    *mbox;
3136        int rc;
3137
3138        if (ndlp->nlp_flag & NLP_RPI_VALID) {
3139                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3140                if (mbox) {
3141                        lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
3142                        mbox->vport = vport;
3143                        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3144                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3145                        if (rc == MBX_NOT_FINISHED)
3146                                mempool_free(mbox, phba->mbox_mem_pool);
3147                }
3148                lpfc_no_rpi(phba, ndlp);
3149                ndlp->nlp_rpi = 0;
3150                ndlp->nlp_flag &= ~NLP_RPI_VALID;
3151                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3152                return 1;
3153        }
3154        return 0;
3155}
3156
3157void
3158lpfc_unreg_all_rpis(struct lpfc_vport *vport)
3159{
3160        struct lpfc_hba  *phba  = vport->phba;
3161        LPFC_MBOXQ_t     *mbox;
3162        int rc;
3163
3164        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3165        if (mbox) {
3166                lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
3167                mbox->vport = vport;
3168                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3169                mbox->context1 = NULL;
3170                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
3171                if (rc != MBX_TIMEOUT)
3172                        mempool_free(mbox, phba->mbox_mem_pool);
3173
3174                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
3175                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3176                                "1836 Could not issue "
3177                                "unreg_login(all_rpis) status %d\n", rc);
3178        }
3179}
3180
3181void
3182lpfc_unreg_default_rpis(struct lpfc_vport *vport)
3183{
3184        struct lpfc_hba  *phba  = vport->phba;
3185        LPFC_MBOXQ_t     *mbox;
3186        int rc;
3187
3188        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3189        if (mbox) {
3190                lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
3191                mbox->vport = vport;
3192                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3193                mbox->context1 = NULL;
3194                rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
3195                if (rc != MBX_TIMEOUT)
3196                        mempool_free(mbox, phba->mbox_mem_pool);
3197
3198                if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
3199                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3200                                         "1815 Could not issue "
3201                                         "unreg_did (default rpis) status %d\n",
3202                                         rc);
3203        }
3204}
3205
3206/*
3207 * Free resources associated with LPFC_NODELIST entry
3208 * so it can be freed.
3209 */
3210static int
3211lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3212{
3213        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3214        struct lpfc_hba  *phba = vport->phba;
3215        LPFC_MBOXQ_t *mb, *nextmb;
3216        struct lpfc_dmabuf *mp;
3217
3218        /* Cleanup node for NPort <nlp_DID> */
3219        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3220                         "0900 Cleanup node for NPort x%x "
3221                         "Data: x%x x%x x%x\n",
3222                         ndlp->nlp_DID, ndlp->nlp_flag,
3223                         ndlp->nlp_state, ndlp->nlp_rpi);
3224        if (NLP_CHK_FREE_REQ(ndlp)) {
3225                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
3226                                "0280 lpfc_cleanup_node: ndlp:x%p "
3227                                "usgmap:x%x refcnt:%d\n",
3228                                (void *)ndlp, ndlp->nlp_usg_map,
3229                                atomic_read(&ndlp->kref.refcount));
3230                lpfc_dequeue_node(vport, ndlp);
3231        } else {
3232                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
3233                                "0281 lpfc_cleanup_node: ndlp:x%p "
3234                                "usgmap:x%x refcnt:%d\n",
3235                                (void *)ndlp, ndlp->nlp_usg_map,
3236                                atomic_read(&ndlp->kref.refcount));
3237                lpfc_disable_node(vport, ndlp);
3238        }
3239
3240        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3241        if ((mb = phba->sli.mbox_active)) {
3242                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
3243                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
3244                        mb->context2 = NULL;
3245                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3246                }
3247        }
3248
3249        spin_lock_irq(&phba->hbalock);
3250        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
3251                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
3252                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
3253                        mp = (struct lpfc_dmabuf *) (mb->context1);
3254                        if (mp) {
3255                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
3256                                kfree(mp);
3257                        }
3258                        list_del(&mb->list);
3259                        mempool_free(mb, phba->mbox_mem_pool);
3260                        /* We shall not invoke the lpfc_nlp_put to decrement
3261                         * the ndlp reference count as we are in the process
3262                         * of lpfc_nlp_release.
3263                         */
3264                }
3265        }
3266        spin_unlock_irq(&phba->hbalock);
3267
3268        lpfc_els_abort(phba, ndlp);
3269
3270        spin_lock_irq(shost->host_lock);
3271        ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3272        spin_unlock_irq(shost->host_lock);
3273
3274        ndlp->nlp_last_elscmd = 0;
3275        del_timer_sync(&ndlp->nlp_delayfunc);
3276
3277        list_del_init(&ndlp->els_retry_evt.evt_listp);
3278        list_del_init(&ndlp->dev_loss_evt.evt_listp);
3279
3280        lpfc_unreg_rpi(vport, ndlp);
3281
3282        return 0;
3283}
3284
3285/*
3286 * Check to see if we can free the nlp back to the freelist.
3287 * If we are in the middle of using the nlp in the discovery state
3288 * machine, defer the free till we reach the end of the state machine.
3289 */
3290static void
3291lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3292{
3293        struct lpfc_hba  *phba = vport->phba;
3294        struct lpfc_rport_data *rdata;
3295        LPFC_MBOXQ_t *mbox;
3296        int rc;
3297
3298        lpfc_cancel_retry_delay_tmo(vport, ndlp);
3299        if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3300            !(ndlp->nlp_flag & NLP_RPI_VALID)) {
3301                /* For this case we need to cleanup the default rpi
3302                 * allocated by the firmware.
3303                 */
3304                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
3305                        != NULL) {
3306                        rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
3307                            (uint8_t *) &vport->fc_sparam, mbox, 0);
3308                        if (rc) {
3309                                mempool_free(mbox, phba->mbox_mem_pool);
3310                        }
3311                        else {
3312                                mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3313                                mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3314                                mbox->vport = vport;
3315                                mbox->context2 = NULL;
3316                                rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3317                                if (rc == MBX_NOT_FINISHED) {
3318                                        mempool_free(mbox, phba->mbox_mem_pool);
3319                                }
3320                        }
3321                }
3322        }
3323        lpfc_cleanup_node(vport, ndlp);
3324
3325        /*
3326         * We can get here with a non-NULL ndlp->rport because when we
3327         * unregister a rport we don't break the rport/node linkage.  So if we
3328         * do, make sure we don't leaving any dangling pointers behind.
3329         */
3330        if (ndlp->rport) {
3331                rdata = ndlp->rport->dd_data;
3332                rdata->pnode = NULL;
3333                ndlp->rport = NULL;
3334        }
3335}
3336
3337static int
3338lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3339              uint32_t did)
3340{
3341        D_ID mydid, ndlpdid, matchdid;
3342
3343        if (did == Bcast_DID)
3344                return 0;
3345
3346        /* First check for Direct match */
3347        if (ndlp->nlp_DID == did)
3348                return 1;
3349
3350        /* Next check for area/domain identically equals 0 match */
3351        mydid.un.word = vport->fc_myDID;
3352        if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
3353                return 0;
3354        }
3355
3356        matchdid.un.word = did;
3357        ndlpdid.un.word = ndlp->nlp_DID;
3358        if (matchdid.un.b.id == ndlpdid.un.b.id) {
3359                if ((mydid.un.b.domain == matchdid.un.b.domain) &&
3360                    (mydid.un.b.area == matchdid.un.b.area)) {
3361                        if ((ndlpdid.un.b.domain == 0) &&
3362                            (ndlpdid.un.b.area == 0)) {
3363                                if (ndlpdid.un.b.id)
3364                                        return 1;
3365                        }
3366                        return 0;
3367                }
3368
3369                matchdid.un.word = ndlp->nlp_DID;
3370                if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
3371                    (mydid.un.b.area == ndlpdid.un.b.area)) {
3372                        if ((matchdid.un.b.domain == 0) &&
3373                            (matchdid.un.b.area == 0)) {
3374                                if (matchdid.un.b.id)
3375                                        return 1;
3376                        }
3377                }
3378        }
3379        return 0;
3380}
3381
3382/* Search for a nodelist entry */
3383static struct lpfc_nodelist *
3384__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
3385{
3386        struct lpfc_nodelist *ndlp;
3387        uint32_t data1;
3388
3389        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3390                if (lpfc_matchdid(vport, ndlp, did)) {
3391                        data1 = (((uint32_t) ndlp->nlp_state << 24) |
3392                                 ((uint32_t) ndlp->nlp_xri << 16) |
3393                                 ((uint32_t) ndlp->nlp_type << 8) |
3394                                 ((uint32_t) ndlp->nlp_rpi & 0xff));
3395                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3396                                         "0929 FIND node DID "
3397                                         "Data: x%p x%x x%x x%x\n",
3398                                         ndlp, ndlp->nlp_DID,
3399                                         ndlp->nlp_flag, data1);
3400                        return ndlp;
3401                }
3402        }
3403
3404        /* FIND node did <did> NOT FOUND */
3405        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3406                         "0932 FIND node did x%x NOT FOUND.\n", did);
3407        return NULL;
3408}
3409
3410struct lpfc_nodelist *
3411lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
3412{
3413        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3414        struct lpfc_nodelist *ndlp;
3415
3416        spin_lock_irq(shost->host_lock);
3417        ndlp = __lpfc_findnode_did(vport, did);
3418        spin_unlock_irq(shost->host_lock);
3419        return ndlp;
3420}
3421
3422struct lpfc_nodelist *
3423lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
3424{
3425        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3426        struct lpfc_nodelist *ndlp;
3427
3428        ndlp = lpfc_findnode_did(vport, did);
3429        if (!ndlp) {
3430                if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
3431                    lpfc_rscn_payload_check(vport, did) == 0)
3432                        return NULL;
3433                ndlp = (struct lpfc_nodelist *)
3434                     mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
3435                if (!ndlp)
3436                        return NULL;
3437                lpfc_nlp_init(vport, ndlp, did);
3438                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3439                spin_lock_irq(shost->host_lock);
3440                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3441                spin_unlock_irq(shost->host_lock);
3442                return ndlp;
3443        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3444                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
3445                if (!ndlp)
3446                        return NULL;
3447                spin_lock_irq(shost->host_lock);
3448                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3449                spin_unlock_irq(shost->host_lock);
3450                return ndlp;
3451        }
3452
3453        if ((vport->fc_flag & FC_RSCN_MODE) &&
3454            !(vport->fc_flag & FC_NDISC_ACTIVE)) {
3455                if (lpfc_rscn_payload_check(vport, did)) {
3456                        /* If we've already recieved a PLOGI from this NPort
3457                         * we don't need to try to discover it again.
3458                         */
3459                        if (ndlp->nlp_flag & NLP_RCV_PLOGI)
3460                                return NULL;
3461
3462                        /* Since this node is marked for discovery,
3463                         * delay timeout is not needed.
3464                         */
3465                        lpfc_cancel_retry_delay_tmo(vport, ndlp);
3466                        spin_lock_irq(shost->host_lock);
3467                        ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3468                        spin_unlock_irq(shost->host_lock);
3469                } else
3470                        ndlp = NULL;
3471        } else {
3472                /* If we've already recieved a PLOGI from this NPort,
3473                 * or we are already in the process of discovery on it,
3474                 * we don't need to try to discover it again.
3475                 */
3476                if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
3477                    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
3478                    ndlp->nlp_flag & NLP_RCV_PLOGI)
3479                        return NULL;
3480                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3481                spin_lock_irq(shost->host_lock);
3482                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3483                spin_unlock_irq(shost->host_lock);
3484        }
3485        return ndlp;
3486}
3487
3488/* Build a list of nodes to discover based on the loopmap */
3489void
3490lpfc_disc_list_loopmap(struct lpfc_vport *vport)
3491{
3492        struct lpfc_hba  *phba = vport->phba;
3493        int j;
3494        uint32_t alpa, index;
3495
3496        if (!lpfc_is_link_up(phba))
3497                return;
3498
3499        if (phba->fc_topology != TOPOLOGY_LOOP)
3500                return;
3501
3502        /* Check for loop map present or not */
3503        if (phba->alpa_map[0]) {
3504                for (j = 1; j <= phba->alpa_map[0]; j++) {
3505                        alpa = phba->alpa_map[j];
3506                        if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
3507                                continue;
3508                        lpfc_setup_disc_node(vport, alpa);
3509                }
3510        } else {
3511                /* No alpamap, so try all alpa's */
3512                for (j = 0; j < FC_MAXLOOP; j++) {
3513                        /* If cfg_scan_down is set, start from highest
3514                         * ALPA (0xef) to lowest (0x1).
3515                         */
3516                        if (vport->cfg_scan_down)
3517                                index = j;
3518                        else
3519                                index = FC_MAXLOOP - j - 1;
3520                        alpa = lpfcAlpaArray[index];
3521                        if ((vport->fc_myDID & 0xff) == alpa)
3522                                continue;
3523                        lpfc_setup_disc_node(vport, alpa);
3524                }
3525        }
3526        return;
3527}
3528
3529void
3530lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
3531{
3532        LPFC_MBOXQ_t *mbox;
3533        struct lpfc_sli *psli = &phba->sli;
3534        struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
3535        struct lpfc_sli_ring *fcp_ring   = &psli->ring[psli->fcp_ring];
3536        struct lpfc_sli_ring *next_ring  = &psli->ring[psli->next_ring];
3537        int  rc;
3538
3539        /*
3540         * if it's not a physical port or if we already send
3541         * clear_la then don't send it.
3542         */
3543        if ((phba->link_state >= LPFC_CLEAR_LA) ||
3544            (vport->port_type != LPFC_PHYSICAL_PORT) ||
3545                (phba->sli_rev == LPFC_SLI_REV4))
3546                return;
3547
3548                        /* Link up discovery */
3549        if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
3550                phba->link_state = LPFC_CLEAR_LA;
3551                lpfc_clear_la(phba, mbox);
3552                mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
3553                mbox->vport = vport;
3554                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3555                if (rc == MBX_NOT_FINISHED) {
3556                        mempool_free(mbox, phba->mbox_mem_pool);
3557                        lpfc_disc_flush_list(vport);
3558                        extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
3559                        fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
3560                        next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
3561                        phba->link_state = LPFC_HBA_ERROR;
3562                }
3563        }
3564}
3565
3566/* Reg_vpi to tell firmware to resume normal operations */
3567void
3568lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
3569{
3570        LPFC_MBOXQ_t *regvpimbox;
3571
3572        regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3573        if (regvpimbox) {
3574                lpfc_reg_vpi(vport, regvpimbox);
3575                regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
3576                regvpimbox->vport = vport;
3577                if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
3578                                        == MBX_NOT_FINISHED) {
3579                        mempool_free(regvpimbox, phba->mbox_mem_pool);
3580                }
3581        }
3582}
3583
3584/* Start Link up / RSCN discovery on NPR nodes */
3585void
3586lpfc_disc_start(struct lpfc_vport *vport)
3587{
3588        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3589        struct lpfc_hba  *phba = vport->phba;
3590        uint32_t num_sent;
3591        uint32_t clear_la_pending;
3592        int did_changed;
3593
3594        if (!lpfc_is_link_up(phba))
3595                return;
3596
3597        if (phba->link_state == LPFC_CLEAR_LA)
3598                clear_la_pending = 1;
3599        else
3600                clear_la_pending = 0;
3601
3602        if (vport->port_state < LPFC_VPORT_READY)
3603                vport->port_state = LPFC_DISC_AUTH;
3604
3605        lpfc_set_disctmo(vport);
3606
3607        if (vport->fc_prevDID == vport->fc_myDID)
3608                did_changed = 0;
3609        else
3610                did_changed = 1;
3611
3612        vport->fc_prevDID = vport->fc_myDID;
3613        vport->num_disc_nodes = 0;
3614
3615        /* Start Discovery state <hba_state> */
3616        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3617                         "0202 Start Discovery hba state x%x "
3618                         "Data: x%x x%x x%x\n",
3619                         vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
3620                         vport->fc_adisc_cnt);
3621
3622        /* First do ADISCs - if any */
3623        num_sent = lpfc_els_disc_adisc(vport);
3624
3625        if (num_sent)
3626                return;
3627
3628        /*
3629         * For SLI3, cmpl_reg_vpi will set port_state to READY, and
3630         * continue discovery.
3631         */
3632        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3633            !(vport->fc_flag & FC_PT2PT) &&
3634            !(vport->fc_flag & FC_RSCN_MODE) &&
3635            (phba->sli_rev < LPFC_SLI_REV4)) {
3636                lpfc_issue_reg_vpi(phba, vport);
3637                return;
3638        }
3639
3640        /*
3641         * For SLI2, we need to set port_state to READY and continue
3642         * discovery.
3643         */
3644        if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
3645                /* If we get here, there is nothing to ADISC */
3646                if (vport->port_type == LPFC_PHYSICAL_PORT)
3647                        lpfc_issue_clear_la(phba, vport);
3648
3649                if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
3650                        vport->num_disc_nodes = 0;
3651                        /* go thru NPR nodes and issue ELS PLOGIs */
3652                        if (vport->fc_npr_cnt)
3653                                lpfc_els_disc_plogi(vport);
3654
3655                        if (!vport->num_disc_nodes) {
3656                                spin_lock_irq(shost->host_lock);
3657                                vport->fc_flag &= ~FC_NDISC_ACTIVE;
3658                                spin_unlock_irq(shost->host_lock);
3659                                lpfc_can_disctmo(vport);
3660                        }
3661                }
3662                vport->port_state = LPFC_VPORT_READY;
3663        } else {
3664                /* Next do PLOGIs - if any */
3665                num_sent = lpfc_els_disc_plogi(vport);
3666
3667                if (num_sent)
3668                        return;
3669
3670                if (vport->fc_flag & FC_RSCN_MODE) {
3671                        /* Check to see if more RSCNs came in while we
3672                         * were processing this one.
3673                         */
3674                        if ((vport->fc_rscn_id_cnt == 0) &&
3675                            (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
3676                                spin_lock_irq(shost->host_lock);
3677                                vport->fc_flag &= ~FC_RSCN_MODE;
3678                                spin_unlock_irq(shost->host_lock);
3679                                lpfc_can_disctmo(vport);
3680                        } else
3681                                lpfc_els_handle_rscn(vport);
3682                }
3683        }
3684        return;
3685}
3686
3687/*
3688 *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
3689 *  ring the match the sppecified nodelist.
3690 */
3691static void
3692lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3693{
3694        LIST_HEAD(completions);
3695        struct lpfc_sli *psli;
3696        IOCB_t     *icmd;
3697        struct lpfc_iocbq    *iocb, *next_iocb;
3698        struct lpfc_sli_ring *pring;
3699
3700        psli = &phba->sli;
3701        pring = &psli->ring[LPFC_ELS_RING];
3702
3703        /* Error matching iocb on txq or txcmplq
3704         * First check the txq.
3705         */
3706        spin_lock_irq(&phba->hbalock);
3707        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3708                if (iocb->context1 != ndlp) {
3709                        continue;
3710                }
3711                icmd = &iocb->iocb;
3712                if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
3713                    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
3714
3715                        list_move_tail(&iocb->list, &completions);
3716                        pring->txq_cnt--;
3717                }
3718        }
3719
3720        /* Next check the txcmplq */
3721        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
3722                if (iocb->context1 != ndlp) {
3723                        continue;
3724                }
3725                icmd = &iocb->iocb;
3726                if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
3727                    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
3728                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3729                }
3730        }
3731        spin_unlock_irq(&phba->hbalock);
3732
3733        /* Cancel all the IOCBs from the completions list */
3734        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3735                              IOERR_SLI_ABORTED);
3736}
3737
3738static void
3739lpfc_disc_flush_list(struct lpfc_vport *vport)
3740{
3741        struct lpfc_nodelist *ndlp, *next_ndlp;
3742        struct lpfc_hba *phba = vport->phba;
3743
3744        if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
3745                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3746                                         nlp_listp) {
3747                        if (!NLP_CHK_NODE_ACT(ndlp))
3748                                continue;
3749                        if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
3750                            ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
3751                                lpfc_free_tx(phba, ndlp);
3752                        }
3753                }
3754        }
3755}
3756
3757void
3758lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
3759{
3760        lpfc_els_flush_rscn(vport);
3761        lpfc_els_flush_cmd(vport);
3762        lpfc_disc_flush_list(vport);
3763}
3764
3765/*****************************************************************************/
3766/*
3767 * NAME:     lpfc_disc_timeout
3768 *
3769 * FUNCTION: Fibre Channel driver discovery timeout routine.
3770 *
3771 * EXECUTION ENVIRONMENT: interrupt only
3772 *
3773 * CALLED FROM:
3774 *      Timer function
3775 *
3776 * RETURNS:
3777 *      none
3778 */
3779/*****************************************************************************/
3780void
3781lpfc_disc_timeout(unsigned long ptr)
3782{
3783        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3784        struct lpfc_hba   *phba = vport->phba;
3785        uint32_t tmo_posted;
3786        unsigned long flags = 0;
3787
3788        if (unlikely(!phba))
3789                return;
3790
3791        spin_lock_irqsave(&vport->work_port_lock, flags);
3792        tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
3793        if (!tmo_posted)
3794                vport->work_port_events |= WORKER_DISC_TMO;
3795        spin_unlock_irqrestore(&vport->work_port_lock, flags);
3796
3797        if (!tmo_posted)
3798                lpfc_worker_wake_up(phba);
3799        return;
3800}
3801
3802static void
3803lpfc_disc_timeout_handler(struct lpfc_vport *vport)
3804{
3805        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3806        struct lpfc_hba  *phba = vport->phba;
3807        struct lpfc_sli  *psli = &phba->sli;
3808        struct lpfc_nodelist *ndlp, *next_ndlp;
3809        LPFC_MBOXQ_t *initlinkmbox;
3810        int rc, clrlaerr = 0;
3811
3812        if (!(vport->fc_flag & FC_DISC_TMO))
3813                return;
3814
3815        spin_lock_irq(shost->host_lock);
3816        vport->fc_flag &= ~FC_DISC_TMO;
3817        spin_unlock_irq(shost->host_lock);
3818
3819        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3820                "disc timeout:    state:x%x rtry:x%x flg:x%x",
3821                vport->port_state, vport->fc_ns_retry, vport->fc_flag);
3822
3823        switch (vport->port_state) {
3824
3825        case LPFC_LOCAL_CFG_LINK:
3826        /* port_state is identically  LPFC_LOCAL_CFG_LINK while waiting for
3827         * FAN
3828         */
3829                                /* FAN timeout */
3830                lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
3831                                 "0221 FAN timeout\n");
3832                /* Start discovery by sending FLOGI, clean up old rpis */
3833                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3834                                         nlp_listp) {
3835                        if (!NLP_CHK_NODE_ACT(ndlp))
3836                                continue;
3837                        if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3838                                continue;
3839                        if (ndlp->nlp_type & NLP_FABRIC) {
3840                                /* Clean up the ndlp on Fabric connections */
3841                                lpfc_drop_node(vport, ndlp);
3842
3843                        } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3844                                /* Fail outstanding IO now since device
3845                                 * is marked for PLOGI.
3846                                 */
3847                                lpfc_unreg_rpi(vport, ndlp);
3848                        }
3849                }
3850                if (vport->port_state != LPFC_FLOGI) {
3851                        lpfc_initial_flogi(vport);
3852                        return;
3853                }
3854                break;
3855
3856        case LPFC_FDISC:
3857        case LPFC_FLOGI:
3858        /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
3859                /* Initial FLOGI timeout */
3860                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3861                                 "0222 Initial %s timeout\n",
3862                                 vport->vpi ? "FDISC" : "FLOGI");
3863
3864                /* Assume no Fabric and go on with discovery.
3865                 * Check for outstanding ELS FLOGI to abort.
3866                 */
3867
3868                /* FLOGI failed, so just use loop map to make discovery list */
3869                lpfc_disc_list_loopmap(vport);
3870
3871                /* Start discovery */
3872                lpfc_disc_start(vport);
3873                break;
3874
3875        case LPFC_FABRIC_CFG_LINK:
3876        /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
3877           NameServer login */
3878                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3879                                 "0223 Timeout while waiting for "
3880                                 "NameServer login\n");
3881                /* Next look for NameServer ndlp */
3882                ndlp = lpfc_findnode_did(vport, NameServer_DID);
3883                if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3884                        lpfc_els_abort(phba, ndlp);
3885
3886                /* ReStart discovery */
3887                goto restart_disc;
3888
3889        case LPFC_NS_QRY:
3890        /* Check for wait for NameServer Rsp timeout */
3891                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3892                                 "0224 NameServer Query timeout "
3893                                 "Data: x%x x%x\n",
3894                                 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
3895
3896                if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
3897                        /* Try it one more time */
3898                        vport->fc_ns_retry++;
3899                        rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
3900                                         vport->fc_ns_retry, 0);
3901                        if (rc == 0)
3902                                break;
3903                }
3904                vport->fc_ns_retry = 0;
3905
3906restart_disc:
3907                /*
3908                 * Discovery is over.
3909                 * set port_state to PORT_READY if SLI2.
3910                 * cmpl_reg_vpi will set port_state to READY for SLI3.
3911                 */
3912                if (phba->sli_rev < LPFC_SLI_REV4) {
3913                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3914                                lpfc_issue_reg_vpi(phba, vport);
3915                        else  { /* NPIV Not enabled */
3916                                lpfc_issue_clear_la(phba, vport);
3917                                vport->port_state = LPFC_VPORT_READY;
3918                        }
3919                }
3920
3921                /* Setup and issue mailbox INITIALIZE LINK command */
3922                initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3923                if (!initlinkmbox) {
3924                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3925                                         "0206 Device Discovery "
3926                                         "completion error\n");
3927                        phba->link_state = LPFC_HBA_ERROR;
3928                        break;
3929                }
3930
3931                lpfc_linkdown(phba);
3932                lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
3933                               phba->cfg_link_speed);
3934                initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
3935                initlinkmbox->vport = vport;
3936                initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3937                rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
3938                lpfc_set_loopback_flag(phba);
3939                if (rc == MBX_NOT_FINISHED)
3940                        mempool_free(initlinkmbox, phba->mbox_mem_pool);
3941
3942                break;
3943
3944        case LPFC_DISC_AUTH:
3945        /* Node Authentication timeout */
3946                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3947                                 "0227 Node Authentication timeout\n");
3948                lpfc_disc_flush_list(vport);
3949
3950                /*
3951                 * set port_state to PORT_READY if SLI2.
3952                 * cmpl_reg_vpi will set port_state to READY for SLI3.
3953                 */
3954                if (phba->sli_rev < LPFC_SLI_REV4) {
3955                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3956                                lpfc_issue_reg_vpi(phba, vport);
3957                        else  { /* NPIV Not enabled */
3958                                lpfc_issue_clear_la(phba, vport);
3959                                vport->port_state = LPFC_VPORT_READY;
3960                        }
3961                }
3962                break;
3963
3964        case LPFC_VPORT_READY:
3965                if (vport->fc_flag & FC_RSCN_MODE) {
3966                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3967                                         "0231 RSCN timeout Data: x%x "
3968                                         "x%x\n",
3969                                         vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
3970
3971                        /* Cleanup any outstanding ELS commands */
3972                        lpfc_els_flush_cmd(vport);
3973
3974                        lpfc_els_flush_rscn(vport);
3975                        lpfc_disc_flush_list(vport);
3976                }
3977                break;
3978
3979        default:
3980                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3981                                 "0273 Unexpected discovery timeout, "
3982                                 "vport State x%x\n", vport->port_state);
3983                break;
3984        }
3985
3986        switch (phba->link_state) {
3987        case LPFC_CLEAR_LA:
3988                                /* CLEAR LA timeout */
3989                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3990                                 "0228 CLEAR LA timeout\n");
3991                clrlaerr = 1;
3992                break;
3993
3994        case LPFC_LINK_UP:
3995                lpfc_issue_clear_la(phba, vport);
3996                /* Drop thru */
3997        case LPFC_LINK_UNKNOWN:
3998        case LPFC_WARM_START:
3999        case LPFC_INIT_START:
4000        case LPFC_INIT_MBX_CMDS:
4001        case LPFC_LINK_DOWN:
4002        case LPFC_HBA_ERROR:
4003                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
4004                                 "0230 Unexpected timeout, hba link "
4005                                 "state x%x\n", phba->link_state);
4006                clrlaerr = 1;
4007                break;
4008
4009        case LPFC_HBA_READY:
4010                break;
4011        }
4012
4013        if (clrlaerr) {
4014                lpfc_disc_flush_list(vport);
4015                psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
4016                psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
4017                psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
4018                vport->port_state = LPFC_VPORT_READY;
4019        }
4020
4021        return;
4022}
4023
4024/*
4025 * This routine handles processing a NameServer REG_LOGIN mailbox
4026 * command upon completion. It is setup in the LPFC_MBOXQ
4027 * as the completion routine when the command is
4028 * handed off to the SLI layer.
4029 */
4030void
4031lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4032{
4033        MAILBOX_t *mb = &pmb->u.mb;
4034        struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
4035        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4036        struct lpfc_vport    *vport = pmb->vport;
4037
4038        pmb->context1 = NULL;
4039
4040        ndlp->nlp_rpi = mb->un.varWords[0];
4041        ndlp->nlp_flag |= NLP_RPI_VALID;
4042        ndlp->nlp_type |= NLP_FABRIC;
4043        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4044
4045        /*
4046         * Start issuing Fabric-Device Management Interface (FDMI) command to
4047         * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4048         * fdmi-on=2 (supporting RPA/hostnmae)
4049         */
4050
4051        if (vport->cfg_fdmi_on == 1)
4052                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
4053        else
4054                mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
4055
4056        /* decrement the node reference count held for this callback
4057         * function.
4058         */
4059        lpfc_nlp_put(ndlp);
4060        lpfc_mbuf_free(phba, mp->virt, mp->phys);
4061        kfree(mp);
4062        mempool_free(pmb, phba->mbox_mem_pool);
4063
4064        return;
4065}
4066
4067static int
4068lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
4069{
4070        uint16_t *rpi = param;
4071
4072        return ndlp->nlp_rpi == *rpi;
4073}
4074
4075static int
4076lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
4077{
4078        return memcmp(&ndlp->nlp_portname, param,
4079                      sizeof(ndlp->nlp_portname)) == 0;
4080}
4081
4082static struct lpfc_nodelist *
4083__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
4084{
4085        struct lpfc_nodelist *ndlp;
4086
4087        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4088                if (filter(ndlp, param))
4089                        return ndlp;
4090        }
4091        return NULL;
4092}
4093
4094/*
4095 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4096 * returns the node list element pointer else return NULL.
4097 */
4098struct lpfc_nodelist *
4099__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
4100{
4101        return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
4102}
4103
4104/*
4105 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4106 * returns the node element list pointer else return NULL.
4107 */
4108struct lpfc_nodelist *
4109lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
4110{
4111        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4112        struct lpfc_nodelist *ndlp;
4113
4114        spin_lock_irq(shost->host_lock);
4115        ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
4116        spin_unlock_irq(shost->host_lock);
4117        return ndlp;
4118}
4119
4120void
4121lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4122              uint32_t did)
4123{
4124        memset(ndlp, 0, sizeof (struct lpfc_nodelist));
4125
4126        lpfc_initialize_node(vport, ndlp, did);
4127        INIT_LIST_HEAD(&ndlp->nlp_listp);
4128
4129        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4130                "node init:       did:x%x",
4131                ndlp->nlp_DID, 0, 0);
4132
4133        return;
4134}
4135
4136/* This routine releases all resources associated with a specifc NPort's ndlp
4137 * and mempool_free's the nodelist.
4138 */
4139static void
4140lpfc_nlp_release(struct kref *kref)
4141{
4142        struct lpfc_hba *phba;
4143        unsigned long flags;
4144        struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
4145                                                  kref);
4146
4147        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4148                "node release:    did:x%x flg:x%x type:x%x",
4149                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4150
4151        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4152                        "0279 lpfc_nlp_release: ndlp:x%p "
4153                        "usgmap:x%x refcnt:%d\n",
4154                        (void *)ndlp, ndlp->nlp_usg_map,
4155                        atomic_read(&ndlp->kref.refcount));
4156
4157        /* remove ndlp from action. */
4158        lpfc_nlp_remove(ndlp->vport, ndlp);
4159
4160        /* clear the ndlp active flag for all release cases */
4161        phba = ndlp->phba;
4162        spin_lock_irqsave(&phba->ndlp_lock, flags);
4163        NLP_CLR_NODE_ACT(ndlp);
4164        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4165
4166        /* free ndlp memory for final ndlp release */
4167        if (NLP_CHK_FREE_REQ(ndlp)) {
4168                kfree(ndlp->lat_data);
4169                mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
4170        }
4171}
4172
4173/* This routine bumps the reference count for a ndlp structure to ensure
4174 * that one discovery thread won't free a ndlp while another discovery thread
4175 * is using it.
4176 */
4177struct lpfc_nodelist *
4178lpfc_nlp_get(struct lpfc_nodelist *ndlp)
4179{
4180        struct lpfc_hba *phba;
4181        unsigned long flags;
4182
4183        if (ndlp) {
4184                lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4185                        "node get:        did:x%x flg:x%x refcnt:x%x",
4186                        ndlp->nlp_DID, ndlp->nlp_flag,
4187                        atomic_read(&ndlp->kref.refcount));
4188                /* The check of ndlp usage to prevent incrementing the
4189                 * ndlp reference count that is in the process of being
4190                 * released.
4191                 */
4192                phba = ndlp->phba;
4193                spin_lock_irqsave(&phba->ndlp_lock, flags);
4194                if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
4195                        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4196                        lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4197                                "0276 lpfc_nlp_get: ndlp:x%p "
4198                                "usgmap:x%x refcnt:%d\n",
4199                                (void *)ndlp, ndlp->nlp_usg_map,
4200                                atomic_read(&ndlp->kref.refcount));
4201                        return NULL;
4202                } else
4203                        kref_get(&ndlp->kref);
4204                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4205        }
4206        return ndlp;
4207}
4208
4209/* This routine decrements the reference count for a ndlp structure. If the
4210 * count goes to 0, this indicates the the associated nodelist should be
4211 * freed. Returning 1 indicates the ndlp resource has been released; on the
4212 * other hand, returning 0 indicates the ndlp resource has not been released
4213 * yet.
4214 */
4215int
4216lpfc_nlp_put(struct lpfc_nodelist *ndlp)
4217{
4218        struct lpfc_hba *phba;
4219        unsigned long flags;
4220
4221        if (!ndlp)
4222                return 1;
4223
4224        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4225        "node put:        did:x%x flg:x%x refcnt:x%x",
4226                ndlp->nlp_DID, ndlp->nlp_flag,
4227                atomic_read(&ndlp->kref.refcount));
4228        phba = ndlp->phba;
4229        spin_lock_irqsave(&phba->ndlp_lock, flags);
4230        /* Check the ndlp memory free acknowledge flag to avoid the
4231         * possible race condition that kref_put got invoked again
4232         * after previous one has done ndlp memory free.
4233         */
4234        if (NLP_CHK_FREE_ACK(ndlp)) {
4235                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4236                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4237                                "0274 lpfc_nlp_put: ndlp:x%p "
4238                                "usgmap:x%x refcnt:%d\n",
4239                                (void *)ndlp, ndlp->nlp_usg_map,
4240                                atomic_read(&ndlp->kref.refcount));
4241                return 1;
4242        }
4243        /* Check the ndlp inactivate log flag to avoid the possible
4244         * race condition that kref_put got invoked again after ndlp
4245         * is already in inactivating state.
4246         */
4247        if (NLP_CHK_IACT_REQ(ndlp)) {
4248                spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4249                lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4250                                "0275 lpfc_nlp_put: ndlp:x%p "
4251                                "usgmap:x%x refcnt:%d\n",
4252                                (void *)ndlp, ndlp->nlp_usg_map,
4253                                atomic_read(&ndlp->kref.refcount));
4254                return 1;
4255        }
4256        /* For last put, mark the ndlp usage flags to make sure no
4257         * other kref_get and kref_put on the same ndlp shall get
4258         * in between the process when the final kref_put has been
4259         * invoked on this ndlp.
4260         */
4261        if (atomic_read(&ndlp->kref.refcount) == 1) {
4262                /* Indicate ndlp is put to inactive state. */
4263                NLP_SET_IACT_REQ(ndlp);
4264                /* Acknowledge ndlp memory free has been seen. */
4265                if (NLP_CHK_FREE_REQ(ndlp))
4266                        NLP_SET_FREE_ACK(ndlp);
4267        }
4268        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4269        /* Note, the kref_put returns 1 when decrementing a reference
4270         * count that was 1, it invokes the release callback function,
4271         * but it still left the reference count as 1 (not actually
4272         * performs the last decrementation). Otherwise, it actually
4273         * decrements the reference count and returns 0.
4274         */
4275        return kref_put(&ndlp->kref, lpfc_nlp_release);
4276}
4277
4278/* This routine free's the specified nodelist if it is not in use
4279 * by any other discovery thread. This routine returns 1 if the
4280 * ndlp has been freed. A return value of 0 indicates the ndlp is
4281 * not yet been released.
4282 */
4283int
4284lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
4285{
4286        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4287                "node not used:   did:x%x flg:x%x refcnt:x%x",
4288                ndlp->nlp_DID, ndlp->nlp_flag,
4289                atomic_read(&ndlp->kref.refcount));
4290        if (atomic_read(&ndlp->kref.refcount) == 1)
4291                if (lpfc_nlp_put(ndlp))
4292                        return 1;
4293        return 0;
4294}
4295
4296/**
4297 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4298 * @phba: Pointer to hba context object.
4299 *
4300 * This function iterate through all FC nodes associated
4301 * will all vports to check if there is any node with
4302 * fc_rports associated with it. If there is an fc_rport
4303 * associated with the node, then the node is either in
4304 * discovered state or its devloss_timer is pending.
4305 */
4306static int
4307lpfc_fcf_inuse(struct lpfc_hba *phba)
4308{
4309        struct lpfc_vport **vports;
4310        int i, ret = 0;
4311        struct lpfc_nodelist *ndlp;
4312        struct Scsi_Host  *shost;
4313
4314        vports = lpfc_create_vport_work_array(phba);
4315
4316        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4317                shost = lpfc_shost_from_vport(vports[i]);
4318                spin_lock_irq(shost->host_lock);
4319                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4320                        if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4321                          (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4322                                ret = 1;
4323                                spin_unlock_irq(shost->host_lock);
4324                                goto out;
4325                        }
4326                }
4327                spin_unlock_irq(shost->host_lock);
4328        }
4329out:
4330        lpfc_destroy_vport_work_array(phba, vports);
4331        return ret;
4332}
4333
4334/**
4335 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4336 * @phba: Pointer to hba context object.
4337 * @mboxq: Pointer to mailbox object.
4338 *
4339 * This function frees memory associated with the mailbox command.
4340 */
4341static void
4342lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4343{
4344        struct lpfc_vport *vport = mboxq->vport;
4345
4346        if (mboxq->u.mb.mbxStatus) {
4347                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4348                        "2555 UNREG_VFI mbxStatus error x%x "
4349                        "HBA state x%x\n",
4350                        mboxq->u.mb.mbxStatus, vport->port_state);
4351        }
4352        mempool_free(mboxq, phba->mbox_mem_pool);
4353        return;
4354}
4355
4356/**
4357 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4358 * @phba: Pointer to hba context object.
4359 * @mboxq: Pointer to mailbox object.
4360 *
4361 * This function frees memory associated with the mailbox command.
4362 */
4363static void
4364lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4365{
4366        struct lpfc_vport *vport = mboxq->vport;
4367
4368        if (mboxq->u.mb.mbxStatus) {
4369                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4370                        "2550 UNREG_FCFI mbxStatus error x%x "
4371                        "HBA state x%x\n",
4372                        mboxq->u.mb.mbxStatus, vport->port_state);
4373        }
4374        mempool_free(mboxq, phba->mbox_mem_pool);
4375        return;
4376}
4377
4378/**
4379 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4380 * @phba: Pointer to hba context object.
4381 *
4382 * This function check if there are any connected remote port for the FCF and
4383 * if all the devices are disconnected, this function unregister FCFI.
4384 * This function also tries to use another FCF for discovery.
4385 */
4386void
4387lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4388{
4389        LPFC_MBOXQ_t *mbox;
4390        int rc;
4391        struct lpfc_vport **vports;
4392        int i;
4393
4394        spin_lock_irq(&phba->hbalock);
4395        /*
4396         * If HBA is not running in FIP mode or
4397         * If HBA does not support FCoE or
4398         * If FCF is not registered.
4399         * do nothing.
4400         */
4401        if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4402                !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4403                (phba->cfg_enable_fip == 0)) {
4404                spin_unlock_irq(&phba->hbalock);
4405                return;
4406        }
4407        spin_unlock_irq(&phba->hbalock);
4408
4409        if (lpfc_fcf_inuse(phba))
4410                return;
4411
4412
4413        /* Unregister VPIs */
4414        vports = lpfc_create_vport_work_array(phba);
4415        if (vports &&
4416                (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4417                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4418                        lpfc_mbx_unreg_vpi(vports[i]);
4419                        vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4420                        vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4421                }
4422        lpfc_destroy_vport_work_array(phba, vports);
4423
4424        /* Unregister VFI */
4425        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4426        if (!mbox) {
4427                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4428                        "2556 UNREG_VFI mbox allocation failed"
4429                        "HBA state x%x\n",
4430                        phba->pport->port_state);
4431                return;
4432        }
4433
4434        lpfc_unreg_vfi(mbox, phba->pport->vfi);
4435        mbox->vport = phba->pport;
4436        mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4437
4438        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4439        if (rc == MBX_NOT_FINISHED) {
4440                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4441                        "2557 UNREG_VFI issue mbox failed rc x%x "
4442                        "HBA state x%x\n",
4443                        rc, phba->pport->port_state);
4444                mempool_free(mbox, phba->mbox_mem_pool);
4445                return;
4446        }
4447
4448        /* Unregister FCF */
4449        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4450        if (!mbox) {
4451                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4452                        "2551 UNREG_FCFI mbox allocation failed"
4453                        "HBA state x%x\n",
4454                        phba->pport->port_state);
4455                return;
4456        }
4457
4458        lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4459        mbox->vport = phba->pport;
4460        mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4461        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4462
4463        if (rc == MBX_NOT_FINISHED) {
4464                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4465                        "2552 UNREG_FCFI issue mbox failed rc x%x "
4466                        "HBA state x%x\n",
4467                        rc, phba->pport->port_state);
4468                mempool_free(mbox, phba->mbox_mem_pool);
4469                return;
4470        }
4471
4472        spin_lock_irq(&phba->hbalock);
4473        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4474                FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4475                FCF_VALID_VLAN);
4476        spin_unlock_irq(&phba->hbalock);
4477
4478        /*
4479         * If driver is not unloading, check if there is any other
4480         * FCF record that can be used for discovery.
4481         */
4482        if ((phba->pport->load_flag & FC_UNLOADING) ||
4483                (phba->link_state < LPFC_LINK_UP))
4484                return;
4485
4486        rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4487
4488        if (rc)
4489                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4490                        "2553 lpfc_unregister_unused_fcf failed to read FCF"
4491                        " record HBA state x%x\n",
4492                        phba->pport->port_state);
4493}
4494
4495/**
4496 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4497 * @phba: Pointer to hba context object.
4498 * @buff: Buffer containing the FCF connection table as in the config
4499 *         region.
4500 * This function create driver data structure for the FCF connection
4501 * record table read from config region 23.
4502 */
4503static void
4504lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4505        uint8_t *buff)
4506{
4507        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4508        struct lpfc_fcf_conn_hdr *conn_hdr;
4509        struct lpfc_fcf_conn_rec *conn_rec;
4510        uint32_t record_count;
4511        int i;
4512
4513        /* Free the current connect table */
4514        list_for_each_entry_safe(conn_entry, next_conn_entry,
4515                &phba->fcf_conn_rec_list, list)
4516                kfree(conn_entry);
4517
4518        conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519        record_count = conn_hdr->length * sizeof(uint32_t)/
4520                sizeof(struct lpfc_fcf_conn_rec);
4521
4522        conn_rec = (struct lpfc_fcf_conn_rec *)
4523                (buff + sizeof(struct lpfc_fcf_conn_hdr));
4524
4525        for (i = 0; i < record_count; i++) {
4526                if (!(conn_rec[i].flags & FCFCNCT_VALID))
4527                        continue;
4528                conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4529                        GFP_KERNEL);
4530                if (!conn_entry) {
4531                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4532                                "2566 Failed to allocate connection"
4533                                " table entry\n");
4534                        return;
4535                }
4536
4537                memcpy(&conn_entry->conn_rec, &conn_rec[i],
4538                        sizeof(struct lpfc_fcf_conn_rec));
4539                conn_entry->conn_rec.vlan_tag =
4540                        le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4541                conn_entry->conn_rec.flags =
4542                        le16_to_cpu(conn_entry->conn_rec.flags);
4543                list_add_tail(&conn_entry->list,
4544                        &phba->fcf_conn_rec_list);
4545        }
4546}
4547
4548/**
4549 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4550 * @phba: Pointer to hba context object.
4551 * @buff: Buffer containing the FCoE parameter data structure.
4552 *
4553 *  This function update driver data structure with config
4554 *  parameters read from config region 23.
4555 */
4556static void
4557lpfc_read_fcoe_param(struct lpfc_hba *phba,
4558                        uint8_t *buff)
4559{
4560        struct lpfc_fip_param_hdr *fcoe_param_hdr;
4561        struct lpfc_fcoe_params *fcoe_param;
4562
4563        fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4564                buff;
4565        fcoe_param = (struct lpfc_fcoe_params *)
4566                (buff + sizeof(struct lpfc_fip_param_hdr));
4567
4568        if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4569                (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4570                return;
4571
4572        if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4573                        FIPP_MODE_ON)
4574                phba->cfg_enable_fip = 1;
4575
4576        if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4577                FIPP_MODE_OFF)
4578                phba->cfg_enable_fip = 0;
4579
4580        if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4581                phba->valid_vlan = 1;
4582                phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4583                        0xFFF;
4584        }
4585
4586        phba->fc_map[0] = fcoe_param->fc_map[0];
4587        phba->fc_map[1] = fcoe_param->fc_map[1];
4588        phba->fc_map[2] = fcoe_param->fc_map[2];
4589        return;
4590}
4591
4592/**
4593 * lpfc_get_rec_conf23 - Get a record type in config region data.
4594 * @buff: Buffer containing config region 23 data.
4595 * @size: Size of the data buffer.
4596 * @rec_type: Record type to be searched.
4597 *
4598 * This function searches config region data to find the begining
4599 * of the record specified by record_type. If record found, this
4600 * function return pointer to the record else return NULL.
4601 */
4602static uint8_t *
4603lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4604{
4605        uint32_t offset = 0, rec_length;
4606
4607        if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4608                (size < sizeof(uint32_t)))
4609                return NULL;
4610
4611        rec_length = buff[offset + 1];
4612
4613        /*
4614         * One TLV record has one word header and number of data words
4615         * specified in the rec_length field of the record header.
4616         */
4617        while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4618                <= size) {
4619                if (buff[offset] == rec_type)
4620                        return &buff[offset];
4621
4622                if (buff[offset] == LPFC_REGION23_LAST_REC)
4623                        return NULL;
4624
4625                offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4626                rec_length = buff[offset + 1];
4627        }
4628        return NULL;
4629}
4630
4631/**
4632 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4633 * @phba: Pointer to lpfc_hba data structure.
4634 * @buff: Buffer containing config region 23 data.
4635 * @size: Size of the data buffer.
4636 *
4637 * This fuction parse the FCoE config parameters in config region 23 and
4638 * populate driver data structure with the parameters.
4639 */
4640void
4641lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4642                uint8_t *buff,
4643                uint32_t size)
4644{
4645        uint32_t offset = 0, rec_length;
4646        uint8_t *rec_ptr;
4647
4648        /*
4649         * If data size is less than 2 words signature and version cannot be
4650         * verified.
4651         */
4652        if (size < 2*sizeof(uint32_t))
4653                return;
4654
4655        /* Check the region signature first */
4656        if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4657                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4658                        "2567 Config region 23 has bad signature\n");
4659                return;
4660        }
4661
4662        offset += 4;
4663
4664        /* Check the data structure version */
4665        if (buff[offset] != LPFC_REGION23_VERSION) {
4666                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4667                        "2568 Config region 23 has bad version\n");
4668                return;
4669        }
4670        offset += 4;
4671
4672        rec_length = buff[offset + 1];
4673
4674        /* Read FCoE param record */
4675        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4676                        size - offset, FCOE_PARAM_TYPE);
4677        if (rec_ptr)
4678                lpfc_read_fcoe_param(phba, rec_ptr);
4679
4680        /* Read FCF connection table */
4681        rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4682                size - offset, FCOE_CONN_TBL_TYPE);
4683        if (rec_ptr)
4684                lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4685
4686}
4687