linux/drivers/scsi/lpfc/lpfc_sli.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/slab.h>
  29#include <linux/lockdep.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_cmnd.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport_fc.h>
  36#include <scsi/fc/fc_fs.h>
  37#include <linux/aer.h>
  38#include <linux/crash_dump.h>
  39#ifdef CONFIG_X86
  40#include <asm/set_memory.h>
  41#endif
  42
  43#include "lpfc_hw4.h"
  44#include "lpfc_hw.h"
  45#include "lpfc_sli.h"
  46#include "lpfc_sli4.h"
  47#include "lpfc_nl.h"
  48#include "lpfc_disc.h"
  49#include "lpfc.h"
  50#include "lpfc_scsi.h"
  51#include "lpfc_nvme.h"
  52#include "lpfc_crtn.h"
  53#include "lpfc_logmsg.h"
  54#include "lpfc_compat.h"
  55#include "lpfc_debugfs.h"
  56#include "lpfc_vport.h"
  57#include "lpfc_version.h"
  58
  59/* There are only four IOCB completion types. */
  60typedef enum _lpfc_iocb_type {
  61        LPFC_UNKNOWN_IOCB,
  62        LPFC_UNSOL_IOCB,
  63        LPFC_SOL_IOCB,
  64        LPFC_ABORT_IOCB
  65} lpfc_iocb_type;
  66
  67
  68/* Provide function prototypes local to this module. */
  69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
  70                                  uint32_t);
  71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
  72                              uint8_t *, uint32_t *);
  73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
  74                                                         struct lpfc_iocbq *);
  75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
  76                                      struct hbq_dmabuf *);
  77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  78                                          struct hbq_dmabuf *dmabuf);
  79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
  80                                   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
  81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
  82                                       int);
  83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
  84                                     struct lpfc_queue *eq,
  85                                     struct lpfc_eqe *eqe);
  86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
  87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
  88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
  89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
  90                                    struct lpfc_queue *cq,
  91                                    struct lpfc_cqe *cqe);
  92
  93static IOCB_t *
  94lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
  95{
  96        return &iocbq->iocb;
  97}
  98
  99#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
 100/**
 101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
 102 * @srcp: Source memory pointer.
 103 * @destp: Destination memory pointer.
 104 * @cnt: Number of words required to be copied.
 105 *       Must be a multiple of sizeof(uint64_t)
 106 *
 107 * This function is used for copying data between driver memory
 108 * and the SLI WQ. This function also changes the endianness
 109 * of each word if native endianness is different from SLI
 110 * endianness. This function can be called with or without
 111 * lock.
 112 **/
 113static void
 114lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
 115{
 116        uint64_t *src = srcp;
 117        uint64_t *dest = destp;
 118        int i;
 119
 120        for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
 121                *dest++ = *src++;
 122}
 123#else
 124#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
 125#endif
 126
 127/**
 128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
 129 * @q: The Work Queue to operate on.
 130 * @wqe: The work Queue Entry to put on the Work queue.
 131 *
 132 * This routine will copy the contents of @wqe to the next available entry on
 133 * the @q. This function will then ring the Work Queue Doorbell to signal the
 134 * HBA to start processing the Work Queue Entry. This function returns 0 if
 135 * successful. If no entries are available on @q then this function will return
 136 * -ENOMEM.
 137 * The caller is expected to hold the hbalock when calling this routine.
 138 **/
 139static int
 140lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
 141{
 142        union lpfc_wqe *temp_wqe;
 143        struct lpfc_register doorbell;
 144        uint32_t host_index;
 145        uint32_t idx;
 146        uint32_t i = 0;
 147        uint8_t *tmp;
 148        u32 if_type;
 149
 150        /* sanity check on queue memory */
 151        if (unlikely(!q))
 152                return -ENOMEM;
 153        temp_wqe = lpfc_sli4_qe(q, q->host_index);
 154
 155        /* If the host has not yet processed the next entry then we are done */
 156        idx = ((q->host_index + 1) % q->entry_count);
 157        if (idx == q->hba_index) {
 158                q->WQ_overflow++;
 159                return -EBUSY;
 160        }
 161        q->WQ_posted++;
 162        /* set consumption flag every once in a while */
 163        if (!((q->host_index + 1) % q->notify_interval))
 164                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 165        else
 166                bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
 167        if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
 168                bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 169        lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 170        if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 171                /* write to DPP aperture taking advatage of Combined Writes */
 172                tmp = (uint8_t *)temp_wqe;
 173#ifdef __raw_writeq
 174                for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
 175                        __raw_writeq(*((uint64_t *)(tmp + i)),
 176                                        q->dpp_regaddr + i);
 177#else
 178                for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
 179                        __raw_writel(*((uint32_t *)(tmp + i)),
 180                                        q->dpp_regaddr + i);
 181#endif
 182        }
 183        /* ensure WQE bcopy and DPP flushed before doorbell write */
 184        wmb();
 185
 186        /* Update the host index before invoking device */
 187        host_index = q->host_index;
 188
 189        q->host_index = idx;
 190
 191        /* Ring Doorbell */
 192        doorbell.word0 = 0;
 193        if (q->db_format == LPFC_DB_LIST_FORMAT) {
 194                if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 195                        bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
 196                        bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
 197                        bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
 198                            q->dpp_id);
 199                        bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
 200                            q->queue_id);
 201                } else {
 202                        bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
 203                        bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
 204
 205                        /* Leave bits <23:16> clear for if_type 6 dpp */
 206                        if_type = bf_get(lpfc_sli_intf_if_type,
 207                                         &q->phba->sli4_hba.sli_intf);
 208                        if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
 209                                bf_set(lpfc_wq_db_list_fm_index, &doorbell,
 210                                       host_index);
 211                }
 212        } else if (q->db_format == LPFC_DB_RING_FORMAT) {
 213                bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
 214                bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
 215        } else {
 216                return -EINVAL;
 217        }
 218        writel(doorbell.word0, q->db_regaddr);
 219
 220        return 0;
 221}
 222
 223/**
 224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
 225 * @q: The Work Queue to operate on.
 226 * @index: The index to advance the hba index to.
 227 *
 228 * This routine will update the HBA index of a queue to reflect consumption of
 229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
 230 * an entry the host calls this function to update the queue's internal
 231 * pointers.
 232 **/
 233static void
 234lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
 235{
 236        /* sanity check on queue memory */
 237        if (unlikely(!q))
 238                return;
 239
 240        q->hba_index = index;
 241}
 242
 243/**
 244 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
 245 * @q: The Mailbox Queue to operate on.
 246 * @mqe: The Mailbox Queue Entry to put on the Work queue.
 247 *
 248 * This routine will copy the contents of @mqe to the next available entry on
 249 * the @q. This function will then ring the Work Queue Doorbell to signal the
 250 * HBA to start processing the Work Queue Entry. This function returns 0 if
 251 * successful. If no entries are available on @q then this function will return
 252 * -ENOMEM.
 253 * The caller is expected to hold the hbalock when calling this routine.
 254 **/
 255static uint32_t
 256lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
 257{
 258        struct lpfc_mqe *temp_mqe;
 259        struct lpfc_register doorbell;
 260
 261        /* sanity check on queue memory */
 262        if (unlikely(!q))
 263                return -ENOMEM;
 264        temp_mqe = lpfc_sli4_qe(q, q->host_index);
 265
 266        /* If the host has not yet processed the next entry then we are done */
 267        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
 268                return -ENOMEM;
 269        lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
 270        /* Save off the mailbox pointer for completion */
 271        q->phba->mbox = (MAILBOX_t *)temp_mqe;
 272
 273        /* Update the host index before invoking device */
 274        q->host_index = ((q->host_index + 1) % q->entry_count);
 275
 276        /* Ring Doorbell */
 277        doorbell.word0 = 0;
 278        bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
 279        bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
 280        writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
 281        return 0;
 282}
 283
 284/**
 285 * lpfc_sli4_mq_release - Updates internal hba index for MQ
 286 * @q: The Mailbox Queue to operate on.
 287 *
 288 * This routine will update the HBA index of a queue to reflect consumption of
 289 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
 290 * an entry the host calls this function to update the queue's internal
 291 * pointers. This routine returns the number of entries that were consumed by
 292 * the HBA.
 293 **/
 294static uint32_t
 295lpfc_sli4_mq_release(struct lpfc_queue *q)
 296{
 297        /* sanity check on queue memory */
 298        if (unlikely(!q))
 299                return 0;
 300
 301        /* Clear the mailbox pointer for completion */
 302        q->phba->mbox = NULL;
 303        q->hba_index = ((q->hba_index + 1) % q->entry_count);
 304        return 1;
 305}
 306
 307/**
 308 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
 309 * @q: The Event Queue to get the first valid EQE from
 310 *
 311 * This routine will get the first valid Event Queue Entry from @q, update
 312 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
 313 * the Queue (no more work to do), or the Queue is full of EQEs that have been
 314 * processed, but not popped back to the HBA then this routine will return NULL.
 315 **/
 316static struct lpfc_eqe *
 317lpfc_sli4_eq_get(struct lpfc_queue *q)
 318{
 319        struct lpfc_eqe *eqe;
 320
 321        /* sanity check on queue memory */
 322        if (unlikely(!q))
 323                return NULL;
 324        eqe = lpfc_sli4_qe(q, q->host_index);
 325
 326        /* If the next EQE is not valid then we are done */
 327        if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
 328                return NULL;
 329
 330        /*
 331         * insert barrier for instruction interlock : data from the hardware
 332         * must have the valid bit checked before it can be copied and acted
 333         * upon. Speculative instructions were allowing a bcopy at the start
 334         * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
 335         * after our return, to copy data before the valid bit check above
 336         * was done. As such, some of the copied data was stale. The barrier
 337         * ensures the check is before any data is copied.
 338         */
 339        mb();
 340        return eqe;
 341}
 342
 343/**
 344 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
 345 * @q: The Event Queue to disable interrupts
 346 *
 347 **/
 348void
 349lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
 350{
 351        struct lpfc_register doorbell;
 352
 353        doorbell.word0 = 0;
 354        bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 355        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 356        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 357                (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 358        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 359        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 360}
 361
 362/**
 363 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
 364 * @q: The Event Queue to disable interrupts
 365 *
 366 **/
 367void
 368lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
 369{
 370        struct lpfc_register doorbell;
 371
 372        doorbell.word0 = 0;
 373        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 374        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 375}
 376
 377/**
 378 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
 379 * @phba: adapter with EQ
 380 * @q: The Event Queue that the host has completed processing for.
 381 * @count: Number of elements that have been consumed
 382 * @arm: Indicates whether the host wants to arms this CQ.
 383 *
 384 * This routine will notify the HBA, by ringing the doorbell, that count
 385 * number of EQEs have been processed. The @arm parameter indicates whether
 386 * the queue should be rearmed when ringing the doorbell.
 387 **/
 388void
 389lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 390                     uint32_t count, bool arm)
 391{
 392        struct lpfc_register doorbell;
 393
 394        /* sanity check on queue memory */
 395        if (unlikely(!q || (count == 0 && !arm)))
 396                return;
 397
 398        /* ring doorbell for number popped */
 399        doorbell.word0 = 0;
 400        if (arm) {
 401                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 402                bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 403        }
 404        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 405        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 406        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 407                        (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 408        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 409        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 410        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 411        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 412                readl(q->phba->sli4_hba.EQDBregaddr);
 413}
 414
 415/**
 416 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
 417 * @phba: adapter with EQ
 418 * @q: The Event Queue that the host has completed processing for.
 419 * @count: Number of elements that have been consumed
 420 * @arm: Indicates whether the host wants to arms this CQ.
 421 *
 422 * This routine will notify the HBA, by ringing the doorbell, that count
 423 * number of EQEs have been processed. The @arm parameter indicates whether
 424 * the queue should be rearmed when ringing the doorbell.
 425 **/
 426void
 427lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 428                          uint32_t count, bool arm)
 429{
 430        struct lpfc_register doorbell;
 431
 432        /* sanity check on queue memory */
 433        if (unlikely(!q || (count == 0 && !arm)))
 434                return;
 435
 436        /* ring doorbell for number popped */
 437        doorbell.word0 = 0;
 438        if (arm)
 439                bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
 440        bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
 441        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 442        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 443        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 444        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 445                readl(q->phba->sli4_hba.EQDBregaddr);
 446}
 447
 448static void
 449__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
 450                        struct lpfc_eqe *eqe)
 451{
 452        if (!phba->sli4_hba.pc_sli4_params.eqav)
 453                bf_set_le32(lpfc_eqe_valid, eqe, 0);
 454
 455        eq->host_index = ((eq->host_index + 1) % eq->entry_count);
 456
 457        /* if the index wrapped around, toggle the valid bit */
 458        if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
 459                eq->qe_valid = (eq->qe_valid) ? 0 : 1;
 460}
 461
 462static void
 463lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
 464{
 465        struct lpfc_eqe *eqe = NULL;
 466        u32 eq_count = 0, cq_count = 0;
 467        struct lpfc_cqe *cqe = NULL;
 468        struct lpfc_queue *cq = NULL, *childq = NULL;
 469        int cqid = 0;
 470
 471        /* walk all the EQ entries and drop on the floor */
 472        eqe = lpfc_sli4_eq_get(eq);
 473        while (eqe) {
 474                /* Get the reference to the corresponding CQ */
 475                cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 476                cq = NULL;
 477
 478                list_for_each_entry(childq, &eq->child_list, list) {
 479                        if (childq->queue_id == cqid) {
 480                                cq = childq;
 481                                break;
 482                        }
 483                }
 484                /* If CQ is valid, iterate through it and drop all the CQEs */
 485                if (cq) {
 486                        cqe = lpfc_sli4_cq_get(cq);
 487                        while (cqe) {
 488                                __lpfc_sli4_consume_cqe(phba, cq, cqe);
 489                                cq_count++;
 490                                cqe = lpfc_sli4_cq_get(cq);
 491                        }
 492                        /* Clear and re-arm the CQ */
 493                        phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
 494                            LPFC_QUEUE_REARM);
 495                        cq_count = 0;
 496                }
 497                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 498                eq_count++;
 499                eqe = lpfc_sli4_eq_get(eq);
 500        }
 501
 502        /* Clear and re-arm the EQ */
 503        phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
 504}
 505
 506static int
 507lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
 508                     uint8_t rearm)
 509{
 510        struct lpfc_eqe *eqe;
 511        int count = 0, consumed = 0;
 512
 513        if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
 514                goto rearm_and_exit;
 515
 516        eqe = lpfc_sli4_eq_get(eq);
 517        while (eqe) {
 518                lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
 519                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 520
 521                consumed++;
 522                if (!(++count % eq->max_proc_limit))
 523                        break;
 524
 525                if (!(count % eq->notify_interval)) {
 526                        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
 527                                                        LPFC_QUEUE_NOARM);
 528                        consumed = 0;
 529                }
 530
 531                eqe = lpfc_sli4_eq_get(eq);
 532        }
 533        eq->EQ_processed += count;
 534
 535        /* Track the max number of EQEs processed in 1 intr */
 536        if (count > eq->EQ_max_eqe)
 537                eq->EQ_max_eqe = count;
 538
 539        xchg(&eq->queue_claimed, 0);
 540
 541rearm_and_exit:
 542        /* Always clear the EQ. */
 543        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
 544
 545        return count;
 546}
 547
 548/**
 549 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
 550 * @q: The Completion Queue to get the first valid CQE from
 551 *
 552 * This routine will get the first valid Completion Queue Entry from @q, update
 553 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
 554 * the Queue (no more work to do), or the Queue is full of CQEs that have been
 555 * processed, but not popped back to the HBA then this routine will return NULL.
 556 **/
 557static struct lpfc_cqe *
 558lpfc_sli4_cq_get(struct lpfc_queue *q)
 559{
 560        struct lpfc_cqe *cqe;
 561
 562        /* sanity check on queue memory */
 563        if (unlikely(!q))
 564                return NULL;
 565        cqe = lpfc_sli4_qe(q, q->host_index);
 566
 567        /* If the next CQE is not valid then we are done */
 568        if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
 569                return NULL;
 570
 571        /*
 572         * insert barrier for instruction interlock : data from the hardware
 573         * must have the valid bit checked before it can be copied and acted
 574         * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
 575         * instructions allowing action on content before valid bit checked,
 576         * add barrier here as well. May not be needed as "content" is a
 577         * single 32-bit entity here (vs multi word structure for cq's).
 578         */
 579        mb();
 580        return cqe;
 581}
 582
 583static void
 584__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 585                        struct lpfc_cqe *cqe)
 586{
 587        if (!phba->sli4_hba.pc_sli4_params.cqav)
 588                bf_set_le32(lpfc_cqe_valid, cqe, 0);
 589
 590        cq->host_index = ((cq->host_index + 1) % cq->entry_count);
 591
 592        /* if the index wrapped around, toggle the valid bit */
 593        if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
 594                cq->qe_valid = (cq->qe_valid) ? 0 : 1;
 595}
 596
 597/**
 598 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
 599 * @phba: the adapter with the CQ
 600 * @q: The Completion Queue that the host has completed processing for.
 601 * @count: the number of elements that were consumed
 602 * @arm: Indicates whether the host wants to arms this CQ.
 603 *
 604 * This routine will notify the HBA, by ringing the doorbell, that the
 605 * CQEs have been processed. The @arm parameter specifies whether the
 606 * queue should be rearmed when ringing the doorbell.
 607 **/
 608void
 609lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 610                     uint32_t count, bool arm)
 611{
 612        struct lpfc_register doorbell;
 613
 614        /* sanity check on queue memory */
 615        if (unlikely(!q || (count == 0 && !arm)))
 616                return;
 617
 618        /* ring doorbell for number popped */
 619        doorbell.word0 = 0;
 620        if (arm)
 621                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 622        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 623        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
 624        bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
 625                        (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
 626        bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
 627        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 628}
 629
 630/**
 631 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
 632 * @phba: the adapter with the CQ
 633 * @q: The Completion Queue that the host has completed processing for.
 634 * @count: the number of elements that were consumed
 635 * @arm: Indicates whether the host wants to arms this CQ.
 636 *
 637 * This routine will notify the HBA, by ringing the doorbell, that the
 638 * CQEs have been processed. The @arm parameter specifies whether the
 639 * queue should be rearmed when ringing the doorbell.
 640 **/
 641void
 642lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 643                         uint32_t count, bool arm)
 644{
 645        struct lpfc_register doorbell;
 646
 647        /* sanity check on queue memory */
 648        if (unlikely(!q || (count == 0 && !arm)))
 649                return;
 650
 651        /* ring doorbell for number popped */
 652        doorbell.word0 = 0;
 653        if (arm)
 654                bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
 655        bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
 656        bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
 657        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 658}
 659
 660/*
 661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
 662 *
 663 * This routine will copy the contents of @wqe to the next available entry on
 664 * the @q. This function will then ring the Receive Queue Doorbell to signal the
 665 * HBA to start processing the Receive Queue Entry. This function returns the
 666 * index that the rqe was copied to if successful. If no entries are available
 667 * on @q then this function will return -ENOMEM.
 668 * The caller is expected to hold the hbalock when calling this routine.
 669 **/
 670int
 671lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
 672                 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
 673{
 674        struct lpfc_rqe *temp_hrqe;
 675        struct lpfc_rqe *temp_drqe;
 676        struct lpfc_register doorbell;
 677        int hq_put_index;
 678        int dq_put_index;
 679
 680        /* sanity check on queue memory */
 681        if (unlikely(!hq) || unlikely(!dq))
 682                return -ENOMEM;
 683        hq_put_index = hq->host_index;
 684        dq_put_index = dq->host_index;
 685        temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
 686        temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
 687
 688        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
 689                return -EINVAL;
 690        if (hq_put_index != dq_put_index)
 691                return -EINVAL;
 692        /* If the host has not yet processed the next entry then we are done */
 693        if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
 694                return -EBUSY;
 695        lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
 696        lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 697
 698        /* Update the host index to point to the next slot */
 699        hq->host_index = ((hq_put_index + 1) % hq->entry_count);
 700        dq->host_index = ((dq_put_index + 1) % dq->entry_count);
 701        hq->RQ_buf_posted++;
 702
 703        /* Ring The Header Receive Queue Doorbell */
 704        if (!(hq->host_index % hq->notify_interval)) {
 705                doorbell.word0 = 0;
 706                if (hq->db_format == LPFC_DB_RING_FORMAT) {
 707                        bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
 708                               hq->notify_interval);
 709                        bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
 710                } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
 711                        bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
 712                               hq->notify_interval);
 713                        bf_set(lpfc_rq_db_list_fm_index, &doorbell,
 714                               hq->host_index);
 715                        bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
 716                } else {
 717                        return -EINVAL;
 718                }
 719                writel(doorbell.word0, hq->db_regaddr);
 720        }
 721        return hq_put_index;
 722}
 723
 724/*
 725 * lpfc_sli4_rq_release - Updates internal hba index for RQ
 726 *
 727 * This routine will update the HBA index of a queue to reflect consumption of
 728 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
 729 * consumed an entry the host calls this function to update the queue's
 730 * internal pointers. This routine returns the number of entries that were
 731 * consumed by the HBA.
 732 **/
 733static uint32_t
 734lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
 735{
 736        /* sanity check on queue memory */
 737        if (unlikely(!hq) || unlikely(!dq))
 738                return 0;
 739
 740        if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
 741                return 0;
 742        hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
 743        dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
 744        return 1;
 745}
 746
 747/**
 748 * lpfc_cmd_iocb - Get next command iocb entry in the ring
 749 * @phba: Pointer to HBA context object.
 750 * @pring: Pointer to driver SLI ring object.
 751 *
 752 * This function returns pointer to next command iocb entry
 753 * in the command ring. The caller must hold hbalock to prevent
 754 * other threads consume the next command iocb.
 755 * SLI-2/SLI-3 provide different sized iocbs.
 756 **/
 757static inline IOCB_t *
 758lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 759{
 760        return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
 761                           pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
 762}
 763
 764/**
 765 * lpfc_resp_iocb - Get next response iocb entry in the ring
 766 * @phba: Pointer to HBA context object.
 767 * @pring: Pointer to driver SLI ring object.
 768 *
 769 * This function returns pointer to next response iocb entry
 770 * in the response ring. The caller must hold hbalock to make sure
 771 * that no other thread consume the next response iocb.
 772 * SLI-2/SLI-3 provide different sized iocbs.
 773 **/
 774static inline IOCB_t *
 775lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 776{
 777        return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
 778                           pring->sli.sli3.rspidx * phba->iocb_rsp_size);
 779}
 780
 781/**
 782 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
 783 * @phba: Pointer to HBA context object.
 784 *
 785 * This function is called with hbalock held. This function
 786 * allocates a new driver iocb object from the iocb pool. If the
 787 * allocation is successful, it returns pointer to the newly
 788 * allocated iocb object else it returns NULL.
 789 **/
 790struct lpfc_iocbq *
 791__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 792{
 793        struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
 794        struct lpfc_iocbq * iocbq = NULL;
 795
 796        lockdep_assert_held(&phba->hbalock);
 797
 798        list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
 799        if (iocbq)
 800                phba->iocb_cnt++;
 801        if (phba->iocb_cnt > phba->iocb_max)
 802                phba->iocb_max = phba->iocb_cnt;
 803        return iocbq;
 804}
 805
 806/**
 807 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
 808 * @phba: Pointer to HBA context object.
 809 * @xritag: XRI value.
 810 *
 811 * This function clears the sglq pointer from the array of acive
 812 * sglq's. The xritag that is passed in is used to index into the
 813 * array. Before the xritag can be used it needs to be adjusted
 814 * by subtracting the xribase.
 815 *
 816 * Returns sglq ponter = success, NULL = Failure.
 817 **/
 818struct lpfc_sglq *
 819__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 820{
 821        struct lpfc_sglq *sglq;
 822
 823        sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
 824        phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
 825        return sglq;
 826}
 827
 828/**
 829 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
 830 * @phba: Pointer to HBA context object.
 831 * @xritag: XRI value.
 832 *
 833 * This function returns the sglq pointer from the array of acive
 834 * sglq's. The xritag that is passed in is used to index into the
 835 * array. Before the xritag can be used it needs to be adjusted
 836 * by subtracting the xribase.
 837 *
 838 * Returns sglq ponter = success, NULL = Failure.
 839 **/
 840struct lpfc_sglq *
 841__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 842{
 843        struct lpfc_sglq *sglq;
 844
 845        sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
 846        return sglq;
 847}
 848
 849/**
 850 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
 851 * @phba: Pointer to HBA context object.
 852 * @xritag: xri used in this exchange.
 853 * @rrq: The RRQ to be cleared.
 854 *
 855 **/
 856void
 857lpfc_clr_rrq_active(struct lpfc_hba *phba,
 858                    uint16_t xritag,
 859                    struct lpfc_node_rrq *rrq)
 860{
 861        struct lpfc_nodelist *ndlp = NULL;
 862
 863        if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
 864                ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 865
 866        /* The target DID could have been swapped (cable swap)
 867         * we should use the ndlp from the findnode if it is
 868         * available.
 869         */
 870        if ((!ndlp) && rrq->ndlp)
 871                ndlp = rrq->ndlp;
 872
 873        if (!ndlp)
 874                goto out;
 875
 876        if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
 877                rrq->send_rrq = 0;
 878                rrq->xritag = 0;
 879                rrq->rrq_stop_time = 0;
 880        }
 881out:
 882        mempool_free(rrq, phba->rrq_pool);
 883}
 884
 885/**
 886 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
 887 * @phba: Pointer to HBA context object.
 888 *
 889 * This function is called with hbalock held. This function
 890 * Checks if stop_time (ratov from setting rrq active) has
 891 * been reached, if it has and the send_rrq flag is set then
 892 * it will call lpfc_send_rrq. If the send_rrq flag is not set
 893 * then it will just call the routine to clear the rrq and
 894 * free the rrq resource.
 895 * The timer is set to the next rrq that is going to expire before
 896 * leaving the routine.
 897 *
 898 **/
 899void
 900lpfc_handle_rrq_active(struct lpfc_hba *phba)
 901{
 902        struct lpfc_node_rrq *rrq;
 903        struct lpfc_node_rrq *nextrrq;
 904        unsigned long next_time;
 905        unsigned long iflags;
 906        LIST_HEAD(send_rrq);
 907
 908        spin_lock_irqsave(&phba->hbalock, iflags);
 909        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 910        next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
 911        list_for_each_entry_safe(rrq, nextrrq,
 912                                 &phba->active_rrq_list, list) {
 913                if (time_after(jiffies, rrq->rrq_stop_time))
 914                        list_move(&rrq->list, &send_rrq);
 915                else if (time_before(rrq->rrq_stop_time, next_time))
 916                        next_time = rrq->rrq_stop_time;
 917        }
 918        spin_unlock_irqrestore(&phba->hbalock, iflags);
 919        if ((!list_empty(&phba->active_rrq_list)) &&
 920            (!(phba->pport->load_flag & FC_UNLOADING)))
 921                mod_timer(&phba->rrq_tmr, next_time);
 922        list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
 923                list_del(&rrq->list);
 924                if (!rrq->send_rrq) {
 925                        /* this call will free the rrq */
 926                        lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 927                } else if (lpfc_send_rrq(phba, rrq)) {
 928                        /* if we send the rrq then the completion handler
 929                        *  will clear the bit in the xribitmap.
 930                        */
 931                        lpfc_clr_rrq_active(phba, rrq->xritag,
 932                                            rrq);
 933                }
 934        }
 935}
 936
 937/**
 938 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
 939 * @vport: Pointer to vport context object.
 940 * @xri: The xri used in the exchange.
 941 * @did: The targets DID for this exchange.
 942 *
 943 * returns NULL = rrq not found in the phba->active_rrq_list.
 944 *         rrq = rrq for this xri and target.
 945 **/
 946struct lpfc_node_rrq *
 947lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
 948{
 949        struct lpfc_hba *phba = vport->phba;
 950        struct lpfc_node_rrq *rrq;
 951        struct lpfc_node_rrq *nextrrq;
 952        unsigned long iflags;
 953
 954        if (phba->sli_rev != LPFC_SLI_REV4)
 955                return NULL;
 956        spin_lock_irqsave(&phba->hbalock, iflags);
 957        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
 958                if (rrq->vport == vport && rrq->xritag == xri &&
 959                                rrq->nlp_DID == did){
 960                        list_del(&rrq->list);
 961                        spin_unlock_irqrestore(&phba->hbalock, iflags);
 962                        return rrq;
 963                }
 964        }
 965        spin_unlock_irqrestore(&phba->hbalock, iflags);
 966        return NULL;
 967}
 968
 969/**
 970 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
 971 * @vport: Pointer to vport context object.
 972 * @ndlp: Pointer to the lpfc_node_list structure.
 973 * If ndlp is NULL Remove all active RRQs for this vport from the
 974 * phba->active_rrq_list and clear the rrq.
 975 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
 976 **/
 977void
 978lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 979
 980{
 981        struct lpfc_hba *phba = vport->phba;
 982        struct lpfc_node_rrq *rrq;
 983        struct lpfc_node_rrq *nextrrq;
 984        unsigned long iflags;
 985        LIST_HEAD(rrq_list);
 986
 987        if (phba->sli_rev != LPFC_SLI_REV4)
 988                return;
 989        if (!ndlp) {
 990                lpfc_sli4_vport_delete_els_xri_aborted(vport);
 991                lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
 992        }
 993        spin_lock_irqsave(&phba->hbalock, iflags);
 994        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
 995                if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
 996                        list_move(&rrq->list, &rrq_list);
 997        spin_unlock_irqrestore(&phba->hbalock, iflags);
 998
 999        list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1000                list_del(&rrq->list);
1001                lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1002        }
1003}
1004
1005/**
1006 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1007 * @phba: Pointer to HBA context object.
1008 * @ndlp: Targets nodelist pointer for this exchange.
1009 * @xritag: the xri in the bitmap to test.
1010 *
1011 * This function returns:
1012 * 0 = rrq not active for this xri
1013 * 1 = rrq is valid for this xri.
1014 **/
1015int
1016lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1017                        uint16_t  xritag)
1018{
1019        if (!ndlp)
1020                return 0;
1021        if (!ndlp->active_rrqs_xri_bitmap)
1022                return 0;
1023        if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1024                return 1;
1025        else
1026                return 0;
1027}
1028
1029/**
1030 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1031 * @phba: Pointer to HBA context object.
1032 * @ndlp: nodelist pointer for this target.
1033 * @xritag: xri used in this exchange.
1034 * @rxid: Remote Exchange ID.
1035 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1036 *
1037 * This function takes the hbalock.
1038 * The active bit is always set in the active rrq xri_bitmap even
1039 * if there is no slot avaiable for the other rrq information.
1040 *
1041 * returns 0 rrq actived for this xri
1042 *         < 0 No memory or invalid ndlp.
1043 **/
1044int
1045lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1046                    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1047{
1048        unsigned long iflags;
1049        struct lpfc_node_rrq *rrq;
1050        int empty;
1051
1052        if (!ndlp)
1053                return -EINVAL;
1054
1055        if (!phba->cfg_enable_rrq)
1056                return -EINVAL;
1057
1058        spin_lock_irqsave(&phba->hbalock, iflags);
1059        if (phba->pport->load_flag & FC_UNLOADING) {
1060                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1061                goto out;
1062        }
1063
1064        /*
1065         * set the active bit even if there is no mem available.
1066         */
1067        if (NLP_CHK_FREE_REQ(ndlp))
1068                goto out;
1069
1070        if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1071                goto out;
1072
1073        if (!ndlp->active_rrqs_xri_bitmap)
1074                goto out;
1075
1076        if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1077                goto out;
1078
1079        spin_unlock_irqrestore(&phba->hbalock, iflags);
1080        rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1081        if (!rrq) {
1082                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1083                                "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1084                                " DID:0x%x Send:%d\n",
1085                                xritag, rxid, ndlp->nlp_DID, send_rrq);
1086                return -EINVAL;
1087        }
1088        if (phba->cfg_enable_rrq == 1)
1089                rrq->send_rrq = send_rrq;
1090        else
1091                rrq->send_rrq = 0;
1092        rrq->xritag = xritag;
1093        rrq->rrq_stop_time = jiffies +
1094                                msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1095        rrq->ndlp = ndlp;
1096        rrq->nlp_DID = ndlp->nlp_DID;
1097        rrq->vport = ndlp->vport;
1098        rrq->rxid = rxid;
1099        spin_lock_irqsave(&phba->hbalock, iflags);
1100        empty = list_empty(&phba->active_rrq_list);
1101        list_add_tail(&rrq->list, &phba->active_rrq_list);
1102        phba->hba_flag |= HBA_RRQ_ACTIVE;
1103        if (empty)
1104                lpfc_worker_wake_up(phba);
1105        spin_unlock_irqrestore(&phba->hbalock, iflags);
1106        return 0;
1107out:
1108        spin_unlock_irqrestore(&phba->hbalock, iflags);
1109        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1110                        "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1111                        " DID:0x%x Send:%d\n",
1112                        xritag, rxid, ndlp->nlp_DID, send_rrq);
1113        return -EINVAL;
1114}
1115
1116/**
1117 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1118 * @phba: Pointer to HBA context object.
1119 * @piocbq: Pointer to the iocbq.
1120 *
1121 * The driver calls this function with either the nvme ls ring lock
1122 * or the fc els ring lock held depending on the iocb usage.  This function
1123 * gets a new driver sglq object from the sglq list. If the list is not empty
1124 * then it is successful, it returns pointer to the newly allocated sglq
1125 * object else it returns NULL.
1126 **/
1127static struct lpfc_sglq *
1128__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1129{
1130        struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1131        struct lpfc_sglq *sglq = NULL;
1132        struct lpfc_sglq *start_sglq = NULL;
1133        struct lpfc_io_buf *lpfc_cmd;
1134        struct lpfc_nodelist *ndlp;
1135        struct lpfc_sli_ring *pring = NULL;
1136        int found = 0;
1137
1138        if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1139                pring =  phba->sli4_hba.nvmels_wq->pring;
1140        else
1141                pring = lpfc_phba_elsring(phba);
1142
1143        lockdep_assert_held(&pring->ring_lock);
1144
1145        if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1146                lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1147                ndlp = lpfc_cmd->rdata->pnode;
1148        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1149                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1150                ndlp = piocbq->context_un.ndlp;
1151        } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1152                if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1153                        ndlp = NULL;
1154                else
1155                        ndlp = piocbq->context_un.ndlp;
1156        } else {
1157                ndlp = piocbq->context1;
1158        }
1159
1160        spin_lock(&phba->sli4_hba.sgl_list_lock);
1161        list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1162        start_sglq = sglq;
1163        while (!found) {
1164                if (!sglq)
1165                        break;
1166                if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1167                    test_bit(sglq->sli4_lxritag,
1168                    ndlp->active_rrqs_xri_bitmap)) {
1169                        /* This xri has an rrq outstanding for this DID.
1170                         * put it back in the list and get another xri.
1171                         */
1172                        list_add_tail(&sglq->list, lpfc_els_sgl_list);
1173                        sglq = NULL;
1174                        list_remove_head(lpfc_els_sgl_list, sglq,
1175                                                struct lpfc_sglq, list);
1176                        if (sglq == start_sglq) {
1177                                list_add_tail(&sglq->list, lpfc_els_sgl_list);
1178                                sglq = NULL;
1179                                break;
1180                        } else
1181                                continue;
1182                }
1183                sglq->ndlp = ndlp;
1184                found = 1;
1185                phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1186                sglq->state = SGL_ALLOCATED;
1187        }
1188        spin_unlock(&phba->sli4_hba.sgl_list_lock);
1189        return sglq;
1190}
1191
1192/**
1193 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1194 * @phba: Pointer to HBA context object.
1195 * @piocbq: Pointer to the iocbq.
1196 *
1197 * This function is called with the sgl_list lock held. This function
1198 * gets a new driver sglq object from the sglq list. If the
1199 * list is not empty then it is successful, it returns pointer to the newly
1200 * allocated sglq object else it returns NULL.
1201 **/
1202struct lpfc_sglq *
1203__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1204{
1205        struct list_head *lpfc_nvmet_sgl_list;
1206        struct lpfc_sglq *sglq = NULL;
1207
1208        lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1209
1210        lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1211
1212        list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1213        if (!sglq)
1214                return NULL;
1215        phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1216        sglq->state = SGL_ALLOCATED;
1217        return sglq;
1218}
1219
1220/**
1221 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1222 * @phba: Pointer to HBA context object.
1223 *
1224 * This function is called with no lock held. This function
1225 * allocates a new driver iocb object from the iocb pool. If the
1226 * allocation is successful, it returns pointer to the newly
1227 * allocated iocb object else it returns NULL.
1228 **/
1229struct lpfc_iocbq *
1230lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1231{
1232        struct lpfc_iocbq * iocbq = NULL;
1233        unsigned long iflags;
1234
1235        spin_lock_irqsave(&phba->hbalock, iflags);
1236        iocbq = __lpfc_sli_get_iocbq(phba);
1237        spin_unlock_irqrestore(&phba->hbalock, iflags);
1238        return iocbq;
1239}
1240
1241/**
1242 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1243 * @phba: Pointer to HBA context object.
1244 * @iocbq: Pointer to driver iocb object.
1245 *
1246 * This function is called to release the driver iocb object
1247 * to the iocb pool. The iotag in the iocb object
1248 * does not change for each use of the iocb object. This function
1249 * clears all other fields of the iocb object when it is freed.
1250 * The sqlq structure that holds the xritag and phys and virtual
1251 * mappings for the scatter gather list is retrieved from the
1252 * active array of sglq. The get of the sglq pointer also clears
1253 * the entry in the array. If the status of the IO indiactes that
1254 * this IO was aborted then the sglq entry it put on the
1255 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1256 * IO has good status or fails for any other reason then the sglq
1257 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1258 *  asserted held in the code path calling this routine.
1259 **/
1260static void
1261__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1262{
1263        struct lpfc_sglq *sglq;
1264        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1265        unsigned long iflag = 0;
1266        struct lpfc_sli_ring *pring;
1267
1268        if (iocbq->sli4_xritag == NO_XRI)
1269                sglq = NULL;
1270        else
1271                sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1272
1273
1274        if (sglq)  {
1275                if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1276                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1277                                          iflag);
1278                        sglq->state = SGL_FREED;
1279                        sglq->ndlp = NULL;
1280                        list_add_tail(&sglq->list,
1281                                      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1282                        spin_unlock_irqrestore(
1283                                &phba->sli4_hba.sgl_list_lock, iflag);
1284                        goto out;
1285                }
1286
1287                pring = phba->sli4_hba.els_wq->pring;
1288                if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1289                        (sglq->state != SGL_XRI_ABORTED)) {
1290                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1291                                          iflag);
1292                        list_add(&sglq->list,
1293                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1294                        spin_unlock_irqrestore(
1295                                &phba->sli4_hba.sgl_list_lock, iflag);
1296                } else {
1297                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1298                                          iflag);
1299                        sglq->state = SGL_FREED;
1300                        sglq->ndlp = NULL;
1301                        list_add_tail(&sglq->list,
1302                                      &phba->sli4_hba.lpfc_els_sgl_list);
1303                        spin_unlock_irqrestore(
1304                                &phba->sli4_hba.sgl_list_lock, iflag);
1305
1306                        /* Check if TXQ queue needs to be serviced */
1307                        if (!list_empty(&pring->txq))
1308                                lpfc_worker_wake_up(phba);
1309                }
1310        }
1311
1312out:
1313        /*
1314         * Clean all volatile data fields, preserve iotag and node struct.
1315         */
1316        memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1317        iocbq->sli4_lxritag = NO_XRI;
1318        iocbq->sli4_xritag = NO_XRI;
1319        iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1320                              LPFC_IO_NVME_LS);
1321        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1322}
1323
1324
1325/**
1326 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1327 * @phba: Pointer to HBA context object.
1328 * @iocbq: Pointer to driver iocb object.
1329 *
1330 * This function is called to release the driver iocb object to the
1331 * iocb pool. The iotag in the iocb object does not change for each
1332 * use of the iocb object. This function clears all other fields of
1333 * the iocb object when it is freed. The hbalock is asserted held in
1334 * the code path calling this routine.
1335 **/
1336static void
1337__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1338{
1339        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1340
1341        /*
1342         * Clean all volatile data fields, preserve iotag and node struct.
1343         */
1344        memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1345        iocbq->sli4_xritag = NO_XRI;
1346        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1347}
1348
1349/**
1350 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1351 * @phba: Pointer to HBA context object.
1352 * @iocbq: Pointer to driver iocb object.
1353 *
1354 * This function is called with hbalock held to release driver
1355 * iocb object to the iocb pool. The iotag in the iocb object
1356 * does not change for each use of the iocb object. This function
1357 * clears all other fields of the iocb object when it is freed.
1358 **/
1359static void
1360__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361{
1362        lockdep_assert_held(&phba->hbalock);
1363
1364        phba->__lpfc_sli_release_iocbq(phba, iocbq);
1365        phba->iocb_cnt--;
1366}
1367
1368/**
1369 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1370 * @phba: Pointer to HBA context object.
1371 * @iocbq: Pointer to driver iocb object.
1372 *
1373 * This function is called with no lock held to release the iocb to
1374 * iocb pool.
1375 **/
1376void
1377lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378{
1379        unsigned long iflags;
1380
1381        /*
1382         * Clean all volatile data fields, preserve iotag and node struct.
1383         */
1384        spin_lock_irqsave(&phba->hbalock, iflags);
1385        __lpfc_sli_release_iocbq(phba, iocbq);
1386        spin_unlock_irqrestore(&phba->hbalock, iflags);
1387}
1388
1389/**
1390 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1391 * @phba: Pointer to HBA context object.
1392 * @iocblist: List of IOCBs.
1393 * @ulpstatus: ULP status in IOCB command field.
1394 * @ulpWord4: ULP word-4 in IOCB command field.
1395 *
1396 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1397 * on the list by invoking the complete callback function associated with the
1398 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1399 * fields.
1400 **/
1401void
1402lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1403                      uint32_t ulpstatus, uint32_t ulpWord4)
1404{
1405        struct lpfc_iocbq *piocb;
1406
1407        while (!list_empty(iocblist)) {
1408                list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1409                if (!piocb->iocb_cmpl) {
1410                        if (piocb->iocb_flag & LPFC_IO_NVME)
1411                                lpfc_nvme_cancel_iocb(phba, piocb);
1412                        else
1413                                lpfc_sli_release_iocbq(phba, piocb);
1414                } else {
1415                        piocb->iocb.ulpStatus = ulpstatus;
1416                        piocb->iocb.un.ulpWord[4] = ulpWord4;
1417                        (piocb->iocb_cmpl) (phba, piocb, piocb);
1418                }
1419        }
1420        return;
1421}
1422
1423/**
1424 * lpfc_sli_iocb_cmd_type - Get the iocb type
1425 * @iocb_cmnd: iocb command code.
1426 *
1427 * This function is called by ring event handler function to get the iocb type.
1428 * This function translates the iocb command to an iocb command type used to
1429 * decide the final disposition of each completed IOCB.
1430 * The function returns
1431 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1432 * LPFC_SOL_IOCB     if it is a solicited iocb completion
1433 * LPFC_ABORT_IOCB   if it is an abort iocb
1434 * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1435 *
1436 * The caller is not required to hold any lock.
1437 **/
1438static lpfc_iocb_type
1439lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1440{
1441        lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1442
1443        if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1444                return 0;
1445
1446        switch (iocb_cmnd) {
1447        case CMD_XMIT_SEQUENCE_CR:
1448        case CMD_XMIT_SEQUENCE_CX:
1449        case CMD_XMIT_BCAST_CN:
1450        case CMD_XMIT_BCAST_CX:
1451        case CMD_ELS_REQUEST_CR:
1452        case CMD_ELS_REQUEST_CX:
1453        case CMD_CREATE_XRI_CR:
1454        case CMD_CREATE_XRI_CX:
1455        case CMD_GET_RPI_CN:
1456        case CMD_XMIT_ELS_RSP_CX:
1457        case CMD_GET_RPI_CR:
1458        case CMD_FCP_IWRITE_CR:
1459        case CMD_FCP_IWRITE_CX:
1460        case CMD_FCP_IREAD_CR:
1461        case CMD_FCP_IREAD_CX:
1462        case CMD_FCP_ICMND_CR:
1463        case CMD_FCP_ICMND_CX:
1464        case CMD_FCP_TSEND_CX:
1465        case CMD_FCP_TRSP_CX:
1466        case CMD_FCP_TRECEIVE_CX:
1467        case CMD_FCP_AUTO_TRSP_CX:
1468        case CMD_ADAPTER_MSG:
1469        case CMD_ADAPTER_DUMP:
1470        case CMD_XMIT_SEQUENCE64_CR:
1471        case CMD_XMIT_SEQUENCE64_CX:
1472        case CMD_XMIT_BCAST64_CN:
1473        case CMD_XMIT_BCAST64_CX:
1474        case CMD_ELS_REQUEST64_CR:
1475        case CMD_ELS_REQUEST64_CX:
1476        case CMD_FCP_IWRITE64_CR:
1477        case CMD_FCP_IWRITE64_CX:
1478        case CMD_FCP_IREAD64_CR:
1479        case CMD_FCP_IREAD64_CX:
1480        case CMD_FCP_ICMND64_CR:
1481        case CMD_FCP_ICMND64_CX:
1482        case CMD_FCP_TSEND64_CX:
1483        case CMD_FCP_TRSP64_CX:
1484        case CMD_FCP_TRECEIVE64_CX:
1485        case CMD_GEN_REQUEST64_CR:
1486        case CMD_GEN_REQUEST64_CX:
1487        case CMD_XMIT_ELS_RSP64_CX:
1488        case DSSCMD_IWRITE64_CR:
1489        case DSSCMD_IWRITE64_CX:
1490        case DSSCMD_IREAD64_CR:
1491        case DSSCMD_IREAD64_CX:
1492        case CMD_SEND_FRAME:
1493                type = LPFC_SOL_IOCB;
1494                break;
1495        case CMD_ABORT_XRI_CN:
1496        case CMD_ABORT_XRI_CX:
1497        case CMD_CLOSE_XRI_CN:
1498        case CMD_CLOSE_XRI_CX:
1499        case CMD_XRI_ABORTED_CX:
1500        case CMD_ABORT_MXRI64_CN:
1501        case CMD_XMIT_BLS_RSP64_CX:
1502                type = LPFC_ABORT_IOCB;
1503                break;
1504        case CMD_RCV_SEQUENCE_CX:
1505        case CMD_RCV_ELS_REQ_CX:
1506        case CMD_RCV_SEQUENCE64_CX:
1507        case CMD_RCV_ELS_REQ64_CX:
1508        case CMD_ASYNC_STATUS:
1509        case CMD_IOCB_RCV_SEQ64_CX:
1510        case CMD_IOCB_RCV_ELS64_CX:
1511        case CMD_IOCB_RCV_CONT64_CX:
1512        case CMD_IOCB_RET_XRI64_CX:
1513                type = LPFC_UNSOL_IOCB;
1514                break;
1515        case CMD_IOCB_XMIT_MSEQ64_CR:
1516        case CMD_IOCB_XMIT_MSEQ64_CX:
1517        case CMD_IOCB_RCV_SEQ_LIST64_CX:
1518        case CMD_IOCB_RCV_ELS_LIST64_CX:
1519        case CMD_IOCB_CLOSE_EXTENDED_CN:
1520        case CMD_IOCB_ABORT_EXTENDED_CN:
1521        case CMD_IOCB_RET_HBQE64_CN:
1522        case CMD_IOCB_FCP_IBIDIR64_CR:
1523        case CMD_IOCB_FCP_IBIDIR64_CX:
1524        case CMD_IOCB_FCP_ITASKMGT64_CX:
1525        case CMD_IOCB_LOGENTRY_CN:
1526        case CMD_IOCB_LOGENTRY_ASYNC_CN:
1527                printk("%s - Unhandled SLI-3 Command x%x\n",
1528                                __func__, iocb_cmnd);
1529                type = LPFC_UNKNOWN_IOCB;
1530                break;
1531        default:
1532                type = LPFC_UNKNOWN_IOCB;
1533                break;
1534        }
1535
1536        return type;
1537}
1538
1539/**
1540 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1541 * @phba: Pointer to HBA context object.
1542 *
1543 * This function is called from SLI initialization code
1544 * to configure every ring of the HBA's SLI interface. The
1545 * caller is not required to hold any lock. This function issues
1546 * a config_ring mailbox command for each ring.
1547 * This function returns zero if successful else returns a negative
1548 * error code.
1549 **/
1550static int
1551lpfc_sli_ring_map(struct lpfc_hba *phba)
1552{
1553        struct lpfc_sli *psli = &phba->sli;
1554        LPFC_MBOXQ_t *pmb;
1555        MAILBOX_t *pmbox;
1556        int i, rc, ret = 0;
1557
1558        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1559        if (!pmb)
1560                return -ENOMEM;
1561        pmbox = &pmb->u.mb;
1562        phba->link_state = LPFC_INIT_MBX_CMDS;
1563        for (i = 0; i < psli->num_rings; i++) {
1564                lpfc_config_ring(phba, i, pmb);
1565                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1566                if (rc != MBX_SUCCESS) {
1567                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1568                                        "0446 Adapter failed to init (%d), "
1569                                        "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1570                                        "ring %d\n",
1571                                        rc, pmbox->mbxCommand,
1572                                        pmbox->mbxStatus, i);
1573                        phba->link_state = LPFC_HBA_ERROR;
1574                        ret = -ENXIO;
1575                        break;
1576                }
1577        }
1578        mempool_free(pmb, phba->mbox_mem_pool);
1579        return ret;
1580}
1581
1582/**
1583 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1584 * @phba: Pointer to HBA context object.
1585 * @pring: Pointer to driver SLI ring object.
1586 * @piocb: Pointer to the driver iocb object.
1587 *
1588 * The driver calls this function with the hbalock held for SLI3 ports or
1589 * the ring lock held for SLI4 ports. The function adds the
1590 * new iocb to txcmplq of the given ring. This function always returns
1591 * 0. If this function is called for ELS ring, this function checks if
1592 * there is a vport associated with the ELS command. This function also
1593 * starts els_tmofunc timer if this is an ELS command.
1594 **/
1595static int
1596lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1597                        struct lpfc_iocbq *piocb)
1598{
1599        if (phba->sli_rev == LPFC_SLI_REV4)
1600                lockdep_assert_held(&pring->ring_lock);
1601        else
1602                lockdep_assert_held(&phba->hbalock);
1603
1604        BUG_ON(!piocb);
1605
1606        list_add_tail(&piocb->list, &pring->txcmplq);
1607        piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1608        pring->txcmplq_cnt++;
1609
1610        if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1611           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1612           (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1613                BUG_ON(!piocb->vport);
1614                if (!(piocb->vport->load_flag & FC_UNLOADING))
1615                        mod_timer(&piocb->vport->els_tmofunc,
1616                                  jiffies +
1617                                  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1618        }
1619
1620        return 0;
1621}
1622
1623/**
1624 * lpfc_sli_ringtx_get - Get first element of the txq
1625 * @phba: Pointer to HBA context object.
1626 * @pring: Pointer to driver SLI ring object.
1627 *
1628 * This function is called with hbalock held to get next
1629 * iocb in txq of the given ring. If there is any iocb in
1630 * the txq, the function returns first iocb in the list after
1631 * removing the iocb from the list, else it returns NULL.
1632 **/
1633struct lpfc_iocbq *
1634lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1635{
1636        struct lpfc_iocbq *cmd_iocb;
1637
1638        lockdep_assert_held(&phba->hbalock);
1639
1640        list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1641        return cmd_iocb;
1642}
1643
1644/**
1645 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1646 * @phba: Pointer to HBA context object.
1647 * @pring: Pointer to driver SLI ring object.
1648 *
1649 * This function is called with hbalock held and the caller must post the
1650 * iocb without releasing the lock. If the caller releases the lock,
1651 * iocb slot returned by the function is not guaranteed to be available.
1652 * The function returns pointer to the next available iocb slot if there
1653 * is available slot in the ring, else it returns NULL.
1654 * If the get index of the ring is ahead of the put index, the function
1655 * will post an error attention event to the worker thread to take the
1656 * HBA to offline state.
1657 **/
1658static IOCB_t *
1659lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1660{
1661        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1662        uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1663
1664        lockdep_assert_held(&phba->hbalock);
1665
1666        if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1667           (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1668                pring->sli.sli3.next_cmdidx = 0;
1669
1670        if (unlikely(pring->sli.sli3.local_getidx ==
1671                pring->sli.sli3.next_cmdidx)) {
1672
1673                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1674
1675                if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1676                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1677                                        "0315 Ring %d issue: portCmdGet %d "
1678                                        "is bigger than cmd ring %d\n",
1679                                        pring->ringno,
1680                                        pring->sli.sli3.local_getidx,
1681                                        max_cmd_idx);
1682
1683                        phba->link_state = LPFC_HBA_ERROR;
1684                        /*
1685                         * All error attention handlers are posted to
1686                         * worker thread
1687                         */
1688                        phba->work_ha |= HA_ERATT;
1689                        phba->work_hs = HS_FFER3;
1690
1691                        lpfc_worker_wake_up(phba);
1692
1693                        return NULL;
1694                }
1695
1696                if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1697                        return NULL;
1698        }
1699
1700        return lpfc_cmd_iocb(phba, pring);
1701}
1702
1703/**
1704 * lpfc_sli_next_iotag - Get an iotag for the iocb
1705 * @phba: Pointer to HBA context object.
1706 * @iocbq: Pointer to driver iocb object.
1707 *
1708 * This function gets an iotag for the iocb. If there is no unused iotag and
1709 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1710 * array and assigns a new iotag.
1711 * The function returns the allocated iotag if successful, else returns zero.
1712 * Zero is not a valid iotag.
1713 * The caller is not required to hold any lock.
1714 **/
1715uint16_t
1716lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1717{
1718        struct lpfc_iocbq **new_arr;
1719        struct lpfc_iocbq **old_arr;
1720        size_t new_len;
1721        struct lpfc_sli *psli = &phba->sli;
1722        uint16_t iotag;
1723
1724        spin_lock_irq(&phba->hbalock);
1725        iotag = psli->last_iotag;
1726        if(++iotag < psli->iocbq_lookup_len) {
1727                psli->last_iotag = iotag;
1728                psli->iocbq_lookup[iotag] = iocbq;
1729                spin_unlock_irq(&phba->hbalock);
1730                iocbq->iotag = iotag;
1731                return iotag;
1732        } else if (psli->iocbq_lookup_len < (0xffff
1733                                           - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1734                new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1735                spin_unlock_irq(&phba->hbalock);
1736                new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1737                                  GFP_KERNEL);
1738                if (new_arr) {
1739                        spin_lock_irq(&phba->hbalock);
1740                        old_arr = psli->iocbq_lookup;
1741                        if (new_len <= psli->iocbq_lookup_len) {
1742                                /* highly unprobable case */
1743                                kfree(new_arr);
1744                                iotag = psli->last_iotag;
1745                                if(++iotag < psli->iocbq_lookup_len) {
1746                                        psli->last_iotag = iotag;
1747                                        psli->iocbq_lookup[iotag] = iocbq;
1748                                        spin_unlock_irq(&phba->hbalock);
1749                                        iocbq->iotag = iotag;
1750                                        return iotag;
1751                                }
1752                                spin_unlock_irq(&phba->hbalock);
1753                                return 0;
1754                        }
1755                        if (psli->iocbq_lookup)
1756                                memcpy(new_arr, old_arr,
1757                                       ((psli->last_iotag  + 1) *
1758                                        sizeof (struct lpfc_iocbq *)));
1759                        psli->iocbq_lookup = new_arr;
1760                        psli->iocbq_lookup_len = new_len;
1761                        psli->last_iotag = iotag;
1762                        psli->iocbq_lookup[iotag] = iocbq;
1763                        spin_unlock_irq(&phba->hbalock);
1764                        iocbq->iotag = iotag;
1765                        kfree(old_arr);
1766                        return iotag;
1767                }
1768        } else
1769                spin_unlock_irq(&phba->hbalock);
1770
1771        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1772                        "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1773                        psli->last_iotag);
1774
1775        return 0;
1776}
1777
1778/**
1779 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1780 * @phba: Pointer to HBA context object.
1781 * @pring: Pointer to driver SLI ring object.
1782 * @iocb: Pointer to iocb slot in the ring.
1783 * @nextiocb: Pointer to driver iocb object which need to be
1784 *            posted to firmware.
1785 *
1786 * This function is called to post a new iocb to the firmware. This
1787 * function copies the new iocb to ring iocb slot and updates the
1788 * ring pointers. It adds the new iocb to txcmplq if there is
1789 * a completion call back for this iocb else the function will free the
1790 * iocb object.  The hbalock is asserted held in the code path calling
1791 * this routine.
1792 **/
1793static void
1794lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1795                IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1796{
1797        /*
1798         * Set up an iotag
1799         */
1800        nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1801
1802
1803        if (pring->ringno == LPFC_ELS_RING) {
1804                lpfc_debugfs_slow_ring_trc(phba,
1805                        "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1806                        *(((uint32_t *) &nextiocb->iocb) + 4),
1807                        *(((uint32_t *) &nextiocb->iocb) + 6),
1808                        *(((uint32_t *) &nextiocb->iocb) + 7));
1809        }
1810
1811        /*
1812         * Issue iocb command to adapter
1813         */
1814        lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1815        wmb();
1816        pring->stats.iocb_cmd++;
1817
1818        /*
1819         * If there is no completion routine to call, we can release the
1820         * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1821         * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1822         */
1823        if (nextiocb->iocb_cmpl)
1824                lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1825        else
1826                __lpfc_sli_release_iocbq(phba, nextiocb);
1827
1828        /*
1829         * Let the HBA know what IOCB slot will be the next one the
1830         * driver will put a command into.
1831         */
1832        pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1833        writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1834}
1835
1836/**
1837 * lpfc_sli_update_full_ring - Update the chip attention register
1838 * @phba: Pointer to HBA context object.
1839 * @pring: Pointer to driver SLI ring object.
1840 *
1841 * The caller is not required to hold any lock for calling this function.
1842 * This function updates the chip attention bits for the ring to inform firmware
1843 * that there are pending work to be done for this ring and requests an
1844 * interrupt when there is space available in the ring. This function is
1845 * called when the driver is unable to post more iocbs to the ring due
1846 * to unavailability of space in the ring.
1847 **/
1848static void
1849lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1850{
1851        int ringno = pring->ringno;
1852
1853        pring->flag |= LPFC_CALL_RING_AVAILABLE;
1854
1855        wmb();
1856
1857        /*
1858         * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1859         * The HBA will tell us when an IOCB entry is available.
1860         */
1861        writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1862        readl(phba->CAregaddr); /* flush */
1863
1864        pring->stats.iocb_cmd_full++;
1865}
1866
1867/**
1868 * lpfc_sli_update_ring - Update chip attention register
1869 * @phba: Pointer to HBA context object.
1870 * @pring: Pointer to driver SLI ring object.
1871 *
1872 * This function updates the chip attention register bit for the
1873 * given ring to inform HBA that there is more work to be done
1874 * in this ring. The caller is not required to hold any lock.
1875 **/
1876static void
1877lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1878{
1879        int ringno = pring->ringno;
1880
1881        /*
1882         * Tell the HBA that there is work to do in this ring.
1883         */
1884        if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1885                wmb();
1886                writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1887                readl(phba->CAregaddr); /* flush */
1888        }
1889}
1890
1891/**
1892 * lpfc_sli_resume_iocb - Process iocbs in the txq
1893 * @phba: Pointer to HBA context object.
1894 * @pring: Pointer to driver SLI ring object.
1895 *
1896 * This function is called with hbalock held to post pending iocbs
1897 * in the txq to the firmware. This function is called when driver
1898 * detects space available in the ring.
1899 **/
1900static void
1901lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1902{
1903        IOCB_t *iocb;
1904        struct lpfc_iocbq *nextiocb;
1905
1906        lockdep_assert_held(&phba->hbalock);
1907
1908        /*
1909         * Check to see if:
1910         *  (a) there is anything on the txq to send
1911         *  (b) link is up
1912         *  (c) link attention events can be processed (fcp ring only)
1913         *  (d) IOCB processing is not blocked by the outstanding mbox command.
1914         */
1915
1916        if (lpfc_is_link_up(phba) &&
1917            (!list_empty(&pring->txq)) &&
1918            (pring->ringno != LPFC_FCP_RING ||
1919             phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1920
1921                while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1922                       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1923                        lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1924
1925                if (iocb)
1926                        lpfc_sli_update_ring(phba, pring);
1927                else
1928                        lpfc_sli_update_full_ring(phba, pring);
1929        }
1930
1931        return;
1932}
1933
1934/**
1935 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1936 * @phba: Pointer to HBA context object.
1937 * @hbqno: HBQ number.
1938 *
1939 * This function is called with hbalock held to get the next
1940 * available slot for the given HBQ. If there is free slot
1941 * available for the HBQ it will return pointer to the next available
1942 * HBQ entry else it will return NULL.
1943 **/
1944static struct lpfc_hbq_entry *
1945lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1946{
1947        struct hbq_s *hbqp = &phba->hbqs[hbqno];
1948
1949        lockdep_assert_held(&phba->hbalock);
1950
1951        if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1952            ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1953                hbqp->next_hbqPutIdx = 0;
1954
1955        if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1956                uint32_t raw_index = phba->hbq_get[hbqno];
1957                uint32_t getidx = le32_to_cpu(raw_index);
1958
1959                hbqp->local_hbqGetIdx = getidx;
1960
1961                if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1962                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963                                        "1802 HBQ %d: local_hbqGetIdx "
1964                                        "%u is > than hbqp->entry_count %u\n",
1965                                        hbqno, hbqp->local_hbqGetIdx,
1966                                        hbqp->entry_count);
1967
1968                        phba->link_state = LPFC_HBA_ERROR;
1969                        return NULL;
1970                }
1971
1972                if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1973                        return NULL;
1974        }
1975
1976        return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1977                        hbqp->hbqPutIdx;
1978}
1979
1980/**
1981 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1982 * @phba: Pointer to HBA context object.
1983 *
1984 * This function is called with no lock held to free all the
1985 * hbq buffers while uninitializing the SLI interface. It also
1986 * frees the HBQ buffers returned by the firmware but not yet
1987 * processed by the upper layers.
1988 **/
1989void
1990lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1991{
1992        struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1993        struct hbq_dmabuf *hbq_buf;
1994        unsigned long flags;
1995        int i, hbq_count;
1996
1997        hbq_count = lpfc_sli_hbq_count();
1998        /* Return all memory used by all HBQs */
1999        spin_lock_irqsave(&phba->hbalock, flags);
2000        for (i = 0; i < hbq_count; ++i) {
2001                list_for_each_entry_safe(dmabuf, next_dmabuf,
2002                                &phba->hbqs[i].hbq_buffer_list, list) {
2003                        hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2004                        list_del(&hbq_buf->dbuf.list);
2005                        (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2006                }
2007                phba->hbqs[i].buffer_count = 0;
2008        }
2009
2010        /* Mark the HBQs not in use */
2011        phba->hbq_in_use = 0;
2012        spin_unlock_irqrestore(&phba->hbalock, flags);
2013}
2014
2015/**
2016 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2017 * @phba: Pointer to HBA context object.
2018 * @hbqno: HBQ number.
2019 * @hbq_buf: Pointer to HBQ buffer.
2020 *
2021 * This function is called with the hbalock held to post a
2022 * hbq buffer to the firmware. If the function finds an empty
2023 * slot in the HBQ, it will post the buffer. The function will return
2024 * pointer to the hbq entry if it successfully post the buffer
2025 * else it will return NULL.
2026 **/
2027static int
2028lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2029                         struct hbq_dmabuf *hbq_buf)
2030{
2031        lockdep_assert_held(&phba->hbalock);
2032        return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2033}
2034
2035/**
2036 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2037 * @phba: Pointer to HBA context object.
2038 * @hbqno: HBQ number.
2039 * @hbq_buf: Pointer to HBQ buffer.
2040 *
2041 * This function is called with the hbalock held to post a hbq buffer to the
2042 * firmware. If the function finds an empty slot in the HBQ, it will post the
2043 * buffer and place it on the hbq_buffer_list. The function will return zero if
2044 * it successfully post the buffer else it will return an error.
2045 **/
2046static int
2047lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2048                            struct hbq_dmabuf *hbq_buf)
2049{
2050        struct lpfc_hbq_entry *hbqe;
2051        dma_addr_t physaddr = hbq_buf->dbuf.phys;
2052
2053        lockdep_assert_held(&phba->hbalock);
2054        /* Get next HBQ entry slot to use */
2055        hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2056        if (hbqe) {
2057                struct hbq_s *hbqp = &phba->hbqs[hbqno];
2058
2059                hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2060                hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2061                hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2062                hbqe->bde.tus.f.bdeFlags = 0;
2063                hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2064                hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2065                                /* Sync SLIM */
2066                hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2067                writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2068                                /* flush */
2069                readl(phba->hbq_put + hbqno);
2070                list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2071                return 0;
2072        } else
2073                return -ENOMEM;
2074}
2075
2076/**
2077 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2078 * @phba: Pointer to HBA context object.
2079 * @hbqno: HBQ number.
2080 * @hbq_buf: Pointer to HBQ buffer.
2081 *
2082 * This function is called with the hbalock held to post an RQE to the SLI4
2083 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2084 * the hbq_buffer_list and return zero, otherwise it will return an error.
2085 **/
2086static int
2087lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2088                            struct hbq_dmabuf *hbq_buf)
2089{
2090        int rc;
2091        struct lpfc_rqe hrqe;
2092        struct lpfc_rqe drqe;
2093        struct lpfc_queue *hrq;
2094        struct lpfc_queue *drq;
2095
2096        if (hbqno != LPFC_ELS_HBQ)
2097                return 1;
2098        hrq = phba->sli4_hba.hdr_rq;
2099        drq = phba->sli4_hba.dat_rq;
2100
2101        lockdep_assert_held(&phba->hbalock);
2102        hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2103        hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2104        drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2105        drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2106        rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2107        if (rc < 0)
2108                return rc;
2109        hbq_buf->tag = (rc | (hbqno << 16));
2110        list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2111        return 0;
2112}
2113
2114/* HBQ for ELS and CT traffic. */
2115static struct lpfc_hbq_init lpfc_els_hbq = {
2116        .rn = 1,
2117        .entry_count = 256,
2118        .mask_count = 0,
2119        .profile = 0,
2120        .ring_mask = (1 << LPFC_ELS_RING),
2121        .buffer_count = 0,
2122        .init_count = 40,
2123        .add_count = 40,
2124};
2125
2126/* Array of HBQs */
2127struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2128        &lpfc_els_hbq,
2129};
2130
2131/**
2132 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2133 * @phba: Pointer to HBA context object.
2134 * @hbqno: HBQ number.
2135 * @count: Number of HBQ buffers to be posted.
2136 *
2137 * This function is called with no lock held to post more hbq buffers to the
2138 * given HBQ. The function returns the number of HBQ buffers successfully
2139 * posted.
2140 **/
2141static int
2142lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2143{
2144        uint32_t i, posted = 0;
2145        unsigned long flags;
2146        struct hbq_dmabuf *hbq_buffer;
2147        LIST_HEAD(hbq_buf_list);
2148        if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2149                return 0;
2150
2151        if ((phba->hbqs[hbqno].buffer_count + count) >
2152            lpfc_hbq_defs[hbqno]->entry_count)
2153                count = lpfc_hbq_defs[hbqno]->entry_count -
2154                                        phba->hbqs[hbqno].buffer_count;
2155        if (!count)
2156                return 0;
2157        /* Allocate HBQ entries */
2158        for (i = 0; i < count; i++) {
2159                hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2160                if (!hbq_buffer)
2161                        break;
2162                list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2163        }
2164        /* Check whether HBQ is still in use */
2165        spin_lock_irqsave(&phba->hbalock, flags);
2166        if (!phba->hbq_in_use)
2167                goto err;
2168        while (!list_empty(&hbq_buf_list)) {
2169                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2170                                 dbuf.list);
2171                hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2172                                      (hbqno << 16));
2173                if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2174                        phba->hbqs[hbqno].buffer_count++;
2175                        posted++;
2176                } else
2177                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178        }
2179        spin_unlock_irqrestore(&phba->hbalock, flags);
2180        return posted;
2181err:
2182        spin_unlock_irqrestore(&phba->hbalock, flags);
2183        while (!list_empty(&hbq_buf_list)) {
2184                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2185                                 dbuf.list);
2186                (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2187        }
2188        return 0;
2189}
2190
2191/**
2192 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2193 * @phba: Pointer to HBA context object.
2194 * @qno: HBQ number.
2195 *
2196 * This function posts more buffers to the HBQ. This function
2197 * is called with no lock held. The function returns the number of HBQ entries
2198 * successfully allocated.
2199 **/
2200int
2201lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2202{
2203        if (phba->sli_rev == LPFC_SLI_REV4)
2204                return 0;
2205        else
2206                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2207                                         lpfc_hbq_defs[qno]->add_count);
2208}
2209
2210/**
2211 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2212 * @phba: Pointer to HBA context object.
2213 * @qno:  HBQ queue number.
2214 *
2215 * This function is called from SLI initialization code path with
2216 * no lock held to post initial HBQ buffers to firmware. The
2217 * function returns the number of HBQ entries successfully allocated.
2218 **/
2219static int
2220lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2221{
2222        if (phba->sli_rev == LPFC_SLI_REV4)
2223                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2224                                        lpfc_hbq_defs[qno]->entry_count);
2225        else
2226                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2227                                         lpfc_hbq_defs[qno]->init_count);
2228}
2229
2230/*
2231 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2232 *
2233 * This function removes the first hbq buffer on an hbq list and returns a
2234 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2235 **/
2236static struct hbq_dmabuf *
2237lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2238{
2239        struct lpfc_dmabuf *d_buf;
2240
2241        list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2242        if (!d_buf)
2243                return NULL;
2244        return container_of(d_buf, struct hbq_dmabuf, dbuf);
2245}
2246
2247/**
2248 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2249 * @phba: Pointer to HBA context object.
2250 * @hrq: HBQ number.
2251 *
2252 * This function removes the first RQ buffer on an RQ buffer list and returns a
2253 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2254 **/
2255static struct rqb_dmabuf *
2256lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2257{
2258        struct lpfc_dmabuf *h_buf;
2259        struct lpfc_rqb *rqbp;
2260
2261        rqbp = hrq->rqbp;
2262        list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2263                         struct lpfc_dmabuf, list);
2264        if (!h_buf)
2265                return NULL;
2266        rqbp->buffer_count--;
2267        return container_of(h_buf, struct rqb_dmabuf, hbuf);
2268}
2269
2270/**
2271 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2272 * @phba: Pointer to HBA context object.
2273 * @tag: Tag of the hbq buffer.
2274 *
2275 * This function searches for the hbq buffer associated with the given tag in
2276 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2277 * otherwise it returns NULL.
2278 **/
2279static struct hbq_dmabuf *
2280lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2281{
2282        struct lpfc_dmabuf *d_buf;
2283        struct hbq_dmabuf *hbq_buf;
2284        uint32_t hbqno;
2285
2286        hbqno = tag >> 16;
2287        if (hbqno >= LPFC_MAX_HBQS)
2288                return NULL;
2289
2290        spin_lock_irq(&phba->hbalock);
2291        list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2292                hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2293                if (hbq_buf->tag == tag) {
2294                        spin_unlock_irq(&phba->hbalock);
2295                        return hbq_buf;
2296                }
2297        }
2298        spin_unlock_irq(&phba->hbalock);
2299        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300                        "1803 Bad hbq tag. Data: x%x x%x\n",
2301                        tag, phba->hbqs[tag >> 16].buffer_count);
2302        return NULL;
2303}
2304
2305/**
2306 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2307 * @phba: Pointer to HBA context object.
2308 * @hbq_buffer: Pointer to HBQ buffer.
2309 *
2310 * This function is called with hbalock. This function gives back
2311 * the hbq buffer to firmware. If the HBQ does not have space to
2312 * post the buffer, it will free the buffer.
2313 **/
2314void
2315lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2316{
2317        uint32_t hbqno;
2318
2319        if (hbq_buffer) {
2320                hbqno = hbq_buffer->tag >> 16;
2321                if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2322                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2323        }
2324}
2325
2326/**
2327 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2328 * @mbxCommand: mailbox command code.
2329 *
2330 * This function is called by the mailbox event handler function to verify
2331 * that the completed mailbox command is a legitimate mailbox command. If the
2332 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2333 * and the mailbox event handler will take the HBA offline.
2334 **/
2335static int
2336lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2337{
2338        uint8_t ret;
2339
2340        switch (mbxCommand) {
2341        case MBX_LOAD_SM:
2342        case MBX_READ_NV:
2343        case MBX_WRITE_NV:
2344        case MBX_WRITE_VPARMS:
2345        case MBX_RUN_BIU_DIAG:
2346        case MBX_INIT_LINK:
2347        case MBX_DOWN_LINK:
2348        case MBX_CONFIG_LINK:
2349        case MBX_CONFIG_RING:
2350        case MBX_RESET_RING:
2351        case MBX_READ_CONFIG:
2352        case MBX_READ_RCONFIG:
2353        case MBX_READ_SPARM:
2354        case MBX_READ_STATUS:
2355        case MBX_READ_RPI:
2356        case MBX_READ_XRI:
2357        case MBX_READ_REV:
2358        case MBX_READ_LNK_STAT:
2359        case MBX_REG_LOGIN:
2360        case MBX_UNREG_LOGIN:
2361        case MBX_CLEAR_LA:
2362        case MBX_DUMP_MEMORY:
2363        case MBX_DUMP_CONTEXT:
2364        case MBX_RUN_DIAGS:
2365        case MBX_RESTART:
2366        case MBX_UPDATE_CFG:
2367        case MBX_DOWN_LOAD:
2368        case MBX_DEL_LD_ENTRY:
2369        case MBX_RUN_PROGRAM:
2370        case MBX_SET_MASK:
2371        case MBX_SET_VARIABLE:
2372        case MBX_UNREG_D_ID:
2373        case MBX_KILL_BOARD:
2374        case MBX_CONFIG_FARP:
2375        case MBX_BEACON:
2376        case MBX_LOAD_AREA:
2377        case MBX_RUN_BIU_DIAG64:
2378        case MBX_CONFIG_PORT:
2379        case MBX_READ_SPARM64:
2380        case MBX_READ_RPI64:
2381        case MBX_REG_LOGIN64:
2382        case MBX_READ_TOPOLOGY:
2383        case MBX_WRITE_WWN:
2384        case MBX_SET_DEBUG:
2385        case MBX_LOAD_EXP_ROM:
2386        case MBX_ASYNCEVT_ENABLE:
2387        case MBX_REG_VPI:
2388        case MBX_UNREG_VPI:
2389        case MBX_HEARTBEAT:
2390        case MBX_PORT_CAPABILITIES:
2391        case MBX_PORT_IOV_CONTROL:
2392        case MBX_SLI4_CONFIG:
2393        case MBX_SLI4_REQ_FTRS:
2394        case MBX_REG_FCFI:
2395        case MBX_UNREG_FCFI:
2396        case MBX_REG_VFI:
2397        case MBX_UNREG_VFI:
2398        case MBX_INIT_VPI:
2399        case MBX_INIT_VFI:
2400        case MBX_RESUME_RPI:
2401        case MBX_READ_EVENT_LOG_STATUS:
2402        case MBX_READ_EVENT_LOG:
2403        case MBX_SECURITY_MGMT:
2404        case MBX_AUTH_PORT:
2405        case MBX_ACCESS_VDATA:
2406                ret = mbxCommand;
2407                break;
2408        default:
2409                ret = MBX_SHUTDOWN;
2410                break;
2411        }
2412        return ret;
2413}
2414
2415/**
2416 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2417 * @phba: Pointer to HBA context object.
2418 * @pmboxq: Pointer to mailbox command.
2419 *
2420 * This is completion handler function for mailbox commands issued from
2421 * lpfc_sli_issue_mbox_wait function. This function is called by the
2422 * mailbox event handler function with no lock held. This function
2423 * will wake up thread waiting on the wait queue pointed by context1
2424 * of the mailbox.
2425 **/
2426void
2427lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2428{
2429        unsigned long drvr_flag;
2430        struct completion *pmbox_done;
2431
2432        /*
2433         * If pmbox_done is empty, the driver thread gave up waiting and
2434         * continued running.
2435         */
2436        pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2437        spin_lock_irqsave(&phba->hbalock, drvr_flag);
2438        pmbox_done = (struct completion *)pmboxq->context3;
2439        if (pmbox_done)
2440                complete(pmbox_done);
2441        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2442        return;
2443}
2444
2445static void
2446__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2447{
2448        unsigned long iflags;
2449
2450        if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2451                lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2452                spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2453                ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2454                ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2455                spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2456        }
2457        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2458}
2459
2460/**
2461 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2462 * @phba: Pointer to HBA context object.
2463 * @pmb: Pointer to mailbox object.
2464 *
2465 * This function is the default mailbox completion handler. It
2466 * frees the memory resources associated with the completed mailbox
2467 * command. If the completed command is a REG_LOGIN mailbox command,
2468 * this function will issue a UREG_LOGIN to re-claim the RPI.
2469 **/
2470void
2471lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2472{
2473        struct lpfc_vport  *vport = pmb->vport;
2474        struct lpfc_dmabuf *mp;
2475        struct lpfc_nodelist *ndlp;
2476        struct Scsi_Host *shost;
2477        uint16_t rpi, vpi;
2478        int rc;
2479
2480        mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2481
2482        if (mp) {
2483                lpfc_mbuf_free(phba, mp->virt, mp->phys);
2484                kfree(mp);
2485        }
2486
2487        /*
2488         * If a REG_LOGIN succeeded  after node is destroyed or node
2489         * is in re-discovery driver need to cleanup the RPI.
2490         */
2491        if (!(phba->pport->load_flag & FC_UNLOADING) &&
2492            pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2493            !pmb->u.mb.mbxStatus) {
2494                rpi = pmb->u.mb.un.varWords[0];
2495                vpi = pmb->u.mb.un.varRegLogin.vpi;
2496                if (phba->sli_rev == LPFC_SLI_REV4)
2497                        vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2498                lpfc_unreg_login(phba, vpi, rpi, pmb);
2499                pmb->vport = vport;
2500                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2501                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2502                if (rc != MBX_NOT_FINISHED)
2503                        return;
2504        }
2505
2506        if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2507                !(phba->pport->load_flag & FC_UNLOADING) &&
2508                !pmb->u.mb.mbxStatus) {
2509                shost = lpfc_shost_from_vport(vport);
2510                spin_lock_irq(shost->host_lock);
2511                vport->vpi_state |= LPFC_VPI_REGISTERED;
2512                vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2513                spin_unlock_irq(shost->host_lock);
2514        }
2515
2516        if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2517                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2518                lpfc_nlp_put(ndlp);
2519                pmb->ctx_buf = NULL;
2520                pmb->ctx_ndlp = NULL;
2521        }
2522
2523        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2524                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2525
2526                /* Check to see if there are any deferred events to process */
2527                if (ndlp) {
2528                        lpfc_printf_vlog(
2529                                vport,
2530                                KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2531                                "1438 UNREG cmpl deferred mbox x%x "
2532                                "on NPort x%x Data: x%x x%x %px\n",
2533                                ndlp->nlp_rpi, ndlp->nlp_DID,
2534                                ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2535
2536                        if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2537                            (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2538                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
2539                                ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2540                                lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2541                        } else {
2542                                __lpfc_sli_rpi_release(vport, ndlp);
2543                        }
2544                        if (vport->load_flag & FC_UNLOADING)
2545                                lpfc_nlp_put(ndlp);
2546                        pmb->ctx_ndlp = NULL;
2547                }
2548        }
2549
2550        /* Check security permission status on INIT_LINK mailbox command */
2551        if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2552            (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2553                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2554                                "2860 SLI authentication is required "
2555                                "for INIT_LINK but has not done yet\n");
2556
2557        if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2558                lpfc_sli4_mbox_cmd_free(phba, pmb);
2559        else
2560                mempool_free(pmb, phba->mbox_mem_pool);
2561}
2562 /**
2563 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2564 * @phba: Pointer to HBA context object.
2565 * @pmb: Pointer to mailbox object.
2566 *
2567 * This function is the unreg rpi mailbox completion handler. It
2568 * frees the memory resources associated with the completed mailbox
2569 * command. An additional refrenece is put on the ndlp to prevent
2570 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2571 * the unreg mailbox command completes, this routine puts the
2572 * reference back.
2573 *
2574 **/
2575void
2576lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2577{
2578        struct lpfc_vport  *vport = pmb->vport;
2579        struct lpfc_nodelist *ndlp;
2580
2581        ndlp = pmb->ctx_ndlp;
2582        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2583                if (phba->sli_rev == LPFC_SLI_REV4 &&
2584                    (bf_get(lpfc_sli_intf_if_type,
2585                     &phba->sli4_hba.sli_intf) >=
2586                     LPFC_SLI_INTF_IF_TYPE_2)) {
2587                        if (ndlp) {
2588                                lpfc_printf_vlog(
2589                                        vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2590                                         "0010 UNREG_LOGIN vpi:%x "
2591                                         "rpi:%x DID:%x defer x%x flg x%x "
2592                                         "map:%x %px\n",
2593                                         vport->vpi, ndlp->nlp_rpi,
2594                                         ndlp->nlp_DID, ndlp->nlp_defer_did,
2595                                         ndlp->nlp_flag,
2596                                         ndlp->nlp_usg_map, ndlp);
2597                                ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2598                                lpfc_nlp_put(ndlp);
2599
2600                                /* Check to see if there are any deferred
2601                                 * events to process
2602                                 */
2603                                if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2604                                    (ndlp->nlp_defer_did !=
2605                                    NLP_EVT_NOTHING_PENDING)) {
2606                                        lpfc_printf_vlog(
2607                                                vport, KERN_INFO, LOG_DISCOVERY,
2608                                                "4111 UNREG cmpl deferred "
2609                                                "clr x%x on "
2610                                                "NPort x%x Data: x%x x%px\n",
2611                                                ndlp->nlp_rpi, ndlp->nlp_DID,
2612                                                ndlp->nlp_defer_did, ndlp);
2613                                        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2614                                        ndlp->nlp_defer_did =
2615                                                NLP_EVT_NOTHING_PENDING;
2616                                        lpfc_issue_els_plogi(
2617                                                vport, ndlp->nlp_DID, 0);
2618                                } else {
2619                                        __lpfc_sli_rpi_release(vport, ndlp);
2620                                }
2621                        }
2622                }
2623        }
2624
2625        mempool_free(pmb, phba->mbox_mem_pool);
2626}
2627
2628/**
2629 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2630 * @phba: Pointer to HBA context object.
2631 *
2632 * This function is called with no lock held. This function processes all
2633 * the completed mailbox commands and gives it to upper layers. The interrupt
2634 * service routine processes mailbox completion interrupt and adds completed
2635 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2636 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2637 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2638 * function returns the mailbox commands to the upper layer by calling the
2639 * completion handler function of each mailbox.
2640 **/
2641int
2642lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2643{
2644        MAILBOX_t *pmbox;
2645        LPFC_MBOXQ_t *pmb;
2646        int rc;
2647        LIST_HEAD(cmplq);
2648
2649        phba->sli.slistat.mbox_event++;
2650
2651        /* Get all completed mailboxe buffers into the cmplq */
2652        spin_lock_irq(&phba->hbalock);
2653        list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2654        spin_unlock_irq(&phba->hbalock);
2655
2656        /* Get a Mailbox buffer to setup mailbox commands for callback */
2657        do {
2658                list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2659                if (pmb == NULL)
2660                        break;
2661
2662                pmbox = &pmb->u.mb;
2663
2664                if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2665                        if (pmb->vport) {
2666                                lpfc_debugfs_disc_trc(pmb->vport,
2667                                        LPFC_DISC_TRC_MBOX_VPORT,
2668                                        "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2669                                        (uint32_t)pmbox->mbxCommand,
2670                                        pmbox->un.varWords[0],
2671                                        pmbox->un.varWords[1]);
2672                        }
2673                        else {
2674                                lpfc_debugfs_disc_trc(phba->pport,
2675                                        LPFC_DISC_TRC_MBOX,
2676                                        "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2677                                        (uint32_t)pmbox->mbxCommand,
2678                                        pmbox->un.varWords[0],
2679                                        pmbox->un.varWords[1]);
2680                        }
2681                }
2682
2683                /*
2684                 * It is a fatal error if unknown mbox command completion.
2685                 */
2686                if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2687                    MBX_SHUTDOWN) {
2688                        /* Unknown mailbox command compl */
2689                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2690                                        "(%d):0323 Unknown Mailbox command "
2691                                        "x%x (x%x/x%x) Cmpl\n",
2692                                        pmb->vport ? pmb->vport->vpi :
2693                                        LPFC_VPORT_UNKNOWN,
2694                                        pmbox->mbxCommand,
2695                                        lpfc_sli_config_mbox_subsys_get(phba,
2696                                                                        pmb),
2697                                        lpfc_sli_config_mbox_opcode_get(phba,
2698                                                                        pmb));
2699                        phba->link_state = LPFC_HBA_ERROR;
2700                        phba->work_hs = HS_FFER3;
2701                        lpfc_handle_eratt(phba);
2702                        continue;
2703                }
2704
2705                if (pmbox->mbxStatus) {
2706                        phba->sli.slistat.mbox_stat_err++;
2707                        if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2708                                /* Mbox cmd cmpl error - RETRYing */
2709                                lpfc_printf_log(phba, KERN_INFO,
2710                                        LOG_MBOX | LOG_SLI,
2711                                        "(%d):0305 Mbox cmd cmpl "
2712                                        "error - RETRYing Data: x%x "
2713                                        "(x%x/x%x) x%x x%x x%x\n",
2714                                        pmb->vport ? pmb->vport->vpi :
2715                                        LPFC_VPORT_UNKNOWN,
2716                                        pmbox->mbxCommand,
2717                                        lpfc_sli_config_mbox_subsys_get(phba,
2718                                                                        pmb),
2719                                        lpfc_sli_config_mbox_opcode_get(phba,
2720                                                                        pmb),
2721                                        pmbox->mbxStatus,
2722                                        pmbox->un.varWords[0],
2723                                        pmb->vport ? pmb->vport->port_state :
2724                                        LPFC_VPORT_UNKNOWN);
2725                                pmbox->mbxStatus = 0;
2726                                pmbox->mbxOwner = OWN_HOST;
2727                                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2728                                if (rc != MBX_NOT_FINISHED)
2729                                        continue;
2730                        }
2731                }
2732
2733                /* Mailbox cmd <cmd> Cmpl <cmpl> */
2734                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2735                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2736                                "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2737                                "x%x x%x x%x\n",
2738                                pmb->vport ? pmb->vport->vpi : 0,
2739                                pmbox->mbxCommand,
2740                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
2741                                lpfc_sli_config_mbox_opcode_get(phba, pmb),
2742                                pmb->mbox_cmpl,
2743                                *((uint32_t *) pmbox),
2744                                pmbox->un.varWords[0],
2745                                pmbox->un.varWords[1],
2746                                pmbox->un.varWords[2],
2747                                pmbox->un.varWords[3],
2748                                pmbox->un.varWords[4],
2749                                pmbox->un.varWords[5],
2750                                pmbox->un.varWords[6],
2751                                pmbox->un.varWords[7],
2752                                pmbox->un.varWords[8],
2753                                pmbox->un.varWords[9],
2754                                pmbox->un.varWords[10]);
2755
2756                if (pmb->mbox_cmpl)
2757                        pmb->mbox_cmpl(phba,pmb);
2758        } while (1);
2759        return 0;
2760}
2761
2762/**
2763 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2764 * @phba: Pointer to HBA context object.
2765 * @pring: Pointer to driver SLI ring object.
2766 * @tag: buffer tag.
2767 *
2768 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2769 * is set in the tag the buffer is posted for a particular exchange,
2770 * the function will return the buffer without replacing the buffer.
2771 * If the buffer is for unsolicited ELS or CT traffic, this function
2772 * returns the buffer and also posts another buffer to the firmware.
2773 **/
2774static struct lpfc_dmabuf *
2775lpfc_sli_get_buff(struct lpfc_hba *phba,
2776                  struct lpfc_sli_ring *pring,
2777                  uint32_t tag)
2778{
2779        struct hbq_dmabuf *hbq_entry;
2780
2781        if (tag & QUE_BUFTAG_BIT)
2782                return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2783        hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2784        if (!hbq_entry)
2785                return NULL;
2786        return &hbq_entry->dbuf;
2787}
2788
2789/**
2790 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2791 *                              containing a NVME LS request.
2792 * @phba: pointer to lpfc hba data structure.
2793 * @piocb: pointer to the iocbq struct representing the sequence starting
2794 *        frame.
2795 *
2796 * This routine initially validates the NVME LS, validates there is a login
2797 * with the port that sent the LS, and then calls the appropriate nvme host
2798 * or target LS request handler.
2799 **/
2800static void
2801lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2802{
2803        struct lpfc_nodelist *ndlp;
2804        struct lpfc_dmabuf *d_buf;
2805        struct hbq_dmabuf *nvmebuf;
2806        struct fc_frame_header *fc_hdr;
2807        struct lpfc_async_xchg_ctx *axchg = NULL;
2808        char *failwhy = NULL;
2809        uint32_t oxid, sid, did, fctl, size;
2810        int ret = 1;
2811
2812        d_buf = piocb->context2;
2813
2814        nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2815        fc_hdr = nvmebuf->hbuf.virt;
2816        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2817        sid = sli4_sid_from_fc_hdr(fc_hdr);
2818        did = sli4_did_from_fc_hdr(fc_hdr);
2819        fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2820                fc_hdr->fh_f_ctl[1] << 8 |
2821                fc_hdr->fh_f_ctl[2]);
2822        size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2823
2824        lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
2825                         oxid, size, sid);
2826
2827        if (phba->pport->load_flag & FC_UNLOADING) {
2828                failwhy = "Driver Unloading";
2829        } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2830                failwhy = "NVME FC4 Disabled";
2831        } else if (!phba->nvmet_support && !phba->pport->localport) {
2832                failwhy = "No Localport";
2833        } else if (phba->nvmet_support && !phba->targetport) {
2834                failwhy = "No Targetport";
2835        } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2836                failwhy = "Bad NVME LS R_CTL";
2837        } else if (unlikely((fctl & 0x00FF0000) !=
2838                        (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2839                failwhy = "Bad NVME LS F_CTL";
2840        } else {
2841                axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2842                if (!axchg)
2843                        failwhy = "No CTX memory";
2844        }
2845
2846        if (unlikely(failwhy)) {
2847                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2848                                "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2849                                sid, oxid, failwhy);
2850                goto out_fail;
2851        }
2852
2853        /* validate the source of the LS is logged in */
2854        ndlp = lpfc_findnode_did(phba->pport, sid);
2855        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2856            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2857             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2858                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2859                                "6216 NVME Unsol rcv: No ndlp: "
2860                                "NPort_ID x%x oxid x%x\n",
2861                                sid, oxid);
2862                goto out_fail;
2863        }
2864
2865        axchg->phba = phba;
2866        axchg->ndlp = ndlp;
2867        axchg->size = size;
2868        axchg->oxid = oxid;
2869        axchg->sid = sid;
2870        axchg->wqeq = NULL;
2871        axchg->state = LPFC_NVME_STE_LS_RCV;
2872        axchg->entry_cnt = 1;
2873        axchg->rqb_buffer = (void *)nvmebuf;
2874        axchg->hdwq = &phba->sli4_hba.hdwq[0];
2875        axchg->payload = nvmebuf->dbuf.virt;
2876        INIT_LIST_HEAD(&axchg->list);
2877
2878        if (phba->nvmet_support)
2879                ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2880        else
2881                ret = lpfc_nvme_handle_lsreq(phba, axchg);
2882
2883        /* if zero, LS was successfully handled. If non-zero, LS not handled */
2884        if (!ret)
2885                return;
2886
2887        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2888                        "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2889                        "NVMe%s handler failed %d\n",
2890                        did, sid, oxid,
2891                        (phba->nvmet_support) ? "T" : "I", ret);
2892
2893out_fail:
2894
2895        /* recycle receive buffer */
2896        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2897
2898        /* If start of new exchange, abort it */
2899        if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2900                ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2901
2902        if (ret)
2903                kfree(axchg);
2904}
2905
2906/**
2907 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2908 * @phba: Pointer to HBA context object.
2909 * @pring: Pointer to driver SLI ring object.
2910 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2911 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2912 * @fch_type: the type for the first frame of the sequence.
2913 *
2914 * This function is called with no lock held. This function uses the r_ctl and
2915 * type of the received sequence to find the correct callback function to call
2916 * to process the sequence.
2917 **/
2918static int
2919lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2920                         struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2921                         uint32_t fch_type)
2922{
2923        int i;
2924
2925        switch (fch_type) {
2926        case FC_TYPE_NVME:
2927                lpfc_nvme_unsol_ls_handler(phba, saveq);
2928                return 1;
2929        default:
2930                break;
2931        }
2932
2933        /* unSolicited Responses */
2934        if (pring->prt[0].profile) {
2935                if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2936                        (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2937                                                                        saveq);
2938                return 1;
2939        }
2940        /* We must search, based on rctl / type
2941           for the right routine */
2942        for (i = 0; i < pring->num_mask; i++) {
2943                if ((pring->prt[i].rctl == fch_r_ctl) &&
2944                    (pring->prt[i].type == fch_type)) {
2945                        if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2946                                (pring->prt[i].lpfc_sli_rcv_unsol_event)
2947                                                (phba, pring, saveq);
2948                        return 1;
2949                }
2950        }
2951        return 0;
2952}
2953
2954/**
2955 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2956 * @phba: Pointer to HBA context object.
2957 * @pring: Pointer to driver SLI ring object.
2958 * @saveq: Pointer to the unsolicited iocb.
2959 *
2960 * This function is called with no lock held by the ring event handler
2961 * when there is an unsolicited iocb posted to the response ring by the
2962 * firmware. This function gets the buffer associated with the iocbs
2963 * and calls the event handler for the ring. This function handles both
2964 * qring buffers and hbq buffers.
2965 * When the function returns 1 the caller can free the iocb object otherwise
2966 * upper layer functions will free the iocb objects.
2967 **/
2968static int
2969lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2970                            struct lpfc_iocbq *saveq)
2971{
2972        IOCB_t           * irsp;
2973        WORD5            * w5p;
2974        uint32_t           Rctl, Type;
2975        struct lpfc_iocbq *iocbq;
2976        struct lpfc_dmabuf *dmzbuf;
2977
2978        irsp = &(saveq->iocb);
2979
2980        if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2981                if (pring->lpfc_sli_rcv_async_status)
2982                        pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2983                else
2984                        lpfc_printf_log(phba,
2985                                        KERN_WARNING,
2986                                        LOG_SLI,
2987                                        "0316 Ring %d handler: unexpected "
2988                                        "ASYNC_STATUS iocb received evt_code "
2989                                        "0x%x\n",
2990                                        pring->ringno,
2991                                        irsp->un.asyncstat.evt_code);
2992                return 1;
2993        }
2994
2995        if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2996                (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2997                if (irsp->ulpBdeCount > 0) {
2998                        dmzbuf = lpfc_sli_get_buff(phba, pring,
2999                                        irsp->un.ulpWord[3]);
3000                        lpfc_in_buf_free(phba, dmzbuf);
3001                }
3002
3003                if (irsp->ulpBdeCount > 1) {
3004                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3005                                        irsp->unsli3.sli3Words[3]);
3006                        lpfc_in_buf_free(phba, dmzbuf);
3007                }
3008
3009                if (irsp->ulpBdeCount > 2) {
3010                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3011                                irsp->unsli3.sli3Words[7]);
3012                        lpfc_in_buf_free(phba, dmzbuf);
3013                }
3014
3015                return 1;
3016        }
3017
3018        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3019                if (irsp->ulpBdeCount != 0) {
3020                        saveq->context2 = lpfc_sli_get_buff(phba, pring,
3021                                                irsp->un.ulpWord[3]);
3022                        if (!saveq->context2)
3023                                lpfc_printf_log(phba,
3024                                        KERN_ERR,
3025                                        LOG_SLI,
3026                                        "0341 Ring %d Cannot find buffer for "
3027                                        "an unsolicited iocb. tag 0x%x\n",
3028                                        pring->ringno,
3029                                        irsp->un.ulpWord[3]);
3030                }
3031                if (irsp->ulpBdeCount == 2) {
3032                        saveq->context3 = lpfc_sli_get_buff(phba, pring,
3033                                                irsp->unsli3.sli3Words[7]);
3034                        if (!saveq->context3)
3035                                lpfc_printf_log(phba,
3036                                        KERN_ERR,
3037                                        LOG_SLI,
3038                                        "0342 Ring %d Cannot find buffer for an"
3039                                        " unsolicited iocb. tag 0x%x\n",
3040                                        pring->ringno,
3041                                        irsp->unsli3.sli3Words[7]);
3042                }
3043                list_for_each_entry(iocbq, &saveq->list, list) {
3044                        irsp = &(iocbq->iocb);
3045                        if (irsp->ulpBdeCount != 0) {
3046                                iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3047                                                        irsp->un.ulpWord[3]);
3048                                if (!iocbq->context2)
3049                                        lpfc_printf_log(phba,
3050                                                KERN_ERR,
3051                                                LOG_SLI,
3052                                                "0343 Ring %d Cannot find "
3053                                                "buffer for an unsolicited iocb"
3054                                                ". tag 0x%x\n", pring->ringno,
3055                                                irsp->un.ulpWord[3]);
3056                        }
3057                        if (irsp->ulpBdeCount == 2) {
3058                                iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3059                                                irsp->unsli3.sli3Words[7]);
3060                                if (!iocbq->context3)
3061                                        lpfc_printf_log(phba,
3062                                                KERN_ERR,
3063                                                LOG_SLI,
3064                                                "0344 Ring %d Cannot find "
3065                                                "buffer for an unsolicited "
3066                                                "iocb. tag 0x%x\n",
3067                                                pring->ringno,
3068                                                irsp->unsli3.sli3Words[7]);
3069                        }
3070                }
3071        }
3072        if (irsp->ulpBdeCount != 0 &&
3073            (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3074             irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3075                int found = 0;
3076
3077                /* search continue save q for same XRI */
3078                list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3079                        if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3080                                saveq->iocb.unsli3.rcvsli3.ox_id) {
3081                                list_add_tail(&saveq->list, &iocbq->list);
3082                                found = 1;
3083                                break;
3084                        }
3085                }
3086                if (!found)
3087                        list_add_tail(&saveq->clist,
3088                                      &pring->iocb_continue_saveq);
3089                if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3090                        list_del_init(&iocbq->clist);
3091                        saveq = iocbq;
3092                        irsp = &(saveq->iocb);
3093                } else
3094                        return 0;
3095        }
3096        if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3097            (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3098            (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3099                Rctl = FC_RCTL_ELS_REQ;
3100                Type = FC_TYPE_ELS;
3101        } else {
3102                w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3103                Rctl = w5p->hcsw.Rctl;
3104                Type = w5p->hcsw.Type;
3105
3106                /* Firmware Workaround */
3107                if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3108                        (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3109                         irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3110                        Rctl = FC_RCTL_ELS_REQ;
3111                        Type = FC_TYPE_ELS;
3112                        w5p->hcsw.Rctl = Rctl;
3113                        w5p->hcsw.Type = Type;
3114                }
3115        }
3116
3117        if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3118                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3119                                "0313 Ring %d handler: unexpected Rctl x%x "
3120                                "Type x%x received\n",
3121                                pring->ringno, Rctl, Type);
3122
3123        return 1;
3124}
3125
3126/**
3127 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3128 * @phba: Pointer to HBA context object.
3129 * @pring: Pointer to driver SLI ring object.
3130 * @prspiocb: Pointer to response iocb object.
3131 *
3132 * This function looks up the iocb_lookup table to get the command iocb
3133 * corresponding to the given response iocb using the iotag of the
3134 * response iocb. The driver calls this function with the hbalock held
3135 * for SLI3 ports or the ring lock held for SLI4 ports.
3136 * This function returns the command iocb object if it finds the command
3137 * iocb else returns NULL.
3138 **/
3139static struct lpfc_iocbq *
3140lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3141                      struct lpfc_sli_ring *pring,
3142                      struct lpfc_iocbq *prspiocb)
3143{
3144        struct lpfc_iocbq *cmd_iocb = NULL;
3145        uint16_t iotag;
3146        spinlock_t *temp_lock = NULL;
3147        unsigned long iflag = 0;
3148
3149        if (phba->sli_rev == LPFC_SLI_REV4)
3150                temp_lock = &pring->ring_lock;
3151        else
3152                temp_lock = &phba->hbalock;
3153
3154        spin_lock_irqsave(temp_lock, iflag);
3155        iotag = prspiocb->iocb.ulpIoTag;
3156
3157        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3158                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3159                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3160                        /* remove from txcmpl queue list */
3161                        list_del_init(&cmd_iocb->list);
3162                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3163                        pring->txcmplq_cnt--;
3164                        spin_unlock_irqrestore(temp_lock, iflag);
3165                        return cmd_iocb;
3166                }
3167        }
3168
3169        spin_unlock_irqrestore(temp_lock, iflag);
3170        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3171                        "0317 iotag x%x is out of "
3172                        "range: max iotag x%x wd0 x%x\n",
3173                        iotag, phba->sli.last_iotag,
3174                        *(((uint32_t *) &prspiocb->iocb) + 7));
3175        return NULL;
3176}
3177
3178/**
3179 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3180 * @phba: Pointer to HBA context object.
3181 * @pring: Pointer to driver SLI ring object.
3182 * @iotag: IOCB tag.
3183 *
3184 * This function looks up the iocb_lookup table to get the command iocb
3185 * corresponding to the given iotag. The driver calls this function with
3186 * the ring lock held because this function is an SLI4 port only helper.
3187 * This function returns the command iocb object if it finds the command
3188 * iocb else returns NULL.
3189 **/
3190static struct lpfc_iocbq *
3191lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3192                             struct lpfc_sli_ring *pring, uint16_t iotag)
3193{
3194        struct lpfc_iocbq *cmd_iocb = NULL;
3195        spinlock_t *temp_lock = NULL;
3196        unsigned long iflag = 0;
3197
3198        if (phba->sli_rev == LPFC_SLI_REV4)
3199                temp_lock = &pring->ring_lock;
3200        else
3201                temp_lock = &phba->hbalock;
3202
3203        spin_lock_irqsave(temp_lock, iflag);
3204        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3205                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3206                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3207                        /* remove from txcmpl queue list */
3208                        list_del_init(&cmd_iocb->list);
3209                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3210                        pring->txcmplq_cnt--;
3211                        spin_unlock_irqrestore(temp_lock, iflag);
3212                        return cmd_iocb;
3213                }
3214        }
3215
3216        spin_unlock_irqrestore(temp_lock, iflag);
3217        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3218                        "0372 iotag x%x lookup error: max iotag (x%x) "
3219                        "iocb_flag x%x\n",
3220                        iotag, phba->sli.last_iotag,
3221                        cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3222        return NULL;
3223}
3224
3225/**
3226 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3227 * @phba: Pointer to HBA context object.
3228 * @pring: Pointer to driver SLI ring object.
3229 * @saveq: Pointer to the response iocb to be processed.
3230 *
3231 * This function is called by the ring event handler for non-fcp
3232 * rings when there is a new response iocb in the response ring.
3233 * The caller is not required to hold any locks. This function
3234 * gets the command iocb associated with the response iocb and
3235 * calls the completion handler for the command iocb. If there
3236 * is no completion handler, the function will free the resources
3237 * associated with command iocb. If the response iocb is for
3238 * an already aborted command iocb, the status of the completion
3239 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3240 * This function always returns 1.
3241 **/
3242static int
3243lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3244                          struct lpfc_iocbq *saveq)
3245{
3246        struct lpfc_iocbq *cmdiocbp;
3247        int rc = 1;
3248        unsigned long iflag;
3249
3250        cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3251        if (cmdiocbp) {
3252                if (cmdiocbp->iocb_cmpl) {
3253                        /*
3254                         * If an ELS command failed send an event to mgmt
3255                         * application.
3256                         */
3257                        if (saveq->iocb.ulpStatus &&
3258                             (pring->ringno == LPFC_ELS_RING) &&
3259                             (cmdiocbp->iocb.ulpCommand ==
3260                                CMD_ELS_REQUEST64_CR))
3261                                lpfc_send_els_failure_event(phba,
3262                                        cmdiocbp, saveq);
3263
3264                        /*
3265                         * Post all ELS completions to the worker thread.
3266                         * All other are passed to the completion callback.
3267                         */
3268                        if (pring->ringno == LPFC_ELS_RING) {
3269                                if ((phba->sli_rev < LPFC_SLI_REV4) &&
3270                                    (cmdiocbp->iocb_flag &
3271                                                        LPFC_DRIVER_ABORTED)) {
3272                                        spin_lock_irqsave(&phba->hbalock,
3273                                                          iflag);
3274                                        cmdiocbp->iocb_flag &=
3275                                                ~LPFC_DRIVER_ABORTED;
3276                                        spin_unlock_irqrestore(&phba->hbalock,
3277                                                               iflag);
3278                                        saveq->iocb.ulpStatus =
3279                                                IOSTAT_LOCAL_REJECT;
3280                                        saveq->iocb.un.ulpWord[4] =
3281                                                IOERR_SLI_ABORTED;
3282
3283                                        /* Firmware could still be in progress
3284                                         * of DMAing payload, so don't free data
3285                                         * buffer till after a hbeat.
3286                                         */
3287                                        spin_lock_irqsave(&phba->hbalock,
3288                                                          iflag);
3289                                        saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3290                                        spin_unlock_irqrestore(&phba->hbalock,
3291                                                               iflag);
3292                                }
3293                                if (phba->sli_rev == LPFC_SLI_REV4) {
3294                                        if (saveq->iocb_flag &
3295                                            LPFC_EXCHANGE_BUSY) {
3296                                                /* Set cmdiocb flag for the
3297                                                 * exchange busy so sgl (xri)
3298                                                 * will not be released until
3299                                                 * the abort xri is received
3300                                                 * from hba.
3301                                                 */
3302                                                spin_lock_irqsave(
3303                                                        &phba->hbalock, iflag);
3304                                                cmdiocbp->iocb_flag |=
3305                                                        LPFC_EXCHANGE_BUSY;
3306                                                spin_unlock_irqrestore(
3307                                                        &phba->hbalock, iflag);
3308                                        }
3309                                        if (cmdiocbp->iocb_flag &
3310                                            LPFC_DRIVER_ABORTED) {
3311                                                /*
3312                                                 * Clear LPFC_DRIVER_ABORTED
3313                                                 * bit in case it was driver
3314                                                 * initiated abort.
3315                                                 */
3316                                                spin_lock_irqsave(
3317                                                        &phba->hbalock, iflag);
3318                                                cmdiocbp->iocb_flag &=
3319                                                        ~LPFC_DRIVER_ABORTED;
3320                                                spin_unlock_irqrestore(
3321                                                        &phba->hbalock, iflag);
3322                                                cmdiocbp->iocb.ulpStatus =
3323                                                        IOSTAT_LOCAL_REJECT;
3324                                                cmdiocbp->iocb.un.ulpWord[4] =
3325                                                        IOERR_ABORT_REQUESTED;
3326                                                /*
3327                                                 * For SLI4, irsiocb contains
3328                                                 * NO_XRI in sli_xritag, it
3329                                                 * shall not affect releasing
3330                                                 * sgl (xri) process.
3331                                                 */
3332                                                saveq->iocb.ulpStatus =
3333                                                        IOSTAT_LOCAL_REJECT;
3334                                                saveq->iocb.un.ulpWord[4] =
3335                                                        IOERR_SLI_ABORTED;
3336                                                spin_lock_irqsave(
3337                                                        &phba->hbalock, iflag);
3338                                                saveq->iocb_flag |=
3339                                                        LPFC_DELAY_MEM_FREE;
3340                                                spin_unlock_irqrestore(
3341                                                        &phba->hbalock, iflag);
3342                                        }
3343                                }
3344                        }
3345                        (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3346                } else
3347                        lpfc_sli_release_iocbq(phba, cmdiocbp);
3348        } else {
3349                /*
3350                 * Unknown initiating command based on the response iotag.
3351                 * This could be the case on the ELS ring because of
3352                 * lpfc_els_abort().
3353                 */
3354                if (pring->ringno != LPFC_ELS_RING) {
3355                        /*
3356                         * Ring <ringno> handler: unexpected completion IoTag
3357                         * <IoTag>
3358                         */
3359                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3360                                         "0322 Ring %d handler: "
3361                                         "unexpected completion IoTag x%x "
3362                                         "Data: x%x x%x x%x x%x\n",
3363                                         pring->ringno,
3364                                         saveq->iocb.ulpIoTag,
3365                                         saveq->iocb.ulpStatus,
3366                                         saveq->iocb.un.ulpWord[4],
3367                                         saveq->iocb.ulpCommand,
3368                                         saveq->iocb.ulpContext);
3369                }
3370        }
3371
3372        return rc;
3373}
3374
3375/**
3376 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3377 * @phba: Pointer to HBA context object.
3378 * @pring: Pointer to driver SLI ring object.
3379 *
3380 * This function is called from the iocb ring event handlers when
3381 * put pointer is ahead of the get pointer for a ring. This function signal
3382 * an error attention condition to the worker thread and the worker
3383 * thread will transition the HBA to offline state.
3384 **/
3385static void
3386lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3387{
3388        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3389        /*
3390         * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3391         * rsp ring <portRspMax>
3392         */
3393        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394                        "0312 Ring %d handler: portRspPut %d "
3395                        "is bigger than rsp ring %d\n",
3396                        pring->ringno, le32_to_cpu(pgp->rspPutInx),
3397                        pring->sli.sli3.numRiocb);
3398
3399        phba->link_state = LPFC_HBA_ERROR;
3400
3401        /*
3402         * All error attention handlers are posted to
3403         * worker thread
3404         */
3405        phba->work_ha |= HA_ERATT;
3406        phba->work_hs = HS_FFER3;
3407
3408        lpfc_worker_wake_up(phba);
3409
3410        return;
3411}
3412
3413/**
3414 * lpfc_poll_eratt - Error attention polling timer timeout handler
3415 * @t: Context to fetch pointer to address of HBA context object from.
3416 *
3417 * This function is invoked by the Error Attention polling timer when the
3418 * timer times out. It will check the SLI Error Attention register for
3419 * possible attention events. If so, it will post an Error Attention event
3420 * and wake up worker thread to process it. Otherwise, it will set up the
3421 * Error Attention polling timer for the next poll.
3422 **/
3423void lpfc_poll_eratt(struct timer_list *t)
3424{
3425        struct lpfc_hba *phba;
3426        uint32_t eratt = 0;
3427        uint64_t sli_intr, cnt;
3428
3429        phba = from_timer(phba, t, eratt_poll);
3430
3431        /* Here we will also keep track of interrupts per sec of the hba */
3432        sli_intr = phba->sli.slistat.sli_intr;
3433
3434        if (phba->sli.slistat.sli_prev_intr > sli_intr)
3435                cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3436                        sli_intr);
3437        else
3438                cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3439
3440        /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3441        do_div(cnt, phba->eratt_poll_interval);
3442        phba->sli.slistat.sli_ips = cnt;
3443
3444        phba->sli.slistat.sli_prev_intr = sli_intr;
3445
3446        /* Check chip HA register for error event */
3447        eratt = lpfc_sli_check_eratt(phba);
3448
3449        if (eratt)
3450                /* Tell the worker thread there is work to do */
3451                lpfc_worker_wake_up(phba);
3452        else
3453                /* Restart the timer for next eratt poll */
3454                mod_timer(&phba->eratt_poll,
3455                          jiffies +
3456                          msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3457        return;
3458}
3459
3460
3461/**
3462 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3463 * @phba: Pointer to HBA context object.
3464 * @pring: Pointer to driver SLI ring object.
3465 * @mask: Host attention register mask for this ring.
3466 *
3467 * This function is called from the interrupt context when there is a ring
3468 * event for the fcp ring. The caller does not hold any lock.
3469 * The function processes each response iocb in the response ring until it
3470 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3471 * LE bit set. The function will call the completion handler of the command iocb
3472 * if the response iocb indicates a completion for a command iocb or it is
3473 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3474 * function if this is an unsolicited iocb.
3475 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3476 * to check it explicitly.
3477 */
3478int
3479lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3480                                struct lpfc_sli_ring *pring, uint32_t mask)
3481{
3482        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3483        IOCB_t *irsp = NULL;
3484        IOCB_t *entry = NULL;
3485        struct lpfc_iocbq *cmdiocbq = NULL;
3486        struct lpfc_iocbq rspiocbq;
3487        uint32_t status;
3488        uint32_t portRspPut, portRspMax;
3489        int rc = 1;
3490        lpfc_iocb_type type;
3491        unsigned long iflag;
3492        uint32_t rsp_cmpl = 0;
3493
3494        spin_lock_irqsave(&phba->hbalock, iflag);
3495        pring->stats.iocb_event++;
3496
3497        /*
3498         * The next available response entry should never exceed the maximum
3499         * entries.  If it does, treat it as an adapter hardware error.
3500         */
3501        portRspMax = pring->sli.sli3.numRiocb;
3502        portRspPut = le32_to_cpu(pgp->rspPutInx);
3503        if (unlikely(portRspPut >= portRspMax)) {
3504                lpfc_sli_rsp_pointers_error(phba, pring);
3505                spin_unlock_irqrestore(&phba->hbalock, iflag);
3506                return 1;
3507        }
3508        if (phba->fcp_ring_in_use) {
3509                spin_unlock_irqrestore(&phba->hbalock, iflag);
3510                return 1;
3511        } else
3512                phba->fcp_ring_in_use = 1;
3513
3514        rmb();
3515        while (pring->sli.sli3.rspidx != portRspPut) {
3516                /*
3517                 * Fetch an entry off the ring and copy it into a local data
3518                 * structure.  The copy involves a byte-swap since the
3519                 * network byte order and pci byte orders are different.
3520                 */
3521                entry = lpfc_resp_iocb(phba, pring);
3522                phba->last_completion_time = jiffies;
3523
3524                if (++pring->sli.sli3.rspidx >= portRspMax)
3525                        pring->sli.sli3.rspidx = 0;
3526
3527                lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3528                                      (uint32_t *) &rspiocbq.iocb,
3529                                      phba->iocb_rsp_size);
3530                INIT_LIST_HEAD(&(rspiocbq.list));
3531                irsp = &rspiocbq.iocb;
3532
3533                type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3534                pring->stats.iocb_rsp++;
3535                rsp_cmpl++;
3536
3537                if (unlikely(irsp->ulpStatus)) {
3538                        /*
3539                         * If resource errors reported from HBA, reduce
3540                         * queuedepths of the SCSI device.
3541                         */
3542                        if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3543                            ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3544                             IOERR_NO_RESOURCES)) {
3545                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3546                                phba->lpfc_rampdown_queue_depth(phba);
3547                                spin_lock_irqsave(&phba->hbalock, iflag);
3548                        }
3549
3550                        /* Rsp ring <ringno> error: IOCB */
3551                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3552                                        "0336 Rsp Ring %d error: IOCB Data: "
3553                                        "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3554                                        pring->ringno,
3555                                        irsp->un.ulpWord[0],
3556                                        irsp->un.ulpWord[1],
3557                                        irsp->un.ulpWord[2],
3558                                        irsp->un.ulpWord[3],
3559                                        irsp->un.ulpWord[4],
3560                                        irsp->un.ulpWord[5],
3561                                        *(uint32_t *)&irsp->un1,
3562                                        *((uint32_t *)&irsp->un1 + 1));
3563                }
3564
3565                switch (type) {
3566                case LPFC_ABORT_IOCB:
3567                case LPFC_SOL_IOCB:
3568                        /*
3569                         * Idle exchange closed via ABTS from port.  No iocb
3570                         * resources need to be recovered.
3571                         */
3572                        if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3573                                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3574                                                "0333 IOCB cmd 0x%x"
3575                                                " processed. Skipping"
3576                                                " completion\n",
3577                                                irsp->ulpCommand);
3578                                break;
3579                        }
3580
3581                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3582                        cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3583                                                         &rspiocbq);
3584                        spin_lock_irqsave(&phba->hbalock, iflag);
3585                        if (unlikely(!cmdiocbq))
3586                                break;
3587                        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3588                                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3589                        if (cmdiocbq->iocb_cmpl) {
3590                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3591                                (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3592                                                      &rspiocbq);
3593                                spin_lock_irqsave(&phba->hbalock, iflag);
3594                        }
3595                        break;
3596                case LPFC_UNSOL_IOCB:
3597                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3598                        lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3599                        spin_lock_irqsave(&phba->hbalock, iflag);
3600                        break;
3601                default:
3602                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3603                                char adaptermsg[LPFC_MAX_ADPTMSG];
3604                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3605                                memcpy(&adaptermsg[0], (uint8_t *) irsp,
3606                                       MAX_MSG_DATA);
3607                                dev_warn(&((phba->pcidev)->dev),
3608                                         "lpfc%d: %s\n",
3609                                         phba->brd_no, adaptermsg);
3610                        } else {
3611                                /* Unknown IOCB command */
3612                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3613                                                "0334 Unknown IOCB command "
3614                                                "Data: x%x, x%x x%x x%x x%x\n",
3615                                                type, irsp->ulpCommand,
3616                                                irsp->ulpStatus,
3617                                                irsp->ulpIoTag,
3618                                                irsp->ulpContext);
3619                        }
3620                        break;
3621                }
3622
3623                /*
3624                 * The response IOCB has been processed.  Update the ring
3625                 * pointer in SLIM.  If the port response put pointer has not
3626                 * been updated, sync the pgp->rspPutInx and fetch the new port
3627                 * response put pointer.
3628                 */
3629                writel(pring->sli.sli3.rspidx,
3630                        &phba->host_gp[pring->ringno].rspGetInx);
3631
3632                if (pring->sli.sli3.rspidx == portRspPut)
3633                        portRspPut = le32_to_cpu(pgp->rspPutInx);
3634        }
3635
3636        if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3637                pring->stats.iocb_rsp_full++;
3638                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3639                writel(status, phba->CAregaddr);
3640                readl(phba->CAregaddr);
3641        }
3642        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3643                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3644                pring->stats.iocb_cmd_empty++;
3645
3646                /* Force update of the local copy of cmdGetInx */
3647                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3648                lpfc_sli_resume_iocb(phba, pring);
3649
3650                if ((pring->lpfc_sli_cmd_available))
3651                        (pring->lpfc_sli_cmd_available) (phba, pring);
3652
3653        }
3654
3655        phba->fcp_ring_in_use = 0;
3656        spin_unlock_irqrestore(&phba->hbalock, iflag);
3657        return rc;
3658}
3659
3660/**
3661 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3662 * @phba: Pointer to HBA context object.
3663 * @pring: Pointer to driver SLI ring object.
3664 * @rspiocbp: Pointer to driver response IOCB object.
3665 *
3666 * This function is called from the worker thread when there is a slow-path
3667 * response IOCB to process. This function chains all the response iocbs until
3668 * seeing the iocb with the LE bit set. The function will call
3669 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3670 * completion of a command iocb. The function will call the
3671 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3672 * The function frees the resources or calls the completion handler if this
3673 * iocb is an abort completion. The function returns NULL when the response
3674 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3675 * this function shall chain the iocb on to the iocb_continueq and return the
3676 * response iocb passed in.
3677 **/
3678static struct lpfc_iocbq *
3679lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3680                        struct lpfc_iocbq *rspiocbp)
3681{
3682        struct lpfc_iocbq *saveq;
3683        struct lpfc_iocbq *cmdiocbp;
3684        struct lpfc_iocbq *next_iocb;
3685        IOCB_t *irsp = NULL;
3686        uint32_t free_saveq;
3687        uint8_t iocb_cmd_type;
3688        lpfc_iocb_type type;
3689        unsigned long iflag;
3690        int rc;
3691
3692        spin_lock_irqsave(&phba->hbalock, iflag);
3693        /* First add the response iocb to the countinueq list */
3694        list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3695        pring->iocb_continueq_cnt++;
3696
3697        /* Now, determine whether the list is completed for processing */
3698        irsp = &rspiocbp->iocb;
3699        if (irsp->ulpLe) {
3700                /*
3701                 * By default, the driver expects to free all resources
3702                 * associated with this iocb completion.
3703                 */
3704                free_saveq = 1;
3705                saveq = list_get_first(&pring->iocb_continueq,
3706                                       struct lpfc_iocbq, list);
3707                irsp = &(saveq->iocb);
3708                list_del_init(&pring->iocb_continueq);
3709                pring->iocb_continueq_cnt = 0;
3710
3711                pring->stats.iocb_rsp++;
3712
3713                /*
3714                 * If resource errors reported from HBA, reduce
3715                 * queuedepths of the SCSI device.
3716                 */
3717                if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3718                    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3719                     IOERR_NO_RESOURCES)) {
3720                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3721                        phba->lpfc_rampdown_queue_depth(phba);
3722                        spin_lock_irqsave(&phba->hbalock, iflag);
3723                }
3724
3725                if (irsp->ulpStatus) {
3726                        /* Rsp ring <ringno> error: IOCB */
3727                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3728                                        "0328 Rsp Ring %d error: "
3729                                        "IOCB Data: "
3730                                        "x%x x%x x%x x%x "
3731                                        "x%x x%x x%x x%x "
3732                                        "x%x x%x x%x x%x "
3733                                        "x%x x%x x%x x%x\n",
3734                                        pring->ringno,
3735                                        irsp->un.ulpWord[0],
3736                                        irsp->un.ulpWord[1],
3737                                        irsp->un.ulpWord[2],
3738                                        irsp->un.ulpWord[3],
3739                                        irsp->un.ulpWord[4],
3740                                        irsp->un.ulpWord[5],
3741                                        *(((uint32_t *) irsp) + 6),
3742                                        *(((uint32_t *) irsp) + 7),
3743                                        *(((uint32_t *) irsp) + 8),
3744                                        *(((uint32_t *) irsp) + 9),
3745                                        *(((uint32_t *) irsp) + 10),
3746                                        *(((uint32_t *) irsp) + 11),
3747                                        *(((uint32_t *) irsp) + 12),
3748                                        *(((uint32_t *) irsp) + 13),
3749                                        *(((uint32_t *) irsp) + 14),
3750                                        *(((uint32_t *) irsp) + 15));
3751                }
3752
3753                /*
3754                 * Fetch the IOCB command type and call the correct completion
3755                 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3756                 * get freed back to the lpfc_iocb_list by the discovery
3757                 * kernel thread.
3758                 */
3759                iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3760                type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3761                switch (type) {
3762                case LPFC_SOL_IOCB:
3763                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3764                        rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3765                        spin_lock_irqsave(&phba->hbalock, iflag);
3766                        break;
3767
3768                case LPFC_UNSOL_IOCB:
3769                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3770                        rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3771                        spin_lock_irqsave(&phba->hbalock, iflag);
3772                        if (!rc)
3773                                free_saveq = 0;
3774                        break;
3775
3776                case LPFC_ABORT_IOCB:
3777                        cmdiocbp = NULL;
3778                        if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3779                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3780                                cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3781                                                                 saveq);
3782                                spin_lock_irqsave(&phba->hbalock, iflag);
3783                        }
3784                        if (cmdiocbp) {
3785                                /* Call the specified completion routine */
3786                                if (cmdiocbp->iocb_cmpl) {
3787                                        spin_unlock_irqrestore(&phba->hbalock,
3788                                                               iflag);
3789                                        (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3790                                                              saveq);
3791                                        spin_lock_irqsave(&phba->hbalock,
3792                                                          iflag);
3793                                } else
3794                                        __lpfc_sli_release_iocbq(phba,
3795                                                                 cmdiocbp);
3796                        }
3797                        break;
3798
3799                case LPFC_UNKNOWN_IOCB:
3800                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3801                                char adaptermsg[LPFC_MAX_ADPTMSG];
3802                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3803                                memcpy(&adaptermsg[0], (uint8_t *)irsp,
3804                                       MAX_MSG_DATA);
3805                                dev_warn(&((phba->pcidev)->dev),
3806                                         "lpfc%d: %s\n",
3807                                         phba->brd_no, adaptermsg);
3808                        } else {
3809                                /* Unknown IOCB command */
3810                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3811                                                "0335 Unknown IOCB "
3812                                                "command Data: x%x "
3813                                                "x%x x%x x%x\n",
3814                                                irsp->ulpCommand,
3815                                                irsp->ulpStatus,
3816                                                irsp->ulpIoTag,
3817                                                irsp->ulpContext);
3818                        }
3819                        break;
3820                }
3821
3822                if (free_saveq) {
3823                        list_for_each_entry_safe(rspiocbp, next_iocb,
3824                                                 &saveq->list, list) {
3825                                list_del_init(&rspiocbp->list);
3826                                __lpfc_sli_release_iocbq(phba, rspiocbp);
3827                        }
3828                        __lpfc_sli_release_iocbq(phba, saveq);
3829                }
3830                rspiocbp = NULL;
3831        }
3832        spin_unlock_irqrestore(&phba->hbalock, iflag);
3833        return rspiocbp;
3834}
3835
3836/**
3837 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3838 * @phba: Pointer to HBA context object.
3839 * @pring: Pointer to driver SLI ring object.
3840 * @mask: Host attention register mask for this ring.
3841 *
3842 * This routine wraps the actual slow_ring event process routine from the
3843 * API jump table function pointer from the lpfc_hba struct.
3844 **/
3845void
3846lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3847                                struct lpfc_sli_ring *pring, uint32_t mask)
3848{
3849        phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3850}
3851
3852/**
3853 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3854 * @phba: Pointer to HBA context object.
3855 * @pring: Pointer to driver SLI ring object.
3856 * @mask: Host attention register mask for this ring.
3857 *
3858 * This function is called from the worker thread when there is a ring event
3859 * for non-fcp rings. The caller does not hold any lock. The function will
3860 * remove each response iocb in the response ring and calls the handle
3861 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3862 **/
3863static void
3864lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3865                                   struct lpfc_sli_ring *pring, uint32_t mask)
3866{
3867        struct lpfc_pgp *pgp;
3868        IOCB_t *entry;
3869        IOCB_t *irsp = NULL;
3870        struct lpfc_iocbq *rspiocbp = NULL;
3871        uint32_t portRspPut, portRspMax;
3872        unsigned long iflag;
3873        uint32_t status;
3874
3875        pgp = &phba->port_gp[pring->ringno];
3876        spin_lock_irqsave(&phba->hbalock, iflag);
3877        pring->stats.iocb_event++;
3878
3879        /*
3880         * The next available response entry should never exceed the maximum
3881         * entries.  If it does, treat it as an adapter hardware error.
3882         */
3883        portRspMax = pring->sli.sli3.numRiocb;
3884        portRspPut = le32_to_cpu(pgp->rspPutInx);
3885        if (portRspPut >= portRspMax) {
3886                /*
3887                 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3888                 * rsp ring <portRspMax>
3889                 */
3890                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3891                                "0303 Ring %d handler: portRspPut %d "
3892                                "is bigger than rsp ring %d\n",
3893                                pring->ringno, portRspPut, portRspMax);
3894
3895                phba->link_state = LPFC_HBA_ERROR;
3896                spin_unlock_irqrestore(&phba->hbalock, iflag);
3897
3898                phba->work_hs = HS_FFER3;
3899                lpfc_handle_eratt(phba);
3900
3901                return;
3902        }
3903
3904        rmb();
3905        while (pring->sli.sli3.rspidx != portRspPut) {
3906                /*
3907                 * Build a completion list and call the appropriate handler.
3908                 * The process is to get the next available response iocb, get
3909                 * a free iocb from the list, copy the response data into the
3910                 * free iocb, insert to the continuation list, and update the
3911                 * next response index to slim.  This process makes response
3912                 * iocb's in the ring available to DMA as fast as possible but
3913                 * pays a penalty for a copy operation.  Since the iocb is
3914                 * only 32 bytes, this penalty is considered small relative to
3915                 * the PCI reads for register values and a slim write.  When
3916                 * the ulpLe field is set, the entire Command has been
3917                 * received.
3918                 */
3919                entry = lpfc_resp_iocb(phba, pring);
3920
3921                phba->last_completion_time = jiffies;
3922                rspiocbp = __lpfc_sli_get_iocbq(phba);
3923                if (rspiocbp == NULL) {
3924                        printk(KERN_ERR "%s: out of buffers! Failing "
3925                               "completion.\n", __func__);
3926                        break;
3927                }
3928
3929                lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3930                                      phba->iocb_rsp_size);
3931                irsp = &rspiocbp->iocb;
3932
3933                if (++pring->sli.sli3.rspidx >= portRspMax)
3934                        pring->sli.sli3.rspidx = 0;
3935
3936                if (pring->ringno == LPFC_ELS_RING) {
3937                        lpfc_debugfs_slow_ring_trc(phba,
3938                        "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3939                                *(((uint32_t *) irsp) + 4),
3940                                *(((uint32_t *) irsp) + 6),
3941                                *(((uint32_t *) irsp) + 7));
3942                }
3943
3944                writel(pring->sli.sli3.rspidx,
3945                        &phba->host_gp[pring->ringno].rspGetInx);
3946
3947                spin_unlock_irqrestore(&phba->hbalock, iflag);
3948                /* Handle the response IOCB */
3949                rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3950                spin_lock_irqsave(&phba->hbalock, iflag);
3951
3952                /*
3953                 * If the port response put pointer has not been updated, sync
3954                 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3955                 * response put pointer.
3956                 */
3957                if (pring->sli.sli3.rspidx == portRspPut) {
3958                        portRspPut = le32_to_cpu(pgp->rspPutInx);
3959                }
3960        } /* while (pring->sli.sli3.rspidx != portRspPut) */
3961
3962        if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3963                /* At least one response entry has been freed */
3964                pring->stats.iocb_rsp_full++;
3965                /* SET RxRE_RSP in Chip Att register */
3966                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3967                writel(status, phba->CAregaddr);
3968                readl(phba->CAregaddr); /* flush */
3969        }
3970        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3971                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3972                pring->stats.iocb_cmd_empty++;
3973
3974                /* Force update of the local copy of cmdGetInx */
3975                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3976                lpfc_sli_resume_iocb(phba, pring);
3977
3978                if ((pring->lpfc_sli_cmd_available))
3979                        (pring->lpfc_sli_cmd_available) (phba, pring);
3980
3981        }
3982
3983        spin_unlock_irqrestore(&phba->hbalock, iflag);
3984        return;
3985}
3986
3987/**
3988 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3989 * @phba: Pointer to HBA context object.
3990 * @pring: Pointer to driver SLI ring object.
3991 * @mask: Host attention register mask for this ring.
3992 *
3993 * This function is called from the worker thread when there is a pending
3994 * ELS response iocb on the driver internal slow-path response iocb worker
3995 * queue. The caller does not hold any lock. The function will remove each
3996 * response iocb from the response worker queue and calls the handle
3997 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3998 **/
3999static void
4000lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4001                                   struct lpfc_sli_ring *pring, uint32_t mask)
4002{
4003        struct lpfc_iocbq *irspiocbq;
4004        struct hbq_dmabuf *dmabuf;
4005        struct lpfc_cq_event *cq_event;
4006        unsigned long iflag;
4007        int count = 0;
4008
4009        spin_lock_irqsave(&phba->hbalock, iflag);
4010        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4011        spin_unlock_irqrestore(&phba->hbalock, iflag);
4012        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4013                /* Get the response iocb from the head of work queue */
4014                spin_lock_irqsave(&phba->hbalock, iflag);
4015                list_remove_head(&phba->sli4_hba.sp_queue_event,
4016                                 cq_event, struct lpfc_cq_event, list);
4017                spin_unlock_irqrestore(&phba->hbalock, iflag);
4018
4019                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4020                case CQE_CODE_COMPL_WQE:
4021                        irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4022                                                 cq_event);
4023                        /* Translate ELS WCQE to response IOCBQ */
4024                        irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4025                                                                   irspiocbq);
4026                        if (irspiocbq)
4027                                lpfc_sli_sp_handle_rspiocb(phba, pring,
4028                                                           irspiocbq);
4029                        count++;
4030                        break;
4031                case CQE_CODE_RECEIVE:
4032                case CQE_CODE_RECEIVE_V1:
4033                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
4034                                              cq_event);
4035                        lpfc_sli4_handle_received_buffer(phba, dmabuf);
4036                        count++;
4037                        break;
4038                default:
4039                        break;
4040                }
4041
4042                /* Limit the number of events to 64 to avoid soft lockups */
4043                if (count == 64)
4044                        break;
4045        }
4046}
4047
4048/**
4049 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4050 * @phba: Pointer to HBA context object.
4051 * @pring: Pointer to driver SLI ring object.
4052 *
4053 * This function aborts all iocbs in the given ring and frees all the iocb
4054 * objects in txq. This function issues an abort iocb for all the iocb commands
4055 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4056 * the return of this function. The caller is not required to hold any locks.
4057 **/
4058void
4059lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4060{
4061        LIST_HEAD(completions);
4062        struct lpfc_iocbq *iocb, *next_iocb;
4063
4064        if (pring->ringno == LPFC_ELS_RING) {
4065                lpfc_fabric_abort_hba(phba);
4066        }
4067
4068        /* Error everything on txq and txcmplq
4069         * First do the txq.
4070         */
4071        if (phba->sli_rev >= LPFC_SLI_REV4) {
4072                spin_lock_irq(&pring->ring_lock);
4073                list_splice_init(&pring->txq, &completions);
4074                pring->txq_cnt = 0;
4075                spin_unlock_irq(&pring->ring_lock);
4076
4077                spin_lock_irq(&phba->hbalock);
4078                /* Next issue ABTS for everything on the txcmplq */
4079                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4080                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4081                spin_unlock_irq(&phba->hbalock);
4082        } else {
4083                spin_lock_irq(&phba->hbalock);
4084                list_splice_init(&pring->txq, &completions);
4085                pring->txq_cnt = 0;
4086
4087                /* Next issue ABTS for everything on the txcmplq */
4088                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4089                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4090                spin_unlock_irq(&phba->hbalock);
4091        }
4092
4093        /* Cancel all the IOCBs from the completions list */
4094        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4095                              IOERR_SLI_ABORTED);
4096}
4097
4098/**
4099 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4100 * @phba: Pointer to HBA context object.
4101 *
4102 * This function aborts all iocbs in FCP rings and frees all the iocb
4103 * objects in txq. This function issues an abort iocb for all the iocb commands
4104 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4105 * the return of this function. The caller is not required to hold any locks.
4106 **/
4107void
4108lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4109{
4110        struct lpfc_sli *psli = &phba->sli;
4111        struct lpfc_sli_ring  *pring;
4112        uint32_t i;
4113
4114        /* Look on all the FCP Rings for the iotag */
4115        if (phba->sli_rev >= LPFC_SLI_REV4) {
4116                for (i = 0; i < phba->cfg_hdw_queue; i++) {
4117                        pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4118                        lpfc_sli_abort_iocb_ring(phba, pring);
4119                }
4120        } else {
4121                pring = &psli->sli3_ring[LPFC_FCP_RING];
4122                lpfc_sli_abort_iocb_ring(phba, pring);
4123        }
4124}
4125
4126/**
4127 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4128 * @phba: Pointer to HBA context object.
4129 *
4130 * This function flushes all iocbs in the IO ring and frees all the iocb
4131 * objects in txq and txcmplq. This function will not issue abort iocbs
4132 * for all the iocb commands in txcmplq, they will just be returned with
4133 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4134 * slot has been permanently disabled.
4135 **/
4136void
4137lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4138{
4139        LIST_HEAD(txq);
4140        LIST_HEAD(txcmplq);
4141        struct lpfc_sli *psli = &phba->sli;
4142        struct lpfc_sli_ring  *pring;
4143        uint32_t i;
4144        struct lpfc_iocbq *piocb, *next_iocb;
4145
4146        spin_lock_irq(&phba->hbalock);
4147        if (phba->hba_flag & HBA_IOQ_FLUSH ||
4148            !phba->sli4_hba.hdwq) {
4149                spin_unlock_irq(&phba->hbalock);
4150                return;
4151        }
4152        /* Indicate the I/O queues are flushed */
4153        phba->hba_flag |= HBA_IOQ_FLUSH;
4154        spin_unlock_irq(&phba->hbalock);
4155
4156        /* Look on all the FCP Rings for the iotag */
4157        if (phba->sli_rev >= LPFC_SLI_REV4) {
4158                for (i = 0; i < phba->cfg_hdw_queue; i++) {
4159                        pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4160
4161                        spin_lock_irq(&pring->ring_lock);
4162                        /* Retrieve everything on txq */
4163                        list_splice_init(&pring->txq, &txq);
4164                        list_for_each_entry_safe(piocb, next_iocb,
4165                                                 &pring->txcmplq, list)
4166                                piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4167                        /* Retrieve everything on the txcmplq */
4168                        list_splice_init(&pring->txcmplq, &txcmplq);
4169                        pring->txq_cnt = 0;
4170                        pring->txcmplq_cnt = 0;
4171                        spin_unlock_irq(&pring->ring_lock);
4172
4173                        /* Flush the txq */
4174                        lpfc_sli_cancel_iocbs(phba, &txq,
4175                                              IOSTAT_LOCAL_REJECT,
4176                                              IOERR_SLI_DOWN);
4177                        /* Flush the txcmpq */
4178                        lpfc_sli_cancel_iocbs(phba, &txcmplq,
4179                                              IOSTAT_LOCAL_REJECT,
4180                                              IOERR_SLI_DOWN);
4181                }
4182        } else {
4183                pring = &psli->sli3_ring[LPFC_FCP_RING];
4184
4185                spin_lock_irq(&phba->hbalock);
4186                /* Retrieve everything on txq */
4187                list_splice_init(&pring->txq, &txq);
4188                list_for_each_entry_safe(piocb, next_iocb,
4189                                         &pring->txcmplq, list)
4190                        piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4191                /* Retrieve everything on the txcmplq */
4192                list_splice_init(&pring->txcmplq, &txcmplq);
4193                pring->txq_cnt = 0;
4194                pring->txcmplq_cnt = 0;
4195                spin_unlock_irq(&phba->hbalock);
4196
4197                /* Flush the txq */
4198                lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4199                                      IOERR_SLI_DOWN);
4200                /* Flush the txcmpq */
4201                lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4202                                      IOERR_SLI_DOWN);
4203        }
4204}
4205
4206/**
4207 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4208 * @phba: Pointer to HBA context object.
4209 * @mask: Bit mask to be checked.
4210 *
4211 * This function reads the host status register and compares
4212 * with the provided bit mask to check if HBA completed
4213 * the restart. This function will wait in a loop for the
4214 * HBA to complete restart. If the HBA does not restart within
4215 * 15 iterations, the function will reset the HBA again. The
4216 * function returns 1 when HBA fail to restart otherwise returns
4217 * zero.
4218 **/
4219static int
4220lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4221{
4222        uint32_t status;
4223        int i = 0;
4224        int retval = 0;
4225
4226        /* Read the HBA Host Status Register */
4227        if (lpfc_readl(phba->HSregaddr, &status))
4228                return 1;
4229
4230        /*
4231         * Check status register every 100ms for 5 retries, then every
4232         * 500ms for 5, then every 2.5 sec for 5, then reset board and
4233         * every 2.5 sec for 4.
4234         * Break our of the loop if errors occurred during init.
4235         */
4236        while (((status & mask) != mask) &&
4237               !(status & HS_FFERM) &&
4238               i++ < 20) {
4239
4240                if (i <= 5)
4241                        msleep(10);
4242                else if (i <= 10)
4243                        msleep(500);
4244                else
4245                        msleep(2500);
4246
4247                if (i == 15) {
4248                                /* Do post */
4249                        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4250                        lpfc_sli_brdrestart(phba);
4251                }
4252                /* Read the HBA Host Status Register */
4253                if (lpfc_readl(phba->HSregaddr, &status)) {
4254                        retval = 1;
4255                        break;
4256                }
4257        }
4258
4259        /* Check to see if any errors occurred during init */
4260        if ((status & HS_FFERM) || (i >= 20)) {
4261                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4262                                "2751 Adapter failed to restart, "
4263                                "status reg x%x, FW Data: A8 x%x AC x%x\n",
4264                                status,
4265                                readl(phba->MBslimaddr + 0xa8),
4266                                readl(phba->MBslimaddr + 0xac));
4267                phba->link_state = LPFC_HBA_ERROR;
4268                retval = 1;
4269        }
4270
4271        return retval;
4272}
4273
4274/**
4275 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4276 * @phba: Pointer to HBA context object.
4277 * @mask: Bit mask to be checked.
4278 *
4279 * This function checks the host status register to check if HBA is
4280 * ready. This function will wait in a loop for the HBA to be ready
4281 * If the HBA is not ready , the function will will reset the HBA PCI
4282 * function again. The function returns 1 when HBA fail to be ready
4283 * otherwise returns zero.
4284 **/
4285static int
4286lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4287{
4288        uint32_t status;
4289        int retval = 0;
4290
4291        /* Read the HBA Host Status Register */
4292        status = lpfc_sli4_post_status_check(phba);
4293
4294        if (status) {
4295                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4296                lpfc_sli_brdrestart(phba);
4297                status = lpfc_sli4_post_status_check(phba);
4298        }
4299
4300        /* Check to see if any errors occurred during init */
4301        if (status) {
4302                phba->link_state = LPFC_HBA_ERROR;
4303                retval = 1;
4304        } else
4305                phba->sli4_hba.intr_enable = 0;
4306
4307        return retval;
4308}
4309
4310/**
4311 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4312 * @phba: Pointer to HBA context object.
4313 * @mask: Bit mask to be checked.
4314 *
4315 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4316 * from the API jump table function pointer from the lpfc_hba struct.
4317 **/
4318int
4319lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4320{
4321        return phba->lpfc_sli_brdready(phba, mask);
4322}
4323
4324#define BARRIER_TEST_PATTERN (0xdeadbeef)
4325
4326/**
4327 * lpfc_reset_barrier - Make HBA ready for HBA reset
4328 * @phba: Pointer to HBA context object.
4329 *
4330 * This function is called before resetting an HBA. This function is called
4331 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4332 **/
4333void lpfc_reset_barrier(struct lpfc_hba *phba)
4334{
4335        uint32_t __iomem *resp_buf;
4336        uint32_t __iomem *mbox_buf;
4337        volatile uint32_t mbox;
4338        uint32_t hc_copy, ha_copy, resp_data;
4339        int  i;
4340        uint8_t hdrtype;
4341
4342        lockdep_assert_held(&phba->hbalock);
4343
4344        pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4345        if (hdrtype != 0x80 ||
4346            (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4347             FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4348                return;
4349
4350        /*
4351         * Tell the other part of the chip to suspend temporarily all
4352         * its DMA activity.
4353         */
4354        resp_buf = phba->MBslimaddr;
4355
4356        /* Disable the error attention */
4357        if (lpfc_readl(phba->HCregaddr, &hc_copy))
4358                return;
4359        writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4360        readl(phba->HCregaddr); /* flush */
4361        phba->link_flag |= LS_IGNORE_ERATT;
4362
4363        if (lpfc_readl(phba->HAregaddr, &ha_copy))
4364                return;
4365        if (ha_copy & HA_ERATT) {
4366                /* Clear Chip error bit */
4367                writel(HA_ERATT, phba->HAregaddr);
4368                phba->pport->stopped = 1;
4369        }
4370
4371        mbox = 0;
4372        ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4373        ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4374
4375        writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4376        mbox_buf = phba->MBslimaddr;
4377        writel(mbox, mbox_buf);
4378
4379        for (i = 0; i < 50; i++) {
4380                if (lpfc_readl((resp_buf + 1), &resp_data))
4381                        return;
4382                if (resp_data != ~(BARRIER_TEST_PATTERN))
4383                        mdelay(1);
4384                else
4385                        break;
4386        }
4387        resp_data = 0;
4388        if (lpfc_readl((resp_buf + 1), &resp_data))
4389                return;
4390        if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4391                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4392                    phba->pport->stopped)
4393                        goto restore_hc;
4394                else
4395                        goto clear_errat;
4396        }
4397
4398        ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4399        resp_data = 0;
4400        for (i = 0; i < 500; i++) {
4401                if (lpfc_readl(resp_buf, &resp_data))
4402                        return;
4403                if (resp_data != mbox)
4404                        mdelay(1);
4405                else
4406                        break;
4407        }
4408
4409clear_errat:
4410
4411        while (++i < 500) {
4412                if (lpfc_readl(phba->HAregaddr, &ha_copy))
4413                        return;
4414                if (!(ha_copy & HA_ERATT))
4415                        mdelay(1);
4416                else
4417                        break;
4418        }
4419
4420        if (readl(phba->HAregaddr) & HA_ERATT) {
4421                writel(HA_ERATT, phba->HAregaddr);
4422                phba->pport->stopped = 1;
4423        }
4424
4425restore_hc:
4426        phba->link_flag &= ~LS_IGNORE_ERATT;
4427        writel(hc_copy, phba->HCregaddr);
4428        readl(phba->HCregaddr); /* flush */
4429}
4430
4431/**
4432 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4433 * @phba: Pointer to HBA context object.
4434 *
4435 * This function issues a kill_board mailbox command and waits for
4436 * the error attention interrupt. This function is called for stopping
4437 * the firmware processing. The caller is not required to hold any
4438 * locks. This function calls lpfc_hba_down_post function to free
4439 * any pending commands after the kill. The function will return 1 when it
4440 * fails to kill the board else will return 0.
4441 **/
4442int
4443lpfc_sli_brdkill(struct lpfc_hba *phba)
4444{
4445        struct lpfc_sli *psli;
4446        LPFC_MBOXQ_t *pmb;
4447        uint32_t status;
4448        uint32_t ha_copy;
4449        int retval;
4450        int i = 0;
4451
4452        psli = &phba->sli;
4453
4454        /* Kill HBA */
4455        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4456                        "0329 Kill HBA Data: x%x x%x\n",
4457                        phba->pport->port_state, psli->sli_flag);
4458
4459        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4460        if (!pmb)
4461                return 1;
4462
4463        /* Disable the error attention */
4464        spin_lock_irq(&phba->hbalock);
4465        if (lpfc_readl(phba->HCregaddr, &status)) {
4466                spin_unlock_irq(&phba->hbalock);
4467                mempool_free(pmb, phba->mbox_mem_pool);
4468                return 1;
4469        }
4470        status &= ~HC_ERINT_ENA;
4471        writel(status, phba->HCregaddr);
4472        readl(phba->HCregaddr); /* flush */
4473        phba->link_flag |= LS_IGNORE_ERATT;
4474        spin_unlock_irq(&phba->hbalock);
4475
4476        lpfc_kill_board(phba, pmb);
4477        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4478        retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4479
4480        if (retval != MBX_SUCCESS) {
4481                if (retval != MBX_BUSY)
4482                        mempool_free(pmb, phba->mbox_mem_pool);
4483                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4484                                "2752 KILL_BOARD command failed retval %d\n",
4485                                retval);
4486                spin_lock_irq(&phba->hbalock);
4487                phba->link_flag &= ~LS_IGNORE_ERATT;
4488                spin_unlock_irq(&phba->hbalock);
4489                return 1;
4490        }
4491
4492        spin_lock_irq(&phba->hbalock);
4493        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4494        spin_unlock_irq(&phba->hbalock);
4495
4496        mempool_free(pmb, phba->mbox_mem_pool);
4497
4498        /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4499         * attention every 100ms for 3 seconds. If we don't get ERATT after
4500         * 3 seconds we still set HBA_ERROR state because the status of the
4501         * board is now undefined.
4502         */
4503        if (lpfc_readl(phba->HAregaddr, &ha_copy))
4504                return 1;
4505        while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4506                mdelay(100);
4507                if (lpfc_readl(phba->HAregaddr, &ha_copy))
4508                        return 1;
4509        }
4510
4511        del_timer_sync(&psli->mbox_tmo);
4512        if (ha_copy & HA_ERATT) {
4513                writel(HA_ERATT, phba->HAregaddr);
4514                phba->pport->stopped = 1;
4515        }
4516        spin_lock_irq(&phba->hbalock);
4517        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4518        psli->mbox_active = NULL;
4519        phba->link_flag &= ~LS_IGNORE_ERATT;
4520        spin_unlock_irq(&phba->hbalock);
4521
4522        lpfc_hba_down_post(phba);
4523        phba->link_state = LPFC_HBA_ERROR;
4524
4525        return ha_copy & HA_ERATT ? 0 : 1;
4526}
4527
4528/**
4529 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4530 * @phba: Pointer to HBA context object.
4531 *
4532 * This function resets the HBA by writing HC_INITFF to the control
4533 * register. After the HBA resets, this function resets all the iocb ring
4534 * indices. This function disables PCI layer parity checking during
4535 * the reset.
4536 * This function returns 0 always.
4537 * The caller is not required to hold any locks.
4538 **/
4539int
4540lpfc_sli_brdreset(struct lpfc_hba *phba)
4541{
4542        struct lpfc_sli *psli;
4543        struct lpfc_sli_ring *pring;
4544        uint16_t cfg_value;
4545        int i;
4546
4547        psli = &phba->sli;
4548
4549        /* Reset HBA */
4550        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4551                        "0325 Reset HBA Data: x%x x%x\n",
4552                        (phba->pport) ? phba->pport->port_state : 0,
4553                        psli->sli_flag);
4554
4555        /* perform board reset */
4556        phba->fc_eventTag = 0;
4557        phba->link_events = 0;
4558        if (phba->pport) {
4559                phba->pport->fc_myDID = 0;
4560                phba->pport->fc_prevDID = 0;
4561        }
4562
4563        /* Turn off parity checking and serr during the physical reset */
4564        if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4565                return -EIO;
4566
4567        pci_write_config_word(phba->pcidev, PCI_COMMAND,
4568                              (cfg_value &
4569                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4570
4571        psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4572
4573        /* Now toggle INITFF bit in the Host Control Register */
4574        writel(HC_INITFF, phba->HCregaddr);
4575        mdelay(1);
4576        readl(phba->HCregaddr); /* flush */
4577        writel(0, phba->HCregaddr);
4578        readl(phba->HCregaddr); /* flush */
4579
4580        /* Restore PCI cmd register */
4581        pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4582
4583        /* Initialize relevant SLI info */
4584        for (i = 0; i < psli->num_rings; i++) {
4585                pring = &psli->sli3_ring[i];
4586                pring->flag = 0;
4587                pring->sli.sli3.rspidx = 0;
4588                pring->sli.sli3.next_cmdidx  = 0;
4589                pring->sli.sli3.local_getidx = 0;
4590                pring->sli.sli3.cmdidx = 0;
4591                pring->missbufcnt = 0;
4592        }
4593
4594        phba->link_state = LPFC_WARM_START;
4595        return 0;
4596}
4597
4598/**
4599 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4600 * @phba: Pointer to HBA context object.
4601 *
4602 * This function resets a SLI4 HBA. This function disables PCI layer parity
4603 * checking during resets the device. The caller is not required to hold
4604 * any locks.
4605 *
4606 * This function returns 0 on success else returns negative error code.
4607 **/
4608int
4609lpfc_sli4_brdreset(struct lpfc_hba *phba)
4610{
4611        struct lpfc_sli *psli = &phba->sli;
4612        uint16_t cfg_value;
4613        int rc = 0;
4614
4615        /* Reset HBA */
4616        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4617                        "0295 Reset HBA Data: x%x x%x x%x\n",
4618                        phba->pport->port_state, psli->sli_flag,
4619                        phba->hba_flag);
4620
4621        /* perform board reset */
4622        phba->fc_eventTag = 0;
4623        phba->link_events = 0;
4624        phba->pport->fc_myDID = 0;
4625        phba->pport->fc_prevDID = 0;
4626
4627        spin_lock_irq(&phba->hbalock);
4628        psli->sli_flag &= ~(LPFC_PROCESS_LA);
4629        phba->fcf.fcf_flag = 0;
4630        spin_unlock_irq(&phba->hbalock);
4631
4632        /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4633        if (phba->hba_flag & HBA_FW_DUMP_OP) {
4634                phba->hba_flag &= ~HBA_FW_DUMP_OP;
4635                return rc;
4636        }
4637
4638        /* Now physically reset the device */
4639        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4640                        "0389 Performing PCI function reset!\n");
4641
4642        /* Turn off parity checking and serr during the physical reset */
4643        if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4644                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4645                                "3205 PCI read Config failed\n");
4646                return -EIO;
4647        }
4648
4649        pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4650                              ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4651
4652        /* Perform FCoE PCI function reset before freeing queue memory */
4653        rc = lpfc_pci_function_reset(phba);
4654
4655        /* Restore PCI cmd register */
4656        pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4657
4658        return rc;
4659}
4660
4661/**
4662 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4663 * @phba: Pointer to HBA context object.
4664 *
4665 * This function is called in the SLI initialization code path to
4666 * restart the HBA. The caller is not required to hold any lock.
4667 * This function writes MBX_RESTART mailbox command to the SLIM and
4668 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4669 * function to free any pending commands. The function enables
4670 * POST only during the first initialization. The function returns zero.
4671 * The function does not guarantee completion of MBX_RESTART mailbox
4672 * command before the return of this function.
4673 **/
4674static int
4675lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4676{
4677        MAILBOX_t *mb;
4678        struct lpfc_sli *psli;
4679        volatile uint32_t word0;
4680        void __iomem *to_slim;
4681        uint32_t hba_aer_enabled;
4682
4683        spin_lock_irq(&phba->hbalock);
4684
4685        /* Take PCIe device Advanced Error Reporting (AER) state */
4686        hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4687
4688        psli = &phba->sli;
4689
4690        /* Restart HBA */
4691        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4692                        "0337 Restart HBA Data: x%x x%x\n",
4693                        (phba->pport) ? phba->pport->port_state : 0,
4694                        psli->sli_flag);
4695
4696        word0 = 0;
4697        mb = (MAILBOX_t *) &word0;
4698        mb->mbxCommand = MBX_RESTART;
4699        mb->mbxHc = 1;
4700
4701        lpfc_reset_barrier(phba);
4702
4703        to_slim = phba->MBslimaddr;
4704        writel(*(uint32_t *) mb, to_slim);
4705        readl(to_slim); /* flush */
4706
4707        /* Only skip post after fc_ffinit is completed */
4708        if (phba->pport && phba->pport->port_state)
4709                word0 = 1;      /* This is really setting up word1 */
4710        else
4711                word0 = 0;      /* This is really setting up word1 */
4712        to_slim = phba->MBslimaddr + sizeof (uint32_t);
4713        writel(*(uint32_t *) mb, to_slim);
4714        readl(to_slim); /* flush */
4715
4716        lpfc_sli_brdreset(phba);
4717        if (phba->pport)
4718                phba->pport->stopped = 0;
4719        phba->link_state = LPFC_INIT_START;
4720        phba->hba_flag = 0;
4721        spin_unlock_irq(&phba->hbalock);
4722
4723        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4724        psli->stats_start = ktime_get_seconds();
4725
4726        /* Give the INITFF and Post time to settle. */
4727        mdelay(100);
4728
4729        /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4730        if (hba_aer_enabled)
4731                pci_disable_pcie_error_reporting(phba->pcidev);
4732
4733        lpfc_hba_down_post(phba);
4734
4735        return 0;
4736}
4737
4738/**
4739 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4740 * @phba: Pointer to HBA context object.
4741 *
4742 * This function is called in the SLI initialization code path to restart
4743 * a SLI4 HBA. The caller is not required to hold any lock.
4744 * At the end of the function, it calls lpfc_hba_down_post function to
4745 * free any pending commands.
4746 **/
4747static int
4748lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4749{
4750        struct lpfc_sli *psli = &phba->sli;
4751        uint32_t hba_aer_enabled;
4752        int rc;
4753
4754        /* Restart HBA */
4755        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4756                        "0296 Restart HBA Data: x%x x%x\n",
4757                        phba->pport->port_state, psli->sli_flag);
4758
4759        /* Take PCIe device Advanced Error Reporting (AER) state */
4760        hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4761
4762        rc = lpfc_sli4_brdreset(phba);
4763        if (rc) {
4764                phba->link_state = LPFC_HBA_ERROR;
4765                goto hba_down_queue;
4766        }
4767
4768        spin_lock_irq(&phba->hbalock);
4769        phba->pport->stopped = 0;
4770        phba->link_state = LPFC_INIT_START;
4771        phba->hba_flag = 0;
4772        spin_unlock_irq(&phba->hbalock);
4773
4774        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4775        psli->stats_start = ktime_get_seconds();
4776
4777        /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4778        if (hba_aer_enabled)
4779                pci_disable_pcie_error_reporting(phba->pcidev);
4780
4781hba_down_queue:
4782        lpfc_hba_down_post(phba);
4783        lpfc_sli4_queue_destroy(phba);
4784
4785        return rc;
4786}
4787
4788/**
4789 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4790 * @phba: Pointer to HBA context object.
4791 *
4792 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4793 * API jump table function pointer from the lpfc_hba struct.
4794**/
4795int
4796lpfc_sli_brdrestart(struct lpfc_hba *phba)
4797{
4798        return phba->lpfc_sli_brdrestart(phba);
4799}
4800
4801/**
4802 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4803 * @phba: Pointer to HBA context object.
4804 *
4805 * This function is called after a HBA restart to wait for successful
4806 * restart of the HBA. Successful restart of the HBA is indicated by
4807 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4808 * iteration, the function will restart the HBA again. The function returns
4809 * zero if HBA successfully restarted else returns negative error code.
4810 **/
4811int
4812lpfc_sli_chipset_init(struct lpfc_hba *phba)
4813{
4814        uint32_t status, i = 0;
4815
4816        /* Read the HBA Host Status Register */
4817        if (lpfc_readl(phba->HSregaddr, &status))
4818                return -EIO;
4819
4820        /* Check status register to see what current state is */
4821        i = 0;
4822        while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4823
4824                /* Check every 10ms for 10 retries, then every 100ms for 90
4825                 * retries, then every 1 sec for 50 retires for a total of
4826                 * ~60 seconds before reset the board again and check every
4827                 * 1 sec for 50 retries. The up to 60 seconds before the
4828                 * board ready is required by the Falcon FIPS zeroization
4829                 * complete, and any reset the board in between shall cause
4830                 * restart of zeroization, further delay the board ready.
4831                 */
4832                if (i++ >= 200) {
4833                        /* Adapter failed to init, timeout, status reg
4834                           <status> */
4835                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4836                                        "0436 Adapter failed to init, "
4837                                        "timeout, status reg x%x, "
4838                                        "FW Data: A8 x%x AC x%x\n", status,
4839                                        readl(phba->MBslimaddr + 0xa8),
4840                                        readl(phba->MBslimaddr + 0xac));
4841                        phba->link_state = LPFC_HBA_ERROR;
4842                        return -ETIMEDOUT;
4843                }
4844
4845                /* Check to see if any errors occurred during init */
4846                if (status & HS_FFERM) {
4847                        /* ERROR: During chipset initialization */
4848                        /* Adapter failed to init, chipset, status reg
4849                           <status> */
4850                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4851                                        "0437 Adapter failed to init, "
4852                                        "chipset, status reg x%x, "
4853                                        "FW Data: A8 x%x AC x%x\n", status,
4854                                        readl(phba->MBslimaddr + 0xa8),
4855                                        readl(phba->MBslimaddr + 0xac));
4856                        phba->link_state = LPFC_HBA_ERROR;
4857                        return -EIO;
4858                }
4859
4860                if (i <= 10)
4861                        msleep(10);
4862                else if (i <= 100)
4863                        msleep(100);
4864                else
4865                        msleep(1000);
4866
4867                if (i == 150) {
4868                        /* Do post */
4869                        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4870                        lpfc_sli_brdrestart(phba);
4871                }
4872                /* Read the HBA Host Status Register */
4873                if (lpfc_readl(phba->HSregaddr, &status))
4874                        return -EIO;
4875        }
4876
4877        /* Check to see if any errors occurred during init */
4878        if (status & HS_FFERM) {
4879                /* ERROR: During chipset initialization */
4880                /* Adapter failed to init, chipset, status reg <status> */
4881                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4882                                "0438 Adapter failed to init, chipset, "
4883                                "status reg x%x, "
4884                                "FW Data: A8 x%x AC x%x\n", status,
4885                                readl(phba->MBslimaddr + 0xa8),
4886                                readl(phba->MBslimaddr + 0xac));
4887                phba->link_state = LPFC_HBA_ERROR;
4888                return -EIO;
4889        }
4890
4891        /* Clear all interrupt enable conditions */
4892        writel(0, phba->HCregaddr);
4893        readl(phba->HCregaddr); /* flush */
4894
4895        /* setup host attn register */
4896        writel(0xffffffff, phba->HAregaddr);
4897        readl(phba->HAregaddr); /* flush */
4898        return 0;
4899}
4900
4901/**
4902 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4903 *
4904 * This function calculates and returns the number of HBQs required to be
4905 * configured.
4906 **/
4907int
4908lpfc_sli_hbq_count(void)
4909{
4910        return ARRAY_SIZE(lpfc_hbq_defs);
4911}
4912
4913/**
4914 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4915 *
4916 * This function adds the number of hbq entries in every HBQ to get
4917 * the total number of hbq entries required for the HBA and returns
4918 * the total count.
4919 **/
4920static int
4921lpfc_sli_hbq_entry_count(void)
4922{
4923        int  hbq_count = lpfc_sli_hbq_count();
4924        int  count = 0;
4925        int  i;
4926
4927        for (i = 0; i < hbq_count; ++i)
4928                count += lpfc_hbq_defs[i]->entry_count;
4929        return count;
4930}
4931
4932/**
4933 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4934 *
4935 * This function calculates amount of memory required for all hbq entries
4936 * to be configured and returns the total memory required.
4937 **/
4938int
4939lpfc_sli_hbq_size(void)
4940{
4941        return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4942}
4943
4944/**
4945 * lpfc_sli_hbq_setup - configure and initialize HBQs
4946 * @phba: Pointer to HBA context object.
4947 *
4948 * This function is called during the SLI initialization to configure
4949 * all the HBQs and post buffers to the HBQ. The caller is not
4950 * required to hold any locks. This function will return zero if successful
4951 * else it will return negative error code.
4952 **/
4953static int
4954lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4955{
4956        int  hbq_count = lpfc_sli_hbq_count();
4957        LPFC_MBOXQ_t *pmb;
4958        MAILBOX_t *pmbox;
4959        uint32_t hbqno;
4960        uint32_t hbq_entry_index;
4961
4962                                /* Get a Mailbox buffer to setup mailbox
4963                                 * commands for HBA initialization
4964                                 */
4965        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4966
4967        if (!pmb)
4968                return -ENOMEM;
4969
4970        pmbox = &pmb->u.mb;
4971
4972        /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4973        phba->link_state = LPFC_INIT_MBX_CMDS;
4974        phba->hbq_in_use = 1;
4975
4976        hbq_entry_index = 0;
4977        for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4978                phba->hbqs[hbqno].next_hbqPutIdx = 0;
4979                phba->hbqs[hbqno].hbqPutIdx      = 0;
4980                phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4981                phba->hbqs[hbqno].entry_count =
4982                        lpfc_hbq_defs[hbqno]->entry_count;
4983                lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4984                        hbq_entry_index, pmb);
4985                hbq_entry_index += phba->hbqs[hbqno].entry_count;
4986
4987                if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4988                        /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4989                           mbxStatus <status>, ring <num> */
4990
4991                        lpfc_printf_log(phba, KERN_ERR,
4992                                        LOG_SLI | LOG_VPORT,
4993                                        "1805 Adapter failed to init. "
4994                                        "Data: x%x x%x x%x\n",
4995                                        pmbox->mbxCommand,
4996                                        pmbox->mbxStatus, hbqno);
4997
4998                        phba->link_state = LPFC_HBA_ERROR;
4999                        mempool_free(pmb, phba->mbox_mem_pool);
5000                        return -ENXIO;
5001                }
5002        }
5003        phba->hbq_count = hbq_count;
5004
5005        mempool_free(pmb, phba->mbox_mem_pool);
5006
5007        /* Initially populate or replenish the HBQs */
5008        for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5009                lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5010        return 0;
5011}
5012
5013/**
5014 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5015 * @phba: Pointer to HBA context object.
5016 *
5017 * This function is called during the SLI initialization to configure
5018 * all the HBQs and post buffers to the HBQ. The caller is not
5019 * required to hold any locks. This function will return zero if successful
5020 * else it will return negative error code.
5021 **/
5022static int
5023lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5024{
5025        phba->hbq_in_use = 1;
5026        /**
5027         * Specific case when the MDS diagnostics is enabled and supported.
5028         * The receive buffer count is truncated to manage the incoming
5029         * traffic.
5030         **/
5031        if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5032                phba->hbqs[LPFC_ELS_HBQ].entry_count =
5033                        lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5034        else
5035                phba->hbqs[LPFC_ELS_HBQ].entry_count =
5036                        lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5037        phba->hbq_count = 1;
5038        lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5039        /* Initially populate or replenish the HBQs */
5040        return 0;
5041}
5042
5043/**
5044 * lpfc_sli_config_port - Issue config port mailbox command
5045 * @phba: Pointer to HBA context object.
5046 * @sli_mode: sli mode - 2/3
5047 *
5048 * This function is called by the sli initialization code path
5049 * to issue config_port mailbox command. This function restarts the
5050 * HBA firmware and issues a config_port mailbox command to configure
5051 * the SLI interface in the sli mode specified by sli_mode
5052 * variable. The caller is not required to hold any locks.
5053 * The function returns 0 if successful, else returns negative error
5054 * code.
5055 **/
5056int
5057lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5058{
5059        LPFC_MBOXQ_t *pmb;
5060        uint32_t resetcount = 0, rc = 0, done = 0;
5061
5062        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5063        if (!pmb) {
5064                phba->link_state = LPFC_HBA_ERROR;
5065                return -ENOMEM;
5066        }
5067
5068        phba->sli_rev = sli_mode;
5069        while (resetcount < 2 && !done) {
5070                spin_lock_irq(&phba->hbalock);
5071                phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5072                spin_unlock_irq(&phba->hbalock);
5073                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5074                lpfc_sli_brdrestart(phba);
5075                rc = lpfc_sli_chipset_init(phba);
5076                if (rc)
5077                        break;
5078
5079                spin_lock_irq(&phba->hbalock);
5080                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5081                spin_unlock_irq(&phba->hbalock);
5082                resetcount++;
5083
5084                /* Call pre CONFIG_PORT mailbox command initialization.  A
5085                 * value of 0 means the call was successful.  Any other
5086                 * nonzero value is a failure, but if ERESTART is returned,
5087                 * the driver may reset the HBA and try again.
5088                 */
5089                rc = lpfc_config_port_prep(phba);
5090                if (rc == -ERESTART) {
5091                        phba->link_state = LPFC_LINK_UNKNOWN;
5092                        continue;
5093                } else if (rc)
5094                        break;
5095
5096                phba->link_state = LPFC_INIT_MBX_CMDS;
5097                lpfc_config_port(phba, pmb);
5098                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5099                phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5100                                        LPFC_SLI3_HBQ_ENABLED |
5101                                        LPFC_SLI3_CRP_ENABLED |
5102                                        LPFC_SLI3_DSS_ENABLED);
5103                if (rc != MBX_SUCCESS) {
5104                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5105                                "0442 Adapter failed to init, mbxCmd x%x "
5106                                "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5107                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5108                        spin_lock_irq(&phba->hbalock);
5109                        phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5110                        spin_unlock_irq(&phba->hbalock);
5111                        rc = -ENXIO;
5112                } else {
5113                        /* Allow asynchronous mailbox command to go through */
5114                        spin_lock_irq(&phba->hbalock);
5115                        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5116                        spin_unlock_irq(&phba->hbalock);
5117                        done = 1;
5118
5119                        if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5120                            (pmb->u.mb.un.varCfgPort.gasabt == 0))
5121                                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5122                                        "3110 Port did not grant ASABT\n");
5123                }
5124        }
5125        if (!done) {
5126                rc = -EINVAL;
5127                goto do_prep_failed;
5128        }
5129        if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5130                if (!pmb->u.mb.un.varCfgPort.cMA) {
5131                        rc = -ENXIO;
5132                        goto do_prep_failed;
5133                }
5134                if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5135                        phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5136                        phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5137                        phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5138                                phba->max_vpi : phba->max_vports;
5139
5140                } else
5141                        phba->max_vpi = 0;
5142                if (pmb->u.mb.un.varCfgPort.gerbm)
5143                        phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5144                if (pmb->u.mb.un.varCfgPort.gcrp)
5145                        phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5146
5147                phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5148                phba->port_gp = phba->mbox->us.s3_pgp.port;
5149
5150                if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5151                        if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5152                                phba->cfg_enable_bg = 0;
5153                                phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5154                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5155                                                "0443 Adapter did not grant "
5156                                                "BlockGuard\n");
5157                        }
5158                }
5159        } else {
5160                phba->hbq_get = NULL;
5161                phba->port_gp = phba->mbox->us.s2.port;
5162                phba->max_vpi = 0;
5163        }
5164do_prep_failed:
5165        mempool_free(pmb, phba->mbox_mem_pool);
5166        return rc;
5167}
5168
5169
5170/**
5171 * lpfc_sli_hba_setup - SLI initialization function
5172 * @phba: Pointer to HBA context object.
5173 *
5174 * This function is the main SLI initialization function. This function
5175 * is called by the HBA initialization code, HBA reset code and HBA
5176 * error attention handler code. Caller is not required to hold any
5177 * locks. This function issues config_port mailbox command to configure
5178 * the SLI, setup iocb rings and HBQ rings. In the end the function
5179 * calls the config_port_post function to issue init_link mailbox
5180 * command and to start the discovery. The function will return zero
5181 * if successful, else it will return negative error code.
5182 **/
5183int
5184lpfc_sli_hba_setup(struct lpfc_hba *phba)
5185{
5186        uint32_t rc;
5187        int  mode = 3, i;
5188        int longs;
5189
5190        switch (phba->cfg_sli_mode) {
5191        case 2:
5192                if (phba->cfg_enable_npiv) {
5193                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5194                                "1824 NPIV enabled: Override sli_mode "
5195                                "parameter (%d) to auto (0).\n",
5196                                phba->cfg_sli_mode);
5197                        break;
5198                }
5199                mode = 2;
5200                break;
5201        case 0:
5202        case 3:
5203                break;
5204        default:
5205                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5206                                "1819 Unrecognized sli_mode parameter: %d.\n",
5207                                phba->cfg_sli_mode);
5208
5209                break;
5210        }
5211        phba->fcp_embed_io = 0; /* SLI4 FC support only */
5212
5213        rc = lpfc_sli_config_port(phba, mode);
5214
5215        if (rc && phba->cfg_sli_mode == 3)
5216                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5217                                "1820 Unable to select SLI-3.  "
5218                                "Not supported by adapter.\n");
5219        if (rc && mode != 2)
5220                rc = lpfc_sli_config_port(phba, 2);
5221        else if (rc && mode == 2)
5222                rc = lpfc_sli_config_port(phba, 3);
5223        if (rc)
5224                goto lpfc_sli_hba_setup_error;
5225
5226        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5227        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5228                rc = pci_enable_pcie_error_reporting(phba->pcidev);
5229                if (!rc) {
5230                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5231                                        "2709 This device supports "
5232                                        "Advanced Error Reporting (AER)\n");
5233                        spin_lock_irq(&phba->hbalock);
5234                        phba->hba_flag |= HBA_AER_ENABLED;
5235                        spin_unlock_irq(&phba->hbalock);
5236                } else {
5237                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5238                                        "2708 This device does not support "
5239                                        "Advanced Error Reporting (AER): %d\n",
5240                                        rc);
5241                        phba->cfg_aer_support = 0;
5242                }
5243        }
5244
5245        if (phba->sli_rev == 3) {
5246                phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5247                phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5248        } else {
5249                phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5250                phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5251                phba->sli3_options = 0;
5252        }
5253
5254        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5255                        "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5256                        phba->sli_rev, phba->max_vpi);
5257        rc = lpfc_sli_ring_map(phba);
5258
5259        if (rc)
5260                goto lpfc_sli_hba_setup_error;
5261
5262        /* Initialize VPIs. */
5263        if (phba->sli_rev == LPFC_SLI_REV3) {
5264                /*
5265                 * The VPI bitmask and physical ID array are allocated
5266                 * and initialized once only - at driver load.  A port
5267                 * reset doesn't need to reinitialize this memory.
5268                 */
5269                if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5270                        longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5271                        phba->vpi_bmask = kcalloc(longs,
5272                                                  sizeof(unsigned long),
5273                                                  GFP_KERNEL);
5274                        if (!phba->vpi_bmask) {
5275                                rc = -ENOMEM;
5276                                goto lpfc_sli_hba_setup_error;
5277                        }
5278
5279                        phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5280                                                sizeof(uint16_t),
5281                                                GFP_KERNEL);
5282                        if (!phba->vpi_ids) {
5283                                kfree(phba->vpi_bmask);
5284                                rc = -ENOMEM;
5285                                goto lpfc_sli_hba_setup_error;
5286                        }
5287                        for (i = 0; i < phba->max_vpi; i++)
5288                                phba->vpi_ids[i] = i;
5289                }
5290        }
5291
5292        /* Init HBQs */
5293        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5294                rc = lpfc_sli_hbq_setup(phba);
5295                if (rc)
5296                        goto lpfc_sli_hba_setup_error;
5297        }
5298        spin_lock_irq(&phba->hbalock);
5299        phba->sli.sli_flag |= LPFC_PROCESS_LA;
5300        spin_unlock_irq(&phba->hbalock);
5301
5302        rc = lpfc_config_port_post(phba);
5303        if (rc)
5304                goto lpfc_sli_hba_setup_error;
5305
5306        return rc;
5307
5308lpfc_sli_hba_setup_error:
5309        phba->link_state = LPFC_HBA_ERROR;
5310        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5311                        "0445 Firmware initialization failed\n");
5312        return rc;
5313}
5314
5315/**
5316 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5317 * @phba: Pointer to HBA context object.
5318 *
5319 * This function issue a dump mailbox command to read config region
5320 * 23 and parse the records in the region and populate driver
5321 * data structure.
5322 **/
5323static int
5324lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5325{
5326        LPFC_MBOXQ_t *mboxq;
5327        struct lpfc_dmabuf *mp;
5328        struct lpfc_mqe *mqe;
5329        uint32_t data_length;
5330        int rc;
5331
5332        /* Program the default value of vlan_id and fc_map */
5333        phba->valid_vlan = 0;
5334        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5335        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5336        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5337
5338        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5339        if (!mboxq)
5340                return -ENOMEM;
5341
5342        mqe = &mboxq->u.mqe;
5343        if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5344                rc = -ENOMEM;
5345                goto out_free_mboxq;
5346        }
5347
5348        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5349        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5350
5351        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5352                        "(%d):2571 Mailbox cmd x%x Status x%x "
5353                        "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5354                        "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5355                        "CQ: x%x x%x x%x x%x\n",
5356                        mboxq->vport ? mboxq->vport->vpi : 0,
5357                        bf_get(lpfc_mqe_command, mqe),
5358                        bf_get(lpfc_mqe_status, mqe),
5359                        mqe->un.mb_words[0], mqe->un.mb_words[1],
5360                        mqe->un.mb_words[2], mqe->un.mb_words[3],
5361                        mqe->un.mb_words[4], mqe->un.mb_words[5],
5362                        mqe->un.mb_words[6], mqe->un.mb_words[7],
5363                        mqe->un.mb_words[8], mqe->un.mb_words[9],
5364                        mqe->un.mb_words[10], mqe->un.mb_words[11],
5365                        mqe->un.mb_words[12], mqe->un.mb_words[13],
5366                        mqe->un.mb_words[14], mqe->un.mb_words[15],
5367                        mqe->un.mb_words[16], mqe->un.mb_words[50],
5368                        mboxq->mcqe.word0,
5369                        mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5370                        mboxq->mcqe.trailer);
5371
5372        if (rc) {
5373                lpfc_mbuf_free(phba, mp->virt, mp->phys);
5374                kfree(mp);
5375                rc = -EIO;
5376                goto out_free_mboxq;
5377        }
5378        data_length = mqe->un.mb_words[5];
5379        if (data_length > DMP_RGN23_SIZE) {
5380                lpfc_mbuf_free(phba, mp->virt, mp->phys);
5381                kfree(mp);
5382                rc = -EIO;
5383                goto out_free_mboxq;
5384        }
5385
5386        lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5387        lpfc_mbuf_free(phba, mp->virt, mp->phys);
5388        kfree(mp);
5389        rc = 0;
5390
5391out_free_mboxq:
5392        mempool_free(mboxq, phba->mbox_mem_pool);
5393        return rc;
5394}
5395
5396/**
5397 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5398 * @phba: pointer to lpfc hba data structure.
5399 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5400 * @vpd: pointer to the memory to hold resulting port vpd data.
5401 * @vpd_size: On input, the number of bytes allocated to @vpd.
5402 *            On output, the number of data bytes in @vpd.
5403 *
5404 * This routine executes a READ_REV SLI4 mailbox command.  In
5405 * addition, this routine gets the port vpd data.
5406 *
5407 * Return codes
5408 *      0 - successful
5409 *      -ENOMEM - could not allocated memory.
5410 **/
5411static int
5412lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5413                    uint8_t *vpd, uint32_t *vpd_size)
5414{
5415        int rc = 0;
5416        uint32_t dma_size;
5417        struct lpfc_dmabuf *dmabuf;
5418        struct lpfc_mqe *mqe;
5419
5420        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5421        if (!dmabuf)
5422                return -ENOMEM;
5423
5424        /*
5425         * Get a DMA buffer for the vpd data resulting from the READ_REV
5426         * mailbox command.
5427         */
5428        dma_size = *vpd_size;
5429        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5430                                          &dmabuf->phys, GFP_KERNEL);
5431        if (!dmabuf->virt) {
5432                kfree(dmabuf);
5433                return -ENOMEM;
5434        }
5435
5436        /*
5437         * The SLI4 implementation of READ_REV conflicts at word1,
5438         * bits 31:16 and SLI4 adds vpd functionality not present
5439         * in SLI3.  This code corrects the conflicts.
5440         */
5441        lpfc_read_rev(phba, mboxq);
5442        mqe = &mboxq->u.mqe;
5443        mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5444        mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5445        mqe->un.read_rev.word1 &= 0x0000FFFF;
5446        bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5447        bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5448
5449        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5450        if (rc) {
5451                dma_free_coherent(&phba->pcidev->dev, dma_size,
5452                                  dmabuf->virt, dmabuf->phys);
5453                kfree(dmabuf);
5454                return -EIO;
5455        }
5456
5457        /*
5458         * The available vpd length cannot be bigger than the
5459         * DMA buffer passed to the port.  Catch the less than
5460         * case and update the caller's size.
5461         */
5462        if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5463                *vpd_size = mqe->un.read_rev.avail_vpd_len;
5464
5465        memcpy(vpd, dmabuf->virt, *vpd_size);
5466
5467        dma_free_coherent(&phba->pcidev->dev, dma_size,
5468                          dmabuf->virt, dmabuf->phys);
5469        kfree(dmabuf);
5470        return 0;
5471}
5472
5473/**
5474 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5475 * @phba: pointer to lpfc hba data structure.
5476 *
5477 * This routine retrieves SLI4 device physical port name this PCI function
5478 * is attached to.
5479 *
5480 * Return codes
5481 *      0 - successful
5482 *      otherwise - failed to retrieve controller attributes
5483 **/
5484static int
5485lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5486{
5487        LPFC_MBOXQ_t *mboxq;
5488        struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5489        struct lpfc_controller_attribute *cntl_attr;
5490        void *virtaddr = NULL;
5491        uint32_t alloclen, reqlen;
5492        uint32_t shdr_status, shdr_add_status;
5493        union lpfc_sli4_cfg_shdr *shdr;
5494        int rc;
5495
5496        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5497        if (!mboxq)
5498                return -ENOMEM;
5499
5500        /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5501        reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5502        alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5503                        LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5504                        LPFC_SLI4_MBX_NEMBED);
5505
5506        if (alloclen < reqlen) {
5507                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5508                                "3084 Allocated DMA memory size (%d) is "
5509                                "less than the requested DMA memory size "
5510                                "(%d)\n", alloclen, reqlen);
5511                rc = -ENOMEM;
5512                goto out_free_mboxq;
5513        }
5514        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5515        virtaddr = mboxq->sge_array->addr[0];
5516        mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5517        shdr = &mbx_cntl_attr->cfg_shdr;
5518        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5519        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5520        if (shdr_status || shdr_add_status || rc) {
5521                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5522                                "3085 Mailbox x%x (x%x/x%x) failed, "
5523                                "rc:x%x, status:x%x, add_status:x%x\n",
5524                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5525                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5526                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5527                                rc, shdr_status, shdr_add_status);
5528                rc = -ENXIO;
5529                goto out_free_mboxq;
5530        }
5531
5532        cntl_attr = &mbx_cntl_attr->cntl_attr;
5533        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5534        phba->sli4_hba.lnk_info.lnk_tp =
5535                bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5536        phba->sli4_hba.lnk_info.lnk_no =
5537                bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5538
5539        memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5540        strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5541                sizeof(phba->BIOSVersion));
5542
5543        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5544                        "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5545                        phba->sli4_hba.lnk_info.lnk_tp,
5546                        phba->sli4_hba.lnk_info.lnk_no,
5547                        phba->BIOSVersion);
5548out_free_mboxq:
5549        if (rc != MBX_TIMEOUT) {
5550                if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5551                        lpfc_sli4_mbox_cmd_free(phba, mboxq);
5552                else
5553                        mempool_free(mboxq, phba->mbox_mem_pool);
5554        }
5555        return rc;
5556}
5557
5558/**
5559 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5560 * @phba: pointer to lpfc hba data structure.
5561 *
5562 * This routine retrieves SLI4 device physical port name this PCI function
5563 * is attached to.
5564 *
5565 * Return codes
5566 *      0 - successful
5567 *      otherwise - failed to retrieve physical port name
5568 **/
5569static int
5570lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5571{
5572        LPFC_MBOXQ_t *mboxq;
5573        struct lpfc_mbx_get_port_name *get_port_name;
5574        uint32_t shdr_status, shdr_add_status;
5575        union lpfc_sli4_cfg_shdr *shdr;
5576        char cport_name = 0;
5577        int rc;
5578
5579        /* We assume nothing at this point */
5580        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5581        phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5582
5583        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5584        if (!mboxq)
5585                return -ENOMEM;
5586        /* obtain link type and link number via READ_CONFIG */
5587        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5588        lpfc_sli4_read_config(phba);
5589        if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5590                goto retrieve_ppname;
5591
5592        /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5593        rc = lpfc_sli4_get_ctl_attr(phba);
5594        if (rc)
5595                goto out_free_mboxq;
5596
5597retrieve_ppname:
5598        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5599                LPFC_MBOX_OPCODE_GET_PORT_NAME,
5600                sizeof(struct lpfc_mbx_get_port_name) -
5601                sizeof(struct lpfc_sli4_cfg_mhdr),
5602                LPFC_SLI4_MBX_EMBED);
5603        get_port_name = &mboxq->u.mqe.un.get_port_name;
5604        shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5605        bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5606        bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5607                phba->sli4_hba.lnk_info.lnk_tp);
5608        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5609        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5610        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5611        if (shdr_status || shdr_add_status || rc) {
5612                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5613                                "3087 Mailbox x%x (x%x/x%x) failed: "
5614                                "rc:x%x, status:x%x, add_status:x%x\n",
5615                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5616                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5617                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5618                                rc, shdr_status, shdr_add_status);
5619                rc = -ENXIO;
5620                goto out_free_mboxq;
5621        }
5622        switch (phba->sli4_hba.lnk_info.lnk_no) {
5623        case LPFC_LINK_NUMBER_0:
5624                cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5625                                &get_port_name->u.response);
5626                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5627                break;
5628        case LPFC_LINK_NUMBER_1:
5629                cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5630                                &get_port_name->u.response);
5631                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5632                break;
5633        case LPFC_LINK_NUMBER_2:
5634                cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5635                                &get_port_name->u.response);
5636                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5637                break;
5638        case LPFC_LINK_NUMBER_3:
5639                cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5640                                &get_port_name->u.response);
5641                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5642                break;
5643        default:
5644                break;
5645        }
5646
5647        if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5648                phba->Port[0] = cport_name;
5649                phba->Port[1] = '\0';
5650                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5651                                "3091 SLI get port name: %s\n", phba->Port);
5652        }
5653
5654out_free_mboxq:
5655        if (rc != MBX_TIMEOUT) {
5656                if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5657                        lpfc_sli4_mbox_cmd_free(phba, mboxq);
5658                else
5659                        mempool_free(mboxq, phba->mbox_mem_pool);
5660        }
5661        return rc;
5662}
5663
5664/**
5665 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5666 * @phba: pointer to lpfc hba data structure.
5667 *
5668 * This routine is called to explicitly arm the SLI4 device's completion and
5669 * event queues
5670 **/
5671static void
5672lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5673{
5674        int qidx;
5675        struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5676        struct lpfc_sli4_hdw_queue *qp;
5677        struct lpfc_queue *eq;
5678
5679        sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5680        sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5681        if (sli4_hba->nvmels_cq)
5682                sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5683                                           LPFC_QUEUE_REARM);
5684
5685        if (sli4_hba->hdwq) {
5686                /* Loop thru all Hardware Queues */
5687                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5688                        qp = &sli4_hba->hdwq[qidx];
5689                        /* ARM the corresponding CQ */
5690                        sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5691                                                LPFC_QUEUE_REARM);
5692                }
5693
5694                /* Loop thru all IRQ vectors */
5695                for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5696                        eq = sli4_hba->hba_eq_hdl[qidx].eq;
5697                        /* ARM the corresponding EQ */
5698                        sli4_hba->sli4_write_eq_db(phba, eq,
5699                                                   0, LPFC_QUEUE_REARM);
5700                }
5701        }
5702
5703        if (phba->nvmet_support) {
5704                for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5705                        sli4_hba->sli4_write_cq_db(phba,
5706                                sli4_hba->nvmet_cqset[qidx], 0,
5707                                LPFC_QUEUE_REARM);
5708                }
5709        }
5710}
5711
5712/**
5713 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5714 * @phba: Pointer to HBA context object.
5715 * @type: The resource extent type.
5716 * @extnt_count: buffer to hold port available extent count.
5717 * @extnt_size: buffer to hold element count per extent.
5718 *
5719 * This function calls the port and retrievs the number of available
5720 * extents and their size for a particular extent type.
5721 *
5722 * Returns: 0 if successful.  Nonzero otherwise.
5723 **/
5724int
5725lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5726                               uint16_t *extnt_count, uint16_t *extnt_size)
5727{
5728        int rc = 0;
5729        uint32_t length;
5730        uint32_t mbox_tmo;
5731        struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5732        LPFC_MBOXQ_t *mbox;
5733
5734        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5735        if (!mbox)
5736                return -ENOMEM;
5737
5738        /* Find out how many extents are available for this resource type */
5739        length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5740                  sizeof(struct lpfc_sli4_cfg_mhdr));
5741        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5742                         LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5743                         length, LPFC_SLI4_MBX_EMBED);
5744
5745        /* Send an extents count of 0 - the GET doesn't use it. */
5746        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5747                                        LPFC_SLI4_MBX_EMBED);
5748        if (unlikely(rc)) {
5749                rc = -EIO;
5750                goto err_exit;
5751        }
5752
5753        if (!phba->sli4_hba.intr_enable)
5754                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5755        else {
5756                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5757                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5758        }
5759        if (unlikely(rc)) {
5760                rc = -EIO;
5761                goto err_exit;
5762        }
5763
5764        rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5765        if (bf_get(lpfc_mbox_hdr_status,
5766                   &rsrc_info->header.cfg_shdr.response)) {
5767                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5768                                "2930 Failed to get resource extents "
5769                                "Status 0x%x Add'l Status 0x%x\n",
5770                                bf_get(lpfc_mbox_hdr_status,
5771                                       &rsrc_info->header.cfg_shdr.response),
5772                                bf_get(lpfc_mbox_hdr_add_status,
5773                                       &rsrc_info->header.cfg_shdr.response));
5774                rc = -EIO;
5775                goto err_exit;
5776        }
5777
5778        *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5779                              &rsrc_info->u.rsp);
5780        *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5781                             &rsrc_info->u.rsp);
5782
5783        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5784                        "3162 Retrieved extents type-%d from port: count:%d, "
5785                        "size:%d\n", type, *extnt_count, *extnt_size);
5786
5787err_exit:
5788        mempool_free(mbox, phba->mbox_mem_pool);
5789        return rc;
5790}
5791
5792/**
5793 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5794 * @phba: Pointer to HBA context object.
5795 * @type: The extent type to check.
5796 *
5797 * This function reads the current available extents from the port and checks
5798 * if the extent count or extent size has changed since the last access.
5799 * Callers use this routine post port reset to understand if there is a
5800 * extent reprovisioning requirement.
5801 *
5802 * Returns:
5803 *   -Error: error indicates problem.
5804 *   1: Extent count or size has changed.
5805 *   0: No changes.
5806 **/
5807static int
5808lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5809{
5810        uint16_t curr_ext_cnt, rsrc_ext_cnt;
5811        uint16_t size_diff, rsrc_ext_size;
5812        int rc = 0;
5813        struct lpfc_rsrc_blks *rsrc_entry;
5814        struct list_head *rsrc_blk_list = NULL;
5815
5816        size_diff = 0;
5817        curr_ext_cnt = 0;
5818        rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5819                                            &rsrc_ext_cnt,
5820                                            &rsrc_ext_size);
5821        if (unlikely(rc))
5822                return -EIO;
5823
5824        switch (type) {
5825        case LPFC_RSC_TYPE_FCOE_RPI:
5826                rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5827                break;
5828        case LPFC_RSC_TYPE_FCOE_VPI:
5829                rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5830                break;
5831        case LPFC_RSC_TYPE_FCOE_XRI:
5832                rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5833                break;
5834        case LPFC_RSC_TYPE_FCOE_VFI:
5835                rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5836                break;
5837        default:
5838                break;
5839        }
5840
5841        list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5842                curr_ext_cnt++;
5843                if (rsrc_entry->rsrc_size != rsrc_ext_size)
5844                        size_diff++;
5845        }
5846
5847        if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5848                rc = 1;
5849
5850        return rc;
5851}
5852
5853/**
5854 * lpfc_sli4_cfg_post_extnts -
5855 * @phba: Pointer to HBA context object.
5856 * @extnt_cnt: number of available extents.
5857 * @type: the extent type (rpi, xri, vfi, vpi).
5858 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5859 * @mbox: pointer to the caller's allocated mailbox structure.
5860 *
5861 * This function executes the extents allocation request.  It also
5862 * takes care of the amount of memory needed to allocate or get the
5863 * allocated extents. It is the caller's responsibility to evaluate
5864 * the response.
5865 *
5866 * Returns:
5867 *   -Error:  Error value describes the condition found.
5868 *   0: if successful
5869 **/
5870static int
5871lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5872                          uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5873{
5874        int rc = 0;
5875        uint32_t req_len;
5876        uint32_t emb_len;
5877        uint32_t alloc_len, mbox_tmo;
5878
5879        /* Calculate the total requested length of the dma memory */
5880        req_len = extnt_cnt * sizeof(uint16_t);
5881
5882        /*
5883         * Calculate the size of an embedded mailbox.  The uint32_t
5884         * accounts for extents-specific word.
5885         */
5886        emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5887                sizeof(uint32_t);
5888
5889        /*
5890         * Presume the allocation and response will fit into an embedded
5891         * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5892         */
5893        *emb = LPFC_SLI4_MBX_EMBED;
5894        if (req_len > emb_len) {
5895                req_len = extnt_cnt * sizeof(uint16_t) +
5896                        sizeof(union lpfc_sli4_cfg_shdr) +
5897                        sizeof(uint32_t);
5898                *emb = LPFC_SLI4_MBX_NEMBED;
5899        }
5900
5901        alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5902                                     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5903                                     req_len, *emb);
5904        if (alloc_len < req_len) {
5905                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5906                        "2982 Allocated DMA memory size (x%x) is "
5907                        "less than the requested DMA memory "
5908                        "size (x%x)\n", alloc_len, req_len);
5909                return -ENOMEM;
5910        }
5911        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5912        if (unlikely(rc))
5913                return -EIO;
5914
5915        if (!phba->sli4_hba.intr_enable)
5916                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5917        else {
5918                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5919                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5920        }
5921
5922        if (unlikely(rc))
5923                rc = -EIO;
5924        return rc;
5925}
5926
5927/**
5928 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5929 * @phba: Pointer to HBA context object.
5930 * @type:  The resource extent type to allocate.
5931 *
5932 * This function allocates the number of elements for the specified
5933 * resource type.
5934 **/
5935static int
5936lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5937{
5938        bool emb = false;
5939        uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5940        uint16_t rsrc_id, rsrc_start, j, k;
5941        uint16_t *ids;
5942        int i, rc;
5943        unsigned long longs;
5944        unsigned long *bmask;
5945        struct lpfc_rsrc_blks *rsrc_blks;
5946        LPFC_MBOXQ_t *mbox;
5947        uint32_t length;
5948        struct lpfc_id_range *id_array = NULL;
5949        void *virtaddr = NULL;
5950        struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5951        struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5952        struct list_head *ext_blk_list;
5953
5954        rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5955                                            &rsrc_cnt,
5956                                            &rsrc_size);
5957        if (unlikely(rc))
5958                return -EIO;
5959
5960        if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5961                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5962                        "3009 No available Resource Extents "
5963                        "for resource type 0x%x: Count: 0x%x, "
5964                        "Size 0x%x\n", type, rsrc_cnt,
5965                        rsrc_size);
5966                return -ENOMEM;
5967        }
5968
5969        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5970                        "2903 Post resource extents type-0x%x: "
5971                        "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5972
5973        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5974        if (!mbox)
5975                return -ENOMEM;
5976
5977        rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5978        if (unlikely(rc)) {
5979                rc = -EIO;
5980                goto err_exit;
5981        }
5982
5983        /*
5984         * Figure out where the response is located.  Then get local pointers
5985         * to the response data.  The port does not guarantee to respond to
5986         * all extents counts request so update the local variable with the
5987         * allocated count from the port.
5988         */
5989        if (emb == LPFC_SLI4_MBX_EMBED) {
5990                rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5991                id_array = &rsrc_ext->u.rsp.id[0];
5992                rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5993        } else {
5994                virtaddr = mbox->sge_array->addr[0];
5995                n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5996                rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5997                id_array = &n_rsrc->id;
5998        }
5999
6000        longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6001        rsrc_id_cnt = rsrc_cnt * rsrc_size;
6002
6003        /*
6004         * Based on the resource size and count, correct the base and max
6005         * resource values.
6006         */
6007        length = sizeof(struct lpfc_rsrc_blks);
6008        switch (type) {
6009        case LPFC_RSC_TYPE_FCOE_RPI:
6010                phba->sli4_hba.rpi_bmask = kcalloc(longs,
6011                                                   sizeof(unsigned long),
6012                                                   GFP_KERNEL);
6013                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6014                        rc = -ENOMEM;
6015                        goto err_exit;
6016                }
6017                phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6018                                                 sizeof(uint16_t),
6019                                                 GFP_KERNEL);
6020                if (unlikely(!phba->sli4_hba.rpi_ids)) {
6021                        kfree(phba->sli4_hba.rpi_bmask);
6022                        rc = -ENOMEM;
6023                        goto err_exit;
6024                }
6025
6026                /*
6027                 * The next_rpi was initialized with the maximum available
6028                 * count but the port may allocate a smaller number.  Catch
6029                 * that case and update the next_rpi.
6030                 */
6031                phba->sli4_hba.next_rpi = rsrc_id_cnt;
6032
6033                /* Initialize local ptrs for common extent processing later. */
6034                bmask = phba->sli4_hba.rpi_bmask;
6035                ids = phba->sli4_hba.rpi_ids;
6036                ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6037                break;
6038        case LPFC_RSC_TYPE_FCOE_VPI:
6039                phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6040                                          GFP_KERNEL);
6041                if (unlikely(!phba->vpi_bmask)) {
6042                        rc = -ENOMEM;
6043                        goto err_exit;
6044                }
6045                phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6046                                         GFP_KERNEL);
6047                if (unlikely(!phba->vpi_ids)) {
6048                        kfree(phba->vpi_bmask);
6049                        rc = -ENOMEM;
6050                        goto err_exit;
6051                }
6052
6053                /* Initialize local ptrs for common extent processing later. */
6054                bmask = phba->vpi_bmask;
6055                ids = phba->vpi_ids;
6056                ext_blk_list = &phba->lpfc_vpi_blk_list;
6057                break;
6058        case LPFC_RSC_TYPE_FCOE_XRI:
6059                phba->sli4_hba.xri_bmask = kcalloc(longs,
6060                                                   sizeof(unsigned long),
6061                                                   GFP_KERNEL);
6062                if (unlikely(!phba->sli4_hba.xri_bmask)) {
6063                        rc = -ENOMEM;
6064                        goto err_exit;
6065                }
6066                phba->sli4_hba.max_cfg_param.xri_used = 0;
6067                phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6068                                                 sizeof(uint16_t),
6069                                                 GFP_KERNEL);
6070                if (unlikely(!phba->sli4_hba.xri_ids)) {
6071                        kfree(phba->sli4_hba.xri_bmask);
6072                        rc = -ENOMEM;
6073                        goto err_exit;
6074                }
6075
6076                /* Initialize local ptrs for common extent processing later. */
6077                bmask = phba->sli4_hba.xri_bmask;
6078                ids = phba->sli4_hba.xri_ids;
6079                ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6080                break;
6081        case LPFC_RSC_TYPE_FCOE_VFI:
6082                phba->sli4_hba.vfi_bmask = kcalloc(longs,
6083                                                   sizeof(unsigned long),
6084                                                   GFP_KERNEL);
6085                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6086                        rc = -ENOMEM;
6087                        goto err_exit;
6088                }
6089                phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6090                                                 sizeof(uint16_t),
6091                                                 GFP_KERNEL);
6092                if (unlikely(!phba->sli4_hba.vfi_ids)) {
6093                        kfree(phba->sli4_hba.vfi_bmask);
6094                        rc = -ENOMEM;
6095                        goto err_exit;
6096                }
6097
6098                /* Initialize local ptrs for common extent processing later. */
6099                bmask = phba->sli4_hba.vfi_bmask;
6100                ids = phba->sli4_hba.vfi_ids;
6101                ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6102                break;
6103        default:
6104                /* Unsupported Opcode.  Fail call. */
6105                id_array = NULL;
6106                bmask = NULL;
6107                ids = NULL;
6108                ext_blk_list = NULL;
6109                goto err_exit;
6110        }
6111
6112        /*
6113         * Complete initializing the extent configuration with the
6114         * allocated ids assigned to this function.  The bitmask serves
6115         * as an index into the array and manages the available ids.  The
6116         * array just stores the ids communicated to the port via the wqes.
6117         */
6118        for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6119                if ((i % 2) == 0)
6120                        rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6121                                         &id_array[k]);
6122                else
6123                        rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6124                                         &id_array[k]);
6125
6126                rsrc_blks = kzalloc(length, GFP_KERNEL);
6127                if (unlikely(!rsrc_blks)) {
6128                        rc = -ENOMEM;
6129                        kfree(bmask);
6130                        kfree(ids);
6131                        goto err_exit;
6132                }
6133                rsrc_blks->rsrc_start = rsrc_id;
6134                rsrc_blks->rsrc_size = rsrc_size;
6135                list_add_tail(&rsrc_blks->list, ext_blk_list);
6136                rsrc_start = rsrc_id;
6137                if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6138                        phba->sli4_hba.io_xri_start = rsrc_start +
6139                                lpfc_sli4_get_iocb_cnt(phba);
6140                }
6141
6142                while (rsrc_id < (rsrc_start + rsrc_size)) {
6143                        ids[j] = rsrc_id;
6144                        rsrc_id++;
6145                        j++;
6146                }
6147                /* Entire word processed.  Get next word.*/
6148                if ((i % 2) == 1)
6149                        k++;
6150        }
6151 err_exit:
6152        lpfc_sli4_mbox_cmd_free(phba, mbox);
6153        return rc;
6154}
6155
6156
6157
6158/**
6159 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6160 * @phba: Pointer to HBA context object.
6161 * @type: the extent's type.
6162 *
6163 * This function deallocates all extents of a particular resource type.
6164 * SLI4 does not allow for deallocating a particular extent range.  It
6165 * is the caller's responsibility to release all kernel memory resources.
6166 **/
6167static int
6168lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6169{
6170        int rc;
6171        uint32_t length, mbox_tmo = 0;
6172        LPFC_MBOXQ_t *mbox;
6173        struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6174        struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6175
6176        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6177        if (!mbox)
6178                return -ENOMEM;
6179
6180        /*
6181         * This function sends an embedded mailbox because it only sends the
6182         * the resource type.  All extents of this type are released by the
6183         * port.
6184         */
6185        length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6186                  sizeof(struct lpfc_sli4_cfg_mhdr));
6187        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6188                         LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6189                         length, LPFC_SLI4_MBX_EMBED);
6190
6191        /* Send an extents count of 0 - the dealloc doesn't use it. */
6192        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6193                                        LPFC_SLI4_MBX_EMBED);
6194        if (unlikely(rc)) {
6195                rc = -EIO;
6196                goto out_free_mbox;
6197        }
6198        if (!phba->sli4_hba.intr_enable)
6199                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6200        else {
6201                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6202                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6203        }
6204        if (unlikely(rc)) {
6205                rc = -EIO;
6206                goto out_free_mbox;
6207        }
6208
6209        dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6210        if (bf_get(lpfc_mbox_hdr_status,
6211                   &dealloc_rsrc->header.cfg_shdr.response)) {
6212                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6213                                "2919 Failed to release resource extents "
6214                                "for type %d - Status 0x%x Add'l Status 0x%x. "
6215                                "Resource memory not released.\n",
6216                                type,
6217                                bf_get(lpfc_mbox_hdr_status,
6218                                    &dealloc_rsrc->header.cfg_shdr.response),
6219                                bf_get(lpfc_mbox_hdr_add_status,
6220                                    &dealloc_rsrc->header.cfg_shdr.response));
6221                rc = -EIO;
6222                goto out_free_mbox;
6223        }
6224
6225        /* Release kernel memory resources for the specific type. */
6226        switch (type) {
6227        case LPFC_RSC_TYPE_FCOE_VPI:
6228                kfree(phba->vpi_bmask);
6229                kfree(phba->vpi_ids);
6230                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6231                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6232                                    &phba->lpfc_vpi_blk_list, list) {
6233                        list_del_init(&rsrc_blk->list);
6234                        kfree(rsrc_blk);
6235                }
6236                phba->sli4_hba.max_cfg_param.vpi_used = 0;
6237                break;
6238        case LPFC_RSC_TYPE_FCOE_XRI:
6239                kfree(phba->sli4_hba.xri_bmask);
6240                kfree(phba->sli4_hba.xri_ids);
6241                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6242                                    &phba->sli4_hba.lpfc_xri_blk_list, list) {
6243                        list_del_init(&rsrc_blk->list);
6244                        kfree(rsrc_blk);
6245                }
6246                break;
6247        case LPFC_RSC_TYPE_FCOE_VFI:
6248                kfree(phba->sli4_hba.vfi_bmask);
6249                kfree(phba->sli4_hba.vfi_ids);
6250                bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6251                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6252                                    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6253                        list_del_init(&rsrc_blk->list);
6254                        kfree(rsrc_blk);
6255                }
6256                break;
6257        case LPFC_RSC_TYPE_FCOE_RPI:
6258                /* RPI bitmask and physical id array are cleaned up earlier. */
6259                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6260                                    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6261                        list_del_init(&rsrc_blk->list);
6262                        kfree(rsrc_blk);
6263                }
6264                break;
6265        default:
6266                break;
6267        }
6268
6269        bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6270
6271 out_free_mbox:
6272        mempool_free(mbox, phba->mbox_mem_pool);
6273        return rc;
6274}
6275
6276static void
6277lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6278                  uint32_t feature)
6279{
6280        uint32_t len;
6281
6282        len = sizeof(struct lpfc_mbx_set_feature) -
6283                sizeof(struct lpfc_sli4_cfg_mhdr);
6284        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6285                         LPFC_MBOX_OPCODE_SET_FEATURES, len,
6286                         LPFC_SLI4_MBX_EMBED);
6287
6288        switch (feature) {
6289        case LPFC_SET_UE_RECOVERY:
6290                bf_set(lpfc_mbx_set_feature_UER,
6291                       &mbox->u.mqe.un.set_feature, 1);
6292                mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6293                mbox->u.mqe.un.set_feature.param_len = 8;
6294                break;
6295        case LPFC_SET_MDS_DIAGS:
6296                bf_set(lpfc_mbx_set_feature_mds,
6297                       &mbox->u.mqe.un.set_feature, 1);
6298                bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6299                       &mbox->u.mqe.un.set_feature, 1);
6300                mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6301                mbox->u.mqe.un.set_feature.param_len = 8;
6302                break;
6303        case LPFC_SET_DUAL_DUMP:
6304                bf_set(lpfc_mbx_set_feature_dd,
6305                       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6306                bf_set(lpfc_mbx_set_feature_ddquery,
6307                       &mbox->u.mqe.un.set_feature, 0);
6308                mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6309                mbox->u.mqe.un.set_feature.param_len = 4;
6310                break;
6311        }
6312
6313        return;
6314}
6315
6316/**
6317 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6318 * @phba: Pointer to HBA context object.
6319 *
6320 * Disable FW logging into host memory on the adapter. To
6321 * be done before reading logs from the host memory.
6322 **/
6323void
6324lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6325{
6326        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6327
6328        spin_lock_irq(&phba->hbalock);
6329        ras_fwlog->state = INACTIVE;
6330        spin_unlock_irq(&phba->hbalock);
6331
6332        /* Disable FW logging to host memory */
6333        writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6334               phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6335
6336        /* Wait 10ms for firmware to stop using DMA buffer */
6337        usleep_range(10 * 1000, 20 * 1000);
6338}
6339
6340/**
6341 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6342 * @phba: Pointer to HBA context object.
6343 *
6344 * This function is called to free memory allocated for RAS FW logging
6345 * support in the driver.
6346 **/
6347void
6348lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6349{
6350        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6351        struct lpfc_dmabuf *dmabuf, *next;
6352
6353        if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6354                list_for_each_entry_safe(dmabuf, next,
6355                                    &ras_fwlog->fwlog_buff_list,
6356                                    list) {
6357                        list_del(&dmabuf->list);
6358                        dma_free_coherent(&phba->pcidev->dev,
6359                                          LPFC_RAS_MAX_ENTRY_SIZE,
6360                                          dmabuf->virt, dmabuf->phys);
6361                        kfree(dmabuf);
6362                }
6363        }
6364
6365        if (ras_fwlog->lwpd.virt) {
6366                dma_free_coherent(&phba->pcidev->dev,
6367                                  sizeof(uint32_t) * 2,
6368                                  ras_fwlog->lwpd.virt,
6369                                  ras_fwlog->lwpd.phys);
6370                ras_fwlog->lwpd.virt = NULL;
6371        }
6372
6373        spin_lock_irq(&phba->hbalock);
6374        ras_fwlog->state = INACTIVE;
6375        spin_unlock_irq(&phba->hbalock);
6376}
6377
6378/**
6379 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6380 * @phba: Pointer to HBA context object.
6381 * @fwlog_buff_count: Count of buffers to be created.
6382 *
6383 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6384 * to update FW log is posted to the adapter.
6385 * Buffer count is calculated based on module param ras_fwlog_buffsize
6386 * Size of each buffer posted to FW is 64K.
6387 **/
6388
6389static int
6390lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6391                        uint32_t fwlog_buff_count)
6392{
6393        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6394        struct lpfc_dmabuf *dmabuf;
6395        int rc = 0, i = 0;
6396
6397        /* Initialize List */
6398        INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6399
6400        /* Allocate memory for the LWPD */
6401        ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6402                                            sizeof(uint32_t) * 2,
6403                                            &ras_fwlog->lwpd.phys,
6404                                            GFP_KERNEL);
6405        if (!ras_fwlog->lwpd.virt) {
6406                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6407                                "6185 LWPD Memory Alloc Failed\n");
6408
6409                return -ENOMEM;
6410        }
6411
6412        ras_fwlog->fw_buffcount = fwlog_buff_count;
6413        for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6414                dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6415                                 GFP_KERNEL);
6416                if (!dmabuf) {
6417                        rc = -ENOMEM;
6418                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6419                                        "6186 Memory Alloc failed FW logging");
6420                        goto free_mem;
6421                }
6422
6423                dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6424                                                  LPFC_RAS_MAX_ENTRY_SIZE,
6425                                                  &dmabuf->phys, GFP_KERNEL);
6426                if (!dmabuf->virt) {
6427                        kfree(dmabuf);
6428                        rc = -ENOMEM;
6429                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6430                                        "6187 DMA Alloc Failed FW logging");
6431                        goto free_mem;
6432                }
6433                dmabuf->buffer_tag = i;
6434                list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6435        }
6436
6437free_mem:
6438        if (rc)
6439                lpfc_sli4_ras_dma_free(phba);
6440
6441        return rc;
6442}
6443
6444/**
6445 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6446 * @phba: pointer to lpfc hba data structure.
6447 * @pmb: pointer to the driver internal queue element for mailbox command.
6448 *
6449 * Completion handler for driver's RAS MBX command to the device.
6450 **/
6451static void
6452lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6453{
6454        MAILBOX_t *mb;
6455        union lpfc_sli4_cfg_shdr *shdr;
6456        uint32_t shdr_status, shdr_add_status;
6457        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6458
6459        mb = &pmb->u.mb;
6460
6461        shdr = (union lpfc_sli4_cfg_shdr *)
6462                &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6463        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6464        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6465
6466        if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6467                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6468                                "6188 FW LOG mailbox "
6469                                "completed with status x%x add_status x%x,"
6470                                " mbx status x%x\n",
6471                                shdr_status, shdr_add_status, mb->mbxStatus);
6472
6473                ras_fwlog->ras_hwsupport = false;
6474                goto disable_ras;
6475        }
6476
6477        spin_lock_irq(&phba->hbalock);
6478        ras_fwlog->state = ACTIVE;
6479        spin_unlock_irq(&phba->hbalock);
6480        mempool_free(pmb, phba->mbox_mem_pool);
6481
6482        return;
6483
6484disable_ras:
6485        /* Free RAS DMA memory */
6486        lpfc_sli4_ras_dma_free(phba);
6487        mempool_free(pmb, phba->mbox_mem_pool);
6488}
6489
6490/**
6491 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6492 * @phba: pointer to lpfc hba data structure.
6493 * @fwlog_level: Logging verbosity level.
6494 * @fwlog_enable: Enable/Disable logging.
6495 *
6496 * Initialize memory and post mailbox command to enable FW logging in host
6497 * memory.
6498 **/
6499int
6500lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6501                         uint32_t fwlog_level,
6502                         uint32_t fwlog_enable)
6503{
6504        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6505        struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6506        struct lpfc_dmabuf *dmabuf;
6507        LPFC_MBOXQ_t *mbox;
6508        uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6509        int rc = 0;
6510
6511        spin_lock_irq(&phba->hbalock);
6512        ras_fwlog->state = INACTIVE;
6513        spin_unlock_irq(&phba->hbalock);
6514
6515        fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6516                          phba->cfg_ras_fwlog_buffsize);
6517        fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6518
6519        /*
6520         * If re-enabling FW logging support use earlier allocated
6521         * DMA buffers while posting MBX command.
6522         **/
6523        if (!ras_fwlog->lwpd.virt) {
6524                rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6525                if (rc) {
6526                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6527                                        "6189 FW Log Memory Allocation Failed");
6528                        return rc;
6529                }
6530        }
6531
6532        /* Setup Mailbox command */
6533        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6534        if (!mbox) {
6535                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6536                                "6190 RAS MBX Alloc Failed");
6537                rc = -ENOMEM;
6538                goto mem_free;
6539        }
6540
6541        ras_fwlog->fw_loglevel = fwlog_level;
6542        len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6543                sizeof(struct lpfc_sli4_cfg_mhdr));
6544
6545        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6546                         LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6547                         len, LPFC_SLI4_MBX_EMBED);
6548
6549        mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6550        bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6551               fwlog_enable);
6552        bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6553               ras_fwlog->fw_loglevel);
6554        bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6555               ras_fwlog->fw_buffcount);
6556        bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6557               LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6558
6559        /* Update DMA buffer address */
6560        list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6561                memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6562
6563                mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6564                        putPaddrLow(dmabuf->phys);
6565
6566                mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6567                        putPaddrHigh(dmabuf->phys);
6568        }
6569
6570        /* Update LPWD address */
6571        mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6572        mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6573
6574        spin_lock_irq(&phba->hbalock);
6575        ras_fwlog->state = REG_INPROGRESS;
6576        spin_unlock_irq(&phba->hbalock);
6577        mbox->vport = phba->pport;
6578        mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6579
6580        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6581
6582        if (rc == MBX_NOT_FINISHED) {
6583                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6584                                "6191 FW-Log Mailbox failed. "
6585                                "status %d mbxStatus : x%x", rc,
6586                                bf_get(lpfc_mqe_status, &mbox->u.mqe));
6587                mempool_free(mbox, phba->mbox_mem_pool);
6588                rc = -EIO;
6589                goto mem_free;
6590        } else
6591                rc = 0;
6592mem_free:
6593        if (rc)
6594                lpfc_sli4_ras_dma_free(phba);
6595
6596        return rc;
6597}
6598
6599/**
6600 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6601 * @phba: Pointer to HBA context object.
6602 *
6603 * Check if RAS is supported on the adapter and initialize it.
6604 **/
6605void
6606lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6607{
6608        /* Check RAS FW Log needs to be enabled or not */
6609        if (lpfc_check_fwlog_support(phba))
6610                return;
6611
6612        lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6613                                 LPFC_RAS_ENABLE_LOGGING);
6614}
6615
6616/**
6617 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6618 * @phba: Pointer to HBA context object.
6619 *
6620 * This function allocates all SLI4 resource identifiers.
6621 **/
6622int
6623lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6624{
6625        int i, rc, error = 0;
6626        uint16_t count, base;
6627        unsigned long longs;
6628
6629        if (!phba->sli4_hba.rpi_hdrs_in_use)
6630                phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6631        if (phba->sli4_hba.extents_in_use) {
6632                /*
6633                 * The port supports resource extents. The XRI, VPI, VFI, RPI
6634                 * resource extent count must be read and allocated before
6635                 * provisioning the resource id arrays.
6636                 */
6637                if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6638                    LPFC_IDX_RSRC_RDY) {
6639                        /*
6640                         * Extent-based resources are set - the driver could
6641                         * be in a port reset. Figure out if any corrective
6642                         * actions need to be taken.
6643                         */
6644                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6645                                                 LPFC_RSC_TYPE_FCOE_VFI);
6646                        if (rc != 0)
6647                                error++;
6648                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6649                                                 LPFC_RSC_TYPE_FCOE_VPI);
6650                        if (rc != 0)
6651                                error++;
6652                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6653                                                 LPFC_RSC_TYPE_FCOE_XRI);
6654                        if (rc != 0)
6655                                error++;
6656                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6657                                                 LPFC_RSC_TYPE_FCOE_RPI);
6658                        if (rc != 0)
6659                                error++;
6660
6661                        /*
6662                         * It's possible that the number of resources
6663                         * provided to this port instance changed between
6664                         * resets.  Detect this condition and reallocate
6665                         * resources.  Otherwise, there is no action.
6666                         */
6667                        if (error) {
6668                                lpfc_printf_log(phba, KERN_INFO,
6669                                                LOG_MBOX | LOG_INIT,
6670                                                "2931 Detected extent resource "
6671                                                "change.  Reallocating all "
6672                                                "extents.\n");
6673                                rc = lpfc_sli4_dealloc_extent(phba,
6674                                                 LPFC_RSC_TYPE_FCOE_VFI);
6675                                rc = lpfc_sli4_dealloc_extent(phba,
6676                                                 LPFC_RSC_TYPE_FCOE_VPI);
6677                                rc = lpfc_sli4_dealloc_extent(phba,
6678                                                 LPFC_RSC_TYPE_FCOE_XRI);
6679                                rc = lpfc_sli4_dealloc_extent(phba,
6680                                                 LPFC_RSC_TYPE_FCOE_RPI);
6681                        } else
6682                                return 0;
6683                }
6684
6685                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6686                if (unlikely(rc))
6687                        goto err_exit;
6688
6689                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6690                if (unlikely(rc))
6691                        goto err_exit;
6692
6693                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6694                if (unlikely(rc))
6695                        goto err_exit;
6696
6697                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6698                if (unlikely(rc))
6699                        goto err_exit;
6700                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6701                       LPFC_IDX_RSRC_RDY);
6702                return rc;
6703        } else {
6704                /*
6705                 * The port does not support resource extents.  The XRI, VPI,
6706                 * VFI, RPI resource ids were determined from READ_CONFIG.
6707                 * Just allocate the bitmasks and provision the resource id
6708                 * arrays.  If a port reset is active, the resources don't
6709                 * need any action - just exit.
6710                 */
6711                if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6712                    LPFC_IDX_RSRC_RDY) {
6713                        lpfc_sli4_dealloc_resource_identifiers(phba);
6714                        lpfc_sli4_remove_rpis(phba);
6715                }
6716                /* RPIs. */
6717                count = phba->sli4_hba.max_cfg_param.max_rpi;
6718                if (count <= 0) {
6719                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6720                                        "3279 Invalid provisioning of "
6721                                        "rpi:%d\n", count);
6722                        rc = -EINVAL;
6723                        goto err_exit;
6724                }
6725                base = phba->sli4_hba.max_cfg_param.rpi_base;
6726                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6727                phba->sli4_hba.rpi_bmask = kcalloc(longs,
6728                                                   sizeof(unsigned long),
6729                                                   GFP_KERNEL);
6730                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6731                        rc = -ENOMEM;
6732                        goto err_exit;
6733                }
6734                phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6735                                                 GFP_KERNEL);
6736                if (unlikely(!phba->sli4_hba.rpi_ids)) {
6737                        rc = -ENOMEM;
6738                        goto free_rpi_bmask;
6739                }
6740
6741                for (i = 0; i < count; i++)
6742                        phba->sli4_hba.rpi_ids[i] = base + i;
6743
6744                /* VPIs. */
6745                count = phba->sli4_hba.max_cfg_param.max_vpi;
6746                if (count <= 0) {
6747                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6748                                        "3280 Invalid provisioning of "
6749                                        "vpi:%d\n", count);
6750                        rc = -EINVAL;
6751                        goto free_rpi_ids;
6752                }
6753                base = phba->sli4_hba.max_cfg_param.vpi_base;
6754                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6755                phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6756                                          GFP_KERNEL);
6757                if (unlikely(!phba->vpi_bmask)) {
6758                        rc = -ENOMEM;
6759                        goto free_rpi_ids;
6760                }
6761                phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6762                                        GFP_KERNEL);
6763                if (unlikely(!phba->vpi_ids)) {
6764                        rc = -ENOMEM;
6765                        goto free_vpi_bmask;
6766                }
6767
6768                for (i = 0; i < count; i++)
6769                        phba->vpi_ids[i] = base + i;
6770
6771                /* XRIs. */
6772                count = phba->sli4_hba.max_cfg_param.max_xri;
6773                if (count <= 0) {
6774                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6775                                        "3281 Invalid provisioning of "
6776                                        "xri:%d\n", count);
6777                        rc = -EINVAL;
6778                        goto free_vpi_ids;
6779                }
6780                base = phba->sli4_hba.max_cfg_param.xri_base;
6781                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6782                phba->sli4_hba.xri_bmask = kcalloc(longs,
6783                                                   sizeof(unsigned long),
6784                                                   GFP_KERNEL);
6785                if (unlikely(!phba->sli4_hba.xri_bmask)) {
6786                        rc = -ENOMEM;
6787                        goto free_vpi_ids;
6788                }
6789                phba->sli4_hba.max_cfg_param.xri_used = 0;
6790                phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6791                                                 GFP_KERNEL);
6792                if (unlikely(!phba->sli4_hba.xri_ids)) {
6793                        rc = -ENOMEM;
6794                        goto free_xri_bmask;
6795                }
6796
6797                for (i = 0; i < count; i++)
6798                        phba->sli4_hba.xri_ids[i] = base + i;
6799
6800                /* VFIs. */
6801                count = phba->sli4_hba.max_cfg_param.max_vfi;
6802                if (count <= 0) {
6803                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6804                                        "3282 Invalid provisioning of "
6805                                        "vfi:%d\n", count);
6806                        rc = -EINVAL;
6807                        goto free_xri_ids;
6808                }
6809                base = phba->sli4_hba.max_cfg_param.vfi_base;
6810                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6811                phba->sli4_hba.vfi_bmask = kcalloc(longs,
6812                                                   sizeof(unsigned long),
6813                                                   GFP_KERNEL);
6814                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6815                        rc = -ENOMEM;
6816                        goto free_xri_ids;
6817                }
6818                phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6819                                                 GFP_KERNEL);
6820                if (unlikely(!phba->sli4_hba.vfi_ids)) {
6821                        rc = -ENOMEM;
6822                        goto free_vfi_bmask;
6823                }
6824
6825                for (i = 0; i < count; i++)
6826                        phba->sli4_hba.vfi_ids[i] = base + i;
6827
6828                /*
6829                 * Mark all resources ready.  An HBA reset doesn't need
6830                 * to reset the initialization.
6831                 */
6832                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6833                       LPFC_IDX_RSRC_RDY);
6834                return 0;
6835        }
6836
6837 free_vfi_bmask:
6838        kfree(phba->sli4_hba.vfi_bmask);
6839        phba->sli4_hba.vfi_bmask = NULL;
6840 free_xri_ids:
6841        kfree(phba->sli4_hba.xri_ids);
6842        phba->sli4_hba.xri_ids = NULL;
6843 free_xri_bmask:
6844        kfree(phba->sli4_hba.xri_bmask);
6845        phba->sli4_hba.xri_bmask = NULL;
6846 free_vpi_ids:
6847        kfree(phba->vpi_ids);
6848        phba->vpi_ids = NULL;
6849 free_vpi_bmask:
6850        kfree(phba->vpi_bmask);
6851        phba->vpi_bmask = NULL;
6852 free_rpi_ids:
6853        kfree(phba->sli4_hba.rpi_ids);
6854        phba->sli4_hba.rpi_ids = NULL;
6855 free_rpi_bmask:
6856        kfree(phba->sli4_hba.rpi_bmask);
6857        phba->sli4_hba.rpi_bmask = NULL;
6858 err_exit:
6859        return rc;
6860}
6861
6862/**
6863 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6864 * @phba: Pointer to HBA context object.
6865 *
6866 * This function allocates the number of elements for the specified
6867 * resource type.
6868 **/
6869int
6870lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6871{
6872        if (phba->sli4_hba.extents_in_use) {
6873                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6874                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6875                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6876                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6877        } else {
6878                kfree(phba->vpi_bmask);
6879                phba->sli4_hba.max_cfg_param.vpi_used = 0;
6880                kfree(phba->vpi_ids);
6881                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6882                kfree(phba->sli4_hba.xri_bmask);
6883                kfree(phba->sli4_hba.xri_ids);
6884                kfree(phba->sli4_hba.vfi_bmask);
6885                kfree(phba->sli4_hba.vfi_ids);
6886                bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6887                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6888        }
6889
6890        return 0;
6891}
6892
6893/**
6894 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6895 * @phba: Pointer to HBA context object.
6896 * @type: The resource extent type.
6897 * @extnt_cnt: buffer to hold port extent count response
6898 * @extnt_size: buffer to hold port extent size response.
6899 *
6900 * This function calls the port to read the host allocated extents
6901 * for a particular type.
6902 **/
6903int
6904lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6905                               uint16_t *extnt_cnt, uint16_t *extnt_size)
6906{
6907        bool emb;
6908        int rc = 0;
6909        uint16_t curr_blks = 0;
6910        uint32_t req_len, emb_len;
6911        uint32_t alloc_len, mbox_tmo;
6912        struct list_head *blk_list_head;
6913        struct lpfc_rsrc_blks *rsrc_blk;
6914        LPFC_MBOXQ_t *mbox;
6915        void *virtaddr = NULL;
6916        struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6917        struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6918        union  lpfc_sli4_cfg_shdr *shdr;
6919
6920        switch (type) {
6921        case LPFC_RSC_TYPE_FCOE_VPI:
6922                blk_list_head = &phba->lpfc_vpi_blk_list;
6923                break;
6924        case LPFC_RSC_TYPE_FCOE_XRI:
6925                blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6926                break;
6927        case LPFC_RSC_TYPE_FCOE_VFI:
6928                blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6929                break;
6930        case LPFC_RSC_TYPE_FCOE_RPI:
6931                blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6932                break;
6933        default:
6934                return -EIO;
6935        }
6936
6937        /* Count the number of extents currently allocatd for this type. */
6938        list_for_each_entry(rsrc_blk, blk_list_head, list) {
6939                if (curr_blks == 0) {
6940                        /*
6941                         * The GET_ALLOCATED mailbox does not return the size,
6942                         * just the count.  The size should be just the size
6943                         * stored in the current allocated block and all sizes
6944                         * for an extent type are the same so set the return
6945                         * value now.
6946                         */
6947                        *extnt_size = rsrc_blk->rsrc_size;
6948                }
6949                curr_blks++;
6950        }
6951
6952        /*
6953         * Calculate the size of an embedded mailbox.  The uint32_t
6954         * accounts for extents-specific word.
6955         */
6956        emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6957                sizeof(uint32_t);
6958
6959        /*
6960         * Presume the allocation and response will fit into an embedded
6961         * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6962         */
6963        emb = LPFC_SLI4_MBX_EMBED;
6964        req_len = emb_len;
6965        if (req_len > emb_len) {
6966                req_len = curr_blks * sizeof(uint16_t) +
6967                        sizeof(union lpfc_sli4_cfg_shdr) +
6968                        sizeof(uint32_t);
6969                emb = LPFC_SLI4_MBX_NEMBED;
6970        }
6971
6972        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6973        if (!mbox)
6974                return -ENOMEM;
6975        memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6976
6977        alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6978                                     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6979                                     req_len, emb);
6980        if (alloc_len < req_len) {
6981                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6982                        "2983 Allocated DMA memory size (x%x) is "
6983                        "less than the requested DMA memory "
6984                        "size (x%x)\n", alloc_len, req_len);
6985                rc = -ENOMEM;
6986                goto err_exit;
6987        }
6988        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6989        if (unlikely(rc)) {
6990                rc = -EIO;
6991                goto err_exit;
6992        }
6993
6994        if (!phba->sli4_hba.intr_enable)
6995                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6996        else {
6997                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6998                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6999        }
7000
7001        if (unlikely(rc)) {
7002                rc = -EIO;
7003                goto err_exit;
7004        }
7005
7006        /*
7007         * Figure out where the response is located.  Then get local pointers
7008         * to the response data.  The port does not guarantee to respond to
7009         * all extents counts request so update the local variable with the
7010         * allocated count from the port.
7011         */
7012        if (emb == LPFC_SLI4_MBX_EMBED) {
7013                rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7014                shdr = &rsrc_ext->header.cfg_shdr;
7015                *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7016        } else {
7017                virtaddr = mbox->sge_array->addr[0];
7018                n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7019                shdr = &n_rsrc->cfg_shdr;
7020                *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7021        }
7022
7023        if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7024                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7025                        "2984 Failed to read allocated resources "
7026                        "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7027                        type,
7028                        bf_get(lpfc_mbox_hdr_status, &shdr->response),
7029                        bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7030                rc = -EIO;
7031                goto err_exit;
7032        }
7033 err_exit:
7034        lpfc_sli4_mbox_cmd_free(phba, mbox);
7035        return rc;
7036}
7037
7038/**
7039 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7040 * @phba: pointer to lpfc hba data structure.
7041 * @sgl_list: linked link of sgl buffers to post
7042 * @cnt: number of linked list buffers
7043 *
7044 * This routine walks the list of buffers that have been allocated and
7045 * repost them to the port by using SGL block post. This is needed after a
7046 * pci_function_reset/warm_start or start. It attempts to construct blocks
7047 * of buffer sgls which contains contiguous xris and uses the non-embedded
7048 * SGL block post mailbox commands to post them to the port. For single
7049 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7050 * mailbox command for posting.
7051 *
7052 * Returns: 0 = success, non-zero failure.
7053 **/
7054static int
7055lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7056                          struct list_head *sgl_list, int cnt)
7057{
7058        struct lpfc_sglq *sglq_entry = NULL;
7059        struct lpfc_sglq *sglq_entry_next = NULL;
7060        struct lpfc_sglq *sglq_entry_first = NULL;
7061        int status, total_cnt;
7062        int post_cnt = 0, num_posted = 0, block_cnt = 0;
7063        int last_xritag = NO_XRI;
7064        LIST_HEAD(prep_sgl_list);
7065        LIST_HEAD(blck_sgl_list);
7066        LIST_HEAD(allc_sgl_list);
7067        LIST_HEAD(post_sgl_list);
7068        LIST_HEAD(free_sgl_list);
7069
7070        spin_lock_irq(&phba->hbalock);
7071        spin_lock(&phba->sli4_hba.sgl_list_lock);
7072        list_splice_init(sgl_list, &allc_sgl_list);
7073        spin_unlock(&phba->sli4_hba.sgl_list_lock);
7074        spin_unlock_irq(&phba->hbalock);
7075
7076        total_cnt = cnt;
7077        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7078                                 &allc_sgl_list, list) {
7079                list_del_init(&sglq_entry->list);
7080                block_cnt++;
7081                if ((last_xritag != NO_XRI) &&
7082                    (sglq_entry->sli4_xritag != last_xritag + 1)) {
7083                        /* a hole in xri block, form a sgl posting block */
7084                        list_splice_init(&prep_sgl_list, &blck_sgl_list);
7085                        post_cnt = block_cnt - 1;
7086                        /* prepare list for next posting block */
7087                        list_add_tail(&sglq_entry->list, &prep_sgl_list);
7088                        block_cnt = 1;
7089                } else {
7090                        /* prepare list for next posting block */
7091                        list_add_tail(&sglq_entry->list, &prep_sgl_list);
7092                        /* enough sgls for non-embed sgl mbox command */
7093                        if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7094                                list_splice_init(&prep_sgl_list,
7095                                                 &blck_sgl_list);
7096                                post_cnt = block_cnt;
7097                                block_cnt = 0;
7098                        }
7099                }
7100                num_posted++;
7101
7102                /* keep track of last sgl's xritag */
7103                last_xritag = sglq_entry->sli4_xritag;
7104
7105                /* end of repost sgl list condition for buffers */
7106                if (num_posted == total_cnt) {
7107                        if (post_cnt == 0) {
7108                                list_splice_init(&prep_sgl_list,
7109                                                 &blck_sgl_list);
7110                                post_cnt = block_cnt;
7111                        } else if (block_cnt == 1) {
7112                                status = lpfc_sli4_post_sgl(phba,
7113                                                sglq_entry->phys, 0,
7114                                                sglq_entry->sli4_xritag);
7115                                if (!status) {
7116                                        /* successful, put sgl to posted list */
7117                                        list_add_tail(&sglq_entry->list,
7118                                                      &post_sgl_list);
7119                                } else {
7120                                        /* Failure, put sgl to free list */
7121                                        lpfc_printf_log(phba, KERN_WARNING,
7122                                                LOG_SLI,
7123                                                "3159 Failed to post "
7124                                                "sgl, xritag:x%x\n",
7125                                                sglq_entry->sli4_xritag);
7126                                        list_add_tail(&sglq_entry->list,
7127                                                      &free_sgl_list);
7128                                        total_cnt--;
7129                                }
7130                        }
7131                }
7132
7133                /* continue until a nembed page worth of sgls */
7134                if (post_cnt == 0)
7135                        continue;
7136
7137                /* post the buffer list sgls as a block */
7138                status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7139                                                 post_cnt);
7140
7141                if (!status) {
7142                        /* success, put sgl list to posted sgl list */
7143                        list_splice_init(&blck_sgl_list, &post_sgl_list);
7144                } else {
7145                        /* Failure, put sgl list to free sgl list */
7146                        sglq_entry_first = list_first_entry(&blck_sgl_list,
7147                                                            struct lpfc_sglq,
7148                                                            list);
7149                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7150                                        "3160 Failed to post sgl-list, "
7151                                        "xritag:x%x-x%x\n",
7152                                        sglq_entry_first->sli4_xritag,
7153                                        (sglq_entry_first->sli4_xritag +
7154                                         post_cnt - 1));
7155                        list_splice_init(&blck_sgl_list, &free_sgl_list);
7156                        total_cnt -= post_cnt;
7157                }
7158
7159                /* don't reset xirtag due to hole in xri block */
7160                if (block_cnt == 0)
7161                        last_xritag = NO_XRI;
7162
7163                /* reset sgl post count for next round of posting */
7164                post_cnt = 0;
7165        }
7166
7167        /* free the sgls failed to post */
7168        lpfc_free_sgl_list(phba, &free_sgl_list);
7169
7170        /* push sgls posted to the available list */
7171        if (!list_empty(&post_sgl_list)) {
7172                spin_lock_irq(&phba->hbalock);
7173                spin_lock(&phba->sli4_hba.sgl_list_lock);
7174                list_splice_init(&post_sgl_list, sgl_list);
7175                spin_unlock(&phba->sli4_hba.sgl_list_lock);
7176                spin_unlock_irq(&phba->hbalock);
7177        } else {
7178                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7179                                "3161 Failure to post sgl to port.\n");
7180                return -EIO;
7181        }
7182
7183        /* return the number of XRIs actually posted */
7184        return total_cnt;
7185}
7186
7187/**
7188 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7189 * @phba: pointer to lpfc hba data structure.
7190 *
7191 * This routine walks the list of nvme buffers that have been allocated and
7192 * repost them to the port by using SGL block post. This is needed after a
7193 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7194 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7195 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7196 *
7197 * Returns: 0 = success, non-zero failure.
7198 **/
7199static int
7200lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7201{
7202        LIST_HEAD(post_nblist);
7203        int num_posted, rc = 0;
7204
7205        /* get all NVME buffers need to repost to a local list */
7206        lpfc_io_buf_flush(phba, &post_nblist);
7207
7208        /* post the list of nvme buffer sgls to port if available */
7209        if (!list_empty(&post_nblist)) {
7210                num_posted = lpfc_sli4_post_io_sgl_list(
7211                        phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7212                /* failed to post any nvme buffer, return error */
7213                if (num_posted == 0)
7214                        rc = -EIO;
7215        }
7216        return rc;
7217}
7218
7219static void
7220lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7221{
7222        uint32_t len;
7223
7224        len = sizeof(struct lpfc_mbx_set_host_data) -
7225                sizeof(struct lpfc_sli4_cfg_mhdr);
7226        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7227                         LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7228                         LPFC_SLI4_MBX_EMBED);
7229
7230        mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7231        mbox->u.mqe.un.set_host_data.param_len =
7232                                        LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7233        snprintf(mbox->u.mqe.un.set_host_data.data,
7234                 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7235                 "Linux %s v"LPFC_DRIVER_VERSION,
7236                 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7237}
7238
7239int
7240lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7241                    struct lpfc_queue *drq, int count, int idx)
7242{
7243        int rc, i;
7244        struct lpfc_rqe hrqe;
7245        struct lpfc_rqe drqe;
7246        struct lpfc_rqb *rqbp;
7247        unsigned long flags;
7248        struct rqb_dmabuf *rqb_buffer;
7249        LIST_HEAD(rqb_buf_list);
7250
7251        spin_lock_irqsave(&phba->hbalock, flags);
7252        rqbp = hrq->rqbp;
7253        for (i = 0; i < count; i++) {
7254                /* IF RQ is already full, don't bother */
7255                if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7256                        break;
7257                rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7258                if (!rqb_buffer)
7259                        break;
7260                rqb_buffer->hrq = hrq;
7261                rqb_buffer->drq = drq;
7262                rqb_buffer->idx = idx;
7263                list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7264        }
7265        while (!list_empty(&rqb_buf_list)) {
7266                list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7267                                 hbuf.list);
7268
7269                hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7270                hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7271                drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7272                drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7273                rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7274                if (rc < 0) {
7275                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7276                                        "6421 Cannot post to HRQ %d: %x %x %x "
7277                                        "DRQ %x %x\n",
7278                                        hrq->queue_id,
7279                                        hrq->host_index,
7280                                        hrq->hba_index,
7281                                        hrq->entry_count,
7282                                        drq->host_index,
7283                                        drq->hba_index);
7284                        rqbp->rqb_free_buffer(phba, rqb_buffer);
7285                } else {
7286                        list_add_tail(&rqb_buffer->hbuf.list,
7287                                      &rqbp->rqb_buffer_list);
7288                        rqbp->buffer_count++;
7289                }
7290        }
7291        spin_unlock_irqrestore(&phba->hbalock, flags);
7292        return 1;
7293}
7294
7295/**
7296 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7297 * @phba: pointer to lpfc hba data structure.
7298 *
7299 * This routine initializes the per-cq idle_stat to dynamically dictate
7300 * polling decisions.
7301 *
7302 * Return codes:
7303 *   None
7304 **/
7305static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7306{
7307        int i;
7308        struct lpfc_sli4_hdw_queue *hdwq;
7309        struct lpfc_queue *cq;
7310        struct lpfc_idle_stat *idle_stat;
7311        u64 wall;
7312
7313        for_each_present_cpu(i) {
7314                hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7315                cq = hdwq->io_cq;
7316
7317                /* Skip if we've already handled this cq's primary CPU */
7318                if (cq->chann != i)
7319                        continue;
7320
7321                idle_stat = &phba->sli4_hba.idle_stat[i];
7322
7323                idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7324                idle_stat->prev_wall = wall;
7325
7326                if (phba->nvmet_support)
7327                        cq->poll_mode = LPFC_QUEUE_WORK;
7328                else
7329                        cq->poll_mode = LPFC_IRQ_POLL;
7330        }
7331
7332        if (!phba->nvmet_support)
7333                schedule_delayed_work(&phba->idle_stat_delay_work,
7334                                      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7335}
7336
7337static void lpfc_sli4_dip(struct lpfc_hba *phba)
7338{
7339        uint32_t if_type;
7340
7341        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7342        if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7343            if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7344                struct lpfc_register reg_data;
7345
7346                if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7347                               &reg_data.word0))
7348                        return;
7349
7350                if (bf_get(lpfc_sliport_status_dip, &reg_data))
7351                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7352                                        "2904 Firmware Dump Image Present"
7353                                        " on Adapter");
7354        }
7355}
7356
7357/**
7358 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7359 * @phba: Pointer to HBA context object.
7360 *
7361 * This function is the main SLI4 device initialization PCI function. This
7362 * function is called by the HBA initialization code, HBA reset code and
7363 * HBA error attention handler code. Caller is not required to hold any
7364 * locks.
7365 **/
7366int
7367lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7368{
7369        int rc, i, cnt, len, dd;
7370        LPFC_MBOXQ_t *mboxq;
7371        struct lpfc_mqe *mqe;
7372        uint8_t *vpd;
7373        uint32_t vpd_size;
7374        uint32_t ftr_rsp = 0;
7375        struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7376        struct lpfc_vport *vport = phba->pport;
7377        struct lpfc_dmabuf *mp;
7378        struct lpfc_rqb *rqbp;
7379
7380        /* Perform a PCI function reset to start from clean */
7381        rc = lpfc_pci_function_reset(phba);
7382        if (unlikely(rc))
7383                return -ENODEV;
7384
7385        /* Check the HBA Host Status Register for readyness */
7386        rc = lpfc_sli4_post_status_check(phba);
7387        if (unlikely(rc))
7388                return -ENODEV;
7389        else {
7390                spin_lock_irq(&phba->hbalock);
7391                phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7392                spin_unlock_irq(&phba->hbalock);
7393        }
7394
7395        lpfc_sli4_dip(phba);
7396
7397        /*
7398         * Allocate a single mailbox container for initializing the
7399         * port.
7400         */
7401        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7402        if (!mboxq)
7403                return -ENOMEM;
7404
7405        /* Issue READ_REV to collect vpd and FW information. */
7406        vpd_size = SLI4_PAGE_SIZE;
7407        vpd = kzalloc(vpd_size, GFP_KERNEL);
7408        if (!vpd) {
7409                rc = -ENOMEM;
7410                goto out_free_mbox;
7411        }
7412
7413        rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7414        if (unlikely(rc)) {
7415                kfree(vpd);
7416                goto out_free_mbox;
7417        }
7418
7419        mqe = &mboxq->u.mqe;
7420        phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7421        if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7422                phba->hba_flag |= HBA_FCOE_MODE;
7423                phba->fcp_embed_io = 0; /* SLI4 FC support only */
7424        } else {
7425                phba->hba_flag &= ~HBA_FCOE_MODE;
7426        }
7427
7428        if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7429                LPFC_DCBX_CEE_MODE)
7430                phba->hba_flag |= HBA_FIP_SUPPORT;
7431        else
7432                phba->hba_flag &= ~HBA_FIP_SUPPORT;
7433
7434        phba->hba_flag &= ~HBA_IOQ_FLUSH;
7435
7436        if (phba->sli_rev != LPFC_SLI_REV4) {
7437                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7438                        "0376 READ_REV Error. SLI Level %d "
7439                        "FCoE enabled %d\n",
7440                        phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7441                rc = -EIO;
7442                kfree(vpd);
7443                goto out_free_mbox;
7444        }
7445
7446        /*
7447         * Continue initialization with default values even if driver failed
7448         * to read FCoE param config regions, only read parameters if the
7449         * board is FCoE
7450         */
7451        if (phba->hba_flag & HBA_FCOE_MODE &&
7452            lpfc_sli4_read_fcoe_params(phba))
7453                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7454                        "2570 Failed to read FCoE parameters\n");
7455
7456        /*
7457         * Retrieve sli4 device physical port name, failure of doing it
7458         * is considered as non-fatal.
7459         */
7460        rc = lpfc_sli4_retrieve_pport_name(phba);
7461        if (!rc)
7462                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7463                                "3080 Successful retrieving SLI4 device "
7464                                "physical port name: %s.\n", phba->Port);
7465
7466        rc = lpfc_sli4_get_ctl_attr(phba);
7467        if (!rc)
7468                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7469                                "8351 Successful retrieving SLI4 device "
7470                                "CTL ATTR\n");
7471
7472        /*
7473         * Evaluate the read rev and vpd data. Populate the driver
7474         * state with the results. If this routine fails, the failure
7475         * is not fatal as the driver will use generic values.
7476         */
7477        rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7478        if (unlikely(!rc)) {
7479                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7480                                "0377 Error %d parsing vpd. "
7481                                "Using defaults.\n", rc);
7482                rc = 0;
7483        }
7484        kfree(vpd);
7485
7486        /* Save information as VPD data */
7487        phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7488        phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7489
7490        /*
7491         * This is because first G7 ASIC doesn't support the standard
7492         * 0x5a NVME cmd descriptor type/subtype
7493         */
7494        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7495                        LPFC_SLI_INTF_IF_TYPE_6) &&
7496            (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7497            (phba->vpd.rev.smRev == 0) &&
7498            (phba->cfg_nvme_embed_cmd == 1))
7499                phba->cfg_nvme_embed_cmd = 0;
7500
7501        phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7502        phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7503                                         &mqe->un.read_rev);
7504        phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7505                                       &mqe->un.read_rev);
7506        phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7507                                            &mqe->un.read_rev);
7508        phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7509                                           &mqe->un.read_rev);
7510        phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7511        memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7512        phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7513        memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7514        phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7515        memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7516        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7517                        "(%d):0380 READ_REV Status x%x "
7518                        "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7519                        mboxq->vport ? mboxq->vport->vpi : 0,
7520                        bf_get(lpfc_mqe_status, mqe),
7521                        phba->vpd.rev.opFwName,
7522                        phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7523                        phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7524
7525        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7526            LPFC_SLI_INTF_IF_TYPE_0) {
7527                lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7528                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7529                if (rc == MBX_SUCCESS) {
7530                        phba->hba_flag |= HBA_RECOVERABLE_UE;
7531                        /* Set 1Sec interval to detect UE */
7532                        phba->eratt_poll_interval = 1;
7533                        phba->sli4_hba.ue_to_sr = bf_get(
7534                                        lpfc_mbx_set_feature_UESR,
7535                                        &mboxq->u.mqe.un.set_feature);
7536                        phba->sli4_hba.ue_to_rp = bf_get(
7537                                        lpfc_mbx_set_feature_UERP,
7538                                        &mboxq->u.mqe.un.set_feature);
7539                }
7540        }
7541
7542        if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7543                /* Enable MDS Diagnostics only if the SLI Port supports it */
7544                lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7545                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7546                if (rc != MBX_SUCCESS)
7547                        phba->mds_diags_support = 0;
7548        }
7549
7550        /*
7551         * Discover the port's supported feature set and match it against the
7552         * hosts requests.
7553         */
7554        lpfc_request_features(phba, mboxq);
7555        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7556        if (unlikely(rc)) {
7557                rc = -EIO;
7558                goto out_free_mbox;
7559        }
7560
7561        /*
7562         * The port must support FCP initiator mode as this is the
7563         * only mode running in the host.
7564         */
7565        if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7566                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7567                                "0378 No support for fcpi mode.\n");
7568                ftr_rsp++;
7569        }
7570
7571        /* Performance Hints are ONLY for FCoE */
7572        if (phba->hba_flag & HBA_FCOE_MODE) {
7573                if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7574                        phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7575                else
7576                        phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7577        }
7578
7579        /*
7580         * If the port cannot support the host's requested features
7581         * then turn off the global config parameters to disable the
7582         * feature in the driver.  This is not a fatal error.
7583         */
7584        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7585                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7586                        phba->cfg_enable_bg = 0;
7587                        phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7588                        ftr_rsp++;
7589                }
7590        }
7591
7592        if (phba->max_vpi && phba->cfg_enable_npiv &&
7593            !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7594                ftr_rsp++;
7595
7596        if (ftr_rsp) {
7597                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7598                                "0379 Feature Mismatch Data: x%08x %08x "
7599                                "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7600                                mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7601                                phba->cfg_enable_npiv, phba->max_vpi);
7602                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7603                        phba->cfg_enable_bg = 0;
7604                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7605                        phba->cfg_enable_npiv = 0;
7606        }
7607
7608        /* These SLI3 features are assumed in SLI4 */
7609        spin_lock_irq(&phba->hbalock);
7610        phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7611        spin_unlock_irq(&phba->hbalock);
7612
7613        /* Always try to enable dual dump feature if we can */
7614        lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7615        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7616        dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7617        if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7618                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7619                                "6448 Dual Dump is enabled\n");
7620        else
7621                lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7622                                "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7623                                "rc:x%x dd:x%x\n",
7624                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7625                                lpfc_sli_config_mbox_subsys_get(
7626                                        phba, mboxq),
7627                                lpfc_sli_config_mbox_opcode_get(
7628                                        phba, mboxq),
7629                                rc, dd);
7630        /*
7631         * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
7632         * calls depends on these resources to complete port setup.
7633         */
7634        rc = lpfc_sli4_alloc_resource_identifiers(phba);
7635        if (rc) {
7636                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7637                                "2920 Failed to alloc Resource IDs "
7638                                "rc = x%x\n", rc);
7639                goto out_free_mbox;
7640        }
7641
7642        lpfc_set_host_data(phba, mboxq);
7643
7644        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7645        if (rc) {
7646                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7647                                "2134 Failed to set host os driver version %x",
7648                                rc);
7649        }
7650
7651        /* Read the port's service parameters. */
7652        rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7653        if (rc) {
7654                phba->link_state = LPFC_HBA_ERROR;
7655                rc = -ENOMEM;
7656                goto out_free_mbox;
7657        }
7658
7659        mboxq->vport = vport;
7660        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7661        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7662        if (rc == MBX_SUCCESS) {
7663                memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7664                rc = 0;
7665        }
7666
7667        /*
7668         * This memory was allocated by the lpfc_read_sparam routine. Release
7669         * it to the mbuf pool.
7670         */
7671        lpfc_mbuf_free(phba, mp->virt, mp->phys);
7672        kfree(mp);
7673        mboxq->ctx_buf = NULL;
7674        if (unlikely(rc)) {
7675                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7676                                "0382 READ_SPARAM command failed "
7677                                "status %d, mbxStatus x%x\n",
7678                                rc, bf_get(lpfc_mqe_status, mqe));
7679                phba->link_state = LPFC_HBA_ERROR;
7680                rc = -EIO;
7681                goto out_free_mbox;
7682        }
7683
7684        lpfc_update_vport_wwn(vport);
7685
7686        /* Update the fc_host data structures with new wwn. */
7687        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7688        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7689
7690        /* Create all the SLI4 queues */
7691        rc = lpfc_sli4_queue_create(phba);
7692        if (rc) {
7693                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7694                                "3089 Failed to allocate queues\n");
7695                rc = -ENODEV;
7696                goto out_free_mbox;
7697        }
7698        /* Set up all the queues to the device */
7699        rc = lpfc_sli4_queue_setup(phba);
7700        if (unlikely(rc)) {
7701                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7702                                "0381 Error %d during queue setup.\n ", rc);
7703                goto out_stop_timers;
7704        }
7705        /* Initialize the driver internal SLI layer lists. */
7706        lpfc_sli4_setup(phba);
7707        lpfc_sli4_queue_init(phba);
7708
7709        /* update host els xri-sgl sizes and mappings */
7710        rc = lpfc_sli4_els_sgl_update(phba);
7711        if (unlikely(rc)) {
7712                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7713                                "1400 Failed to update xri-sgl size and "
7714                                "mapping: %d\n", rc);
7715                goto out_destroy_queue;
7716        }
7717
7718        /* register the els sgl pool to the port */
7719        rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7720                                       phba->sli4_hba.els_xri_cnt);
7721        if (unlikely(rc < 0)) {
7722                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7723                                "0582 Error %d during els sgl post "
7724                                "operation\n", rc);
7725                rc = -ENODEV;
7726                goto out_destroy_queue;
7727        }
7728        phba->sli4_hba.els_xri_cnt = rc;
7729
7730        if (phba->nvmet_support) {
7731                /* update host nvmet xri-sgl sizes and mappings */
7732                rc = lpfc_sli4_nvmet_sgl_update(phba);
7733                if (unlikely(rc)) {
7734                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7735                                        "6308 Failed to update nvmet-sgl size "
7736                                        "and mapping: %d\n", rc);
7737                        goto out_destroy_queue;
7738                }
7739
7740                /* register the nvmet sgl pool to the port */
7741                rc = lpfc_sli4_repost_sgl_list(
7742                        phba,
7743                        &phba->sli4_hba.lpfc_nvmet_sgl_list,
7744                        phba->sli4_hba.nvmet_xri_cnt);
7745                if (unlikely(rc < 0)) {
7746                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7747                                        "3117 Error %d during nvmet "
7748                                        "sgl post\n", rc);
7749                        rc = -ENODEV;
7750                        goto out_destroy_queue;
7751                }
7752                phba->sli4_hba.nvmet_xri_cnt = rc;
7753
7754                /* We allocate an iocbq for every receive context SGL.
7755                 * The additional allocation is for abort and ls handling.
7756                 */
7757                cnt = phba->sli4_hba.nvmet_xri_cnt +
7758                        phba->sli4_hba.max_cfg_param.max_xri;
7759        } else {
7760                /* update host common xri-sgl sizes and mappings */
7761                rc = lpfc_sli4_io_sgl_update(phba);
7762                if (unlikely(rc)) {
7763                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7764                                        "6082 Failed to update nvme-sgl size "
7765                                        "and mapping: %d\n", rc);
7766                        goto out_destroy_queue;
7767                }
7768
7769                /* register the allocated common sgl pool to the port */
7770                rc = lpfc_sli4_repost_io_sgl_list(phba);
7771                if (unlikely(rc)) {
7772                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7773                                        "6116 Error %d during nvme sgl post "
7774                                        "operation\n", rc);
7775                        /* Some NVME buffers were moved to abort nvme list */
7776                        /* A pci function reset will repost them */
7777                        rc = -ENODEV;
7778                        goto out_destroy_queue;
7779                }
7780                /* Each lpfc_io_buf job structure has an iocbq element.
7781                 * This cnt provides for abort, els, ct and ls requests.
7782                 */
7783                cnt = phba->sli4_hba.max_cfg_param.max_xri;
7784        }
7785
7786        if (!phba->sli.iocbq_lookup) {
7787                /* Initialize and populate the iocb list per host */
7788                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7789                                "2821 initialize iocb list with %d entries\n",
7790                                cnt);
7791                rc = lpfc_init_iocb_list(phba, cnt);
7792                if (rc) {
7793                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7794                                        "1413 Failed to init iocb list.\n");
7795                        goto out_destroy_queue;
7796                }
7797        }
7798
7799        if (phba->nvmet_support)
7800                lpfc_nvmet_create_targetport(phba);
7801
7802        if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7803                /* Post initial buffers to all RQs created */
7804                for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7805                        rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7806                        INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7807                        rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7808                        rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7809                        rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7810                        rqbp->buffer_count = 0;
7811
7812                        lpfc_post_rq_buffer(
7813                                phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7814                                phba->sli4_hba.nvmet_mrq_data[i],
7815                                phba->cfg_nvmet_mrq_post, i);
7816                }
7817        }
7818
7819        /* Post the rpi header region to the device. */
7820        rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7821        if (unlikely(rc)) {
7822                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7823                                "0393 Error %d during rpi post operation\n",
7824                                rc);
7825                rc = -ENODEV;
7826                goto out_destroy_queue;
7827        }
7828        lpfc_sli4_node_prep(phba);
7829
7830        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7831                if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7832                        /*
7833                         * The FC Port needs to register FCFI (index 0)
7834                         */
7835                        lpfc_reg_fcfi(phba, mboxq);
7836                        mboxq->vport = phba->pport;
7837                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7838                        if (rc != MBX_SUCCESS)
7839                                goto out_unset_queue;
7840                        rc = 0;
7841                        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7842                                                &mboxq->u.mqe.un.reg_fcfi);
7843                } else {
7844                        /* We are a NVME Target mode with MRQ > 1 */
7845
7846                        /* First register the FCFI */
7847                        lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7848                        mboxq->vport = phba->pport;
7849                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7850                        if (rc != MBX_SUCCESS)
7851                                goto out_unset_queue;
7852                        rc = 0;
7853                        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7854                                                &mboxq->u.mqe.un.reg_fcfi_mrq);
7855
7856                        /* Next register the MRQs */
7857                        lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7858                        mboxq->vport = phba->pport;
7859                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7860                        if (rc != MBX_SUCCESS)
7861                                goto out_unset_queue;
7862                        rc = 0;
7863                }
7864                /* Check if the port is configured to be disabled */
7865                lpfc_sli_read_link_ste(phba);
7866        }
7867
7868        /* Don't post more new bufs if repost already recovered
7869         * the nvme sgls.
7870         */
7871        if (phba->nvmet_support == 0) {
7872                if (phba->sli4_hba.io_xri_cnt == 0) {
7873                        len = lpfc_new_io_buf(
7874                                              phba, phba->sli4_hba.io_xri_max);
7875                        if (len == 0) {
7876                                rc = -ENOMEM;
7877                                goto out_unset_queue;
7878                        }
7879
7880                        if (phba->cfg_xri_rebalancing)
7881                                lpfc_create_multixri_pools(phba);
7882                }
7883        } else {
7884                phba->cfg_xri_rebalancing = 0;
7885        }
7886
7887        /* Allow asynchronous mailbox command to go through */
7888        spin_lock_irq(&phba->hbalock);
7889        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7890        spin_unlock_irq(&phba->hbalock);
7891
7892        /* Post receive buffers to the device */
7893        lpfc_sli4_rb_setup(phba);
7894
7895        /* Reset HBA FCF states after HBA reset */
7896        phba->fcf.fcf_flag = 0;
7897        phba->fcf.current_rec.flag = 0;
7898
7899        /* Start the ELS watchdog timer */
7900        mod_timer(&vport->els_tmofunc,
7901                  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7902
7903        /* Start heart beat timer */
7904        mod_timer(&phba->hb_tmofunc,
7905                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7906        phba->hb_outstanding = 0;
7907        phba->last_completion_time = jiffies;
7908
7909        /* start eq_delay heartbeat */
7910        if (phba->cfg_auto_imax)
7911                queue_delayed_work(phba->wq, &phba->eq_delay_work,
7912                                   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7913
7914        /* start per phba idle_stat_delay heartbeat */
7915        lpfc_init_idle_stat_hb(phba);
7916
7917        /* Start error attention (ERATT) polling timer */
7918        mod_timer(&phba->eratt_poll,
7919                  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7920
7921        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7922        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7923                rc = pci_enable_pcie_error_reporting(phba->pcidev);
7924                if (!rc) {
7925                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7926                                        "2829 This device supports "
7927                                        "Advanced Error Reporting (AER)\n");
7928                        spin_lock_irq(&phba->hbalock);
7929                        phba->hba_flag |= HBA_AER_ENABLED;
7930                        spin_unlock_irq(&phba->hbalock);
7931                } else {
7932                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7933                                        "2830 This device does not support "
7934                                        "Advanced Error Reporting (AER)\n");
7935                        phba->cfg_aer_support = 0;
7936                }
7937                rc = 0;
7938        }
7939
7940        /*
7941         * The port is ready, set the host's link state to LINK_DOWN
7942         * in preparation for link interrupts.
7943         */
7944        spin_lock_irq(&phba->hbalock);
7945        phba->link_state = LPFC_LINK_DOWN;
7946
7947        /* Check if physical ports are trunked */
7948        if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7949                phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7950        if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7951                phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7952        if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7953                phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7954        if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7955                phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7956        spin_unlock_irq(&phba->hbalock);
7957
7958        /* Arm the CQs and then EQs on device */
7959        lpfc_sli4_arm_cqeq_intr(phba);
7960
7961        /* Indicate device interrupt mode */
7962        phba->sli4_hba.intr_enable = 1;
7963
7964        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7965            (phba->hba_flag & LINK_DISABLED)) {
7966                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7967                                "3103 Adapter Link is disabled.\n");
7968                lpfc_down_link(phba, mboxq);
7969                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7970                if (rc != MBX_SUCCESS) {
7971                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7972                                        "3104 Adapter failed to issue "
7973                                        "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7974                        goto out_io_buff_free;
7975                }
7976        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7977                /* don't perform init_link on SLI4 FC port loopback test */
7978                if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7979                        rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7980                        if (rc)
7981                                goto out_io_buff_free;
7982                }
7983        }
7984        mempool_free(mboxq, phba->mbox_mem_pool);
7985        return rc;
7986out_io_buff_free:
7987        /* Free allocated IO Buffers */
7988        lpfc_io_free(phba);
7989out_unset_queue:
7990        /* Unset all the queues set up in this routine when error out */
7991        lpfc_sli4_queue_unset(phba);
7992out_destroy_queue:
7993        lpfc_free_iocb_list(phba);
7994        lpfc_sli4_queue_destroy(phba);
7995out_stop_timers:
7996        lpfc_stop_hba_timers(phba);
7997out_free_mbox:
7998        mempool_free(mboxq, phba->mbox_mem_pool);
7999        return rc;
8000}
8001
8002/**
8003 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8004 * @t: Context to fetch pointer to hba structure from.
8005 *
8006 * This is the callback function for mailbox timer. The mailbox
8007 * timer is armed when a new mailbox command is issued and the timer
8008 * is deleted when the mailbox complete. The function is called by
8009 * the kernel timer code when a mailbox does not complete within
8010 * expected time. This function wakes up the worker thread to
8011 * process the mailbox timeout and returns. All the processing is
8012 * done by the worker thread function lpfc_mbox_timeout_handler.
8013 **/
8014void
8015lpfc_mbox_timeout(struct timer_list *t)
8016{
8017        struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
8018        unsigned long iflag;
8019        uint32_t tmo_posted;
8020
8021        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8022        tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8023        if (!tmo_posted)
8024                phba->pport->work_port_events |= WORKER_MBOX_TMO;
8025        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8026
8027        if (!tmo_posted)
8028                lpfc_worker_wake_up(phba);
8029        return;
8030}
8031
8032/**
8033 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8034 *                                    are pending
8035 * @phba: Pointer to HBA context object.
8036 *
8037 * This function checks if any mailbox completions are present on the mailbox
8038 * completion queue.
8039 **/
8040static bool
8041lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8042{
8043
8044        uint32_t idx;
8045        struct lpfc_queue *mcq;
8046        struct lpfc_mcqe *mcqe;
8047        bool pending_completions = false;
8048        uint8_t qe_valid;
8049
8050        if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8051                return false;
8052
8053        /* Check for completions on mailbox completion queue */
8054
8055        mcq = phba->sli4_hba.mbx_cq;
8056        idx = mcq->hba_index;
8057        qe_valid = mcq->qe_valid;
8058        while (bf_get_le32(lpfc_cqe_valid,
8059               (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8060                mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8061                if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8062                    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8063                        pending_completions = true;
8064                        break;
8065                }
8066                idx = (idx + 1) % mcq->entry_count;
8067                if (mcq->hba_index == idx)
8068                        break;
8069
8070                /* if the index wrapped around, toggle the valid bit */
8071                if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8072                        qe_valid = (qe_valid) ? 0 : 1;
8073        }
8074        return pending_completions;
8075
8076}
8077
8078/**
8079 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8080 *                                            that were missed.
8081 * @phba: Pointer to HBA context object.
8082 *
8083 * For sli4, it is possible to miss an interrupt. As such mbox completions
8084 * maybe missed causing erroneous mailbox timeouts to occur. This function
8085 * checks to see if mbox completions are on the mailbox completion queue
8086 * and will process all the completions associated with the eq for the
8087 * mailbox completion queue.
8088 **/
8089static bool
8090lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8091{
8092        struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8093        uint32_t eqidx;
8094        struct lpfc_queue *fpeq = NULL;
8095        struct lpfc_queue *eq;
8096        bool mbox_pending;
8097
8098        if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8099                return false;
8100
8101        /* Find the EQ associated with the mbox CQ */
8102        if (sli4_hba->hdwq) {
8103                for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8104                        eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8105                        if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8106                                fpeq = eq;
8107                                break;
8108                        }
8109                }
8110        }
8111        if (!fpeq)
8112                return false;
8113
8114        /* Turn off interrupts from this EQ */
8115
8116        sli4_hba->sli4_eq_clr_intr(fpeq);
8117
8118        /* Check to see if a mbox completion is pending */
8119
8120        mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8121
8122        /*
8123         * If a mbox completion is pending, process all the events on EQ
8124         * associated with the mbox completion queue (this could include
8125         * mailbox commands, async events, els commands, receive queue data
8126         * and fcp commands)
8127         */
8128
8129        if (mbox_pending)
8130                /* process and rearm the EQ */
8131                lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8132        else
8133                /* Always clear and re-arm the EQ */
8134                sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8135
8136        return mbox_pending;
8137
8138}
8139
8140/**
8141 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8142 * @phba: Pointer to HBA context object.
8143 *
8144 * This function is called from worker thread when a mailbox command times out.
8145 * The caller is not required to hold any locks. This function will reset the
8146 * HBA and recover all the pending commands.
8147 **/
8148void
8149lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8150{
8151        LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8152        MAILBOX_t *mb = NULL;
8153
8154        struct lpfc_sli *psli = &phba->sli;
8155
8156        /* If the mailbox completed, process the completion and return */
8157        if (lpfc_sli4_process_missed_mbox_completions(phba))
8158                return;
8159
8160        if (pmbox != NULL)
8161                mb = &pmbox->u.mb;
8162        /* Check the pmbox pointer first.  There is a race condition
8163         * between the mbox timeout handler getting executed in the
8164         * worklist and the mailbox actually completing. When this
8165         * race condition occurs, the mbox_active will be NULL.
8166         */
8167        spin_lock_irq(&phba->hbalock);
8168        if (pmbox == NULL) {
8169                lpfc_printf_log(phba, KERN_WARNING,
8170                                LOG_MBOX | LOG_SLI,
8171                                "0353 Active Mailbox cleared - mailbox timeout "
8172                                "exiting\n");
8173                spin_unlock_irq(&phba->hbalock);
8174                return;
8175        }
8176
8177        /* Mbox cmd <mbxCommand> timeout */
8178        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8179                        "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8180                        mb->mbxCommand,
8181                        phba->pport->port_state,
8182                        phba->sli.sli_flag,
8183                        phba->sli.mbox_active);
8184        spin_unlock_irq(&phba->hbalock);
8185
8186        /* Setting state unknown so lpfc_sli_abort_iocb_ring
8187         * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8188         * it to fail all outstanding SCSI IO.
8189         */
8190        spin_lock_irq(&phba->pport->work_port_lock);
8191        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8192        spin_unlock_irq(&phba->pport->work_port_lock);
8193        spin_lock_irq(&phba->hbalock);
8194        phba->link_state = LPFC_LINK_UNKNOWN;
8195        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8196        spin_unlock_irq(&phba->hbalock);
8197
8198        lpfc_sli_abort_fcp_rings(phba);
8199
8200        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201                        "0345 Resetting board due to mailbox timeout\n");
8202
8203        /* Reset the HBA device */
8204        lpfc_reset_hba(phba);
8205}
8206
8207/**
8208 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8209 * @phba: Pointer to HBA context object.
8210 * @pmbox: Pointer to mailbox object.
8211 * @flag: Flag indicating how the mailbox need to be processed.
8212 *
8213 * This function is called by discovery code and HBA management code
8214 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8215 * function gets the hbalock to protect the data structures.
8216 * The mailbox command can be submitted in polling mode, in which case
8217 * this function will wait in a polling loop for the completion of the
8218 * mailbox.
8219 * If the mailbox is submitted in no_wait mode (not polling) the
8220 * function will submit the command and returns immediately without waiting
8221 * for the mailbox completion. The no_wait is supported only when HBA
8222 * is in SLI2/SLI3 mode - interrupts are enabled.
8223 * The SLI interface allows only one mailbox pending at a time. If the
8224 * mailbox is issued in polling mode and there is already a mailbox
8225 * pending, then the function will return an error. If the mailbox is issued
8226 * in NO_WAIT mode and there is a mailbox pending already, the function
8227 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8228 * The sli layer owns the mailbox object until the completion of mailbox
8229 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8230 * return codes the caller owns the mailbox command after the return of
8231 * the function.
8232 **/
8233static int
8234lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8235                       uint32_t flag)
8236{
8237        MAILBOX_t *mbx;
8238        struct lpfc_sli *psli = &phba->sli;
8239        uint32_t status, evtctr;
8240        uint32_t ha_copy, hc_copy;
8241        int i;
8242        unsigned long timeout;
8243        unsigned long drvr_flag = 0;
8244        uint32_t word0, ldata;
8245        void __iomem *to_slim;
8246        int processing_queue = 0;
8247
8248        spin_lock_irqsave(&phba->hbalock, drvr_flag);
8249        if (!pmbox) {
8250                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8251                /* processing mbox queue from intr_handler */
8252                if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8253                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8254                        return MBX_SUCCESS;
8255                }
8256                processing_queue = 1;
8257                pmbox = lpfc_mbox_get(phba);
8258                if (!pmbox) {
8259                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8260                        return MBX_SUCCESS;
8261                }
8262        }
8263
8264        if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8265                pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8266                if(!pmbox->vport) {
8267                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8268                        lpfc_printf_log(phba, KERN_ERR,
8269                                        LOG_MBOX | LOG_VPORT,
8270                                        "1806 Mbox x%x failed. No vport\n",
8271                                        pmbox->u.mb.mbxCommand);
8272                        dump_stack();
8273                        goto out_not_finished;
8274                }
8275        }
8276
8277        /* If the PCI channel is in offline state, do not post mbox. */
8278        if (unlikely(pci_channel_offline(phba->pcidev))) {
8279                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8280                goto out_not_finished;
8281        }
8282
8283        /* If HBA has a deferred error attention, fail the iocb. */
8284        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8285                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8286                goto out_not_finished;
8287        }
8288
8289        psli = &phba->sli;
8290
8291        mbx = &pmbox->u.mb;
8292        status = MBX_SUCCESS;
8293
8294        if (phba->link_state == LPFC_HBA_ERROR) {
8295                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8296
8297                /* Mbox command <mbxCommand> cannot issue */
8298                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8299                                "(%d):0311 Mailbox command x%x cannot "
8300                                "issue Data: x%x x%x\n",
8301                                pmbox->vport ? pmbox->vport->vpi : 0,
8302                                pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8303                goto out_not_finished;
8304        }
8305
8306        if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8307                if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8308                        !(hc_copy & HC_MBINT_ENA)) {
8309                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8310                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8311                                "(%d):2528 Mailbox command x%x cannot "
8312                                "issue Data: x%x x%x\n",
8313                                pmbox->vport ? pmbox->vport->vpi : 0,
8314                                pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8315                        goto out_not_finished;
8316                }
8317        }
8318
8319        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8320                /* Polling for a mbox command when another one is already active
8321                 * is not allowed in SLI. Also, the driver must have established
8322                 * SLI2 mode to queue and process multiple mbox commands.
8323                 */
8324
8325                if (flag & MBX_POLL) {
8326                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8327
8328                        /* Mbox command <mbxCommand> cannot issue */
8329                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8330                                        "(%d):2529 Mailbox command x%x "
8331                                        "cannot issue Data: x%x x%x\n",
8332                                        pmbox->vport ? pmbox->vport->vpi : 0,
8333                                        pmbox->u.mb.mbxCommand,
8334                                        psli->sli_flag, flag);
8335                        goto out_not_finished;
8336                }
8337
8338                if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8339                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8340                        /* Mbox command <mbxCommand> cannot issue */
8341                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8342                                        "(%d):2530 Mailbox command x%x "
8343                                        "cannot issue Data: x%x x%x\n",
8344                                        pmbox->vport ? pmbox->vport->vpi : 0,
8345                                        pmbox->u.mb.mbxCommand,
8346                                        psli->sli_flag, flag);
8347                        goto out_not_finished;
8348                }
8349
8350                /* Another mailbox command is still being processed, queue this
8351                 * command to be processed later.
8352                 */
8353                lpfc_mbox_put(phba, pmbox);
8354
8355                /* Mbox cmd issue - BUSY */
8356                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8357                                "(%d):0308 Mbox cmd issue - BUSY Data: "
8358                                "x%x x%x x%x x%x\n",
8359                                pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8360                                mbx->mbxCommand,
8361                                phba->pport ? phba->pport->port_state : 0xff,
8362                                psli->sli_flag, flag);
8363
8364                psli->slistat.mbox_busy++;
8365                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8366
8367                if (pmbox->vport) {
8368                        lpfc_debugfs_disc_trc(pmbox->vport,
8369                                LPFC_DISC_TRC_MBOX_VPORT,
8370                                "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
8371                                (uint32_t)mbx->mbxCommand,
8372                                mbx->un.varWords[0], mbx->un.varWords[1]);
8373                }
8374                else {
8375                        lpfc_debugfs_disc_trc(phba->pport,
8376                                LPFC_DISC_TRC_MBOX,
8377                                "MBOX Bsy:        cmd:x%x mb:x%x x%x",
8378                                (uint32_t)mbx->mbxCommand,
8379                                mbx->un.varWords[0], mbx->un.varWords[1]);
8380                }
8381
8382                return MBX_BUSY;
8383        }
8384
8385        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8386
8387        /* If we are not polling, we MUST be in SLI2 mode */
8388        if (flag != MBX_POLL) {
8389                if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8390                    (mbx->mbxCommand != MBX_KILL_BOARD)) {
8391                        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8392                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8393                        /* Mbox command <mbxCommand> cannot issue */
8394                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8395                                        "(%d):2531 Mailbox command x%x "
8396                                        "cannot issue Data: x%x x%x\n",
8397                                        pmbox->vport ? pmbox->vport->vpi : 0,
8398                                        pmbox->u.mb.mbxCommand,
8399                                        psli->sli_flag, flag);
8400                        goto out_not_finished;
8401                }
8402                /* timeout active mbox command */
8403                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8404                                           1000);
8405                mod_timer(&psli->mbox_tmo, jiffies + timeout);
8406        }
8407
8408        /* Mailbox cmd <cmd> issue */
8409        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8410                        "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8411                        "x%x\n",
8412                        pmbox->vport ? pmbox->vport->vpi : 0,
8413                        mbx->mbxCommand,
8414                        phba->pport ? phba->pport->port_state : 0xff,
8415                        psli->sli_flag, flag);
8416
8417        if (mbx->mbxCommand != MBX_HEARTBEAT) {
8418                if (pmbox->vport) {
8419                        lpfc_debugfs_disc_trc(pmbox->vport,
8420                                LPFC_DISC_TRC_MBOX_VPORT,
8421                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
8422                                (uint32_t)mbx->mbxCommand,
8423                                mbx->un.varWords[0], mbx->un.varWords[1]);
8424                }
8425                else {
8426                        lpfc_debugfs_disc_trc(phba->pport,
8427                                LPFC_DISC_TRC_MBOX,
8428                                "MBOX Send:       cmd:x%x mb:x%x x%x",
8429                                (uint32_t)mbx->mbxCommand,
8430                                mbx->un.varWords[0], mbx->un.varWords[1]);
8431                }
8432        }
8433
8434        psli->slistat.mbox_cmd++;
8435        evtctr = psli->slistat.mbox_event;
8436
8437        /* next set own bit for the adapter and copy over command word */
8438        mbx->mbxOwner = OWN_CHIP;
8439
8440        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8441                /* Populate mbox extension offset word. */
8442                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8443                        *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8444                                = (uint8_t *)phba->mbox_ext
8445                                  - (uint8_t *)phba->mbox;
8446                }
8447
8448                /* Copy the mailbox extension data */
8449                if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8450                        lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8451                                              (uint8_t *)phba->mbox_ext,
8452                                              pmbox->in_ext_byte_len);
8453                }
8454                /* Copy command data to host SLIM area */
8455                lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8456        } else {
8457                /* Populate mbox extension offset word. */
8458                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8459                        *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8460                                = MAILBOX_HBA_EXT_OFFSET;
8461
8462                /* Copy the mailbox extension data */
8463                if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8464                        lpfc_memcpy_to_slim(phba->MBslimaddr +
8465                                MAILBOX_HBA_EXT_OFFSET,
8466                                pmbox->ctx_buf, pmbox->in_ext_byte_len);
8467
8468                if (mbx->mbxCommand == MBX_CONFIG_PORT)
8469                        /* copy command data into host mbox for cmpl */
8470                        lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8471                                              MAILBOX_CMD_SIZE);
8472
8473                /* First copy mbox command data to HBA SLIM, skip past first
8474                   word */
8475                to_slim = phba->MBslimaddr + sizeof (uint32_t);
8476                lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8477                            MAILBOX_CMD_SIZE - sizeof (uint32_t));
8478
8479                /* Next copy over first word, with mbxOwner set */
8480                ldata = *((uint32_t *)mbx);
8481                to_slim = phba->MBslimaddr;
8482                writel(ldata, to_slim);
8483                readl(to_slim); /* flush */
8484
8485                if (mbx->mbxCommand == MBX_CONFIG_PORT)
8486                        /* switch over to host mailbox */
8487                        psli->sli_flag |= LPFC_SLI_ACTIVE;
8488        }
8489
8490        wmb();
8491
8492        switch (flag) {
8493        case MBX_NOWAIT:
8494                /* Set up reference to mailbox command */
8495                psli->mbox_active = pmbox;
8496                /* Interrupt board to do it */
8497                writel(CA_MBATT, phba->CAregaddr);
8498                readl(phba->CAregaddr); /* flush */
8499                /* Don't wait for it to finish, just return */
8500                break;
8501
8502        case MBX_POLL:
8503                /* Set up null reference to mailbox command */
8504                psli->mbox_active = NULL;
8505                /* Interrupt board to do it */
8506                writel(CA_MBATT, phba->CAregaddr);
8507                readl(phba->CAregaddr); /* flush */
8508
8509                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8510                        /* First read mbox status word */
8511                        word0 = *((uint32_t *)phba->mbox);
8512                        word0 = le32_to_cpu(word0);
8513                } else {
8514                        /* First read mbox status word */
8515                        if (lpfc_readl(phba->MBslimaddr, &word0)) {
8516                                spin_unlock_irqrestore(&phba->hbalock,
8517                                                       drvr_flag);
8518                                goto out_not_finished;
8519                        }
8520                }
8521
8522                /* Read the HBA Host Attention Register */
8523                if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8524                        spin_unlock_irqrestore(&phba->hbalock,
8525                                                       drvr_flag);
8526                        goto out_not_finished;
8527                }
8528                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8529                                                        1000) + jiffies;
8530                i = 0;
8531                /* Wait for command to complete */
8532                while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8533                       (!(ha_copy & HA_MBATT) &&
8534                        (phba->link_state > LPFC_WARM_START))) {
8535                        if (time_after(jiffies, timeout)) {
8536                                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8537                                spin_unlock_irqrestore(&phba->hbalock,
8538                                                       drvr_flag);
8539                                goto out_not_finished;
8540                        }
8541
8542                        /* Check if we took a mbox interrupt while we were
8543                           polling */
8544                        if (((word0 & OWN_CHIP) != OWN_CHIP)
8545                            && (evtctr != psli->slistat.mbox_event))
8546                                break;
8547
8548                        if (i++ > 10) {
8549                                spin_unlock_irqrestore(&phba->hbalock,
8550                                                       drvr_flag);
8551                                msleep(1);
8552                                spin_lock_irqsave(&phba->hbalock, drvr_flag);
8553                        }
8554
8555                        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8556                                /* First copy command data */
8557                                word0 = *((uint32_t *)phba->mbox);
8558                                word0 = le32_to_cpu(word0);
8559                                if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8560                                        MAILBOX_t *slimmb;
8561                                        uint32_t slimword0;
8562                                        /* Check real SLIM for any errors */
8563                                        slimword0 = readl(phba->MBslimaddr);
8564                                        slimmb = (MAILBOX_t *) & slimword0;
8565                                        if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8566                                            && slimmb->mbxStatus) {
8567                                                psli->sli_flag &=
8568                                                    ~LPFC_SLI_ACTIVE;
8569                                                word0 = slimword0;
8570                                        }
8571                                }
8572                        } else {
8573                                /* First copy command data */
8574                                word0 = readl(phba->MBslimaddr);
8575                        }
8576                        /* Read the HBA Host Attention Register */
8577                        if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8578                                spin_unlock_irqrestore(&phba->hbalock,
8579                                                       drvr_flag);
8580                                goto out_not_finished;
8581                        }
8582                }
8583
8584                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8585                        /* copy results back to user */
8586                        lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8587                                                MAILBOX_CMD_SIZE);
8588                        /* Copy the mailbox extension data */
8589                        if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8590                                lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8591                                                      pmbox->ctx_buf,
8592                                                      pmbox->out_ext_byte_len);
8593                        }
8594                } else {
8595                        /* First copy command data */
8596                        lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8597                                                MAILBOX_CMD_SIZE);
8598                        /* Copy the mailbox extension data */
8599                        if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8600                                lpfc_memcpy_from_slim(
8601                                        pmbox->ctx_buf,
8602                                        phba->MBslimaddr +
8603                                        MAILBOX_HBA_EXT_OFFSET,
8604                                        pmbox->out_ext_byte_len);
8605                        }
8606                }
8607
8608                writel(HA_MBATT, phba->HAregaddr);
8609                readl(phba->HAregaddr); /* flush */
8610
8611                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8612                status = mbx->mbxStatus;
8613        }
8614
8615        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8616        return status;
8617
8618out_not_finished:
8619        if (processing_queue) {
8620                pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8621                lpfc_mbox_cmpl_put(phba, pmbox);
8622        }
8623        return MBX_NOT_FINISHED;
8624}
8625
8626/**
8627 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8628 * @phba: Pointer to HBA context object.
8629 *
8630 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8631 * the driver internal pending mailbox queue. It will then try to wait out the
8632 * possible outstanding mailbox command before return.
8633 *
8634 * Returns:
8635 *      0 - the outstanding mailbox command completed; otherwise, the wait for
8636 *      the outstanding mailbox command timed out.
8637 **/
8638static int
8639lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8640{
8641        struct lpfc_sli *psli = &phba->sli;
8642        int rc = 0;
8643        unsigned long timeout = 0;
8644
8645        /* Mark the asynchronous mailbox command posting as blocked */
8646        spin_lock_irq(&phba->hbalock);
8647        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8648        /* Determine how long we might wait for the active mailbox
8649         * command to be gracefully completed by firmware.
8650         */
8651        if (phba->sli.mbox_active)
8652                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8653                                                phba->sli.mbox_active) *
8654                                                1000) + jiffies;
8655        spin_unlock_irq(&phba->hbalock);
8656
8657        /* Make sure the mailbox is really active */
8658        if (timeout)
8659                lpfc_sli4_process_missed_mbox_completions(phba);
8660
8661        /* Wait for the outstnading mailbox command to complete */
8662        while (phba->sli.mbox_active) {
8663                /* Check active mailbox complete status every 2ms */
8664                msleep(2);
8665                if (time_after(jiffies, timeout)) {
8666                        /* Timeout, marked the outstanding cmd not complete */
8667                        rc = 1;
8668                        break;
8669                }
8670        }
8671
8672        /* Can not cleanly block async mailbox command, fails it */
8673        if (rc) {
8674                spin_lock_irq(&phba->hbalock);
8675                psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8676                spin_unlock_irq(&phba->hbalock);
8677        }
8678        return rc;
8679}
8680
8681/**
8682 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8683 * @phba: Pointer to HBA context object.
8684 *
8685 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8686 * commands from the driver internal pending mailbox queue. It makes sure
8687 * that there is no outstanding mailbox command before resuming posting
8688 * asynchronous mailbox commands. If, for any reason, there is outstanding
8689 * mailbox command, it will try to wait it out before resuming asynchronous
8690 * mailbox command posting.
8691 **/
8692static void
8693lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8694{
8695        struct lpfc_sli *psli = &phba->sli;
8696
8697        spin_lock_irq(&phba->hbalock);
8698        if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8699                /* Asynchronous mailbox posting is not blocked, do nothing */
8700                spin_unlock_irq(&phba->hbalock);
8701                return;
8702        }
8703
8704        /* Outstanding synchronous mailbox command is guaranteed to be done,
8705         * successful or timeout, after timing-out the outstanding mailbox
8706         * command shall always be removed, so just unblock posting async
8707         * mailbox command and resume
8708         */
8709        psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8710        spin_unlock_irq(&phba->hbalock);
8711
8712        /* wake up worker thread to post asynchronous mailbox command */
8713        lpfc_worker_wake_up(phba);
8714}
8715
8716/**
8717 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8718 * @phba: Pointer to HBA context object.
8719 * @mboxq: Pointer to mailbox object.
8720 *
8721 * The function waits for the bootstrap mailbox register ready bit from
8722 * port for twice the regular mailbox command timeout value.
8723 *
8724 *      0 - no timeout on waiting for bootstrap mailbox register ready.
8725 *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8726 **/
8727static int
8728lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8729{
8730        uint32_t db_ready;
8731        unsigned long timeout;
8732        struct lpfc_register bmbx_reg;
8733
8734        timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8735                                   * 1000) + jiffies;
8736
8737        do {
8738                bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8739                db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8740                if (!db_ready)
8741                        mdelay(2);
8742
8743                if (time_after(jiffies, timeout))
8744                        return MBXERR_ERROR;
8745        } while (!db_ready);
8746
8747        return 0;
8748}
8749
8750/**
8751 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8752 * @phba: Pointer to HBA context object.
8753 * @mboxq: Pointer to mailbox object.
8754 *
8755 * The function posts a mailbox to the port.  The mailbox is expected
8756 * to be comletely filled in and ready for the port to operate on it.
8757 * This routine executes a synchronous completion operation on the
8758 * mailbox by polling for its completion.
8759 *
8760 * The caller must not be holding any locks when calling this routine.
8761 *
8762 * Returns:
8763 *      MBX_SUCCESS - mailbox posted successfully
8764 *      Any of the MBX error values.
8765 **/
8766static int
8767lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8768{
8769        int rc = MBX_SUCCESS;
8770        unsigned long iflag;
8771        uint32_t mcqe_status;
8772        uint32_t mbx_cmnd;
8773        struct lpfc_sli *psli = &phba->sli;
8774        struct lpfc_mqe *mb = &mboxq->u.mqe;
8775        struct lpfc_bmbx_create *mbox_rgn;
8776        struct dma_address *dma_address;
8777
8778        /*
8779         * Only one mailbox can be active to the bootstrap mailbox region
8780         * at a time and there is no queueing provided.
8781         */
8782        spin_lock_irqsave(&phba->hbalock, iflag);
8783        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8784                spin_unlock_irqrestore(&phba->hbalock, iflag);
8785                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8786                                "(%d):2532 Mailbox command x%x (x%x/x%x) "
8787                                "cannot issue Data: x%x x%x\n",
8788                                mboxq->vport ? mboxq->vport->vpi : 0,
8789                                mboxq->u.mb.mbxCommand,
8790                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8791                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8792                                psli->sli_flag, MBX_POLL);
8793                return MBXERR_ERROR;
8794        }
8795        /* The server grabs the token and owns it until release */
8796        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8797        phba->sli.mbox_active = mboxq;
8798        spin_unlock_irqrestore(&phba->hbalock, iflag);
8799
8800        /* wait for bootstrap mbox register for readyness */
8801        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8802        if (rc)
8803                goto exit;
8804        /*
8805         * Initialize the bootstrap memory region to avoid stale data areas
8806         * in the mailbox post.  Then copy the caller's mailbox contents to
8807         * the bmbx mailbox region.
8808         */
8809        mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8810        memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8811        lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8812                               sizeof(struct lpfc_mqe));
8813
8814        /* Post the high mailbox dma address to the port and wait for ready. */
8815        dma_address = &phba->sli4_hba.bmbx.dma_address;
8816        writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8817
8818        /* wait for bootstrap mbox register for hi-address write done */
8819        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8820        if (rc)
8821                goto exit;
8822
8823        /* Post the low mailbox dma address to the port. */
8824        writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8825
8826        /* wait for bootstrap mbox register for low address write done */
8827        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8828        if (rc)
8829                goto exit;
8830
8831        /*
8832         * Read the CQ to ensure the mailbox has completed.
8833         * If so, update the mailbox status so that the upper layers
8834         * can complete the request normally.
8835         */
8836        lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8837                               sizeof(struct lpfc_mqe));
8838        mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8839        lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8840                               sizeof(struct lpfc_mcqe));
8841        mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8842        /*
8843         * When the CQE status indicates a failure and the mailbox status
8844         * indicates success then copy the CQE status into the mailbox status
8845         * (and prefix it with x4000).
8846         */
8847        if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8848                if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8849                        bf_set(lpfc_mqe_status, mb,
8850                               (LPFC_MBX_ERROR_RANGE | mcqe_status));
8851                rc = MBXERR_ERROR;
8852        } else
8853                lpfc_sli4_swap_str(phba, mboxq);
8854
8855        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8856                        "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8857                        "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8858                        " x%x x%x CQ: x%x x%x x%x x%x\n",
8859                        mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8860                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8861                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8862                        bf_get(lpfc_mqe_status, mb),
8863                        mb->un.mb_words[0], mb->un.mb_words[1],
8864                        mb->un.mb_words[2], mb->un.mb_words[3],
8865                        mb->un.mb_words[4], mb->un.mb_words[5],
8866                        mb->un.mb_words[6], mb->un.mb_words[7],
8867                        mb->un.mb_words[8], mb->un.mb_words[9],
8868                        mb->un.mb_words[10], mb->un.mb_words[11],
8869                        mb->un.mb_words[12], mboxq->mcqe.word0,
8870                        mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
8871                        mboxq->mcqe.trailer);
8872exit:
8873        /* We are holding the token, no needed for lock when release */
8874        spin_lock_irqsave(&phba->hbalock, iflag);
8875        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8876        phba->sli.mbox_active = NULL;
8877        spin_unlock_irqrestore(&phba->hbalock, iflag);
8878        return rc;
8879}
8880
8881/**
8882 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8883 * @phba: Pointer to HBA context object.
8884 * @mboxq: Pointer to mailbox object.
8885 * @flag: Flag indicating how the mailbox need to be processed.
8886 *
8887 * This function is called by discovery code and HBA management code to submit
8888 * a mailbox command to firmware with SLI-4 interface spec.
8889 *
8890 * Return codes the caller owns the mailbox command after the return of the
8891 * function.
8892 **/
8893static int
8894lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8895                       uint32_t flag)
8896{
8897        struct lpfc_sli *psli = &phba->sli;
8898        unsigned long iflags;
8899        int rc;
8900
8901        /* dump from issue mailbox command if setup */
8902        lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8903
8904        rc = lpfc_mbox_dev_check(phba);
8905        if (unlikely(rc)) {
8906                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8907                                "(%d):2544 Mailbox command x%x (x%x/x%x) "
8908                                "cannot issue Data: x%x x%x\n",
8909                                mboxq->vport ? mboxq->vport->vpi : 0,
8910                                mboxq->u.mb.mbxCommand,
8911                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8912                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8913                                psli->sli_flag, flag);
8914                goto out_not_finished;
8915        }
8916
8917        /* Detect polling mode and jump to a handler */
8918        if (!phba->sli4_hba.intr_enable) {
8919                if (flag == MBX_POLL)
8920                        rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8921                else
8922                        rc = -EIO;
8923                if (rc != MBX_SUCCESS)
8924                        lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8925                                        "(%d):2541 Mailbox command x%x "
8926                                        "(x%x/x%x) failure: "
8927                                        "mqe_sta: x%x mcqe_sta: x%x/x%x "
8928                                        "Data: x%x x%x\n,",
8929                                        mboxq->vport ? mboxq->vport->vpi : 0,
8930                                        mboxq->u.mb.mbxCommand,
8931                                        lpfc_sli_config_mbox_subsys_get(phba,
8932                                                                        mboxq),
8933                                        lpfc_sli_config_mbox_opcode_get(phba,
8934                                                                        mboxq),
8935                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8936                                        bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8937                                        bf_get(lpfc_mcqe_ext_status,
8938                                               &mboxq->mcqe),
8939                                        psli->sli_flag, flag);
8940                return rc;
8941        } else if (flag == MBX_POLL) {
8942                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8943                                "(%d):2542 Try to issue mailbox command "
8944                                "x%x (x%x/x%x) synchronously ahead of async "
8945                                "mailbox command queue: x%x x%x\n",
8946                                mboxq->vport ? mboxq->vport->vpi : 0,
8947                                mboxq->u.mb.mbxCommand,
8948                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8949                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8950                                psli->sli_flag, flag);
8951                /* Try to block the asynchronous mailbox posting */
8952                rc = lpfc_sli4_async_mbox_block(phba);
8953                if (!rc) {
8954                        /* Successfully blocked, now issue sync mbox cmd */
8955                        rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8956                        if (rc != MBX_SUCCESS)
8957                                lpfc_printf_log(phba, KERN_WARNING,
8958                                        LOG_MBOX | LOG_SLI,
8959                                        "(%d):2597 Sync Mailbox command "
8960                                        "x%x (x%x/x%x) failure: "
8961                                        "mqe_sta: x%x mcqe_sta: x%x/x%x "
8962                                        "Data: x%x x%x\n,",
8963                                        mboxq->vport ? mboxq->vport->vpi : 0,
8964                                        mboxq->u.mb.mbxCommand,
8965                                        lpfc_sli_config_mbox_subsys_get(phba,
8966                                                                        mboxq),
8967                                        lpfc_sli_config_mbox_opcode_get(phba,
8968                                                                        mboxq),
8969                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8970                                        bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8971                                        bf_get(lpfc_mcqe_ext_status,
8972                                               &mboxq->mcqe),
8973                                        psli->sli_flag, flag);
8974                        /* Unblock the async mailbox posting afterward */
8975                        lpfc_sli4_async_mbox_unblock(phba);
8976                }
8977                return rc;
8978        }
8979
8980        /* Now, interrupt mode asynchronous mailbox command */
8981        rc = lpfc_mbox_cmd_check(phba, mboxq);
8982        if (rc) {
8983                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8984                                "(%d):2543 Mailbox command x%x (x%x/x%x) "
8985                                "cannot issue Data: x%x x%x\n",
8986                                mboxq->vport ? mboxq->vport->vpi : 0,
8987                                mboxq->u.mb.mbxCommand,
8988                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8989                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8990                                psli->sli_flag, flag);
8991                goto out_not_finished;
8992        }
8993
8994        /* Put the mailbox command to the driver internal FIFO */
8995        psli->slistat.mbox_busy++;
8996        spin_lock_irqsave(&phba->hbalock, iflags);
8997        lpfc_mbox_put(phba, mboxq);
8998        spin_unlock_irqrestore(&phba->hbalock, iflags);
8999        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9000                        "(%d):0354 Mbox cmd issue - Enqueue Data: "
9001                        "x%x (x%x/x%x) x%x x%x x%x\n",
9002                        mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9003                        bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9004                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9005                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9006                        phba->pport->port_state,
9007                        psli->sli_flag, MBX_NOWAIT);
9008        /* Wake up worker thread to transport mailbox command from head */
9009        lpfc_worker_wake_up(phba);
9010
9011        return MBX_BUSY;
9012
9013out_not_finished:
9014        return MBX_NOT_FINISHED;
9015}
9016
9017/**
9018 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9019 * @phba: Pointer to HBA context object.
9020 *
9021 * This function is called by worker thread to send a mailbox command to
9022 * SLI4 HBA firmware.
9023 *
9024 **/
9025int
9026lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9027{
9028        struct lpfc_sli *psli = &phba->sli;
9029        LPFC_MBOXQ_t *mboxq;
9030        int rc = MBX_SUCCESS;
9031        unsigned long iflags;
9032        struct lpfc_mqe *mqe;
9033        uint32_t mbx_cmnd;
9034
9035        /* Check interrupt mode before post async mailbox command */
9036        if (unlikely(!phba->sli4_hba.intr_enable))
9037                return MBX_NOT_FINISHED;
9038
9039        /* Check for mailbox command service token */
9040        spin_lock_irqsave(&phba->hbalock, iflags);
9041        if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9042                spin_unlock_irqrestore(&phba->hbalock, iflags);
9043                return MBX_NOT_FINISHED;
9044        }
9045        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9046                spin_unlock_irqrestore(&phba->hbalock, iflags);
9047                return MBX_NOT_FINISHED;
9048        }
9049        if (unlikely(phba->sli.mbox_active)) {
9050                spin_unlock_irqrestore(&phba->hbalock, iflags);
9051                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9052                                "0384 There is pending active mailbox cmd\n");
9053                return MBX_NOT_FINISHED;
9054        }
9055        /* Take the mailbox command service token */
9056        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9057
9058        /* Get the next mailbox command from head of queue */
9059        mboxq = lpfc_mbox_get(phba);
9060
9061        /* If no more mailbox command waiting for post, we're done */
9062        if (!mboxq) {
9063                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9064                spin_unlock_irqrestore(&phba->hbalock, iflags);
9065                return MBX_SUCCESS;
9066        }
9067        phba->sli.mbox_active = mboxq;
9068        spin_unlock_irqrestore(&phba->hbalock, iflags);
9069
9070        /* Check device readiness for posting mailbox command */
9071        rc = lpfc_mbox_dev_check(phba);
9072        if (unlikely(rc))
9073                /* Driver clean routine will clean up pending mailbox */
9074                goto out_not_finished;
9075
9076        /* Prepare the mbox command to be posted */
9077        mqe = &mboxq->u.mqe;
9078        mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9079
9080        /* Start timer for the mbox_tmo and log some mailbox post messages */
9081        mod_timer(&psli->mbox_tmo, (jiffies +
9082                  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9083
9084        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9085                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9086                        "x%x x%x\n",
9087                        mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9088                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9089                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9090                        phba->pport->port_state, psli->sli_flag);
9091
9092        if (mbx_cmnd != MBX_HEARTBEAT) {
9093                if (mboxq->vport) {
9094                        lpfc_debugfs_disc_trc(mboxq->vport,
9095                                LPFC_DISC_TRC_MBOX_VPORT,
9096                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
9097                                mbx_cmnd, mqe->un.mb_words[0],
9098                                mqe->un.mb_words[1]);
9099                } else {
9100                        lpfc_debugfs_disc_trc(phba->pport,
9101                                LPFC_DISC_TRC_MBOX,
9102                                "MBOX Send: cmd:x%x mb:x%x x%x",
9103                                mbx_cmnd, mqe->un.mb_words[0],
9104                                mqe->un.mb_words[1]);
9105                }
9106        }
9107        psli->slistat.mbox_cmd++;
9108
9109        /* Post the mailbox command to the port */
9110        rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9111        if (rc != MBX_SUCCESS) {
9112                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9113                                "(%d):2533 Mailbox command x%x (x%x/x%x) "
9114                                "cannot issue Data: x%x x%x\n",
9115                                mboxq->vport ? mboxq->vport->vpi : 0,
9116                                mboxq->u.mb.mbxCommand,
9117                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9118                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9119                                psli->sli_flag, MBX_NOWAIT);
9120                goto out_not_finished;
9121        }
9122
9123        return rc;
9124
9125out_not_finished:
9126        spin_lock_irqsave(&phba->hbalock, iflags);
9127        if (phba->sli.mbox_active) {
9128                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9129                __lpfc_mbox_cmpl_put(phba, mboxq);
9130                /* Release the token */
9131                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9132                phba->sli.mbox_active = NULL;
9133        }
9134        spin_unlock_irqrestore(&phba->hbalock, iflags);
9135
9136        return MBX_NOT_FINISHED;
9137}
9138
9139/**
9140 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9141 * @phba: Pointer to HBA context object.
9142 * @pmbox: Pointer to mailbox object.
9143 * @flag: Flag indicating how the mailbox need to be processed.
9144 *
9145 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9146 * the API jump table function pointer from the lpfc_hba struct.
9147 *
9148 * Return codes the caller owns the mailbox command after the return of the
9149 * function.
9150 **/
9151int
9152lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9153{
9154        return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9155}
9156
9157/**
9158 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9159 * @phba: The hba struct for which this call is being executed.
9160 * @dev_grp: The HBA PCI-Device group number.
9161 *
9162 * This routine sets up the mbox interface API function jump table in @phba
9163 * struct.
9164 * Returns: 0 - success, -ENODEV - failure.
9165 **/
9166int
9167lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9168{
9169
9170        switch (dev_grp) {
9171        case LPFC_PCI_DEV_LP:
9172                phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9173                phba->lpfc_sli_handle_slow_ring_event =
9174                                lpfc_sli_handle_slow_ring_event_s3;
9175                phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9176                phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9177                phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9178                break;
9179        case LPFC_PCI_DEV_OC:
9180                phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9181                phba->lpfc_sli_handle_slow_ring_event =
9182                                lpfc_sli_handle_slow_ring_event_s4;
9183                phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9184                phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9185                phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9186                break;
9187        default:
9188                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9189                                "1420 Invalid HBA PCI-device group: 0x%x\n",
9190                                dev_grp);
9191                return -ENODEV;
9192                break;
9193        }
9194        return 0;
9195}
9196
9197/**
9198 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9199 * @phba: Pointer to HBA context object.
9200 * @pring: Pointer to driver SLI ring object.
9201 * @piocb: Pointer to address of newly added command iocb.
9202 *
9203 * This function is called with hbalock held for SLI3 ports or
9204 * the ring lock held for SLI4 ports to add a command
9205 * iocb to the txq when SLI layer cannot submit the command iocb
9206 * to the ring.
9207 **/
9208void
9209__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9210                    struct lpfc_iocbq *piocb)
9211{
9212        if (phba->sli_rev == LPFC_SLI_REV4)
9213                lockdep_assert_held(&pring->ring_lock);
9214        else
9215                lockdep_assert_held(&phba->hbalock);
9216        /* Insert the caller's iocb in the txq tail for later processing. */
9217        list_add_tail(&piocb->list, &pring->txq);
9218}
9219
9220/**
9221 * lpfc_sli_next_iocb - Get the next iocb in the txq
9222 * @phba: Pointer to HBA context object.
9223 * @pring: Pointer to driver SLI ring object.
9224 * @piocb: Pointer to address of newly added command iocb.
9225 *
9226 * This function is called with hbalock held before a new
9227 * iocb is submitted to the firmware. This function checks
9228 * txq to flush the iocbs in txq to Firmware before
9229 * submitting new iocbs to the Firmware.
9230 * If there are iocbs in the txq which need to be submitted
9231 * to firmware, lpfc_sli_next_iocb returns the first element
9232 * of the txq after dequeuing it from txq.
9233 * If there is no iocb in the txq then the function will return
9234 * *piocb and *piocb is set to NULL. Caller needs to check
9235 * *piocb to find if there are more commands in the txq.
9236 **/
9237static struct lpfc_iocbq *
9238lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9239                   struct lpfc_iocbq **piocb)
9240{
9241        struct lpfc_iocbq * nextiocb;
9242
9243        lockdep_assert_held(&phba->hbalock);
9244
9245        nextiocb = lpfc_sli_ringtx_get(phba, pring);
9246        if (!nextiocb) {
9247                nextiocb = *piocb;
9248                *piocb = NULL;
9249        }
9250
9251        return nextiocb;
9252}
9253
9254/**
9255 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9256 * @phba: Pointer to HBA context object.
9257 * @ring_number: SLI ring number to issue iocb on.
9258 * @piocb: Pointer to command iocb.
9259 * @flag: Flag indicating if this command can be put into txq.
9260 *
9261 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9262 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9263 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9264 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9265 * this function allows only iocbs for posting buffers. This function finds
9266 * next available slot in the command ring and posts the command to the
9267 * available slot and writes the port attention register to request HBA start
9268 * processing new iocb. If there is no slot available in the ring and
9269 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9270 * the function returns IOCB_BUSY.
9271 *
9272 * This function is called with hbalock held. The function will return success
9273 * after it successfully submit the iocb to firmware or after adding to the
9274 * txq.
9275 **/
9276static int
9277__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9278                    struct lpfc_iocbq *piocb, uint32_t flag)
9279{
9280        struct lpfc_iocbq *nextiocb;
9281        IOCB_t *iocb;
9282        struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9283
9284        lockdep_assert_held(&phba->hbalock);
9285
9286        if (piocb->iocb_cmpl && (!piocb->vport) &&
9287           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9288           (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9289                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9290                                "1807 IOCB x%x failed. No vport\n",
9291                                piocb->iocb.ulpCommand);
9292                dump_stack();
9293                return IOCB_ERROR;
9294        }
9295
9296
9297        /* If the PCI channel is in offline state, do not post iocbs. */
9298        if (unlikely(pci_channel_offline(phba->pcidev)))
9299                return IOCB_ERROR;
9300
9301        /* If HBA has a deferred error attention, fail the iocb. */
9302        if (unlikely(phba->hba_flag & DEFER_ERATT))
9303                return IOCB_ERROR;
9304
9305        /*
9306         * We should never get an IOCB if we are in a < LINK_DOWN state
9307         */
9308        if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9309                return IOCB_ERROR;
9310
9311        /*
9312         * Check to see if we are blocking IOCB processing because of a
9313         * outstanding event.
9314         */
9315        if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9316                goto iocb_busy;
9317
9318        if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9319                /*
9320                 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9321                 * can be issued if the link is not up.
9322                 */
9323                switch (piocb->iocb.ulpCommand) {
9324                case CMD_GEN_REQUEST64_CR:
9325                case CMD_GEN_REQUEST64_CX:
9326                        if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9327                                (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9328                                        FC_RCTL_DD_UNSOL_CMD) ||
9329                                (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9330                                        MENLO_TRANSPORT_TYPE))
9331
9332                                goto iocb_busy;
9333                        break;
9334                case CMD_QUE_RING_BUF_CN:
9335                case CMD_QUE_RING_BUF64_CN:
9336                        /*
9337                         * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9338                         * completion, iocb_cmpl MUST be 0.
9339                         */
9340                        if (piocb->iocb_cmpl)
9341                                piocb->iocb_cmpl = NULL;
9342                        fallthrough;
9343                case CMD_CREATE_XRI_CR:
9344                case CMD_CLOSE_XRI_CN:
9345                case CMD_CLOSE_XRI_CX:
9346                        break;
9347                default:
9348                        goto iocb_busy;
9349                }
9350
9351        /*
9352         * For FCP commands, we must be in a state where we can process link
9353         * attention events.
9354         */
9355        } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9356                            !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9357                goto iocb_busy;
9358        }
9359
9360        while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9361               (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9362                lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9363
9364        if (iocb)
9365                lpfc_sli_update_ring(phba, pring);
9366        else
9367                lpfc_sli_update_full_ring(phba, pring);
9368
9369        if (!piocb)
9370                return IOCB_SUCCESS;
9371
9372        goto out_busy;
9373
9374 iocb_busy:
9375        pring->stats.iocb_cmd_delay++;
9376
9377 out_busy:
9378
9379        if (!(flag & SLI_IOCB_RET_IOCB)) {
9380                __lpfc_sli_ringtx_put(phba, pring, piocb);
9381                return IOCB_SUCCESS;
9382        }
9383
9384        return IOCB_BUSY;
9385}
9386
9387/**
9388 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9389 * @phba: Pointer to HBA context object.
9390 * @piocbq: Pointer to command iocb.
9391 * @sglq: Pointer to the scatter gather queue object.
9392 *
9393 * This routine converts the bpl or bde that is in the IOCB
9394 * to a sgl list for the sli4 hardware. The physical address
9395 * of the bpl/bde is converted back to a virtual address.
9396 * If the IOCB contains a BPL then the list of BDE's is
9397 * converted to sli4_sge's. If the IOCB contains a single
9398 * BDE then it is converted to a single sli_sge.
9399 * The IOCB is still in cpu endianess so the contents of
9400 * the bpl can be used without byte swapping.
9401 *
9402 * Returns valid XRI = Success, NO_XRI = Failure.
9403**/
9404static uint16_t
9405lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9406                struct lpfc_sglq *sglq)
9407{
9408        uint16_t xritag = NO_XRI;
9409        struct ulp_bde64 *bpl = NULL;
9410        struct ulp_bde64 bde;
9411        struct sli4_sge *sgl  = NULL;
9412        struct lpfc_dmabuf *dmabuf;
9413        IOCB_t *icmd;
9414        int numBdes = 0;
9415        int i = 0;
9416        uint32_t offset = 0; /* accumulated offset in the sg request list */
9417        int inbound = 0; /* number of sg reply entries inbound from firmware */
9418
9419        if (!piocbq || !sglq)
9420                return xritag;
9421
9422        sgl  = (struct sli4_sge *)sglq->sgl;
9423        icmd = &piocbq->iocb;
9424        if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9425                return sglq->sli4_xritag;
9426        if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9427                numBdes = icmd->un.genreq64.bdl.bdeSize /
9428                                sizeof(struct ulp_bde64);
9429                /* The addrHigh and addrLow fields within the IOCB
9430                 * have not been byteswapped yet so there is no
9431                 * need to swap them back.
9432                 */
9433                if (piocbq->context3)
9434                        dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9435                else
9436                        return xritag;
9437
9438                bpl  = (struct ulp_bde64 *)dmabuf->virt;
9439                if (!bpl)
9440                        return xritag;
9441
9442                for (i = 0; i < numBdes; i++) {
9443                        /* Should already be byte swapped. */
9444                        sgl->addr_hi = bpl->addrHigh;
9445                        sgl->addr_lo = bpl->addrLow;
9446
9447                        sgl->word2 = le32_to_cpu(sgl->word2);
9448                        if ((i+1) == numBdes)
9449                                bf_set(lpfc_sli4_sge_last, sgl, 1);
9450                        else
9451                                bf_set(lpfc_sli4_sge_last, sgl, 0);
9452                        /* swap the size field back to the cpu so we
9453                         * can assign it to the sgl.
9454                         */
9455                        bde.tus.w = le32_to_cpu(bpl->tus.w);
9456                        sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9457                        /* The offsets in the sgl need to be accumulated
9458                         * separately for the request and reply lists.
9459                         * The request is always first, the reply follows.
9460                         */
9461                        if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9462                                /* add up the reply sg entries */
9463                                if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9464                                        inbound++;
9465                                /* first inbound? reset the offset */
9466                                if (inbound == 1)
9467                                        offset = 0;
9468                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
9469                                bf_set(lpfc_sli4_sge_type, sgl,
9470                                        LPFC_SGE_TYPE_DATA);
9471                                offset += bde.tus.f.bdeSize;
9472                        }
9473                        sgl->word2 = cpu_to_le32(sgl->word2);
9474                        bpl++;
9475                        sgl++;
9476                }
9477        } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9478                        /* The addrHigh and addrLow fields of the BDE have not
9479                         * been byteswapped yet so they need to be swapped
9480                         * before putting them in the sgl.
9481                         */
9482                        sgl->addr_hi =
9483                                cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9484                        sgl->addr_lo =
9485                                cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9486                        sgl->word2 = le32_to_cpu(sgl->word2);
9487                        bf_set(lpfc_sli4_sge_last, sgl, 1);
9488                        sgl->word2 = cpu_to_le32(sgl->word2);
9489                        sgl->sge_len =
9490                                cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9491        }
9492        return sglq->sli4_xritag;
9493}
9494
9495/**
9496 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9497 * @phba: Pointer to HBA context object.
9498 * @iocbq: Pointer to command iocb.
9499 * @wqe: Pointer to the work queue entry.
9500 *
9501 * This routine converts the iocb command to its Work Queue Entry
9502 * equivalent. The wqe pointer should not have any fields set when
9503 * this routine is called because it will memcpy over them.
9504 * This routine does not set the CQ_ID or the WQEC bits in the
9505 * wqe.
9506 *
9507 * Returns: 0 = Success, IOCB_ERROR = Failure.
9508 **/
9509static int
9510lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9511                union lpfc_wqe128 *wqe)
9512{
9513        uint32_t xmit_len = 0, total_len = 0;
9514        uint8_t ct = 0;
9515        uint32_t fip;
9516        uint32_t abort_tag;
9517        uint8_t command_type = ELS_COMMAND_NON_FIP;
9518        uint8_t cmnd;
9519        uint16_t xritag;
9520        uint16_t abrt_iotag;
9521        struct lpfc_iocbq *abrtiocbq;
9522        struct ulp_bde64 *bpl = NULL;
9523        uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9524        int numBdes, i;
9525        struct ulp_bde64 bde;
9526        struct lpfc_nodelist *ndlp;
9527        uint32_t *pcmd;
9528        uint32_t if_type;
9529
9530        fip = phba->hba_flag & HBA_FIP_SUPPORT;
9531        /* The fcp commands will set command type */
9532        if (iocbq->iocb_flag &  LPFC_IO_FCP)
9533                command_type = FCP_COMMAND;
9534        else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9535                command_type = ELS_COMMAND_FIP;
9536        else
9537                command_type = ELS_COMMAND_NON_FIP;
9538
9539        if (phba->fcp_embed_io)
9540                memset(wqe, 0, sizeof(union lpfc_wqe128));
9541        /* Some of the fields are in the right position already */
9542        memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9543        /* The ct field has moved so reset */
9544        wqe->generic.wqe_com.word7 = 0;
9545        wqe->generic.wqe_com.word10 = 0;
9546
9547        abort_tag = (uint32_t) iocbq->iotag;
9548        xritag = iocbq->sli4_xritag;
9549        /* words0-2 bpl convert bde */
9550        if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9551                numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9552                                sizeof(struct ulp_bde64);
9553                bpl  = (struct ulp_bde64 *)
9554                        ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9555                if (!bpl)
9556                        return IOCB_ERROR;
9557
9558                /* Should already be byte swapped. */
9559                wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
9560                wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
9561                /* swap the size field back to the cpu so we
9562                 * can assign it to the sgl.
9563                 */
9564                wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
9565                xmit_len = wqe->generic.bde.tus.f.bdeSize;
9566                total_len = 0;
9567                for (i = 0; i < numBdes; i++) {
9568                        bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
9569                        total_len += bde.tus.f.bdeSize;
9570                }
9571        } else
9572                xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9573
9574        iocbq->iocb.ulpIoTag = iocbq->iotag;
9575        cmnd = iocbq->iocb.ulpCommand;
9576
9577        switch (iocbq->iocb.ulpCommand) {
9578        case CMD_ELS_REQUEST64_CR:
9579                if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9580                        ndlp = iocbq->context_un.ndlp;
9581                else
9582                        ndlp = (struct lpfc_nodelist *)iocbq->context1;
9583                if (!iocbq->iocb.ulpLe) {
9584                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9585                                "2007 Only Limited Edition cmd Format"
9586                                " supported 0x%x\n",
9587                                iocbq->iocb.ulpCommand);
9588                        return IOCB_ERROR;
9589                }
9590
9591                wqe->els_req.payload_len = xmit_len;
9592                /* Els_reguest64 has a TMO */
9593                bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9594                        iocbq->iocb.ulpTimeout);
9595                /* Need a VF for word 4 set the vf bit*/
9596                bf_set(els_req64_vf, &wqe->els_req, 0);
9597                /* And a VFID for word 12 */
9598                bf_set(els_req64_vfid, &wqe->els_req, 0);
9599                ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9600                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9601                       iocbq->iocb.ulpContext);
9602                bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9603                bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9604                /* CCP CCPE PV PRI in word10 were set in the memcpy */
9605                if (command_type == ELS_COMMAND_FIP)
9606                        els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9607                                        >> LPFC_FIP_ELS_ID_SHIFT);
9608                pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9609                                        iocbq->context2)->virt);
9610                if_type = bf_get(lpfc_sli_intf_if_type,
9611                                        &phba->sli4_hba.sli_intf);
9612                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9613                        if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9614                                *pcmd == ELS_CMD_SCR ||
9615                                *pcmd == ELS_CMD_RDF ||
9616                                *pcmd == ELS_CMD_RSCN_XMT ||
9617                                *pcmd == ELS_CMD_FDISC ||
9618                                *pcmd == ELS_CMD_LOGO ||
9619                                *pcmd == ELS_CMD_PLOGI)) {
9620                                bf_set(els_req64_sp, &wqe->els_req, 1);
9621                                bf_set(els_req64_sid, &wqe->els_req,
9622                                        iocbq->vport->fc_myDID);
9623                                if ((*pcmd == ELS_CMD_FLOGI) &&
9624                                        !(phba->fc_topology ==
9625                                                LPFC_TOPOLOGY_LOOP))
9626                                        bf_set(els_req64_sid, &wqe->els_req, 0);
9627                                bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9628                                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9629                                        phba->vpi_ids[iocbq->vport->vpi]);
9630                        } else if (pcmd && iocbq->context1) {
9631                                bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9632                                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9633                                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9634                        }
9635                }
9636                bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9637                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9638                bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9639                bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9640                bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9641                bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9642                bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9643                bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9644                wqe->els_req.max_response_payload_len = total_len - xmit_len;
9645                break;
9646        case CMD_XMIT_SEQUENCE64_CX:
9647                bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9648                       iocbq->iocb.un.ulpWord[3]);
9649                bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9650                       iocbq->iocb.unsli3.rcvsli3.ox_id);
9651                /* The entire sequence is transmitted for this IOCB */
9652                xmit_len = total_len;
9653                cmnd = CMD_XMIT_SEQUENCE64_CR;
9654                if (phba->link_flag & LS_LOOPBACK_MODE)
9655                        bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9656                fallthrough;
9657        case CMD_XMIT_SEQUENCE64_CR:
9658                /* word3 iocb=io_tag32 wqe=reserved */
9659                wqe->xmit_sequence.rsvd3 = 0;
9660                /* word4 relative_offset memcpy */
9661                /* word5 r_ctl/df_ctl memcpy */
9662                bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9663                bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9664                bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9665                       LPFC_WQE_IOD_WRITE);
9666                bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9667                       LPFC_WQE_LENLOC_WORD12);
9668                bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9669                wqe->xmit_sequence.xmit_len = xmit_len;
9670                command_type = OTHER_COMMAND;
9671                break;
9672        case CMD_XMIT_BCAST64_CN:
9673                /* word3 iocb=iotag32 wqe=seq_payload_len */
9674                wqe->xmit_bcast64.seq_payload_len = xmit_len;
9675                /* word4 iocb=rsvd wqe=rsvd */
9676                /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9677                /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9678                bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9679                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9680                bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9681                bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9682                bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9683                       LPFC_WQE_LENLOC_WORD3);
9684                bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9685                break;
9686        case CMD_FCP_IWRITE64_CR:
9687                command_type = FCP_COMMAND_DATA_OUT;
9688                /* word3 iocb=iotag wqe=payload_offset_len */
9689                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9690                bf_set(payload_offset_len, &wqe->fcp_iwrite,
9691                       xmit_len + sizeof(struct fcp_rsp));
9692                bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9693                       0);
9694                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9695                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9696                bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9697                       iocbq->iocb.ulpFCP2Rcvy);
9698                bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9699                /* Always open the exchange */
9700                bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9701                bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9702                       LPFC_WQE_LENLOC_WORD4);
9703                bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9704                bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9705                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9706                        bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9707                        bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9708                        if (iocbq->priority) {
9709                                bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9710                                       (iocbq->priority << 1));
9711                        } else {
9712                                bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9713                                       (phba->cfg_XLanePriority << 1));
9714                        }
9715                }
9716                /* Note, word 10 is already initialized to 0 */
9717
9718                /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9719                if (phba->cfg_enable_pbde)
9720                        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9721                else
9722                        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9723
9724                if (phba->fcp_embed_io) {
9725                        struct lpfc_io_buf *lpfc_cmd;
9726                        struct sli4_sge *sgl;
9727                        struct fcp_cmnd *fcp_cmnd;
9728                        uint32_t *ptr;
9729
9730                        /* 128 byte wqe support here */
9731
9732                        lpfc_cmd = iocbq->context1;
9733                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9734                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
9735
9736                        /* Word 0-2 - FCP_CMND */
9737                        wqe->generic.bde.tus.f.bdeFlags =
9738                                BUFF_TYPE_BDE_IMMED;
9739                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9740                        wqe->generic.bde.addrHigh = 0;
9741                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
9742
9743                        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9744                        bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9745
9746                        /* Word 22-29  FCP CMND Payload */
9747                        ptr = &wqe->words[22];
9748                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9749                }
9750                break;
9751        case CMD_FCP_IREAD64_CR:
9752                /* word3 iocb=iotag wqe=payload_offset_len */
9753                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9754                bf_set(payload_offset_len, &wqe->fcp_iread,
9755                       xmit_len + sizeof(struct fcp_rsp));
9756                bf_set(cmd_buff_len, &wqe->fcp_iread,
9757                       0);
9758                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9759                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9760                bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9761                       iocbq->iocb.ulpFCP2Rcvy);
9762                bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9763                /* Always open the exchange */
9764                bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9765                bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9766                       LPFC_WQE_LENLOC_WORD4);
9767                bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9768                bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9769                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9770                        bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9771                        bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9772                        if (iocbq->priority) {
9773                                bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9774                                       (iocbq->priority << 1));
9775                        } else {
9776                                bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9777                                       (phba->cfg_XLanePriority << 1));
9778                        }
9779                }
9780                /* Note, word 10 is already initialized to 0 */
9781
9782                /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9783                if (phba->cfg_enable_pbde)
9784                        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9785                else
9786                        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9787
9788                if (phba->fcp_embed_io) {
9789                        struct lpfc_io_buf *lpfc_cmd;
9790                        struct sli4_sge *sgl;
9791                        struct fcp_cmnd *fcp_cmnd;
9792                        uint32_t *ptr;
9793
9794                        /* 128 byte wqe support here */
9795
9796                        lpfc_cmd = iocbq->context1;
9797                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9798                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
9799
9800                        /* Word 0-2 - FCP_CMND */
9801                        wqe->generic.bde.tus.f.bdeFlags =
9802                                BUFF_TYPE_BDE_IMMED;
9803                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9804                        wqe->generic.bde.addrHigh = 0;
9805                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
9806
9807                        bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9808                        bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9809
9810                        /* Word 22-29  FCP CMND Payload */
9811                        ptr = &wqe->words[22];
9812                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9813                }
9814                break;
9815        case CMD_FCP_ICMND64_CR:
9816                /* word3 iocb=iotag wqe=payload_offset_len */
9817                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9818                bf_set(payload_offset_len, &wqe->fcp_icmd,
9819                       xmit_len + sizeof(struct fcp_rsp));
9820                bf_set(cmd_buff_len, &wqe->fcp_icmd,
9821                       0);
9822                /* word3 iocb=IO_TAG wqe=reserved */
9823                bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9824                /* Always open the exchange */
9825                bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9826                bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9827                bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9828                bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9829                       LPFC_WQE_LENLOC_NONE);
9830                bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9831                       iocbq->iocb.ulpFCP2Rcvy);
9832                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9833                        bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9834                        bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9835                        if (iocbq->priority) {
9836                                bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9837                                       (iocbq->priority << 1));
9838                        } else {
9839                                bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9840                                       (phba->cfg_XLanePriority << 1));
9841                        }
9842                }
9843                /* Note, word 10 is already initialized to 0 */
9844
9845                if (phba->fcp_embed_io) {
9846                        struct lpfc_io_buf *lpfc_cmd;
9847                        struct sli4_sge *sgl;
9848                        struct fcp_cmnd *fcp_cmnd;
9849                        uint32_t *ptr;
9850
9851                        /* 128 byte wqe support here */
9852
9853                        lpfc_cmd = iocbq->context1;
9854                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9855                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
9856
9857                        /* Word 0-2 - FCP_CMND */
9858                        wqe->generic.bde.tus.f.bdeFlags =
9859                                BUFF_TYPE_BDE_IMMED;
9860                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9861                        wqe->generic.bde.addrHigh = 0;
9862                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
9863
9864                        bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9865                        bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9866
9867                        /* Word 22-29  FCP CMND Payload */
9868                        ptr = &wqe->words[22];
9869                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9870                }
9871                break;
9872        case CMD_GEN_REQUEST64_CR:
9873                /* For this command calculate the xmit length of the
9874                 * request bde.
9875                 */
9876                xmit_len = 0;
9877                numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9878                        sizeof(struct ulp_bde64);
9879                for (i = 0; i < numBdes; i++) {
9880                        bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9881                        if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9882                                break;
9883                        xmit_len += bde.tus.f.bdeSize;
9884                }
9885                /* word3 iocb=IO_TAG wqe=request_payload_len */
9886                wqe->gen_req.request_payload_len = xmit_len;
9887                /* word4 iocb=parameter wqe=relative_offset memcpy */
9888                /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9889                /* word6 context tag copied in memcpy */
9890                if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
9891                        ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9892                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893                                "2015 Invalid CT %x command 0x%x\n",
9894                                ct, iocbq->iocb.ulpCommand);
9895                        return IOCB_ERROR;
9896                }
9897                bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9898                bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9899                bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9900                bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9901                bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9902                bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9903                bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9904                bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9905                wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9906                command_type = OTHER_COMMAND;
9907                break;
9908        case CMD_XMIT_ELS_RSP64_CX:
9909                ndlp = (struct lpfc_nodelist *)iocbq->context1;
9910                /* words0-2 BDE memcpy */
9911                /* word3 iocb=iotag32 wqe=response_payload_len */
9912                wqe->xmit_els_rsp.response_payload_len = xmit_len;
9913                /* word4 */
9914                wqe->xmit_els_rsp.word4 = 0;
9915                /* word5 iocb=rsvd wge=did */
9916                bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9917                         iocbq->iocb.un.xseq64.xmit_els_remoteID);
9918
9919                if_type = bf_get(lpfc_sli_intf_if_type,
9920                                        &phba->sli4_hba.sli_intf);
9921                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9922                        if (iocbq->vport->fc_flag & FC_PT2PT) {
9923                                bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9924                                bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9925                                        iocbq->vport->fc_myDID);
9926                                if (iocbq->vport->fc_myDID == Fabric_DID) {
9927                                        bf_set(wqe_els_did,
9928                                                &wqe->xmit_els_rsp.wqe_dest, 0);
9929                                }
9930                        }
9931                }
9932                bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9933                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9934                bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9935                bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9936                       iocbq->iocb.unsli3.rcvsli3.ox_id);
9937                if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9938                        bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9939                               phba->vpi_ids[iocbq->vport->vpi]);
9940                bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9941                bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9942                bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9943                bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9944                       LPFC_WQE_LENLOC_WORD3);
9945                bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9946                bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9947                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9948                pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9949                                        iocbq->context2)->virt);
9950                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9951                                bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9952                                bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9953                                        iocbq->vport->fc_myDID);
9954                                bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9955                                bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9956                                        phba->vpi_ids[phba->pport->vpi]);
9957                }
9958                command_type = OTHER_COMMAND;
9959                break;
9960        case CMD_CLOSE_XRI_CN:
9961        case CMD_ABORT_XRI_CN:
9962        case CMD_ABORT_XRI_CX:
9963                /* words 0-2 memcpy should be 0 rserved */
9964                /* port will send abts */
9965                abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9966                if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9967                        abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9968                        fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9969                } else
9970                        fip = 0;
9971
9972                if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9973                        /*
9974                         * The link is down, or the command was ELS_FIP
9975                         * so the fw does not need to send abts
9976                         * on the wire.
9977                         */
9978                        bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9979                else
9980                        bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9981                bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9982                /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9983                wqe->abort_cmd.rsrvd5 = 0;
9984                bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9985                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9986                abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9987                /*
9988                 * The abort handler will send us CMD_ABORT_XRI_CN or
9989                 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9990                 */
9991                bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9992                bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9993                bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9994                       LPFC_WQE_LENLOC_NONE);
9995                cmnd = CMD_ABORT_XRI_CX;
9996                command_type = OTHER_COMMAND;
9997                xritag = 0;
9998                break;
9999        case CMD_XMIT_BLS_RSP64_CX:
10000                ndlp = (struct lpfc_nodelist *)iocbq->context1;
10001                /* As BLS ABTS RSP WQE is very different from other WQEs,
10002                 * we re-construct this WQE here based on information in
10003                 * iocbq from scratch.
10004                 */
10005                memset(wqe, 0, sizeof(*wqe));
10006                /* OX_ID is invariable to who sent ABTS to CT exchange */
10007                bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10008                       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10009                if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10010                    LPFC_ABTS_UNSOL_INT) {
10011                        /* ABTS sent by initiator to CT exchange, the
10012                         * RX_ID field will be filled with the newly
10013                         * allocated responder XRI.
10014                         */
10015                        bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10016                               iocbq->sli4_xritag);
10017                } else {
10018                        /* ABTS sent by responder to CT exchange, the
10019                         * RX_ID field will be filled with the responder
10020                         * RX_ID from ABTS.
10021                         */
10022                        bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10023                               bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10024                }
10025                bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10026                bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10027
10028                /* Use CT=VPI */
10029                bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10030                        ndlp->nlp_DID);
10031                bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10032                        iocbq->iocb.ulpContext);
10033                bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10034                bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10035                        phba->vpi_ids[phba->pport->vpi]);
10036                bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10037                bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10038                       LPFC_WQE_LENLOC_NONE);
10039                /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10040                command_type = OTHER_COMMAND;
10041                if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10042                        bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10043                               bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10044                        bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10045                               bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10046                        bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10047                               bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10048                }
10049
10050                break;
10051        case CMD_SEND_FRAME:
10052                bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10053                bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10054                bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10055                bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10056                bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10057                bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10058                bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10059                bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10060                bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10061                bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10062                bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10063                return 0;
10064        case CMD_XRI_ABORTED_CX:
10065        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10066        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10067        case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10068        case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10069        case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10070        default:
10071                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10072                                "2014 Invalid command 0x%x\n",
10073                                iocbq->iocb.ulpCommand);
10074                return IOCB_ERROR;
10075                break;
10076        }
10077
10078        if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10079                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10080        else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10081                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10082        else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10083                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10084        iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10085                              LPFC_IO_DIF_INSERT);
10086        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10087        bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10088        wqe->generic.wqe_com.abort_tag = abort_tag;
10089        bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10090        bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10091        bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10092        bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10093        return 0;
10094}
10095
10096/**
10097 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10098 * @phba: Pointer to HBA context object.
10099 * @ring_number: SLI ring number to issue iocb on.
10100 * @piocb: Pointer to command iocb.
10101 * @flag: Flag indicating if this command can be put into txq.
10102 *
10103 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10104 * an iocb command to an HBA with SLI-4 interface spec.
10105 *
10106 * This function is called with ringlock held. The function will return success
10107 * after it successfully submit the iocb to firmware or after adding to the
10108 * txq.
10109 **/
10110static int
10111__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10112                         struct lpfc_iocbq *piocb, uint32_t flag)
10113{
10114        struct lpfc_sglq *sglq;
10115        union lpfc_wqe128 wqe;
10116        struct lpfc_queue *wq;
10117        struct lpfc_sli_ring *pring;
10118
10119        /* Get the WQ */
10120        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10121            (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10122                wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10123        } else {
10124                wq = phba->sli4_hba.els_wq;
10125        }
10126
10127        /* Get corresponding ring */
10128        pring = wq->pring;
10129
10130        /*
10131         * The WQE can be either 64 or 128 bytes,
10132         */
10133
10134        lockdep_assert_held(&pring->ring_lock);
10135
10136        if (piocb->sli4_xritag == NO_XRI) {
10137                if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10138                    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10139                        sglq = NULL;
10140                else {
10141                        if (!list_empty(&pring->txq)) {
10142                                if (!(flag & SLI_IOCB_RET_IOCB)) {
10143                                        __lpfc_sli_ringtx_put(phba,
10144                                                pring, piocb);
10145                                        return IOCB_SUCCESS;
10146                                } else {
10147                                        return IOCB_BUSY;
10148                                }
10149                        } else {
10150                                sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10151                                if (!sglq) {
10152                                        if (!(flag & SLI_IOCB_RET_IOCB)) {
10153                                                __lpfc_sli_ringtx_put(phba,
10154                                                                pring,
10155                                                                piocb);
10156                                                return IOCB_SUCCESS;
10157                                        } else
10158                                                return IOCB_BUSY;
10159                                }
10160                        }
10161                }
10162        } else if (piocb->iocb_flag &  LPFC_IO_FCP)
10163                /* These IO's already have an XRI and a mapped sgl. */
10164                sglq = NULL;
10165        else {
10166                /*
10167                 * This is a continuation of a commandi,(CX) so this
10168                 * sglq is on the active list
10169                 */
10170                sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10171                if (!sglq)
10172                        return IOCB_ERROR;
10173        }
10174
10175        if (sglq) {
10176                piocb->sli4_lxritag = sglq->sli4_lxritag;
10177                piocb->sli4_xritag = sglq->sli4_xritag;
10178                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10179                        return IOCB_ERROR;
10180        }
10181
10182        if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10183                return IOCB_ERROR;
10184
10185        if (lpfc_sli4_wq_put(wq, &wqe))
10186                return IOCB_ERROR;
10187        lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10188
10189        return 0;
10190}
10191
10192/*
10193 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10194 *
10195 * This routine wraps the actual lockless version for issusing IOCB function
10196 * pointer from the lpfc_hba struct.
10197 *
10198 * Return codes:
10199 * IOCB_ERROR - Error
10200 * IOCB_SUCCESS - Success
10201 * IOCB_BUSY - Busy
10202 **/
10203int
10204__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10205                struct lpfc_iocbq *piocb, uint32_t flag)
10206{
10207        return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10208}
10209
10210/**
10211 * lpfc_sli_api_table_setup - Set up sli api function jump table
10212 * @phba: The hba struct for which this call is being executed.
10213 * @dev_grp: The HBA PCI-Device group number.
10214 *
10215 * This routine sets up the SLI interface API function jump table in @phba
10216 * struct.
10217 * Returns: 0 - success, -ENODEV - failure.
10218 **/
10219int
10220lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10221{
10222
10223        switch (dev_grp) {
10224        case LPFC_PCI_DEV_LP:
10225                phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10226                phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10227                break;
10228        case LPFC_PCI_DEV_OC:
10229                phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10230                phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10231                break;
10232        default:
10233                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10234                                "1419 Invalid HBA PCI-device group: 0x%x\n",
10235                                dev_grp);
10236                return -ENODEV;
10237                break;
10238        }
10239        phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10240        return 0;
10241}
10242
10243/**
10244 * lpfc_sli4_calc_ring - Calculates which ring to use
10245 * @phba: Pointer to HBA context object.
10246 * @piocb: Pointer to command iocb.
10247 *
10248 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10249 * hba_wqidx, thus we need to calculate the corresponding ring.
10250 * Since ABORTS must go on the same WQ of the command they are
10251 * aborting, we use command's hba_wqidx.
10252 */
10253struct lpfc_sli_ring *
10254lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10255{
10256        struct lpfc_io_buf *lpfc_cmd;
10257
10258        if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10259                if (unlikely(!phba->sli4_hba.hdwq))
10260                        return NULL;
10261                /*
10262                 * for abort iocb hba_wqidx should already
10263                 * be setup based on what work queue we used.
10264                 */
10265                if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10266                        lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10267                        piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10268                }
10269                return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10270        } else {
10271                if (unlikely(!phba->sli4_hba.els_wq))
10272                        return NULL;
10273                piocb->hba_wqidx = 0;
10274                return phba->sli4_hba.els_wq->pring;
10275        }
10276}
10277
10278/**
10279 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10280 * @phba: Pointer to HBA context object.
10281 * @ring_number: Ring number
10282 * @piocb: Pointer to command iocb.
10283 * @flag: Flag indicating if this command can be put into txq.
10284 *
10285 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10286 * function. This function gets the hbalock and calls
10287 * __lpfc_sli_issue_iocb function and will return the error returned
10288 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10289 * functions which do not hold hbalock.
10290 **/
10291int
10292lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10293                    struct lpfc_iocbq *piocb, uint32_t flag)
10294{
10295        struct lpfc_sli_ring *pring;
10296        struct lpfc_queue *eq;
10297        unsigned long iflags;
10298        int rc;
10299
10300        if (phba->sli_rev == LPFC_SLI_REV4) {
10301                eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10302
10303                pring = lpfc_sli4_calc_ring(phba, piocb);
10304                if (unlikely(pring == NULL))
10305                        return IOCB_ERROR;
10306
10307                spin_lock_irqsave(&pring->ring_lock, iflags);
10308                rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10309                spin_unlock_irqrestore(&pring->ring_lock, iflags);
10310
10311                lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10312        } else {
10313                /* For now, SLI2/3 will still use hbalock */
10314                spin_lock_irqsave(&phba->hbalock, iflags);
10315                rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10316                spin_unlock_irqrestore(&phba->hbalock, iflags);
10317        }
10318        return rc;
10319}
10320
10321/**
10322 * lpfc_extra_ring_setup - Extra ring setup function
10323 * @phba: Pointer to HBA context object.
10324 *
10325 * This function is called while driver attaches with the
10326 * HBA to setup the extra ring. The extra ring is used
10327 * only when driver needs to support target mode functionality
10328 * or IP over FC functionalities.
10329 *
10330 * This function is called with no lock held. SLI3 only.
10331 **/
10332static int
10333lpfc_extra_ring_setup( struct lpfc_hba *phba)
10334{
10335        struct lpfc_sli *psli;
10336        struct lpfc_sli_ring *pring;
10337
10338        psli = &phba->sli;
10339
10340        /* Adjust cmd/rsp ring iocb entries more evenly */
10341
10342        /* Take some away from the FCP ring */
10343        pring = &psli->sli3_ring[LPFC_FCP_RING];
10344        pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10345        pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10346        pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10347        pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10348
10349        /* and give them to the extra ring */
10350        pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10351
10352        pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10353        pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10354        pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10355        pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10356
10357        /* Setup default profile for this ring */
10358        pring->iotag_max = 4096;
10359        pring->num_mask = 1;
10360        pring->prt[0].profile = 0;      /* Mask 0 */
10361        pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10362        pring->prt[0].type = phba->cfg_multi_ring_type;
10363        pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10364        return 0;
10365}
10366
10367/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10368 * @phba: Pointer to HBA context object.
10369 * @iocbq: Pointer to iocb object.
10370 *
10371 * The async_event handler calls this routine when it receives
10372 * an ASYNC_STATUS_CN event from the port.  The port generates
10373 * this event when an Abort Sequence request to an rport fails
10374 * twice in succession.  The abort could be originated by the
10375 * driver or by the port.  The ABTS could have been for an ELS
10376 * or FCP IO.  The port only generates this event when an ABTS
10377 * fails to complete after one retry.
10378 */
10379static void
10380lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10381                          struct lpfc_iocbq *iocbq)
10382{
10383        struct lpfc_nodelist *ndlp = NULL;
10384        uint16_t rpi = 0, vpi = 0;
10385        struct lpfc_vport *vport = NULL;
10386
10387        /* The rpi in the ulpContext is vport-sensitive. */
10388        vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10389        rpi = iocbq->iocb.ulpContext;
10390
10391        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10392                        "3092 Port generated ABTS async event "
10393                        "on vpi %d rpi %d status 0x%x\n",
10394                        vpi, rpi, iocbq->iocb.ulpStatus);
10395
10396        vport = lpfc_find_vport_by_vpid(phba, vpi);
10397        if (!vport)
10398                goto err_exit;
10399        ndlp = lpfc_findnode_rpi(vport, rpi);
10400        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10401                goto err_exit;
10402
10403        if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10404                lpfc_sli_abts_recover_port(vport, ndlp);
10405        return;
10406
10407 err_exit:
10408        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10409                        "3095 Event Context not found, no "
10410                        "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10411                        iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10412                        vpi, rpi);
10413}
10414
10415/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10416 * @phba: pointer to HBA context object.
10417 * @ndlp: nodelist pointer for the impacted rport.
10418 * @axri: pointer to the wcqe containing the failed exchange.
10419 *
10420 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10421 * port.  The port generates this event when an abort exchange request to an
10422 * rport fails twice in succession with no reply.  The abort could be originated
10423 * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
10424 */
10425void
10426lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10427                           struct lpfc_nodelist *ndlp,
10428                           struct sli4_wcqe_xri_aborted *axri)
10429{
10430        struct lpfc_vport *vport;
10431        uint32_t ext_status = 0;
10432
10433        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10434                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10435                                "3115 Node Context not found, driver "
10436                                "ignoring abts err event\n");
10437                return;
10438        }
10439
10440        vport = ndlp->vport;
10441        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10442                        "3116 Port generated FCP XRI ABORT event on "
10443                        "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10444                        ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10445                        bf_get(lpfc_wcqe_xa_xri, axri),
10446                        bf_get(lpfc_wcqe_xa_status, axri),
10447                        axri->parameter);
10448
10449        /*
10450         * Catch the ABTS protocol failure case.  Older OCe FW releases returned
10451         * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10452         * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10453         */
10454        ext_status = axri->parameter & IOERR_PARAM_MASK;
10455        if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10456            ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10457                lpfc_sli_abts_recover_port(vport, ndlp);
10458}
10459
10460/**
10461 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10462 * @phba: Pointer to HBA context object.
10463 * @pring: Pointer to driver SLI ring object.
10464 * @iocbq: Pointer to iocb object.
10465 *
10466 * This function is called by the slow ring event handler
10467 * function when there is an ASYNC event iocb in the ring.
10468 * This function is called with no lock held.
10469 * Currently this function handles only temperature related
10470 * ASYNC events. The function decodes the temperature sensor
10471 * event message and posts events for the management applications.
10472 **/
10473static void
10474lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10475        struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10476{
10477        IOCB_t *icmd;
10478        uint16_t evt_code;
10479        struct temp_event temp_event_data;
10480        struct Scsi_Host *shost;
10481        uint32_t *iocb_w;
10482
10483        icmd = &iocbq->iocb;
10484        evt_code = icmd->un.asyncstat.evt_code;
10485
10486        switch (evt_code) {
10487        case ASYNC_TEMP_WARN:
10488        case ASYNC_TEMP_SAFE:
10489                temp_event_data.data = (uint32_t) icmd->ulpContext;
10490                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10491                if (evt_code == ASYNC_TEMP_WARN) {
10492                        temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10493                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10494                                "0347 Adapter is very hot, please take "
10495                                "corrective action. temperature : %d Celsius\n",
10496                                (uint32_t) icmd->ulpContext);
10497                } else {
10498                        temp_event_data.event_code = LPFC_NORMAL_TEMP;
10499                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10500                                "0340 Adapter temperature is OK now. "
10501                                "temperature : %d Celsius\n",
10502                                (uint32_t) icmd->ulpContext);
10503                }
10504
10505                /* Send temperature change event to applications */
10506                shost = lpfc_shost_from_vport(phba->pport);
10507                fc_host_post_vendor_event(shost, fc_get_event_number(),
10508                        sizeof(temp_event_data), (char *) &temp_event_data,
10509                        LPFC_NL_VENDOR_ID);
10510                break;
10511        case ASYNC_STATUS_CN:
10512                lpfc_sli_abts_err_handler(phba, iocbq);
10513                break;
10514        default:
10515                iocb_w = (uint32_t *) icmd;
10516                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10517                        "0346 Ring %d handler: unexpected ASYNC_STATUS"
10518                        " evt_code 0x%x\n"
10519                        "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
10520                        "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
10521                        "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
10522                        "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10523                        pring->ringno, icmd->un.asyncstat.evt_code,
10524                        iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10525                        iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10526                        iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10527                        iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10528
10529                break;
10530        }
10531}
10532
10533
10534/**
10535 * lpfc_sli4_setup - SLI ring setup function
10536 * @phba: Pointer to HBA context object.
10537 *
10538 * lpfc_sli_setup sets up rings of the SLI interface with
10539 * number of iocbs per ring and iotags. This function is
10540 * called while driver attach to the HBA and before the
10541 * interrupts are enabled. So there is no need for locking.
10542 *
10543 * This function always returns 0.
10544 **/
10545int
10546lpfc_sli4_setup(struct lpfc_hba *phba)
10547{
10548        struct lpfc_sli_ring *pring;
10549
10550        pring = phba->sli4_hba.els_wq->pring;
10551        pring->num_mask = LPFC_MAX_RING_MASK;
10552        pring->prt[0].profile = 0;      /* Mask 0 */
10553        pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10554        pring->prt[0].type = FC_TYPE_ELS;
10555        pring->prt[0].lpfc_sli_rcv_unsol_event =
10556            lpfc_els_unsol_event;
10557        pring->prt[1].profile = 0;      /* Mask 1 */
10558        pring->prt[1].rctl = FC_RCTL_ELS_REP;
10559        pring->prt[1].type = FC_TYPE_ELS;
10560        pring->prt[1].lpfc_sli_rcv_unsol_event =
10561            lpfc_els_unsol_event;
10562        pring->prt[2].profile = 0;      /* Mask 2 */
10563        /* NameServer Inquiry */
10564        pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10565        /* NameServer */
10566        pring->prt[2].type = FC_TYPE_CT;
10567        pring->prt[2].lpfc_sli_rcv_unsol_event =
10568            lpfc_ct_unsol_event;
10569        pring->prt[3].profile = 0;      /* Mask 3 */
10570        /* NameServer response */
10571        pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10572        /* NameServer */
10573        pring->prt[3].type = FC_TYPE_CT;
10574        pring->prt[3].lpfc_sli_rcv_unsol_event =
10575            lpfc_ct_unsol_event;
10576        return 0;
10577}
10578
10579/**
10580 * lpfc_sli_setup - SLI ring setup function
10581 * @phba: Pointer to HBA context object.
10582 *
10583 * lpfc_sli_setup sets up rings of the SLI interface with
10584 * number of iocbs per ring and iotags. This function is
10585 * called while driver attach to the HBA and before the
10586 * interrupts are enabled. So there is no need for locking.
10587 *
10588 * This function always returns 0. SLI3 only.
10589 **/
10590int
10591lpfc_sli_setup(struct lpfc_hba *phba)
10592{
10593        int i, totiocbsize = 0;
10594        struct lpfc_sli *psli = &phba->sli;
10595        struct lpfc_sli_ring *pring;
10596
10597        psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10598        psli->sli_flag = 0;
10599
10600        psli->iocbq_lookup = NULL;
10601        psli->iocbq_lookup_len = 0;
10602        psli->last_iotag = 0;
10603
10604        for (i = 0; i < psli->num_rings; i++) {
10605                pring = &psli->sli3_ring[i];
10606                switch (i) {
10607                case LPFC_FCP_RING:     /* ring 0 - FCP */
10608                        /* numCiocb and numRiocb are used in config_port */
10609                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10610                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10611                        pring->sli.sli3.numCiocb +=
10612                                SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10613                        pring->sli.sli3.numRiocb +=
10614                                SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10615                        pring->sli.sli3.numCiocb +=
10616                                SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10617                        pring->sli.sli3.numRiocb +=
10618                                SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10619                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10620                                                        SLI3_IOCB_CMD_SIZE :
10621                                                        SLI2_IOCB_CMD_SIZE;
10622                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10623                                                        SLI3_IOCB_RSP_SIZE :
10624                                                        SLI2_IOCB_RSP_SIZE;
10625                        pring->iotag_ctr = 0;
10626                        pring->iotag_max =
10627                            (phba->cfg_hba_queue_depth * 2);
10628                        pring->fast_iotag = pring->iotag_max;
10629                        pring->num_mask = 0;
10630                        break;
10631                case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
10632                        /* numCiocb and numRiocb are used in config_port */
10633                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10634                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10635                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10636                                                        SLI3_IOCB_CMD_SIZE :
10637                                                        SLI2_IOCB_CMD_SIZE;
10638                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10639                                                        SLI3_IOCB_RSP_SIZE :
10640                                                        SLI2_IOCB_RSP_SIZE;
10641                        pring->iotag_max = phba->cfg_hba_queue_depth;
10642                        pring->num_mask = 0;
10643                        break;
10644                case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
10645                        /* numCiocb and numRiocb are used in config_port */
10646                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10647                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10648                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10649                                                        SLI3_IOCB_CMD_SIZE :
10650                                                        SLI2_IOCB_CMD_SIZE;
10651                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10652                                                        SLI3_IOCB_RSP_SIZE :
10653                                                        SLI2_IOCB_RSP_SIZE;
10654                        pring->fast_iotag = 0;
10655                        pring->iotag_ctr = 0;
10656                        pring->iotag_max = 4096;
10657                        pring->lpfc_sli_rcv_async_status =
10658                                lpfc_sli_async_event_handler;
10659                        pring->num_mask = LPFC_MAX_RING_MASK;
10660                        pring->prt[0].profile = 0;      /* Mask 0 */
10661                        pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10662                        pring->prt[0].type = FC_TYPE_ELS;
10663                        pring->prt[0].lpfc_sli_rcv_unsol_event =
10664                            lpfc_els_unsol_event;
10665                        pring->prt[1].profile = 0;      /* Mask 1 */
10666                        pring->prt[1].rctl = FC_RCTL_ELS_REP;
10667                        pring->prt[1].type = FC_TYPE_ELS;
10668                        pring->prt[1].lpfc_sli_rcv_unsol_event =
10669                            lpfc_els_unsol_event;
10670                        pring->prt[2].profile = 0;      /* Mask 2 */
10671                        /* NameServer Inquiry */
10672                        pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10673                        /* NameServer */
10674                        pring->prt[2].type = FC_TYPE_CT;
10675                        pring->prt[2].lpfc_sli_rcv_unsol_event =
10676                            lpfc_ct_unsol_event;
10677                        pring->prt[3].profile = 0;      /* Mask 3 */
10678                        /* NameServer response */
10679                        pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10680                        /* NameServer */
10681                        pring->prt[3].type = FC_TYPE_CT;
10682                        pring->prt[3].lpfc_sli_rcv_unsol_event =
10683                            lpfc_ct_unsol_event;
10684                        break;
10685                }
10686                totiocbsize += (pring->sli.sli3.numCiocb *
10687                        pring->sli.sli3.sizeCiocb) +
10688                        (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10689        }
10690        if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10691                /* Too many cmd / rsp ring entries in SLI2 SLIM */
10692                printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10693                       "SLI2 SLIM Data: x%x x%lx\n",
10694                       phba->brd_no, totiocbsize,
10695                       (unsigned long) MAX_SLIM_IOCB_SIZE);
10696        }
10697        if (phba->cfg_multi_ring_support == 2)
10698                lpfc_extra_ring_setup(phba);
10699
10700        return 0;
10701}
10702
10703/**
10704 * lpfc_sli4_queue_init - Queue initialization function
10705 * @phba: Pointer to HBA context object.
10706 *
10707 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10708 * ring. This function also initializes ring indices of each ring.
10709 * This function is called during the initialization of the SLI
10710 * interface of an HBA.
10711 * This function is called with no lock held and always returns
10712 * 1.
10713 **/
10714void
10715lpfc_sli4_queue_init(struct lpfc_hba *phba)
10716{
10717        struct lpfc_sli *psli;
10718        struct lpfc_sli_ring *pring;
10719        int i;
10720
10721        psli = &phba->sli;
10722        spin_lock_irq(&phba->hbalock);
10723        INIT_LIST_HEAD(&psli->mboxq);
10724        INIT_LIST_HEAD(&psli->mboxq_cmpl);
10725        /* Initialize list headers for txq and txcmplq as double linked lists */
10726        for (i = 0; i < phba->cfg_hdw_queue; i++) {
10727                pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10728                pring->flag = 0;
10729                pring->ringno = LPFC_FCP_RING;
10730                pring->txcmplq_cnt = 0;
10731                INIT_LIST_HEAD(&pring->txq);
10732                INIT_LIST_HEAD(&pring->txcmplq);
10733                INIT_LIST_HEAD(&pring->iocb_continueq);
10734                spin_lock_init(&pring->ring_lock);
10735        }
10736        pring = phba->sli4_hba.els_wq->pring;
10737        pring->flag = 0;
10738        pring->ringno = LPFC_ELS_RING;
10739        pring->txcmplq_cnt = 0;
10740        INIT_LIST_HEAD(&pring->txq);
10741        INIT_LIST_HEAD(&pring->txcmplq);
10742        INIT_LIST_HEAD(&pring->iocb_continueq);
10743        spin_lock_init(&pring->ring_lock);
10744
10745        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10746                pring = phba->sli4_hba.nvmels_wq->pring;
10747                pring->flag = 0;
10748                pring->ringno = LPFC_ELS_RING;
10749                pring->txcmplq_cnt = 0;
10750                INIT_LIST_HEAD(&pring->txq);
10751                INIT_LIST_HEAD(&pring->txcmplq);
10752                INIT_LIST_HEAD(&pring->iocb_continueq);
10753                spin_lock_init(&pring->ring_lock);
10754        }
10755
10756        spin_unlock_irq(&phba->hbalock);
10757}
10758
10759/**
10760 * lpfc_sli_queue_init - Queue initialization function
10761 * @phba: Pointer to HBA context object.
10762 *
10763 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10764 * ring. This function also initializes ring indices of each ring.
10765 * This function is called during the initialization of the SLI
10766 * interface of an HBA.
10767 * This function is called with no lock held and always returns
10768 * 1.
10769 **/
10770void
10771lpfc_sli_queue_init(struct lpfc_hba *phba)
10772{
10773        struct lpfc_sli *psli;
10774        struct lpfc_sli_ring *pring;
10775        int i;
10776
10777        psli = &phba->sli;
10778        spin_lock_irq(&phba->hbalock);
10779        INIT_LIST_HEAD(&psli->mboxq);
10780        INIT_LIST_HEAD(&psli->mboxq_cmpl);
10781        /* Initialize list headers for txq and txcmplq as double linked lists */
10782        for (i = 0; i < psli->num_rings; i++) {
10783                pring = &psli->sli3_ring[i];
10784                pring->ringno = i;
10785                pring->sli.sli3.next_cmdidx  = 0;
10786                pring->sli.sli3.local_getidx = 0;
10787                pring->sli.sli3.cmdidx = 0;
10788                INIT_LIST_HEAD(&pring->iocb_continueq);
10789                INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10790                INIT_LIST_HEAD(&pring->postbufq);
10791                pring->flag = 0;
10792                INIT_LIST_HEAD(&pring->txq);
10793                INIT_LIST_HEAD(&pring->txcmplq);
10794                spin_lock_init(&pring->ring_lock);
10795        }
10796        spin_unlock_irq(&phba->hbalock);
10797}
10798
10799/**
10800 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10801 * @phba: Pointer to HBA context object.
10802 *
10803 * This routine flushes the mailbox command subsystem. It will unconditionally
10804 * flush all the mailbox commands in the three possible stages in the mailbox
10805 * command sub-system: pending mailbox command queue; the outstanding mailbox
10806 * command; and completed mailbox command queue. It is caller's responsibility
10807 * to make sure that the driver is in the proper state to flush the mailbox
10808 * command sub-system. Namely, the posting of mailbox commands into the
10809 * pending mailbox command queue from the various clients must be stopped;
10810 * either the HBA is in a state that it will never works on the outstanding
10811 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10812 * mailbox command has been completed.
10813 **/
10814static void
10815lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10816{
10817        LIST_HEAD(completions);
10818        struct lpfc_sli *psli = &phba->sli;
10819        LPFC_MBOXQ_t *pmb;
10820        unsigned long iflag;
10821
10822        /* Disable softirqs, including timers from obtaining phba->hbalock */
10823        local_bh_disable();
10824
10825        /* Flush all the mailbox commands in the mbox system */
10826        spin_lock_irqsave(&phba->hbalock, iflag);
10827
10828        /* The pending mailbox command queue */
10829        list_splice_init(&phba->sli.mboxq, &completions);
10830        /* The outstanding active mailbox command */
10831        if (psli->mbox_active) {
10832                list_add_tail(&psli->mbox_active->list, &completions);
10833                psli->mbox_active = NULL;
10834                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10835        }
10836        /* The completed mailbox command queue */
10837        list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10838        spin_unlock_irqrestore(&phba->hbalock, iflag);
10839
10840        /* Enable softirqs again, done with phba->hbalock */
10841        local_bh_enable();
10842
10843        /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10844        while (!list_empty(&completions)) {
10845                list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10846                pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10847                if (pmb->mbox_cmpl)
10848                        pmb->mbox_cmpl(phba, pmb);
10849        }
10850}
10851
10852/**
10853 * lpfc_sli_host_down - Vport cleanup function
10854 * @vport: Pointer to virtual port object.
10855 *
10856 * lpfc_sli_host_down is called to clean up the resources
10857 * associated with a vport before destroying virtual
10858 * port data structures.
10859 * This function does following operations:
10860 * - Free discovery resources associated with this virtual
10861 *   port.
10862 * - Free iocbs associated with this virtual port in
10863 *   the txq.
10864 * - Send abort for all iocb commands associated with this
10865 *   vport in txcmplq.
10866 *
10867 * This function is called with no lock held and always returns 1.
10868 **/
10869int
10870lpfc_sli_host_down(struct lpfc_vport *vport)
10871{
10872        LIST_HEAD(completions);
10873        struct lpfc_hba *phba = vport->phba;
10874        struct lpfc_sli *psli = &phba->sli;
10875        struct lpfc_queue *qp = NULL;
10876        struct lpfc_sli_ring *pring;
10877        struct lpfc_iocbq *iocb, *next_iocb;
10878        int i;
10879        unsigned long flags = 0;
10880        uint16_t prev_pring_flag;
10881
10882        lpfc_cleanup_discovery_resources(vport);
10883
10884        spin_lock_irqsave(&phba->hbalock, flags);
10885
10886        /*
10887         * Error everything on the txq since these iocbs
10888         * have not been given to the FW yet.
10889         * Also issue ABTS for everything on the txcmplq
10890         */
10891        if (phba->sli_rev != LPFC_SLI_REV4) {
10892                for (i = 0; i < psli->num_rings; i++) {
10893                        pring = &psli->sli3_ring[i];
10894                        prev_pring_flag = pring->flag;
10895                        /* Only slow rings */
10896                        if (pring->ringno == LPFC_ELS_RING) {
10897                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
10898                                /* Set the lpfc data pending flag */
10899                                set_bit(LPFC_DATA_READY, &phba->data_flags);
10900                        }
10901                        list_for_each_entry_safe(iocb, next_iocb,
10902                                                 &pring->txq, list) {
10903                                if (iocb->vport != vport)
10904                                        continue;
10905                                list_move_tail(&iocb->list, &completions);
10906                        }
10907                        list_for_each_entry_safe(iocb, next_iocb,
10908                                                 &pring->txcmplq, list) {
10909                                if (iocb->vport != vport)
10910                                        continue;
10911                                lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10912                        }
10913                        pring->flag = prev_pring_flag;
10914                }
10915        } else {
10916                list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10917                        pring = qp->pring;
10918                        if (!pring)
10919                                continue;
10920                        if (pring == phba->sli4_hba.els_wq->pring) {
10921                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
10922                                /* Set the lpfc data pending flag */
10923                                set_bit(LPFC_DATA_READY, &phba->data_flags);
10924                        }
10925                        prev_pring_flag = pring->flag;
10926                        spin_lock(&pring->ring_lock);
10927                        list_for_each_entry_safe(iocb, next_iocb,
10928                                                 &pring->txq, list) {
10929                                if (iocb->vport != vport)
10930                                        continue;
10931                                list_move_tail(&iocb->list, &completions);
10932                        }
10933                        spin_unlock(&pring->ring_lock);
10934                        list_for_each_entry_safe(iocb, next_iocb,
10935                                                 &pring->txcmplq, list) {
10936                                if (iocb->vport != vport)
10937                                        continue;
10938                                lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10939                        }
10940                        pring->flag = prev_pring_flag;
10941                }
10942        }
10943        spin_unlock_irqrestore(&phba->hbalock, flags);
10944
10945        /* Cancel all the IOCBs from the completions list */
10946        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10947                              IOERR_SLI_DOWN);
10948        return 1;
10949}
10950
10951/**
10952 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10953 * @phba: Pointer to HBA context object.
10954 *
10955 * This function cleans up all iocb, buffers, mailbox commands
10956 * while shutting down the HBA. This function is called with no
10957 * lock held and always returns 1.
10958 * This function does the following to cleanup driver resources:
10959 * - Free discovery resources for each virtual port
10960 * - Cleanup any pending fabric iocbs
10961 * - Iterate through the iocb txq and free each entry
10962 *   in the list.
10963 * - Free up any buffer posted to the HBA
10964 * - Free mailbox commands in the mailbox queue.
10965 **/
10966int
10967lpfc_sli_hba_down(struct lpfc_hba *phba)
10968{
10969        LIST_HEAD(completions);
10970        struct lpfc_sli *psli = &phba->sli;
10971        struct lpfc_queue *qp = NULL;
10972        struct lpfc_sli_ring *pring;
10973        struct lpfc_dmabuf *buf_ptr;
10974        unsigned long flags = 0;
10975        int i;
10976
10977        /* Shutdown the mailbox command sub-system */
10978        lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10979
10980        lpfc_hba_down_prep(phba);
10981
10982        /* Disable softirqs, including timers from obtaining phba->hbalock */
10983        local_bh_disable();
10984
10985        lpfc_fabric_abort_hba(phba);
10986
10987        spin_lock_irqsave(&phba->hbalock, flags);
10988
10989        /*
10990         * Error everything on the txq since these iocbs
10991         * have not been given to the FW yet.
10992         */
10993        if (phba->sli_rev != LPFC_SLI_REV4) {
10994                for (i = 0; i < psli->num_rings; i++) {
10995                        pring = &psli->sli3_ring[i];
10996                        /* Only slow rings */
10997                        if (pring->ringno == LPFC_ELS_RING) {
10998                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
10999                                /* Set the lpfc data pending flag */
11000                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11001                        }
11002                        list_splice_init(&pring->txq, &completions);
11003                }
11004        } else {
11005                list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11006                        pring = qp->pring;
11007                        if (!pring)
11008                                continue;
11009                        spin_lock(&pring->ring_lock);
11010                        list_splice_init(&pring->txq, &completions);
11011                        spin_unlock(&pring->ring_lock);
11012                        if (pring == phba->sli4_hba.els_wq->pring) {
11013                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
11014                                /* Set the lpfc data pending flag */
11015                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11016                        }
11017                }
11018        }
11019        spin_unlock_irqrestore(&phba->hbalock, flags);
11020
11021        /* Cancel all the IOCBs from the completions list */
11022        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11023                              IOERR_SLI_DOWN);
11024
11025        spin_lock_irqsave(&phba->hbalock, flags);
11026        list_splice_init(&phba->elsbuf, &completions);
11027        phba->elsbuf_cnt = 0;
11028        phba->elsbuf_prev_cnt = 0;
11029        spin_unlock_irqrestore(&phba->hbalock, flags);
11030
11031        while (!list_empty(&completions)) {
11032                list_remove_head(&completions, buf_ptr,
11033                        struct lpfc_dmabuf, list);
11034                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11035                kfree(buf_ptr);
11036        }
11037
11038        /* Enable softirqs again, done with phba->hbalock */
11039        local_bh_enable();
11040
11041        /* Return any active mbox cmds */
11042        del_timer_sync(&psli->mbox_tmo);
11043
11044        spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11045        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11046        spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11047
11048        return 1;
11049}
11050
11051/**
11052 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11053 * @srcp: Source memory pointer.
11054 * @destp: Destination memory pointer.
11055 * @cnt: Number of words required to be copied.
11056 *
11057 * This function is used for copying data between driver memory
11058 * and the SLI memory. This function also changes the endianness
11059 * of each word if native endianness is different from SLI
11060 * endianness. This function can be called with or without
11061 * lock.
11062 **/
11063void
11064lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11065{
11066        uint32_t *src = srcp;
11067        uint32_t *dest = destp;
11068        uint32_t ldata;
11069        int i;
11070
11071        for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11072                ldata = *src;
11073                ldata = le32_to_cpu(ldata);
11074                *dest = ldata;
11075                src++;
11076                dest++;
11077        }
11078}
11079
11080
11081/**
11082 * lpfc_sli_bemem_bcopy - SLI memory copy function
11083 * @srcp: Source memory pointer.
11084 * @destp: Destination memory pointer.
11085 * @cnt: Number of words required to be copied.
11086 *
11087 * This function is used for copying data between a data structure
11088 * with big endian representation to local endianness.
11089 * This function can be called with or without lock.
11090 **/
11091void
11092lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11093{
11094        uint32_t *src = srcp;
11095        uint32_t *dest = destp;
11096        uint32_t ldata;
11097        int i;
11098
11099        for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11100                ldata = *src;
11101                ldata = be32_to_cpu(ldata);
11102                *dest = ldata;
11103                src++;
11104                dest++;
11105        }
11106}
11107
11108/**
11109 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11110 * @phba: Pointer to HBA context object.
11111 * @pring: Pointer to driver SLI ring object.
11112 * @mp: Pointer to driver buffer object.
11113 *
11114 * This function is called with no lock held.
11115 * It always return zero after adding the buffer to the postbufq
11116 * buffer list.
11117 **/
11118int
11119lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11120                         struct lpfc_dmabuf *mp)
11121{
11122        /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11123           later */
11124        spin_lock_irq(&phba->hbalock);
11125        list_add_tail(&mp->list, &pring->postbufq);
11126        pring->postbufq_cnt++;
11127        spin_unlock_irq(&phba->hbalock);
11128        return 0;
11129}
11130
11131/**
11132 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11133 * @phba: Pointer to HBA context object.
11134 *
11135 * When HBQ is enabled, buffers are searched based on tags. This function
11136 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11137 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11138 * does not conflict with tags of buffer posted for unsolicited events.
11139 * The function returns the allocated tag. The function is called with
11140 * no locks held.
11141 **/
11142uint32_t
11143lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11144{
11145        spin_lock_irq(&phba->hbalock);
11146        phba->buffer_tag_count++;
11147        /*
11148         * Always set the QUE_BUFTAG_BIT to distiguish between
11149         * a tag assigned by HBQ.
11150         */
11151        phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11152        spin_unlock_irq(&phba->hbalock);
11153        return phba->buffer_tag_count;
11154}
11155
11156/**
11157 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11158 * @phba: Pointer to HBA context object.
11159 * @pring: Pointer to driver SLI ring object.
11160 * @tag: Buffer tag.
11161 *
11162 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11163 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11164 * iocb is posted to the response ring with the tag of the buffer.
11165 * This function searches the pring->postbufq list using the tag
11166 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11167 * iocb. If the buffer is found then lpfc_dmabuf object of the
11168 * buffer is returned to the caller else NULL is returned.
11169 * This function is called with no lock held.
11170 **/
11171struct lpfc_dmabuf *
11172lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11173                        uint32_t tag)
11174{
11175        struct lpfc_dmabuf *mp, *next_mp;
11176        struct list_head *slp = &pring->postbufq;
11177
11178        /* Search postbufq, from the beginning, looking for a match on tag */
11179        spin_lock_irq(&phba->hbalock);
11180        list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11181                if (mp->buffer_tag == tag) {
11182                        list_del_init(&mp->list);
11183                        pring->postbufq_cnt--;
11184                        spin_unlock_irq(&phba->hbalock);
11185                        return mp;
11186                }
11187        }
11188
11189        spin_unlock_irq(&phba->hbalock);
11190        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11191                        "0402 Cannot find virtual addr for buffer tag on "
11192                        "ring %d Data x%lx x%px x%px x%x\n",
11193                        pring->ringno, (unsigned long) tag,
11194                        slp->next, slp->prev, pring->postbufq_cnt);
11195
11196        return NULL;
11197}
11198
11199/**
11200 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11201 * @phba: Pointer to HBA context object.
11202 * @pring: Pointer to driver SLI ring object.
11203 * @phys: DMA address of the buffer.
11204 *
11205 * This function searches the buffer list using the dma_address
11206 * of unsolicited event to find the driver's lpfc_dmabuf object
11207 * corresponding to the dma_address. The function returns the
11208 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11209 * This function is called by the ct and els unsolicited event
11210 * handlers to get the buffer associated with the unsolicited
11211 * event.
11212 *
11213 * This function is called with no lock held.
11214 **/
11215struct lpfc_dmabuf *
11216lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11217                         dma_addr_t phys)
11218{
11219        struct lpfc_dmabuf *mp, *next_mp;
11220        struct list_head *slp = &pring->postbufq;
11221
11222        /* Search postbufq, from the beginning, looking for a match on phys */
11223        spin_lock_irq(&phba->hbalock);
11224        list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11225                if (mp->phys == phys) {
11226                        list_del_init(&mp->list);
11227                        pring->postbufq_cnt--;
11228                        spin_unlock_irq(&phba->hbalock);
11229                        return mp;
11230                }
11231        }
11232
11233        spin_unlock_irq(&phba->hbalock);
11234        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11235                        "0410 Cannot find virtual addr for mapped buf on "
11236                        "ring %d Data x%llx x%px x%px x%x\n",
11237                        pring->ringno, (unsigned long long)phys,
11238                        slp->next, slp->prev, pring->postbufq_cnt);
11239        return NULL;
11240}
11241
11242/**
11243 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11244 * @phba: Pointer to HBA context object.
11245 * @cmdiocb: Pointer to driver command iocb object.
11246 * @rspiocb: Pointer to driver response iocb object.
11247 *
11248 * This function is the completion handler for the abort iocbs for
11249 * ELS commands. This function is called from the ELS ring event
11250 * handler with no lock held. This function frees memory resources
11251 * associated with the abort iocb.
11252 **/
11253static void
11254lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11255                        struct lpfc_iocbq *rspiocb)
11256{
11257        IOCB_t *irsp = &rspiocb->iocb;
11258        uint16_t abort_iotag, abort_context;
11259        struct lpfc_iocbq *abort_iocb = NULL;
11260
11261        if (irsp->ulpStatus) {
11262
11263                /*
11264                 * Assume that the port already completed and returned, or
11265                 * will return the iocb. Just Log the message.
11266                 */
11267                abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11268                abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11269
11270                spin_lock_irq(&phba->hbalock);
11271                if (phba->sli_rev < LPFC_SLI_REV4) {
11272                        if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11273                            irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11274                            irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11275                                spin_unlock_irq(&phba->hbalock);
11276                                goto release_iocb;
11277                        }
11278                        if (abort_iotag != 0 &&
11279                                abort_iotag <= phba->sli.last_iotag)
11280                                abort_iocb =
11281                                        phba->sli.iocbq_lookup[abort_iotag];
11282                } else
11283                        /* For sli4 the abort_tag is the XRI,
11284                         * so the abort routine puts the iotag  of the iocb
11285                         * being aborted in the context field of the abort
11286                         * IOCB.
11287                         */
11288                        abort_iocb = phba->sli.iocbq_lookup[abort_context];
11289
11290                lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11291                                "0327 Cannot abort els iocb x%px "
11292                                "with tag %x context %x, abort status %x, "
11293                                "abort code %x\n",
11294                                abort_iocb, abort_iotag, abort_context,
11295                                irsp->ulpStatus, irsp->un.ulpWord[4]);
11296
11297                spin_unlock_irq(&phba->hbalock);
11298        }
11299release_iocb:
11300        lpfc_sli_release_iocbq(phba, cmdiocb);
11301        return;
11302}
11303
11304/**
11305 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11306 * @phba: Pointer to HBA context object.
11307 * @cmdiocb: Pointer to driver command iocb object.
11308 * @rspiocb: Pointer to driver response iocb object.
11309 *
11310 * The function is called from SLI ring event handler with no
11311 * lock held. This function is the completion handler for ELS commands
11312 * which are aborted. The function frees memory resources used for
11313 * the aborted ELS commands.
11314 **/
11315static void
11316lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11317                     struct lpfc_iocbq *rspiocb)
11318{
11319        IOCB_t *irsp = &rspiocb->iocb;
11320
11321        /* ELS cmd tag <ulpIoTag> completes */
11322        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11323                        "0139 Ignoring ELS cmd tag x%x completion Data: "
11324                        "x%x x%x x%x\n",
11325                        irsp->ulpIoTag, irsp->ulpStatus,
11326                        irsp->un.ulpWord[4], irsp->ulpTimeout);
11327        if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11328                lpfc_ct_free_iocb(phba, cmdiocb);
11329        else
11330                lpfc_els_free_iocb(phba, cmdiocb);
11331        return;
11332}
11333
11334/**
11335 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11336 * @phba: Pointer to HBA context object.
11337 * @pring: Pointer to driver SLI ring object.
11338 * @cmdiocb: Pointer to driver command iocb object.
11339 *
11340 * This function issues an abort iocb for the provided command iocb down to
11341 * the port. Other than the case the outstanding command iocb is an abort
11342 * request, this function issues abort out unconditionally. This function is
11343 * called with hbalock held. The function returns 0 when it fails due to
11344 * memory allocation failure or when the command iocb is an abort request.
11345 * The hbalock is asserted held in the code path calling this routine.
11346 **/
11347static int
11348lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11349                           struct lpfc_iocbq *cmdiocb)
11350{
11351        struct lpfc_vport *vport = cmdiocb->vport;
11352        struct lpfc_iocbq *abtsiocbp;
11353        IOCB_t *icmd = NULL;
11354        IOCB_t *iabt = NULL;
11355        int retval;
11356        unsigned long iflags;
11357        struct lpfc_nodelist *ndlp;
11358
11359        /*
11360         * There are certain command types we don't want to abort.  And we
11361         * don't want to abort commands that are already in the process of
11362         * being aborted.
11363         */
11364        icmd = &cmdiocb->iocb;
11365        if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11366            icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11367            (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11368                return 0;
11369
11370        /* issue ABTS for this IOCB based on iotag */
11371        abtsiocbp = __lpfc_sli_get_iocbq(phba);
11372        if (abtsiocbp == NULL)
11373                return 0;
11374
11375        /* This signals the response to set the correct status
11376         * before calling the completion handler
11377         */
11378        cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11379
11380        iabt = &abtsiocbp->iocb;
11381        iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11382        iabt->un.acxri.abortContextTag = icmd->ulpContext;
11383        if (phba->sli_rev == LPFC_SLI_REV4) {
11384                iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11385                iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11386        } else {
11387                iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11388                if (pring->ringno == LPFC_ELS_RING) {
11389                        ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11390                        iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11391                }
11392        }
11393        iabt->ulpLe = 1;
11394        iabt->ulpClass = icmd->ulpClass;
11395
11396        /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11397        abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11398        if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11399                abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11400        if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11401                abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11402
11403        if (phba->link_state >= LPFC_LINK_UP)
11404                iabt->ulpCommand = CMD_ABORT_XRI_CN;
11405        else
11406                iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11407
11408        abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11409        abtsiocbp->vport = vport;
11410
11411        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11412                         "0339 Abort xri x%x, original iotag x%x, "
11413                         "abort cmd iotag x%x\n",
11414                         iabt->un.acxri.abortIoTag,
11415                         iabt->un.acxri.abortContextTag,
11416                         abtsiocbp->iotag);
11417
11418        if (phba->sli_rev == LPFC_SLI_REV4) {
11419                pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11420                if (unlikely(pring == NULL))
11421                        return 0;
11422                /* Note: both hbalock and ring_lock need to be set here */
11423                spin_lock_irqsave(&pring->ring_lock, iflags);
11424                retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11425                        abtsiocbp, 0);
11426                spin_unlock_irqrestore(&pring->ring_lock, iflags);
11427        } else {
11428                retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11429                        abtsiocbp, 0);
11430        }
11431
11432        if (retval)
11433                __lpfc_sli_release_iocbq(phba, abtsiocbp);
11434
11435        /*
11436         * Caller to this routine should check for IOCB_ERROR
11437         * and handle it properly.  This routine no longer removes
11438         * iocb off txcmplq and call compl in case of IOCB_ERROR.
11439         */
11440        return retval;
11441}
11442
11443/**
11444 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11445 * @phba: Pointer to HBA context object.
11446 * @pring: Pointer to driver SLI ring object.
11447 * @cmdiocb: Pointer to driver command iocb object.
11448 *
11449 * This function issues an abort iocb for the provided command iocb. In case
11450 * of unloading, the abort iocb will not be issued to commands on the ELS
11451 * ring. Instead, the callback function shall be changed to those commands
11452 * so that nothing happens when them finishes. This function is called with
11453 * hbalock held. The function returns 0 when the command iocb is an abort
11454 * request.
11455 **/
11456int
11457lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11458                           struct lpfc_iocbq *cmdiocb)
11459{
11460        struct lpfc_vport *vport = cmdiocb->vport;
11461        int retval = IOCB_ERROR;
11462        IOCB_t *icmd = NULL;
11463
11464        lockdep_assert_held(&phba->hbalock);
11465
11466        /*
11467         * There are certain command types we don't want to abort.  And we
11468         * don't want to abort commands that are already in the process of
11469         * being aborted.
11470         */
11471        icmd = &cmdiocb->iocb;
11472        if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11473            icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11474            (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11475                return 0;
11476
11477        if (!pring) {
11478                if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11479                        cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11480                else
11481                        cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11482                goto abort_iotag_exit;
11483        }
11484
11485        /*
11486         * If we're unloading, don't abort iocb on the ELS ring, but change
11487         * the callback so that nothing happens when it finishes.
11488         */
11489        if ((vport->load_flag & FC_UNLOADING) &&
11490            (pring->ringno == LPFC_ELS_RING)) {
11491                if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11492                        cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11493                else
11494                        cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11495                goto abort_iotag_exit;
11496        }
11497
11498        /* Now, we try to issue the abort to the cmdiocb out */
11499        retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11500
11501abort_iotag_exit:
11502        /*
11503         * Caller to this routine should check for IOCB_ERROR
11504         * and handle it properly.  This routine no longer removes
11505         * iocb off txcmplq and call compl in case of IOCB_ERROR.
11506         */
11507        return retval;
11508}
11509
11510/**
11511 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11512 * @phba: pointer to lpfc HBA data structure.
11513 *
11514 * This routine will abort all pending and outstanding iocbs to an HBA.
11515 **/
11516void
11517lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11518{
11519        struct lpfc_sli *psli = &phba->sli;
11520        struct lpfc_sli_ring *pring;
11521        struct lpfc_queue *qp = NULL;
11522        int i;
11523
11524        if (phba->sli_rev != LPFC_SLI_REV4) {
11525                for (i = 0; i < psli->num_rings; i++) {
11526                        pring = &psli->sli3_ring[i];
11527                        lpfc_sli_abort_iocb_ring(phba, pring);
11528                }
11529                return;
11530        }
11531        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11532                pring = qp->pring;
11533                if (!pring)
11534                        continue;
11535                lpfc_sli_abort_iocb_ring(phba, pring);
11536        }
11537}
11538
11539/**
11540 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11541 * @iocbq: Pointer to driver iocb object.
11542 * @vport: Pointer to driver virtual port object.
11543 * @tgt_id: SCSI ID of the target.
11544 * @lun_id: LUN ID of the scsi device.
11545 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11546 *
11547 * This function acts as an iocb filter for functions which abort or count
11548 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11549 * 0 if the filtering criteria is met for the given iocb and will return
11550 * 1 if the filtering criteria is not met.
11551 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11552 * given iocb is for the SCSI device specified by vport, tgt_id and
11553 * lun_id parameter.
11554 * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
11555 * given iocb is for the SCSI target specified by vport and tgt_id
11556 * parameters.
11557 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11558 * given iocb is for the SCSI host associated with the given vport.
11559 * This function is called with no locks held.
11560 **/
11561static int
11562lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11563                           uint16_t tgt_id, uint64_t lun_id,
11564                           lpfc_ctx_cmd ctx_cmd)
11565{
11566        struct lpfc_io_buf *lpfc_cmd;
11567        int rc = 1;
11568
11569        if (iocbq->vport != vport)
11570                return rc;
11571
11572        if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
11573            !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11574                return rc;
11575
11576        lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11577
11578        if (lpfc_cmd->pCmd == NULL)
11579                return rc;
11580
11581        switch (ctx_cmd) {
11582        case LPFC_CTX_LUN:
11583                if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11584                    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11585                    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11586                        rc = 0;
11587                break;
11588        case LPFC_CTX_TGT:
11589                if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11590                    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11591                        rc = 0;
11592                break;
11593        case LPFC_CTX_HOST:
11594                rc = 0;
11595                break;
11596        default:
11597                printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11598                        __func__, ctx_cmd);
11599                break;
11600        }
11601
11602        return rc;
11603}
11604
11605/**
11606 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11607 * @vport: Pointer to virtual port.
11608 * @tgt_id: SCSI ID of the target.
11609 * @lun_id: LUN ID of the scsi device.
11610 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11611 *
11612 * This function returns number of FCP commands pending for the vport.
11613 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11614 * commands pending on the vport associated with SCSI device specified
11615 * by tgt_id and lun_id parameters.
11616 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11617 * commands pending on the vport associated with SCSI target specified
11618 * by tgt_id parameter.
11619 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11620 * commands pending on the vport.
11621 * This function returns the number of iocbs which satisfy the filter.
11622 * This function is called without any lock held.
11623 **/
11624int
11625lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11626                  lpfc_ctx_cmd ctx_cmd)
11627{
11628        struct lpfc_hba *phba = vport->phba;
11629        struct lpfc_iocbq *iocbq;
11630        int sum, i;
11631
11632        spin_lock_irq(&phba->hbalock);
11633        for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11634                iocbq = phba->sli.iocbq_lookup[i];
11635
11636                if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11637                                                ctx_cmd) == 0)
11638                        sum++;
11639        }
11640        spin_unlock_irq(&phba->hbalock);
11641
11642        return sum;
11643}
11644
11645/**
11646 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11647 * @phba: Pointer to HBA context object
11648 * @cmdiocb: Pointer to command iocb object.
11649 * @rspiocb: Pointer to response iocb object.
11650 *
11651 * This function is called when an aborted FCP iocb completes. This
11652 * function is called by the ring event handler with no lock held.
11653 * This function frees the iocb.
11654 **/
11655void
11656lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11657                        struct lpfc_iocbq *rspiocb)
11658{
11659        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11660                        "3096 ABORT_XRI_CN completing on rpi x%x "
11661                        "original iotag x%x, abort cmd iotag x%x "
11662                        "status 0x%x, reason 0x%x\n",
11663                        cmdiocb->iocb.un.acxri.abortContextTag,
11664                        cmdiocb->iocb.un.acxri.abortIoTag,
11665                        cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11666                        rspiocb->iocb.un.ulpWord[4]);
11667        lpfc_sli_release_iocbq(phba, cmdiocb);
11668        return;
11669}
11670
11671/**
11672 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11673 * @vport: Pointer to virtual port.
11674 * @pring: Pointer to driver SLI ring object.
11675 * @tgt_id: SCSI ID of the target.
11676 * @lun_id: LUN ID of the scsi device.
11677 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11678 *
11679 * This function sends an abort command for every SCSI command
11680 * associated with the given virtual port pending on the ring
11681 * filtered by lpfc_sli_validate_fcp_iocb function.
11682 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11683 * FCP iocbs associated with lun specified by tgt_id and lun_id
11684 * parameters
11685 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11686 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11687 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11688 * FCP iocbs associated with virtual port.
11689 * This function returns number of iocbs it failed to abort.
11690 * This function is called with no locks held.
11691 **/
11692int
11693lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11694                    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11695{
11696        struct lpfc_hba *phba = vport->phba;
11697        struct lpfc_iocbq *iocbq;
11698        struct lpfc_iocbq *abtsiocb;
11699        struct lpfc_sli_ring *pring_s4;
11700        IOCB_t *cmd = NULL;
11701        int errcnt = 0, ret_val = 0;
11702        int i;
11703
11704        /* all I/Os are in process of being flushed */
11705        if (phba->hba_flag & HBA_IOQ_FLUSH)
11706                return errcnt;
11707
11708        for (i = 1; i <= phba->sli.last_iotag; i++) {
11709                iocbq = phba->sli.iocbq_lookup[i];
11710
11711                if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11712                                               abort_cmd) != 0)
11713                        continue;
11714
11715                /*
11716                 * If the iocbq is already being aborted, don't take a second
11717                 * action, but do count it.
11718                 */
11719                if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11720                        continue;
11721
11722                /* issue ABTS for this IOCB based on iotag */
11723                abtsiocb = lpfc_sli_get_iocbq(phba);
11724                if (abtsiocb == NULL) {
11725                        errcnt++;
11726                        continue;
11727                }
11728
11729                /* indicate the IO is being aborted by the driver. */
11730                iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11731
11732                cmd = &iocbq->iocb;
11733                abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11734                abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11735                if (phba->sli_rev == LPFC_SLI_REV4)
11736                        abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11737                else
11738                        abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11739                abtsiocb->iocb.ulpLe = 1;
11740                abtsiocb->iocb.ulpClass = cmd->ulpClass;
11741                abtsiocb->vport = vport;
11742
11743                /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11744                abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11745                if (iocbq->iocb_flag & LPFC_IO_FCP)
11746                        abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11747                if (iocbq->iocb_flag & LPFC_IO_FOF)
11748                        abtsiocb->iocb_flag |= LPFC_IO_FOF;
11749
11750                if (lpfc_is_link_up(phba))
11751                        abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11752                else
11753                        abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11754
11755                /* Setup callback routine and issue the command. */
11756                abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11757                if (phba->sli_rev == LPFC_SLI_REV4) {
11758                        pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11759                        if (!pring_s4)
11760                                continue;
11761                        ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11762                                                      abtsiocb, 0);
11763                } else
11764                        ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11765                                                      abtsiocb, 0);
11766                if (ret_val == IOCB_ERROR) {
11767                        lpfc_sli_release_iocbq(phba, abtsiocb);
11768                        errcnt++;
11769                        continue;
11770                }
11771        }
11772
11773        return errcnt;
11774}
11775
11776/**
11777 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11778 * @vport: Pointer to virtual port.
11779 * @pring: Pointer to driver SLI ring object.
11780 * @tgt_id: SCSI ID of the target.
11781 * @lun_id: LUN ID of the scsi device.
11782 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11783 *
11784 * This function sends an abort command for every SCSI command
11785 * associated with the given virtual port pending on the ring
11786 * filtered by lpfc_sli_validate_fcp_iocb function.
11787 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11788 * FCP iocbs associated with lun specified by tgt_id and lun_id
11789 * parameters
11790 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11791 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11792 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11793 * FCP iocbs associated with virtual port.
11794 * This function returns number of iocbs it aborted .
11795 * This function is called with no locks held right after a taskmgmt
11796 * command is sent.
11797 **/
11798int
11799lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11800                        uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11801{
11802        struct lpfc_hba *phba = vport->phba;
11803        struct lpfc_io_buf *lpfc_cmd;
11804        struct lpfc_iocbq *abtsiocbq;
11805        struct lpfc_nodelist *ndlp;
11806        struct lpfc_iocbq *iocbq;
11807        IOCB_t *icmd;
11808        int sum, i, ret_val;
11809        unsigned long iflags;
11810        struct lpfc_sli_ring *pring_s4 = NULL;
11811
11812        spin_lock_irqsave(&phba->hbalock, iflags);
11813
11814        /* all I/Os are in process of being flushed */
11815        if (phba->hba_flag & HBA_IOQ_FLUSH) {
11816                spin_unlock_irqrestore(&phba->hbalock, iflags);
11817                return 0;
11818        }
11819        sum = 0;
11820
11821        for (i = 1; i <= phba->sli.last_iotag; i++) {
11822                iocbq = phba->sli.iocbq_lookup[i];
11823
11824                if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11825                                               cmd) != 0)
11826                        continue;
11827
11828                /* Guard against IO completion being called at same time */
11829                lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11830                spin_lock(&lpfc_cmd->buf_lock);
11831
11832                if (!lpfc_cmd->pCmd) {
11833                        spin_unlock(&lpfc_cmd->buf_lock);
11834                        continue;
11835                }
11836
11837                if (phba->sli_rev == LPFC_SLI_REV4) {
11838                        pring_s4 =
11839                            phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11840                        if (!pring_s4) {
11841                                spin_unlock(&lpfc_cmd->buf_lock);
11842                                continue;
11843                        }
11844                        /* Note: both hbalock and ring_lock must be set here */
11845                        spin_lock(&pring_s4->ring_lock);
11846                }
11847
11848                /*
11849                 * If the iocbq is already being aborted, don't take a second
11850                 * action, but do count it.
11851                 */
11852                if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11853                    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11854                        if (phba->sli_rev == LPFC_SLI_REV4)
11855                                spin_unlock(&pring_s4->ring_lock);
11856                        spin_unlock(&lpfc_cmd->buf_lock);
11857                        continue;
11858                }
11859
11860                /* issue ABTS for this IOCB based on iotag */
11861                abtsiocbq = __lpfc_sli_get_iocbq(phba);
11862                if (!abtsiocbq) {
11863                        if (phba->sli_rev == LPFC_SLI_REV4)
11864                                spin_unlock(&pring_s4->ring_lock);
11865                        spin_unlock(&lpfc_cmd->buf_lock);
11866                        continue;
11867                }
11868
11869                icmd = &iocbq->iocb;
11870                abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11871                abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11872                if (phba->sli_rev == LPFC_SLI_REV4)
11873                        abtsiocbq->iocb.un.acxri.abortIoTag =
11874                                                         iocbq->sli4_xritag;
11875                else
11876                        abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11877                abtsiocbq->iocb.ulpLe = 1;
11878                abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11879                abtsiocbq->vport = vport;
11880
11881                /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11882                abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11883                if (iocbq->iocb_flag & LPFC_IO_FCP)
11884                        abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11885                if (iocbq->iocb_flag & LPFC_IO_FOF)
11886                        abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11887
11888                ndlp = lpfc_cmd->rdata->pnode;
11889
11890                if (lpfc_is_link_up(phba) &&
11891                    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11892                        abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11893                else
11894                        abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11895
11896                /* Setup callback routine and issue the command. */
11897                abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11898
11899                /*
11900                 * Indicate the IO is being aborted by the driver and set
11901                 * the caller's flag into the aborted IO.
11902                 */
11903                iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11904
11905                if (phba->sli_rev == LPFC_SLI_REV4) {
11906                        ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11907                                                        abtsiocbq, 0);
11908                        spin_unlock(&pring_s4->ring_lock);
11909                } else {
11910                        ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11911                                                        abtsiocbq, 0);
11912                }
11913
11914                spin_unlock(&lpfc_cmd->buf_lock);
11915
11916                if (ret_val == IOCB_ERROR)
11917                        __lpfc_sli_release_iocbq(phba, abtsiocbq);
11918                else
11919                        sum++;
11920        }
11921        spin_unlock_irqrestore(&phba->hbalock, iflags);
11922        return sum;
11923}
11924
11925/**
11926 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11927 * @phba: Pointer to HBA context object.
11928 * @cmdiocbq: Pointer to command iocb.
11929 * @rspiocbq: Pointer to response iocb.
11930 *
11931 * This function is the completion handler for iocbs issued using
11932 * lpfc_sli_issue_iocb_wait function. This function is called by the
11933 * ring event handler function without any lock held. This function
11934 * can be called from both worker thread context and interrupt
11935 * context. This function also can be called from other thread which
11936 * cleans up the SLI layer objects.
11937 * This function copy the contents of the response iocb to the
11938 * response iocb memory object provided by the caller of
11939 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11940 * sleeps for the iocb completion.
11941 **/
11942static void
11943lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11944                        struct lpfc_iocbq *cmdiocbq,
11945                        struct lpfc_iocbq *rspiocbq)
11946{
11947        wait_queue_head_t *pdone_q;
11948        unsigned long iflags;
11949        struct lpfc_io_buf *lpfc_cmd;
11950
11951        spin_lock_irqsave(&phba->hbalock, iflags);
11952        if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11953
11954                /*
11955                 * A time out has occurred for the iocb.  If a time out
11956                 * completion handler has been supplied, call it.  Otherwise,
11957                 * just free the iocbq.
11958                 */
11959
11960                spin_unlock_irqrestore(&phba->hbalock, iflags);
11961                cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11962                cmdiocbq->wait_iocb_cmpl = NULL;
11963                if (cmdiocbq->iocb_cmpl)
11964                        (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11965                else
11966                        lpfc_sli_release_iocbq(phba, cmdiocbq);
11967                return;
11968        }
11969
11970        cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11971        if (cmdiocbq->context2 && rspiocbq)
11972                memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11973                       &rspiocbq->iocb, sizeof(IOCB_t));
11974
11975        /* Set the exchange busy flag for task management commands */
11976        if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11977                !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11978                lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11979                        cur_iocbq);
11980                if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11981                        lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11982                else
11983                        lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11984        }
11985
11986        pdone_q = cmdiocbq->context_un.wait_queue;
11987        if (pdone_q)
11988                wake_up(pdone_q);
11989        spin_unlock_irqrestore(&phba->hbalock, iflags);
11990        return;
11991}
11992
11993/**
11994 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11995 * @phba: Pointer to HBA context object..
11996 * @piocbq: Pointer to command iocb.
11997 * @flag: Flag to test.
11998 *
11999 * This routine grabs the hbalock and then test the iocb_flag to
12000 * see if the passed in flag is set.
12001 * Returns:
12002 * 1 if flag is set.
12003 * 0 if flag is not set.
12004 **/
12005static int
12006lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12007                 struct lpfc_iocbq *piocbq, uint32_t flag)
12008{
12009        unsigned long iflags;
12010        int ret;
12011
12012        spin_lock_irqsave(&phba->hbalock, iflags);
12013        ret = piocbq->iocb_flag & flag;
12014        spin_unlock_irqrestore(&phba->hbalock, iflags);
12015        return ret;
12016
12017}
12018
12019/**
12020 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12021 * @phba: Pointer to HBA context object..
12022 * @ring_number: Ring number
12023 * @piocb: Pointer to command iocb.
12024 * @prspiocbq: Pointer to response iocb.
12025 * @timeout: Timeout in number of seconds.
12026 *
12027 * This function issues the iocb to firmware and waits for the
12028 * iocb to complete. The iocb_cmpl field of the shall be used
12029 * to handle iocbs which time out. If the field is NULL, the
12030 * function shall free the iocbq structure.  If more clean up is
12031 * needed, the caller is expected to provide a completion function
12032 * that will provide the needed clean up.  If the iocb command is
12033 * not completed within timeout seconds, the function will either
12034 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12035 * completion function set in the iocb_cmpl field and then return
12036 * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
12037 * resources if this function returns IOCB_TIMEDOUT.
12038 * The function waits for the iocb completion using an
12039 * non-interruptible wait.
12040 * This function will sleep while waiting for iocb completion.
12041 * So, this function should not be called from any context which
12042 * does not allow sleeping. Due to the same reason, this function
12043 * cannot be called with interrupt disabled.
12044 * This function assumes that the iocb completions occur while
12045 * this function sleep. So, this function cannot be called from
12046 * the thread which process iocb completion for this ring.
12047 * This function clears the iocb_flag of the iocb object before
12048 * issuing the iocb and the iocb completion handler sets this
12049 * flag and wakes this thread when the iocb completes.
12050 * The contents of the response iocb will be copied to prspiocbq
12051 * by the completion handler when the command completes.
12052 * This function returns IOCB_SUCCESS when success.
12053 * This function is called with no lock held.
12054 **/
12055int
12056lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12057                         uint32_t ring_number,
12058                         struct lpfc_iocbq *piocb,
12059                         struct lpfc_iocbq *prspiocbq,
12060                         uint32_t timeout)
12061{
12062        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12063        long timeleft, timeout_req = 0;
12064        int retval = IOCB_SUCCESS;
12065        uint32_t creg_val;
12066        struct lpfc_iocbq *iocb;
12067        int txq_cnt = 0;
12068        int txcmplq_cnt = 0;
12069        struct lpfc_sli_ring *pring;
12070        unsigned long iflags;
12071        bool iocb_completed = true;
12072
12073        if (phba->sli_rev >= LPFC_SLI_REV4)
12074                pring = lpfc_sli4_calc_ring(phba, piocb);
12075        else
12076                pring = &phba->sli.sli3_ring[ring_number];
12077        /*
12078         * If the caller has provided a response iocbq buffer, then context2
12079         * is NULL or its an error.
12080         */
12081        if (prspiocbq) {
12082                if (piocb->context2)
12083                        return IOCB_ERROR;
12084                piocb->context2 = prspiocbq;
12085        }
12086
12087        piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12088        piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12089        piocb->context_un.wait_queue = &done_q;
12090        piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12091
12092        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12093                if (lpfc_readl(phba->HCregaddr, &creg_val))
12094                        return IOCB_ERROR;
12095                creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12096                writel(creg_val, phba->HCregaddr);
12097                readl(phba->HCregaddr); /* flush */
12098        }
12099
12100        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12101                                     SLI_IOCB_RET_IOCB);
12102        if (retval == IOCB_SUCCESS) {
12103                timeout_req = msecs_to_jiffies(timeout * 1000);
12104                timeleft = wait_event_timeout(done_q,
12105                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12106                                timeout_req);
12107                spin_lock_irqsave(&phba->hbalock, iflags);
12108                if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12109
12110                        /*
12111                         * IOCB timed out.  Inform the wake iocb wait
12112                         * completion function and set local status
12113                         */
12114
12115                        iocb_completed = false;
12116                        piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12117                }
12118                spin_unlock_irqrestore(&phba->hbalock, iflags);
12119                if (iocb_completed) {
12120                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12121                                        "0331 IOCB wake signaled\n");
12122                        /* Note: we are not indicating if the IOCB has a success
12123                         * status or not - that's for the caller to check.
12124                         * IOCB_SUCCESS means just that the command was sent and
12125                         * completed. Not that it completed successfully.
12126                         * */
12127                } else if (timeleft == 0) {
12128                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12129                                        "0338 IOCB wait timeout error - no "
12130                                        "wake response Data x%x\n", timeout);
12131                        retval = IOCB_TIMEDOUT;
12132                } else {
12133                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12134                                        "0330 IOCB wake NOT set, "
12135                                        "Data x%x x%lx\n",
12136                                        timeout, (timeleft / jiffies));
12137                        retval = IOCB_TIMEDOUT;
12138                }
12139        } else if (retval == IOCB_BUSY) {
12140                if (phba->cfg_log_verbose & LOG_SLI) {
12141                        list_for_each_entry(iocb, &pring->txq, list) {
12142                                txq_cnt++;
12143                        }
12144                        list_for_each_entry(iocb, &pring->txcmplq, list) {
12145                                txcmplq_cnt++;
12146                        }
12147                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12148                                "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12149                                phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12150                }
12151                return retval;
12152        } else {
12153                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12154                                "0332 IOCB wait issue failed, Data x%x\n",
12155                                retval);
12156                retval = IOCB_ERROR;
12157        }
12158
12159        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12160                if (lpfc_readl(phba->HCregaddr, &creg_val))
12161                        return IOCB_ERROR;
12162                creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12163                writel(creg_val, phba->HCregaddr);
12164                readl(phba->HCregaddr); /* flush */
12165        }
12166
12167        if (prspiocbq)
12168                piocb->context2 = NULL;
12169
12170        piocb->context_un.wait_queue = NULL;
12171        piocb->iocb_cmpl = NULL;
12172        return retval;
12173}
12174
12175/**
12176 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12177 * @phba: Pointer to HBA context object.
12178 * @pmboxq: Pointer to driver mailbox object.
12179 * @timeout: Timeout in number of seconds.
12180 *
12181 * This function issues the mailbox to firmware and waits for the
12182 * mailbox command to complete. If the mailbox command is not
12183 * completed within timeout seconds, it returns MBX_TIMEOUT.
12184 * The function waits for the mailbox completion using an
12185 * interruptible wait. If the thread is woken up due to a
12186 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12187 * should not free the mailbox resources, if this function returns
12188 * MBX_TIMEOUT.
12189 * This function will sleep while waiting for mailbox completion.
12190 * So, this function should not be called from any context which
12191 * does not allow sleeping. Due to the same reason, this function
12192 * cannot be called with interrupt disabled.
12193 * This function assumes that the mailbox completion occurs while
12194 * this function sleep. So, this function cannot be called from
12195 * the worker thread which processes mailbox completion.
12196 * This function is called in the context of HBA management
12197 * applications.
12198 * This function returns MBX_SUCCESS when successful.
12199 * This function is called with no lock held.
12200 **/
12201int
12202lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12203                         uint32_t timeout)
12204{
12205        struct completion mbox_done;
12206        int retval;
12207        unsigned long flag;
12208
12209        pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12210        /* setup wake call as IOCB callback */
12211        pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12212
12213        /* setup context3 field to pass wait_queue pointer to wake function  */
12214        init_completion(&mbox_done);
12215        pmboxq->context3 = &mbox_done;
12216        /* now issue the command */
12217        retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12218        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12219                wait_for_completion_timeout(&mbox_done,
12220                                            msecs_to_jiffies(timeout * 1000));
12221
12222                spin_lock_irqsave(&phba->hbalock, flag);
12223                pmboxq->context3 = NULL;
12224                /*
12225                 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12226                 * else do not free the resources.
12227                 */
12228                if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12229                        retval = MBX_SUCCESS;
12230                } else {
12231                        retval = MBX_TIMEOUT;
12232                        pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12233                }
12234                spin_unlock_irqrestore(&phba->hbalock, flag);
12235        }
12236        return retval;
12237}
12238
12239/**
12240 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12241 * @phba: Pointer to HBA context.
12242 * @mbx_action: Mailbox shutdown options.
12243 *
12244 * This function is called to shutdown the driver's mailbox sub-system.
12245 * It first marks the mailbox sub-system is in a block state to prevent
12246 * the asynchronous mailbox command from issued off the pending mailbox
12247 * command queue. If the mailbox command sub-system shutdown is due to
12248 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12249 * the mailbox sub-system flush routine to forcefully bring down the
12250 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12251 * as with offline or HBA function reset), this routine will wait for the
12252 * outstanding mailbox command to complete before invoking the mailbox
12253 * sub-system flush routine to gracefully bring down mailbox sub-system.
12254 **/
12255void
12256lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12257{
12258        struct lpfc_sli *psli = &phba->sli;
12259        unsigned long timeout;
12260
12261        if (mbx_action == LPFC_MBX_NO_WAIT) {
12262                /* delay 100ms for port state */
12263                msleep(100);
12264                lpfc_sli_mbox_sys_flush(phba);
12265                return;
12266        }
12267        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12268
12269        /* Disable softirqs, including timers from obtaining phba->hbalock */
12270        local_bh_disable();
12271
12272        spin_lock_irq(&phba->hbalock);
12273        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12274
12275        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12276                /* Determine how long we might wait for the active mailbox
12277                 * command to be gracefully completed by firmware.
12278                 */
12279                if (phba->sli.mbox_active)
12280                        timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12281                                                phba->sli.mbox_active) *
12282                                                1000) + jiffies;
12283                spin_unlock_irq(&phba->hbalock);
12284
12285                /* Enable softirqs again, done with phba->hbalock */
12286                local_bh_enable();
12287
12288                while (phba->sli.mbox_active) {
12289                        /* Check active mailbox complete status every 2ms */
12290                        msleep(2);
12291                        if (time_after(jiffies, timeout))
12292                                /* Timeout, let the mailbox flush routine to
12293                                 * forcefully release active mailbox command
12294                                 */
12295                                break;
12296                }
12297        } else {
12298                spin_unlock_irq(&phba->hbalock);
12299
12300                /* Enable softirqs again, done with phba->hbalock */
12301                local_bh_enable();
12302        }
12303
12304        lpfc_sli_mbox_sys_flush(phba);
12305}
12306
12307/**
12308 * lpfc_sli_eratt_read - read sli-3 error attention events
12309 * @phba: Pointer to HBA context.
12310 *
12311 * This function is called to read the SLI3 device error attention registers
12312 * for possible error attention events. The caller must hold the hostlock
12313 * with spin_lock_irq().
12314 *
12315 * This function returns 1 when there is Error Attention in the Host Attention
12316 * Register and returns 0 otherwise.
12317 **/
12318static int
12319lpfc_sli_eratt_read(struct lpfc_hba *phba)
12320{
12321        uint32_t ha_copy;
12322
12323        /* Read chip Host Attention (HA) register */
12324        if (lpfc_readl(phba->HAregaddr, &ha_copy))
12325                goto unplug_err;
12326
12327        if (ha_copy & HA_ERATT) {
12328                /* Read host status register to retrieve error event */
12329                if (lpfc_sli_read_hs(phba))
12330                        goto unplug_err;
12331
12332                /* Check if there is a deferred error condition is active */
12333                if ((HS_FFER1 & phba->work_hs) &&
12334                    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12335                      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12336                        phba->hba_flag |= DEFER_ERATT;
12337                        /* Clear all interrupt enable conditions */
12338                        writel(0, phba->HCregaddr);
12339                        readl(phba->HCregaddr);
12340                }
12341
12342                /* Set the driver HA work bitmap */
12343                phba->work_ha |= HA_ERATT;
12344                /* Indicate polling handles this ERATT */
12345                phba->hba_flag |= HBA_ERATT_HANDLED;
12346                return 1;
12347        }
12348        return 0;
12349
12350unplug_err:
12351        /* Set the driver HS work bitmap */
12352        phba->work_hs |= UNPLUG_ERR;
12353        /* Set the driver HA work bitmap */
12354        phba->work_ha |= HA_ERATT;
12355        /* Indicate polling handles this ERATT */
12356        phba->hba_flag |= HBA_ERATT_HANDLED;
12357        return 1;
12358}
12359
12360/**
12361 * lpfc_sli4_eratt_read - read sli-4 error attention events
12362 * @phba: Pointer to HBA context.
12363 *
12364 * This function is called to read the SLI4 device error attention registers
12365 * for possible error attention events. The caller must hold the hostlock
12366 * with spin_lock_irq().
12367 *
12368 * This function returns 1 when there is Error Attention in the Host Attention
12369 * Register and returns 0 otherwise.
12370 **/
12371static int
12372lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12373{
12374        uint32_t uerr_sta_hi, uerr_sta_lo;
12375        uint32_t if_type, portsmphr;
12376        struct lpfc_register portstat_reg;
12377
12378        /*
12379         * For now, use the SLI4 device internal unrecoverable error
12380         * registers for error attention. This can be changed later.
12381         */
12382        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12383        switch (if_type) {
12384        case LPFC_SLI_INTF_IF_TYPE_0:
12385                if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12386                        &uerr_sta_lo) ||
12387                        lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12388                        &uerr_sta_hi)) {
12389                        phba->work_hs |= UNPLUG_ERR;
12390                        phba->work_ha |= HA_ERATT;
12391                        phba->hba_flag |= HBA_ERATT_HANDLED;
12392                        return 1;
12393                }
12394                if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12395                    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12396                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12397                                        "1423 HBA Unrecoverable error: "
12398                                        "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12399                                        "ue_mask_lo_reg=0x%x, "
12400                                        "ue_mask_hi_reg=0x%x\n",
12401                                        uerr_sta_lo, uerr_sta_hi,
12402                                        phba->sli4_hba.ue_mask_lo,
12403                                        phba->sli4_hba.ue_mask_hi);
12404                        phba->work_status[0] = uerr_sta_lo;
12405                        phba->work_status[1] = uerr_sta_hi;
12406                        phba->work_ha |= HA_ERATT;
12407                        phba->hba_flag |= HBA_ERATT_HANDLED;
12408                        return 1;
12409                }
12410                break;
12411        case LPFC_SLI_INTF_IF_TYPE_2:
12412        case LPFC_SLI_INTF_IF_TYPE_6:
12413                if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12414                        &portstat_reg.word0) ||
12415                        lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12416                        &portsmphr)){
12417                        phba->work_hs |= UNPLUG_ERR;
12418                        phba->work_ha |= HA_ERATT;
12419                        phba->hba_flag |= HBA_ERATT_HANDLED;
12420                        return 1;
12421                }
12422                if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12423                        phba->work_status[0] =
12424                                readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12425                        phba->work_status[1] =
12426                                readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12427                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12428                                        "2885 Port Status Event: "
12429                                        "port status reg 0x%x, "
12430                                        "port smphr reg 0x%x, "
12431                                        "error 1=0x%x, error 2=0x%x\n",
12432                                        portstat_reg.word0,
12433                                        portsmphr,
12434                                        phba->work_status[0],
12435                                        phba->work_status[1]);
12436                        phba->work_ha |= HA_ERATT;
12437                        phba->hba_flag |= HBA_ERATT_HANDLED;
12438                        return 1;
12439                }
12440                break;
12441        case LPFC_SLI_INTF_IF_TYPE_1:
12442        default:
12443                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12444                                "2886 HBA Error Attention on unsupported "
12445                                "if type %d.", if_type);
12446                return 1;
12447        }
12448
12449        return 0;
12450}
12451
12452/**
12453 * lpfc_sli_check_eratt - check error attention events
12454 * @phba: Pointer to HBA context.
12455 *
12456 * This function is called from timer soft interrupt context to check HBA's
12457 * error attention register bit for error attention events.
12458 *
12459 * This function returns 1 when there is Error Attention in the Host Attention
12460 * Register and returns 0 otherwise.
12461 **/
12462int
12463lpfc_sli_check_eratt(struct lpfc_hba *phba)
12464{
12465        uint32_t ha_copy;
12466
12467        /* If somebody is waiting to handle an eratt, don't process it
12468         * here. The brdkill function will do this.
12469         */
12470        if (phba->link_flag & LS_IGNORE_ERATT)
12471                return 0;
12472
12473        /* Check if interrupt handler handles this ERATT */
12474        spin_lock_irq(&phba->hbalock);
12475        if (phba->hba_flag & HBA_ERATT_HANDLED) {
12476                /* Interrupt handler has handled ERATT */
12477                spin_unlock_irq(&phba->hbalock);
12478                return 0;
12479        }
12480
12481        /*
12482         * If there is deferred error attention, do not check for error
12483         * attention
12484         */
12485        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12486                spin_unlock_irq(&phba->hbalock);
12487                return 0;
12488        }
12489
12490        /* If PCI channel is offline, don't process it */
12491        if (unlikely(pci_channel_offline(phba->pcidev))) {
12492                spin_unlock_irq(&phba->hbalock);
12493                return 0;
12494        }
12495
12496        switch (phba->sli_rev) {
12497        case LPFC_SLI_REV2:
12498        case LPFC_SLI_REV3:
12499                /* Read chip Host Attention (HA) register */
12500                ha_copy = lpfc_sli_eratt_read(phba);
12501                break;
12502        case LPFC_SLI_REV4:
12503                /* Read device Uncoverable Error (UERR) registers */
12504                ha_copy = lpfc_sli4_eratt_read(phba);
12505                break;
12506        default:
12507                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12508                                "0299 Invalid SLI revision (%d)\n",
12509                                phba->sli_rev);
12510                ha_copy = 0;
12511                break;
12512        }
12513        spin_unlock_irq(&phba->hbalock);
12514
12515        return ha_copy;
12516}
12517
12518/**
12519 * lpfc_intr_state_check - Check device state for interrupt handling
12520 * @phba: Pointer to HBA context.
12521 *
12522 * This inline routine checks whether a device or its PCI slot is in a state
12523 * that the interrupt should be handled.
12524 *
12525 * This function returns 0 if the device or the PCI slot is in a state that
12526 * interrupt should be handled, otherwise -EIO.
12527 */
12528static inline int
12529lpfc_intr_state_check(struct lpfc_hba *phba)
12530{
12531        /* If the pci channel is offline, ignore all the interrupts */
12532        if (unlikely(pci_channel_offline(phba->pcidev)))
12533                return -EIO;
12534
12535        /* Update device level interrupt statistics */
12536        phba->sli.slistat.sli_intr++;
12537
12538        /* Ignore all interrupts during initialization. */
12539        if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12540                return -EIO;
12541
12542        return 0;
12543}
12544
12545/**
12546 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12547 * @irq: Interrupt number.
12548 * @dev_id: The device context pointer.
12549 *
12550 * This function is directly called from the PCI layer as an interrupt
12551 * service routine when device with SLI-3 interface spec is enabled with
12552 * MSI-X multi-message interrupt mode and there are slow-path events in
12553 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12554 * interrupt mode, this function is called as part of the device-level
12555 * interrupt handler. When the PCI slot is in error recovery or the HBA
12556 * is undergoing initialization, the interrupt handler will not process
12557 * the interrupt. The link attention and ELS ring attention events are
12558 * handled by the worker thread. The interrupt handler signals the worker
12559 * thread and returns for these events. This function is called without
12560 * any lock held. It gets the hbalock to access and update SLI data
12561 * structures.
12562 *
12563 * This function returns IRQ_HANDLED when interrupt is handled else it
12564 * returns IRQ_NONE.
12565 **/
12566irqreturn_t
12567lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12568{
12569        struct lpfc_hba  *phba;
12570        uint32_t ha_copy, hc_copy;
12571        uint32_t work_ha_copy;
12572        unsigned long status;
12573        unsigned long iflag;
12574        uint32_t control;
12575
12576        MAILBOX_t *mbox, *pmbox;
12577        struct lpfc_vport *vport;
12578        struct lpfc_nodelist *ndlp;
12579        struct lpfc_dmabuf *mp;
12580        LPFC_MBOXQ_t *pmb;
12581        int rc;
12582
12583        /*
12584         * Get the driver's phba structure from the dev_id and
12585         * assume the HBA is not interrupting.
12586         */
12587        phba = (struct lpfc_hba *)dev_id;
12588
12589        if (unlikely(!phba))
12590                return IRQ_NONE;
12591
12592        /*
12593         * Stuff needs to be attented to when this function is invoked as an
12594         * individual interrupt handler in MSI-X multi-message interrupt mode
12595         */
12596        if (phba->intr_type == MSIX) {
12597                /* Check device state for handling interrupt */
12598                if (lpfc_intr_state_check(phba))
12599                        return IRQ_NONE;
12600                /* Need to read HA REG for slow-path events */
12601                spin_lock_irqsave(&phba->hbalock, iflag);
12602                if (lpfc_readl(phba->HAregaddr, &ha_copy))
12603                        goto unplug_error;
12604                /* If somebody is waiting to handle an eratt don't process it
12605                 * here. The brdkill function will do this.
12606                 */
12607                if (phba->link_flag & LS_IGNORE_ERATT)
12608                        ha_copy &= ~HA_ERATT;
12609                /* Check the need for handling ERATT in interrupt handler */
12610                if (ha_copy & HA_ERATT) {
12611                        if (phba->hba_flag & HBA_ERATT_HANDLED)
12612                                /* ERATT polling has handled ERATT */
12613                                ha_copy &= ~HA_ERATT;
12614                        else
12615                                /* Indicate interrupt handler handles ERATT */
12616                                phba->hba_flag |= HBA_ERATT_HANDLED;
12617                }
12618
12619                /*
12620                 * If there is deferred error attention, do not check for any
12621                 * interrupt.
12622                 */
12623                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12624                        spin_unlock_irqrestore(&phba->hbalock, iflag);
12625                        return IRQ_NONE;
12626                }
12627
12628                /* Clear up only attention source related to slow-path */
12629                if (lpfc_readl(phba->HCregaddr, &hc_copy))
12630                        goto unplug_error;
12631
12632                writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12633                        HC_LAINT_ENA | HC_ERINT_ENA),
12634                        phba->HCregaddr);
12635                writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12636                        phba->HAregaddr);
12637                writel(hc_copy, phba->HCregaddr);
12638                readl(phba->HAregaddr); /* flush */
12639                spin_unlock_irqrestore(&phba->hbalock, iflag);
12640        } else
12641                ha_copy = phba->ha_copy;
12642
12643        work_ha_copy = ha_copy & phba->work_ha_mask;
12644
12645        if (work_ha_copy) {
12646                if (work_ha_copy & HA_LATT) {
12647                        if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12648                                /*
12649                                 * Turn off Link Attention interrupts
12650                                 * until CLEAR_LA done
12651                                 */
12652                                spin_lock_irqsave(&phba->hbalock, iflag);
12653                                phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12654                                if (lpfc_readl(phba->HCregaddr, &control))
12655                                        goto unplug_error;
12656                                control &= ~HC_LAINT_ENA;
12657                                writel(control, phba->HCregaddr);
12658                                readl(phba->HCregaddr); /* flush */
12659                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12660                        }
12661                        else
12662                                work_ha_copy &= ~HA_LATT;
12663                }
12664
12665                if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12666                        /*
12667                         * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12668                         * the only slow ring.
12669                         */
12670                        status = (work_ha_copy &
12671                                (HA_RXMASK  << (4*LPFC_ELS_RING)));
12672                        status >>= (4*LPFC_ELS_RING);
12673                        if (status & HA_RXMASK) {
12674                                spin_lock_irqsave(&phba->hbalock, iflag);
12675                                if (lpfc_readl(phba->HCregaddr, &control))
12676                                        goto unplug_error;
12677
12678                                lpfc_debugfs_slow_ring_trc(phba,
12679                                "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
12680                                control, status,
12681                                (uint32_t)phba->sli.slistat.sli_intr);
12682
12683                                if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12684                                        lpfc_debugfs_slow_ring_trc(phba,
12685                                                "ISR Disable ring:"
12686                                                "pwork:x%x hawork:x%x wait:x%x",
12687                                                phba->work_ha, work_ha_copy,
12688                                                (uint32_t)((unsigned long)
12689                                                &phba->work_waitq));
12690
12691                                        control &=
12692                                            ~(HC_R0INT_ENA << LPFC_ELS_RING);
12693                                        writel(control, phba->HCregaddr);
12694                                        readl(phba->HCregaddr); /* flush */
12695                                }
12696                                else {
12697                                        lpfc_debugfs_slow_ring_trc(phba,
12698                                                "ISR slow ring:   pwork:"
12699                                                "x%x hawork:x%x wait:x%x",
12700                                                phba->work_ha, work_ha_copy,
12701                                                (uint32_t)((unsigned long)
12702                                                &phba->work_waitq));
12703                                }
12704                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12705                        }
12706                }
12707                spin_lock_irqsave(&phba->hbalock, iflag);
12708                if (work_ha_copy & HA_ERATT) {
12709                        if (lpfc_sli_read_hs(phba))
12710                                goto unplug_error;
12711                        /*
12712                         * Check if there is a deferred error condition
12713                         * is active
12714                         */
12715                        if ((HS_FFER1 & phba->work_hs) &&
12716                                ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12717                                  HS_FFER6 | HS_FFER7 | HS_FFER8) &
12718                                  phba->work_hs)) {
12719                                phba->hba_flag |= DEFER_ERATT;
12720                                /* Clear all interrupt enable conditions */
12721                                writel(0, phba->HCregaddr);
12722                                readl(phba->HCregaddr);
12723                        }
12724                }
12725
12726                if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12727                        pmb = phba->sli.mbox_active;
12728                        pmbox = &pmb->u.mb;
12729                        mbox = phba->mbox;
12730                        vport = pmb->vport;
12731
12732                        /* First check out the status word */
12733                        lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12734                        if (pmbox->mbxOwner != OWN_HOST) {
12735                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12736                                /*
12737                                 * Stray Mailbox Interrupt, mbxCommand <cmd>
12738                                 * mbxStatus <status>
12739                                 */
12740                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12741                                                "(%d):0304 Stray Mailbox "
12742                                                "Interrupt mbxCommand x%x "
12743                                                "mbxStatus x%x\n",
12744                                                (vport ? vport->vpi : 0),
12745                                                pmbox->mbxCommand,
12746                                                pmbox->mbxStatus);
12747                                /* clear mailbox attention bit */
12748                                work_ha_copy &= ~HA_MBATT;
12749                        } else {
12750                                phba->sli.mbox_active = NULL;
12751                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12752                                phba->last_completion_time = jiffies;
12753                                del_timer(&phba->sli.mbox_tmo);
12754                                if (pmb->mbox_cmpl) {
12755                                        lpfc_sli_pcimem_bcopy(mbox, pmbox,
12756                                                        MAILBOX_CMD_SIZE);
12757                                        if (pmb->out_ext_byte_len &&
12758                                                pmb->ctx_buf)
12759                                                lpfc_sli_pcimem_bcopy(
12760                                                phba->mbox_ext,
12761                                                pmb->ctx_buf,
12762                                                pmb->out_ext_byte_len);
12763                                }
12764                                if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12765                                        pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12766
12767                                        lpfc_debugfs_disc_trc(vport,
12768                                                LPFC_DISC_TRC_MBOX_VPORT,
12769                                                "MBOX dflt rpi: : "
12770                                                "status:x%x rpi:x%x",
12771                                                (uint32_t)pmbox->mbxStatus,
12772                                                pmbox->un.varWords[0], 0);
12773
12774                                        if (!pmbox->mbxStatus) {
12775                                                mp = (struct lpfc_dmabuf *)
12776                                                        (pmb->ctx_buf);
12777                                                ndlp = (struct lpfc_nodelist *)
12778                                                        pmb->ctx_ndlp;
12779
12780                                                /* Reg_LOGIN of dflt RPI was
12781                                                 * successful. new lets get
12782                                                 * rid of the RPI using the
12783                                                 * same mbox buffer.
12784                                                 */
12785                                                lpfc_unreg_login(phba,
12786                                                        vport->vpi,
12787                                                        pmbox->un.varWords[0],
12788                                                        pmb);
12789                                                pmb->mbox_cmpl =
12790                                                        lpfc_mbx_cmpl_dflt_rpi;
12791                                                pmb->ctx_buf = mp;
12792                                                pmb->ctx_ndlp = ndlp;
12793                                                pmb->vport = vport;
12794                                                rc = lpfc_sli_issue_mbox(phba,
12795                                                                pmb,
12796                                                                MBX_NOWAIT);
12797                                                if (rc != MBX_BUSY)
12798                                                        lpfc_printf_log(phba,
12799                                                        KERN_ERR,
12800                                                        LOG_TRACE_EVENT,
12801                                                        "0350 rc should have"
12802                                                        "been MBX_BUSY\n");
12803                                                if (rc != MBX_NOT_FINISHED)
12804                                                        goto send_current_mbox;
12805                                        }
12806                                }
12807                                spin_lock_irqsave(
12808                                                &phba->pport->work_port_lock,
12809                                                iflag);
12810                                phba->pport->work_port_events &=
12811                                        ~WORKER_MBOX_TMO;
12812                                spin_unlock_irqrestore(
12813                                                &phba->pport->work_port_lock,
12814                                                iflag);
12815                                lpfc_mbox_cmpl_put(phba, pmb);
12816                        }
12817                } else
12818                        spin_unlock_irqrestore(&phba->hbalock, iflag);
12819
12820                if ((work_ha_copy & HA_MBATT) &&
12821                    (phba->sli.mbox_active == NULL)) {
12822send_current_mbox:
12823                        /* Process next mailbox command if there is one */
12824                        do {
12825                                rc = lpfc_sli_issue_mbox(phba, NULL,
12826                                                         MBX_NOWAIT);
12827                        } while (rc == MBX_NOT_FINISHED);
12828                        if (rc != MBX_SUCCESS)
12829                                lpfc_printf_log(phba, KERN_ERR,
12830                                                LOG_TRACE_EVENT,
12831                                                "0349 rc should be "
12832                                                "MBX_SUCCESS\n");
12833                }
12834
12835                spin_lock_irqsave(&phba->hbalock, iflag);
12836                phba->work_ha |= work_ha_copy;
12837                spin_unlock_irqrestore(&phba->hbalock, iflag);
12838                lpfc_worker_wake_up(phba);
12839        }
12840        return IRQ_HANDLED;
12841unplug_error:
12842        spin_unlock_irqrestore(&phba->hbalock, iflag);
12843        return IRQ_HANDLED;
12844
12845} /* lpfc_sli_sp_intr_handler */
12846
12847/**
12848 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12849 * @irq: Interrupt number.
12850 * @dev_id: The device context pointer.
12851 *
12852 * This function is directly called from the PCI layer as an interrupt
12853 * service routine when device with SLI-3 interface spec is enabled with
12854 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12855 * ring event in the HBA. However, when the device is enabled with either
12856 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12857 * device-level interrupt handler. When the PCI slot is in error recovery
12858 * or the HBA is undergoing initialization, the interrupt handler will not
12859 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12860 * the intrrupt context. This function is called without any lock held.
12861 * It gets the hbalock to access and update SLI data structures.
12862 *
12863 * This function returns IRQ_HANDLED when interrupt is handled else it
12864 * returns IRQ_NONE.
12865 **/
12866irqreturn_t
12867lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12868{
12869        struct lpfc_hba  *phba;
12870        uint32_t ha_copy;
12871        unsigned long status;
12872        unsigned long iflag;
12873        struct lpfc_sli_ring *pring;
12874
12875        /* Get the driver's phba structure from the dev_id and
12876         * assume the HBA is not interrupting.
12877         */
12878        phba = (struct lpfc_hba *) dev_id;
12879
12880        if (unlikely(!phba))
12881                return IRQ_NONE;
12882
12883        /*
12884         * Stuff needs to be attented to when this function is invoked as an
12885         * individual interrupt handler in MSI-X multi-message interrupt mode
12886         */
12887        if (phba->intr_type == MSIX) {
12888                /* Check device state for handling interrupt */
12889                if (lpfc_intr_state_check(phba))
12890                        return IRQ_NONE;
12891                /* Need to read HA REG for FCP ring and other ring events */
12892                if (lpfc_readl(phba->HAregaddr, &ha_copy))
12893                        return IRQ_HANDLED;
12894                /* Clear up only attention source related to fast-path */
12895                spin_lock_irqsave(&phba->hbalock, iflag);
12896                /*
12897                 * If there is deferred error attention, do not check for
12898                 * any interrupt.
12899                 */
12900                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12901                        spin_unlock_irqrestore(&phba->hbalock, iflag);
12902                        return IRQ_NONE;
12903                }
12904                writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12905                        phba->HAregaddr);
12906                readl(phba->HAregaddr); /* flush */
12907                spin_unlock_irqrestore(&phba->hbalock, iflag);
12908        } else
12909                ha_copy = phba->ha_copy;
12910
12911        /*
12912         * Process all events on FCP ring. Take the optimized path for FCP IO.
12913         */
12914        ha_copy &= ~(phba->work_ha_mask);
12915
12916        status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12917        status >>= (4*LPFC_FCP_RING);
12918        pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12919        if (status & HA_RXMASK)
12920                lpfc_sli_handle_fast_ring_event(phba, pring, status);
12921
12922        if (phba->cfg_multi_ring_support == 2) {
12923                /*
12924                 * Process all events on extra ring. Take the optimized path
12925                 * for extra ring IO.
12926                 */
12927                status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12928                status >>= (4*LPFC_EXTRA_RING);
12929                if (status & HA_RXMASK) {
12930                        lpfc_sli_handle_fast_ring_event(phba,
12931                                        &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12932                                        status);
12933                }
12934        }
12935        return IRQ_HANDLED;
12936}  /* lpfc_sli_fp_intr_handler */
12937
12938/**
12939 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12940 * @irq: Interrupt number.
12941 * @dev_id: The device context pointer.
12942 *
12943 * This function is the HBA device-level interrupt handler to device with
12944 * SLI-3 interface spec, called from the PCI layer when either MSI or
12945 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12946 * requires driver attention. This function invokes the slow-path interrupt
12947 * attention handling function and fast-path interrupt attention handling
12948 * function in turn to process the relevant HBA attention events. This
12949 * function is called without any lock held. It gets the hbalock to access
12950 * and update SLI data structures.
12951 *
12952 * This function returns IRQ_HANDLED when interrupt is handled, else it
12953 * returns IRQ_NONE.
12954 **/
12955irqreturn_t
12956lpfc_sli_intr_handler(int irq, void *dev_id)
12957{
12958        struct lpfc_hba  *phba;
12959        irqreturn_t sp_irq_rc, fp_irq_rc;
12960        unsigned long status1, status2;
12961        uint32_t hc_copy;
12962
12963        /*
12964         * Get the driver's phba structure from the dev_id and
12965         * assume the HBA is not interrupting.
12966         */
12967        phba = (struct lpfc_hba *) dev_id;
12968
12969        if (unlikely(!phba))
12970                return IRQ_NONE;
12971
12972        /* Check device state for handling interrupt */
12973        if (lpfc_intr_state_check(phba))
12974                return IRQ_NONE;
12975
12976        spin_lock(&phba->hbalock);
12977        if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12978                spin_unlock(&phba->hbalock);
12979                return IRQ_HANDLED;
12980        }
12981
12982        if (unlikely(!phba->ha_copy)) {
12983                spin_unlock(&phba->hbalock);
12984                return IRQ_NONE;
12985        } else if (phba->ha_copy & HA_ERATT) {
12986                if (phba->hba_flag & HBA_ERATT_HANDLED)
12987                        /* ERATT polling has handled ERATT */
12988                        phba->ha_copy &= ~HA_ERATT;
12989                else
12990                        /* Indicate interrupt handler handles ERATT */
12991                        phba->hba_flag |= HBA_ERATT_HANDLED;
12992        }
12993
12994        /*
12995         * If there is deferred error attention, do not check for any interrupt.
12996         */
12997        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12998                spin_unlock(&phba->hbalock);
12999                return IRQ_NONE;
13000        }
13001
13002        /* Clear attention sources except link and error attentions */
13003        if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13004                spin_unlock(&phba->hbalock);
13005                return IRQ_HANDLED;
13006        }
13007        writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13008                | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13009                phba->HCregaddr);
13010        writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13011        writel(hc_copy, phba->HCregaddr);
13012        readl(phba->HAregaddr); /* flush */
13013        spin_unlock(&phba->hbalock);
13014
13015        /*
13016         * Invokes slow-path host attention interrupt handling as appropriate.
13017         */
13018
13019        /* status of events with mailbox and link attention */
13020        status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13021
13022        /* status of events with ELS ring */
13023        status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
13024        status2 >>= (4*LPFC_ELS_RING);
13025
13026        if (status1 || (status2 & HA_RXMASK))
13027                sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13028        else
13029                sp_irq_rc = IRQ_NONE;
13030
13031        /*
13032         * Invoke fast-path host attention interrupt handling as appropriate.
13033         */
13034
13035        /* status of events with FCP ring */
13036        status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13037        status1 >>= (4*LPFC_FCP_RING);
13038
13039        /* status of events with extra ring */
13040        if (phba->cfg_multi_ring_support == 2) {
13041                status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13042                status2 >>= (4*LPFC_EXTRA_RING);
13043        } else
13044                status2 = 0;
13045
13046        if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13047                fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13048        else
13049                fp_irq_rc = IRQ_NONE;
13050
13051        /* Return device-level interrupt handling status */
13052        return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13053}  /* lpfc_sli_intr_handler */
13054
13055/**
13056 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13057 * @phba: pointer to lpfc hba data structure.
13058 *
13059 * This routine is invoked by the worker thread to process all the pending
13060 * SLI4 els abort xri events.
13061 **/
13062void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13063{
13064        struct lpfc_cq_event *cq_event;
13065
13066        /* First, declare the els xri abort event has been handled */
13067        spin_lock_irq(&phba->hbalock);
13068        phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13069        spin_unlock_irq(&phba->hbalock);
13070        /* Now, handle all the els xri abort events */
13071        while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13072                /* Get the first event from the head of the event queue */
13073                spin_lock_irq(&phba->hbalock);
13074                list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13075                                 cq_event, struct lpfc_cq_event, list);
13076                spin_unlock_irq(&phba->hbalock);
13077                /* Notify aborted XRI for ELS work queue */
13078                lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13079                /* Free the event processed back to the free pool */
13080                lpfc_sli4_cq_event_release(phba, cq_event);
13081        }
13082}
13083
13084/**
13085 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13086 * @phba: pointer to lpfc hba data structure
13087 * @pIocbIn: pointer to the rspiocbq
13088 * @pIocbOut: pointer to the cmdiocbq
13089 * @wcqe: pointer to the complete wcqe
13090 *
13091 * This routine transfers the fields of a command iocbq to a response iocbq
13092 * by copying all the IOCB fields from command iocbq and transferring the
13093 * completion status information from the complete wcqe.
13094 **/
13095static void
13096lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13097                              struct lpfc_iocbq *pIocbIn,
13098                              struct lpfc_iocbq *pIocbOut,
13099                              struct lpfc_wcqe_complete *wcqe)
13100{
13101        int numBdes, i;
13102        unsigned long iflags;
13103        uint32_t status, max_response;
13104        struct lpfc_dmabuf *dmabuf;
13105        struct ulp_bde64 *bpl, bde;
13106        size_t offset = offsetof(struct lpfc_iocbq, iocb);
13107
13108        memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13109               sizeof(struct lpfc_iocbq) - offset);
13110        /* Map WCQE parameters into irspiocb parameters */
13111        status = bf_get(lpfc_wcqe_c_status, wcqe);
13112        pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13113        if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13114                if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13115                        pIocbIn->iocb.un.fcpi.fcpi_parm =
13116                                        pIocbOut->iocb.un.fcpi.fcpi_parm -
13117                                        wcqe->total_data_placed;
13118                else
13119                        pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13120        else {
13121                pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13122                switch (pIocbOut->iocb.ulpCommand) {
13123                case CMD_ELS_REQUEST64_CR:
13124                        dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13125                        bpl  = (struct ulp_bde64 *)dmabuf->virt;
13126                        bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13127                        max_response = bde.tus.f.bdeSize;
13128                        break;
13129                case CMD_GEN_REQUEST64_CR:
13130                        max_response = 0;
13131                        if (!pIocbOut->context3)
13132                                break;
13133                        numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13134                                        sizeof(struct ulp_bde64);
13135                        dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13136                        bpl = (struct ulp_bde64 *)dmabuf->virt;
13137                        for (i = 0; i < numBdes; i++) {
13138                                bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13139                                if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13140                                        max_response += bde.tus.f.bdeSize;
13141                        }
13142                        break;
13143                default:
13144                        max_response = wcqe->total_data_placed;
13145                        break;
13146                }
13147                if (max_response < wcqe->total_data_placed)
13148                        pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13149                else
13150                        pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13151                                wcqe->total_data_placed;
13152        }
13153
13154        /* Convert BG errors for completion status */
13155        if (status == CQE_STATUS_DI_ERROR) {
13156                pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13157
13158                if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13159                        pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13160                else
13161                        pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13162
13163                pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13164                if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13165                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13166                                BGS_GUARD_ERR_MASK;
13167                if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13168                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13169                                BGS_APPTAG_ERR_MASK;
13170                if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13171                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13172                                BGS_REFTAG_ERR_MASK;
13173
13174                /* Check to see if there was any good data before the error */
13175                if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13176                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13177                                BGS_HI_WATER_MARK_PRESENT_MASK;
13178                        pIocbIn->iocb.unsli3.sli3_bg.bghm =
13179                                wcqe->total_data_placed;
13180                }
13181
13182                /*
13183                * Set ALL the error bits to indicate we don't know what
13184                * type of error it is.
13185                */
13186                if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13187                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13188                                (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13189                                BGS_GUARD_ERR_MASK);
13190        }
13191
13192        /* Pick up HBA exchange busy condition */
13193        if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13194                spin_lock_irqsave(&phba->hbalock, iflags);
13195                pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13196                spin_unlock_irqrestore(&phba->hbalock, iflags);
13197        }
13198}
13199
13200/**
13201 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13202 * @phba: Pointer to HBA context object.
13203 * @irspiocbq: Pointer to work-queue completion queue entry.
13204 *
13205 * This routine handles an ELS work-queue completion event and construct
13206 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13207 * discovery engine to handle.
13208 *
13209 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13210 **/
13211static struct lpfc_iocbq *
13212lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13213                               struct lpfc_iocbq *irspiocbq)
13214{
13215        struct lpfc_sli_ring *pring;
13216        struct lpfc_iocbq *cmdiocbq;
13217        struct lpfc_wcqe_complete *wcqe;
13218        unsigned long iflags;
13219
13220        pring = lpfc_phba_elsring(phba);
13221        if (unlikely(!pring))
13222                return NULL;
13223
13224        wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13225        pring->stats.iocb_event++;
13226        /* Look up the ELS command IOCB and create pseudo response IOCB */
13227        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13228                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
13229        if (unlikely(!cmdiocbq)) {
13230                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13231                                "0386 ELS complete with no corresponding "
13232                                "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13233                                wcqe->word0, wcqe->total_data_placed,
13234                                wcqe->parameter, wcqe->word3);
13235                lpfc_sli_release_iocbq(phba, irspiocbq);
13236                return NULL;
13237        }
13238
13239        spin_lock_irqsave(&pring->ring_lock, iflags);
13240        /* Put the iocb back on the txcmplq */
13241        lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13242        spin_unlock_irqrestore(&pring->ring_lock, iflags);
13243
13244        /* Fake the irspiocbq and copy necessary response information */
13245        lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13246
13247        return irspiocbq;
13248}
13249
13250inline struct lpfc_cq_event *
13251lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13252{
13253        struct lpfc_cq_event *cq_event;
13254
13255        /* Allocate a new internal CQ_EVENT entry */
13256        cq_event = lpfc_sli4_cq_event_alloc(phba);
13257        if (!cq_event) {
13258                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13259                                "0602 Failed to alloc CQ_EVENT entry\n");
13260                return NULL;
13261        }
13262
13263        /* Move the CQE into the event */
13264        memcpy(&cq_event->cqe, entry, size);
13265        return cq_event;
13266}
13267
13268/**
13269 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13270 * @phba: Pointer to HBA context object.
13271 * @mcqe: Pointer to mailbox completion queue entry.
13272 *
13273 * This routine process a mailbox completion queue entry with asynchronous
13274 * event.
13275 *
13276 * Return: true if work posted to worker thread, otherwise false.
13277 **/
13278static bool
13279lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13280{
13281        struct lpfc_cq_event *cq_event;
13282        unsigned long iflags;
13283
13284        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13285                        "0392 Async Event: word0:x%x, word1:x%x, "
13286                        "word2:x%x, word3:x%x\n", mcqe->word0,
13287                        mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13288
13289        cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13290        if (!cq_event)
13291                return false;
13292        spin_lock_irqsave(&phba->hbalock, iflags);
13293        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13294        /* Set the async event flag */
13295        phba->hba_flag |= ASYNC_EVENT;
13296        spin_unlock_irqrestore(&phba->hbalock, iflags);
13297
13298        return true;
13299}
13300
13301/**
13302 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13303 * @phba: Pointer to HBA context object.
13304 * @mcqe: Pointer to mailbox completion queue entry.
13305 *
13306 * This routine process a mailbox completion queue entry with mailbox
13307 * completion event.
13308 *
13309 * Return: true if work posted to worker thread, otherwise false.
13310 **/
13311static bool
13312lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13313{
13314        uint32_t mcqe_status;
13315        MAILBOX_t *mbox, *pmbox;
13316        struct lpfc_mqe *mqe;
13317        struct lpfc_vport *vport;
13318        struct lpfc_nodelist *ndlp;
13319        struct lpfc_dmabuf *mp;
13320        unsigned long iflags;
13321        LPFC_MBOXQ_t *pmb;
13322        bool workposted = false;
13323        int rc;
13324
13325        /* If not a mailbox complete MCQE, out by checking mailbox consume */
13326        if (!bf_get(lpfc_trailer_completed, mcqe))
13327                goto out_no_mqe_complete;
13328
13329        /* Get the reference to the active mbox command */
13330        spin_lock_irqsave(&phba->hbalock, iflags);
13331        pmb = phba->sli.mbox_active;
13332        if (unlikely(!pmb)) {
13333                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13334                                "1832 No pending MBOX command to handle\n");
13335                spin_unlock_irqrestore(&phba->hbalock, iflags);
13336                goto out_no_mqe_complete;
13337        }
13338        spin_unlock_irqrestore(&phba->hbalock, iflags);
13339        mqe = &pmb->u.mqe;
13340        pmbox = (MAILBOX_t *)&pmb->u.mqe;
13341        mbox = phba->mbox;
13342        vport = pmb->vport;
13343
13344        /* Reset heartbeat timer */
13345        phba->last_completion_time = jiffies;
13346        del_timer(&phba->sli.mbox_tmo);
13347
13348        /* Move mbox data to caller's mailbox region, do endian swapping */
13349        if (pmb->mbox_cmpl && mbox)
13350                lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13351
13352        /*
13353         * For mcqe errors, conditionally move a modified error code to
13354         * the mbox so that the error will not be missed.
13355         */
13356        mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13357        if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13358                if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13359                        bf_set(lpfc_mqe_status, mqe,
13360                               (LPFC_MBX_ERROR_RANGE | mcqe_status));
13361        }
13362        if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13363                pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13364                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13365                                      "MBOX dflt rpi: status:x%x rpi:x%x",
13366                                      mcqe_status,
13367                                      pmbox->un.varWords[0], 0);
13368                if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13369                        mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13370                        ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13371                        /* Reg_LOGIN of dflt RPI was successful. Now lets get
13372                         * RID of the PPI using the same mbox buffer.
13373                         */
13374                        lpfc_unreg_login(phba, vport->vpi,
13375                                         pmbox->un.varWords[0], pmb);
13376                        pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13377                        pmb->ctx_buf = mp;
13378                        pmb->ctx_ndlp = ndlp;
13379                        pmb->vport = vport;
13380                        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13381                        if (rc != MBX_BUSY)
13382                                lpfc_printf_log(phba, KERN_ERR,
13383                                                LOG_TRACE_EVENT,
13384                                                "0385 rc should "
13385                                                "have been MBX_BUSY\n");
13386                        if (rc != MBX_NOT_FINISHED)
13387                                goto send_current_mbox;
13388                }
13389        }
13390        spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13391        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13392        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13393
13394        /* There is mailbox completion work to do */
13395        spin_lock_irqsave(&phba->hbalock, iflags);
13396        __lpfc_mbox_cmpl_put(phba, pmb);
13397        phba->work_ha |= HA_MBATT;
13398        spin_unlock_irqrestore(&phba->hbalock, iflags);
13399        workposted = true;
13400
13401send_current_mbox:
13402        spin_lock_irqsave(&phba->hbalock, iflags);
13403        /* Release the mailbox command posting token */
13404        phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13405        /* Setting active mailbox pointer need to be in sync to flag clear */
13406        phba->sli.mbox_active = NULL;
13407        if (bf_get(lpfc_trailer_consumed, mcqe))
13408                lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13409        spin_unlock_irqrestore(&phba->hbalock, iflags);
13410        /* Wake up worker thread to post the next pending mailbox command */
13411        lpfc_worker_wake_up(phba);
13412        return workposted;
13413
13414out_no_mqe_complete:
13415        spin_lock_irqsave(&phba->hbalock, iflags);
13416        if (bf_get(lpfc_trailer_consumed, mcqe))
13417                lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13418        spin_unlock_irqrestore(&phba->hbalock, iflags);
13419        return false;
13420}
13421
13422/**
13423 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13424 * @phba: Pointer to HBA context object.
13425 * @cq: Pointer to associated CQ
13426 * @cqe: Pointer to mailbox completion queue entry.
13427 *
13428 * This routine process a mailbox completion queue entry, it invokes the
13429 * proper mailbox complete handling or asynchronous event handling routine
13430 * according to the MCQE's async bit.
13431 *
13432 * Return: true if work posted to worker thread, otherwise false.
13433 **/
13434static bool
13435lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13436                         struct lpfc_cqe *cqe)
13437{
13438        struct lpfc_mcqe mcqe;
13439        bool workposted;
13440
13441        cq->CQ_mbox++;
13442
13443        /* Copy the mailbox MCQE and convert endian order as needed */
13444        lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13445
13446        /* Invoke the proper event handling routine */
13447        if (!bf_get(lpfc_trailer_async, &mcqe))
13448                workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13449        else
13450                workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13451        return workposted;
13452}
13453
13454/**
13455 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13456 * @phba: Pointer to HBA context object.
13457 * @cq: Pointer to associated CQ
13458 * @wcqe: Pointer to work-queue completion queue entry.
13459 *
13460 * This routine handles an ELS work-queue completion event.
13461 *
13462 * Return: true if work posted to worker thread, otherwise false.
13463 **/
13464static bool
13465lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13466                             struct lpfc_wcqe_complete *wcqe)
13467{
13468        struct lpfc_iocbq *irspiocbq;
13469        unsigned long iflags;
13470        struct lpfc_sli_ring *pring = cq->pring;
13471        int txq_cnt = 0;
13472        int txcmplq_cnt = 0;
13473
13474        /* Check for response status */
13475        if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13476                /* Log the error status */
13477                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13478                                "0357 ELS CQE error: status=x%x: "
13479                                "CQE: %08x %08x %08x %08x\n",
13480                                bf_get(lpfc_wcqe_c_status, wcqe),
13481                                wcqe->word0, wcqe->total_data_placed,
13482                                wcqe->parameter, wcqe->word3);
13483        }
13484
13485        /* Get an irspiocbq for later ELS response processing use */
13486        irspiocbq = lpfc_sli_get_iocbq(phba);
13487        if (!irspiocbq) {
13488                if (!list_empty(&pring->txq))
13489                        txq_cnt++;
13490                if (!list_empty(&pring->txcmplq))
13491                        txcmplq_cnt++;
13492                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13493                        "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13494                        "els_txcmplq_cnt=%d\n",
13495                        txq_cnt, phba->iocb_cnt,
13496                        txcmplq_cnt);
13497                return false;
13498        }
13499
13500        /* Save off the slow-path queue event for work thread to process */
13501        memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13502        spin_lock_irqsave(&phba->hbalock, iflags);
13503        list_add_tail(&irspiocbq->cq_event.list,
13504                      &phba->sli4_hba.sp_queue_event);
13505        phba->hba_flag |= HBA_SP_QUEUE_EVT;
13506        spin_unlock_irqrestore(&phba->hbalock, iflags);
13507
13508        return true;
13509}
13510
13511/**
13512 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13513 * @phba: Pointer to HBA context object.
13514 * @wcqe: Pointer to work-queue completion queue entry.
13515 *
13516 * This routine handles slow-path WQ entry consumed event by invoking the
13517 * proper WQ release routine to the slow-path WQ.
13518 **/
13519static void
13520lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13521                             struct lpfc_wcqe_release *wcqe)
13522{
13523        /* sanity check on queue memory */
13524        if (unlikely(!phba->sli4_hba.els_wq))
13525                return;
13526        /* Check for the slow-path ELS work queue */
13527        if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13528                lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13529                                     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13530        else
13531                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13532                                "2579 Slow-path wqe consume event carries "
13533                                "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13534                                bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13535                                phba->sli4_hba.els_wq->queue_id);
13536}
13537
13538/**
13539 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13540 * @phba: Pointer to HBA context object.
13541 * @cq: Pointer to a WQ completion queue.
13542 * @wcqe: Pointer to work-queue completion queue entry.
13543 *
13544 * This routine handles an XRI abort event.
13545 *
13546 * Return: true if work posted to worker thread, otherwise false.
13547 **/
13548static bool
13549lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13550                                   struct lpfc_queue *cq,
13551                                   struct sli4_wcqe_xri_aborted *wcqe)
13552{
13553        bool workposted = false;
13554        struct lpfc_cq_event *cq_event;
13555        unsigned long iflags;
13556
13557        switch (cq->subtype) {
13558        case LPFC_IO:
13559                lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13560                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13561                        /* Notify aborted XRI for NVME work queue */
13562                        if (phba->nvmet_support)
13563                                lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13564                }
13565                workposted = false;
13566                break;
13567        case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13568        case LPFC_ELS:
13569                cq_event = lpfc_cq_event_setup(
13570                        phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13571                if (!cq_event)
13572                        return false;
13573                cq_event->hdwq = cq->hdwq;
13574                spin_lock_irqsave(&phba->hbalock, iflags);
13575                list_add_tail(&cq_event->list,
13576                              &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13577                /* Set the els xri abort event flag */
13578                phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13579                spin_unlock_irqrestore(&phba->hbalock, iflags);
13580                workposted = true;
13581                break;
13582        default:
13583                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13584                                "0603 Invalid CQ subtype %d: "
13585                                "%08x %08x %08x %08x\n",
13586                                cq->subtype, wcqe->word0, wcqe->parameter,
13587                                wcqe->word2, wcqe->word3);
13588                workposted = false;
13589                break;
13590        }
13591        return workposted;
13592}
13593
13594#define FC_RCTL_MDS_DIAGS       0xF4
13595
13596/**
13597 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13598 * @phba: Pointer to HBA context object.
13599 * @rcqe: Pointer to receive-queue completion queue entry.
13600 *
13601 * This routine process a receive-queue completion queue entry.
13602 *
13603 * Return: true if work posted to worker thread, otherwise false.
13604 **/
13605static bool
13606lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13607{
13608        bool workposted = false;
13609        struct fc_frame_header *fc_hdr;
13610        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13611        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13612        struct lpfc_nvmet_tgtport *tgtp;
13613        struct hbq_dmabuf *dma_buf;
13614        uint32_t status, rq_id;
13615        unsigned long iflags;
13616
13617        /* sanity check on queue memory */
13618        if (unlikely(!hrq) || unlikely(!drq))
13619                return workposted;
13620
13621        if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13622                rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13623        else
13624                rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13625        if (rq_id != hrq->queue_id)
13626                goto out;
13627
13628        status = bf_get(lpfc_rcqe_status, rcqe);
13629        switch (status) {
13630        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13631                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13632                                "2537 Receive Frame Truncated!!\n");
13633                fallthrough;
13634        case FC_STATUS_RQ_SUCCESS:
13635                spin_lock_irqsave(&phba->hbalock, iflags);
13636                lpfc_sli4_rq_release(hrq, drq);
13637                dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13638                if (!dma_buf) {
13639                        hrq->RQ_no_buf_found++;
13640                        spin_unlock_irqrestore(&phba->hbalock, iflags);
13641                        goto out;
13642                }
13643                hrq->RQ_rcv_buf++;
13644                hrq->RQ_buf_posted--;
13645                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13646
13647                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13648
13649                if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13650                    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13651                        spin_unlock_irqrestore(&phba->hbalock, iflags);
13652                        /* Handle MDS Loopback frames */
13653                        if  (!(phba->pport->load_flag & FC_UNLOADING))
13654                                lpfc_sli4_handle_mds_loopback(phba->pport,
13655                                                              dma_buf);
13656                        else
13657                                lpfc_in_buf_free(phba, &dma_buf->dbuf);
13658                        break;
13659                }
13660
13661                /* save off the frame for the work thread to process */
13662                list_add_tail(&dma_buf->cq_event.list,
13663                              &phba->sli4_hba.sp_queue_event);
13664                /* Frame received */
13665                phba->hba_flag |= HBA_SP_QUEUE_EVT;
13666                spin_unlock_irqrestore(&phba->hbalock, iflags);
13667                workposted = true;
13668                break;
13669        case FC_STATUS_INSUFF_BUF_FRM_DISC:
13670                if (phba->nvmet_support) {
13671                        tgtp = phba->targetport->private;
13672                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13673                                        "6402 RQE Error x%x, posted %d err_cnt "
13674                                        "%d: %x %x %x\n",
13675                                        status, hrq->RQ_buf_posted,
13676                                        hrq->RQ_no_posted_buf,
13677                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
13678                                        atomic_read(&tgtp->rcv_fcp_cmd_out),
13679                                        atomic_read(&tgtp->xmt_fcp_release));
13680                }
13681                fallthrough;
13682
13683        case FC_STATUS_INSUFF_BUF_NEED_BUF:
13684                hrq->RQ_no_posted_buf++;
13685                /* Post more buffers if possible */
13686                spin_lock_irqsave(&phba->hbalock, iflags);
13687                phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13688                spin_unlock_irqrestore(&phba->hbalock, iflags);
13689                workposted = true;
13690                break;
13691        }
13692out:
13693        return workposted;
13694}
13695
13696/**
13697 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13698 * @phba: Pointer to HBA context object.
13699 * @cq: Pointer to the completion queue.
13700 * @cqe: Pointer to a completion queue entry.
13701 *
13702 * This routine process a slow-path work-queue or receive queue completion queue
13703 * entry.
13704 *
13705 * Return: true if work posted to worker thread, otherwise false.
13706 **/
13707static bool
13708lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13709                         struct lpfc_cqe *cqe)
13710{
13711        struct lpfc_cqe cqevt;
13712        bool workposted = false;
13713
13714        /* Copy the work queue CQE and convert endian order if needed */
13715        lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13716
13717        /* Check and process for different type of WCQE and dispatch */
13718        switch (bf_get(lpfc_cqe_code, &cqevt)) {
13719        case CQE_CODE_COMPL_WQE:
13720                /* Process the WQ/RQ complete event */
13721                phba->last_completion_time = jiffies;
13722                workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13723                                (struct lpfc_wcqe_complete *)&cqevt);
13724                break;
13725        case CQE_CODE_RELEASE_WQE:
13726                /* Process the WQ release event */
13727                lpfc_sli4_sp_handle_rel_wcqe(phba,
13728                                (struct lpfc_wcqe_release *)&cqevt);
13729                break;
13730        case CQE_CODE_XRI_ABORTED:
13731                /* Process the WQ XRI abort event */
13732                phba->last_completion_time = jiffies;
13733                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13734                                (struct sli4_wcqe_xri_aborted *)&cqevt);
13735                break;
13736        case CQE_CODE_RECEIVE:
13737        case CQE_CODE_RECEIVE_V1:
13738                /* Process the RQ event */
13739                phba->last_completion_time = jiffies;
13740                workposted = lpfc_sli4_sp_handle_rcqe(phba,
13741                                (struct lpfc_rcqe *)&cqevt);
13742                break;
13743        default:
13744                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13745                                "0388 Not a valid WCQE code: x%x\n",
13746                                bf_get(lpfc_cqe_code, &cqevt));
13747                break;
13748        }
13749        return workposted;
13750}
13751
13752/**
13753 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13754 * @phba: Pointer to HBA context object.
13755 * @eqe: Pointer to fast-path event queue entry.
13756 * @speq: Pointer to slow-path event queue.
13757 *
13758 * This routine process a event queue entry from the slow-path event queue.
13759 * It will check the MajorCode and MinorCode to determine this is for a
13760 * completion event on a completion queue, if not, an error shall be logged
13761 * and just return. Otherwise, it will get to the corresponding completion
13762 * queue and process all the entries on that completion queue, rearm the
13763 * completion queue, and then return.
13764 *
13765 **/
13766static void
13767lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13768        struct lpfc_queue *speq)
13769{
13770        struct lpfc_queue *cq = NULL, *childq;
13771        uint16_t cqid;
13772        int ret = 0;
13773
13774        /* Get the reference to the corresponding CQ */
13775        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13776
13777        list_for_each_entry(childq, &speq->child_list, list) {
13778                if (childq->queue_id == cqid) {
13779                        cq = childq;
13780                        break;
13781                }
13782        }
13783        if (unlikely(!cq)) {
13784                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13785                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13786                                        "0365 Slow-path CQ identifier "
13787                                        "(%d) does not exist\n", cqid);
13788                return;
13789        }
13790
13791        /* Save EQ associated with this CQ */
13792        cq->assoc_qp = speq;
13793
13794        if (is_kdump_kernel())
13795                ret = queue_work(phba->wq, &cq->spwork);
13796        else
13797                ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
13798
13799        if (!ret)
13800                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13801                                "0390 Cannot schedule queue work "
13802                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13803                                cqid, cq->queue_id, raw_smp_processor_id());
13804}
13805
13806/**
13807 * __lpfc_sli4_process_cq - Process elements of a CQ
13808 * @phba: Pointer to HBA context object.
13809 * @cq: Pointer to CQ to be processed
13810 * @handler: Routine to process each cqe
13811 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13812 * @poll_mode: Polling mode we were called from
13813 *
13814 * This routine processes completion queue entries in a CQ. While a valid
13815 * queue element is found, the handler is called. During processing checks
13816 * are made for periodic doorbell writes to let the hardware know of
13817 * element consumption.
13818 *
13819 * If the max limit on cqes to process is hit, or there are no more valid
13820 * entries, the loop stops. If we processed a sufficient number of elements,
13821 * meaning there is sufficient load, rather than rearming and generating
13822 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13823 * indicates no rescheduling.
13824 *
13825 * Returns True if work scheduled, False otherwise.
13826 **/
13827static bool
13828__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13829        bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13830                        struct lpfc_cqe *), unsigned long *delay,
13831                        enum lpfc_poll_mode poll_mode)
13832{
13833        struct lpfc_cqe *cqe;
13834        bool workposted = false;
13835        int count = 0, consumed = 0;
13836        bool arm = true;
13837
13838        /* default - no reschedule */
13839        *delay = 0;
13840
13841        if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13842                goto rearm_and_exit;
13843
13844        /* Process all the entries to the CQ */
13845        cq->q_flag = 0;
13846        cqe = lpfc_sli4_cq_get(cq);
13847        while (cqe) {
13848                workposted |= handler(phba, cq, cqe);
13849                __lpfc_sli4_consume_cqe(phba, cq, cqe);
13850
13851                consumed++;
13852                if (!(++count % cq->max_proc_limit))
13853                        break;
13854
13855                if (!(count % cq->notify_interval)) {
13856                        phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13857                                                LPFC_QUEUE_NOARM);
13858                        consumed = 0;
13859                        cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13860                }
13861
13862                if (count == LPFC_NVMET_CQ_NOTIFY)
13863                        cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13864
13865                cqe = lpfc_sli4_cq_get(cq);
13866        }
13867        if (count >= phba->cfg_cq_poll_threshold) {
13868                *delay = 1;
13869                arm = false;
13870        }
13871
13872        /* Note: complete the irq_poll softirq before rearming CQ */
13873        if (poll_mode == LPFC_IRQ_POLL)
13874                irq_poll_complete(&cq->iop);
13875
13876        /* Track the max number of CQEs processed in 1 EQ */
13877        if (count > cq->CQ_max_cqe)
13878                cq->CQ_max_cqe = count;
13879
13880        cq->assoc_qp->EQ_cqe_cnt += count;
13881
13882        /* Catch the no cq entry condition */
13883        if (unlikely(count == 0))
13884                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13885                                "0369 No entry from completion queue "
13886                                "qid=%d\n", cq->queue_id);
13887
13888        xchg(&cq->queue_claimed, 0);
13889
13890rearm_and_exit:
13891        phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13892                        arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13893
13894        return workposted;
13895}
13896
13897/**
13898 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13899 * @cq: pointer to CQ to process
13900 *
13901 * This routine calls the cq processing routine with a handler specific
13902 * to the type of queue bound to it.
13903 *
13904 * The CQ routine returns two values: the first is the calling status,
13905 * which indicates whether work was queued to the  background discovery
13906 * thread. If true, the routine should wakeup the discovery thread;
13907 * the second is the delay parameter. If non-zero, rather than rearming
13908 * the CQ and yet another interrupt, the CQ handler should be queued so
13909 * that it is processed in a subsequent polling action. The value of
13910 * the delay indicates when to reschedule it.
13911 **/
13912static void
13913__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13914{
13915        struct lpfc_hba *phba = cq->phba;
13916        unsigned long delay;
13917        bool workposted = false;
13918        int ret = 0;
13919
13920        /* Process and rearm the CQ */
13921        switch (cq->type) {
13922        case LPFC_MCQ:
13923                workposted |= __lpfc_sli4_process_cq(phba, cq,
13924                                                lpfc_sli4_sp_handle_mcqe,
13925                                                &delay, LPFC_QUEUE_WORK);
13926                break;
13927        case LPFC_WCQ:
13928                if (cq->subtype == LPFC_IO)
13929                        workposted |= __lpfc_sli4_process_cq(phba, cq,
13930                                                lpfc_sli4_fp_handle_cqe,
13931                                                &delay, LPFC_QUEUE_WORK);
13932                else
13933                        workposted |= __lpfc_sli4_process_cq(phba, cq,
13934                                                lpfc_sli4_sp_handle_cqe,
13935                                                &delay, LPFC_QUEUE_WORK);
13936                break;
13937        default:
13938                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13939                                "0370 Invalid completion queue type (%d)\n",
13940                                cq->type);
13941                return;
13942        }
13943
13944        if (delay) {
13945                if (is_kdump_kernel())
13946                        ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
13947                                                delay);
13948                else
13949                        ret = queue_delayed_work_on(cq->chann, phba->wq,
13950                                                &cq->sched_spwork, delay);
13951                if (!ret)
13952                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13953                                "0394 Cannot schedule queue work "
13954                                "for cqid=%d on CPU %d\n",
13955                                cq->queue_id, cq->chann);
13956        }
13957
13958        /* wake up worker thread if there are works to be done */
13959        if (workposted)
13960                lpfc_worker_wake_up(phba);
13961}
13962
13963/**
13964 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13965 *   interrupt
13966 * @work: pointer to work element
13967 *
13968 * translates from the work handler and calls the slow-path handler.
13969 **/
13970static void
13971lpfc_sli4_sp_process_cq(struct work_struct *work)
13972{
13973        struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13974
13975        __lpfc_sli4_sp_process_cq(cq);
13976}
13977
13978/**
13979 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13980 * @work: pointer to work element
13981 *
13982 * translates from the work handler and calls the slow-path handler.
13983 **/
13984static void
13985lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13986{
13987        struct lpfc_queue *cq = container_of(to_delayed_work(work),
13988                                        struct lpfc_queue, sched_spwork);
13989
13990        __lpfc_sli4_sp_process_cq(cq);
13991}
13992
13993/**
13994 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13995 * @phba: Pointer to HBA context object.
13996 * @cq: Pointer to associated CQ
13997 * @wcqe: Pointer to work-queue completion queue entry.
13998 *
13999 * This routine process a fast-path work queue completion entry from fast-path
14000 * event queue for FCP command response completion.
14001 **/
14002static void
14003lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14004                             struct lpfc_wcqe_complete *wcqe)
14005{
14006        struct lpfc_sli_ring *pring = cq->pring;
14007        struct lpfc_iocbq *cmdiocbq;
14008        struct lpfc_iocbq irspiocbq;
14009        unsigned long iflags;
14010
14011        /* Check for response status */
14012        if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14013                /* If resource errors reported from HBA, reduce queue
14014                 * depth of the SCSI device.
14015                 */
14016                if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14017                     IOSTAT_LOCAL_REJECT)) &&
14018                    ((wcqe->parameter & IOERR_PARAM_MASK) ==
14019                     IOERR_NO_RESOURCES))
14020                        phba->lpfc_rampdown_queue_depth(phba);
14021
14022                /* Log the cmpl status */
14023                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14024                                "0373 FCP CQE cmpl: status=x%x: "
14025                                "CQE: %08x %08x %08x %08x\n",
14026                                bf_get(lpfc_wcqe_c_status, wcqe),
14027                                wcqe->word0, wcqe->total_data_placed,
14028                                wcqe->parameter, wcqe->word3);
14029        }
14030
14031        /* Look up the FCP command IOCB and create pseudo response IOCB */
14032        spin_lock_irqsave(&pring->ring_lock, iflags);
14033        pring->stats.iocb_event++;
14034        spin_unlock_irqrestore(&pring->ring_lock, iflags);
14035        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14036                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14037        if (unlikely(!cmdiocbq)) {
14038                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14039                                "0374 FCP complete with no corresponding "
14040                                "cmdiocb: iotag (%d)\n",
14041                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14042                return;
14043        }
14044#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14045        cmdiocbq->isr_timestamp = cq->isr_timestamp;
14046#endif
14047        if (cmdiocbq->iocb_cmpl == NULL) {
14048                if (cmdiocbq->wqe_cmpl) {
14049                        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14050                                spin_lock_irqsave(&phba->hbalock, iflags);
14051                                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14052                                spin_unlock_irqrestore(&phba->hbalock, iflags);
14053                        }
14054
14055                        /* Pass the cmd_iocb and the wcqe to the upper layer */
14056                        (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14057                        return;
14058                }
14059                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14060                                "0375 FCP cmdiocb not callback function "
14061                                "iotag: (%d)\n",
14062                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14063                return;
14064        }
14065
14066        /* Fake the irspiocb and copy necessary response information */
14067        lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14068
14069        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14070                spin_lock_irqsave(&phba->hbalock, iflags);
14071                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14072                spin_unlock_irqrestore(&phba->hbalock, iflags);
14073        }
14074
14075        /* Pass the cmd_iocb and the rsp state to the upper layer */
14076        (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14077}
14078
14079/**
14080 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14081 * @phba: Pointer to HBA context object.
14082 * @cq: Pointer to completion queue.
14083 * @wcqe: Pointer to work-queue completion queue entry.
14084 *
14085 * This routine handles an fast-path WQ entry consumed event by invoking the
14086 * proper WQ release routine to the slow-path WQ.
14087 **/
14088static void
14089lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14090                             struct lpfc_wcqe_release *wcqe)
14091{
14092        struct lpfc_queue *childwq;
14093        bool wqid_matched = false;
14094        uint16_t hba_wqid;
14095
14096        /* Check for fast-path FCP work queue release */
14097        hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14098        list_for_each_entry(childwq, &cq->child_list, list) {
14099                if (childwq->queue_id == hba_wqid) {
14100                        lpfc_sli4_wq_release(childwq,
14101                                        bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14102                        if (childwq->q_flag & HBA_NVMET_WQFULL)
14103                                lpfc_nvmet_wqfull_process(phba, childwq);
14104                        wqid_matched = true;
14105                        break;
14106                }
14107        }
14108        /* Report warning log message if no match found */
14109        if (wqid_matched != true)
14110                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14111                                "2580 Fast-path wqe consume event carries "
14112                                "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14113}
14114
14115/**
14116 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14117 * @phba: Pointer to HBA context object.
14118 * @cq: Pointer to completion queue.
14119 * @rcqe: Pointer to receive-queue completion queue entry.
14120 *
14121 * This routine process a receive-queue completion queue entry.
14122 *
14123 * Return: true if work posted to worker thread, otherwise false.
14124 **/
14125static bool
14126lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14127                            struct lpfc_rcqe *rcqe)
14128{
14129        bool workposted = false;
14130        struct lpfc_queue *hrq;
14131        struct lpfc_queue *drq;
14132        struct rqb_dmabuf *dma_buf;
14133        struct fc_frame_header *fc_hdr;
14134        struct lpfc_nvmet_tgtport *tgtp;
14135        uint32_t status, rq_id;
14136        unsigned long iflags;
14137        uint32_t fctl, idx;
14138
14139        if ((phba->nvmet_support == 0) ||
14140            (phba->sli4_hba.nvmet_cqset == NULL))
14141                return workposted;
14142
14143        idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14144        hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14145        drq = phba->sli4_hba.nvmet_mrq_data[idx];
14146
14147        /* sanity check on queue memory */
14148        if (unlikely(!hrq) || unlikely(!drq))
14149                return workposted;
14150
14151        if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14152                rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14153        else
14154                rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14155
14156        if ((phba->nvmet_support == 0) ||
14157            (rq_id != hrq->queue_id))
14158                return workposted;
14159
14160        status = bf_get(lpfc_rcqe_status, rcqe);
14161        switch (status) {
14162        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14163                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14164                                "6126 Receive Frame Truncated!!\n");
14165                fallthrough;
14166        case FC_STATUS_RQ_SUCCESS:
14167                spin_lock_irqsave(&phba->hbalock, iflags);
14168                lpfc_sli4_rq_release(hrq, drq);
14169                dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14170                if (!dma_buf) {
14171                        hrq->RQ_no_buf_found++;
14172                        spin_unlock_irqrestore(&phba->hbalock, iflags);
14173                        goto out;
14174                }
14175                spin_unlock_irqrestore(&phba->hbalock, iflags);
14176                hrq->RQ_rcv_buf++;
14177                hrq->RQ_buf_posted--;
14178                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14179
14180                /* Just some basic sanity checks on FCP Command frame */
14181                fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14182                        fc_hdr->fh_f_ctl[1] << 8 |
14183                        fc_hdr->fh_f_ctl[2]);
14184                if (((fctl &
14185                    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14186                    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14187                    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14188                        goto drop;
14189
14190                if (fc_hdr->fh_type == FC_TYPE_FCP) {
14191                        dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14192                        lpfc_nvmet_unsol_fcp_event(
14193                                phba, idx, dma_buf, cq->isr_timestamp,
14194                                cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14195                        return false;
14196                }
14197drop:
14198                lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14199                break;
14200        case FC_STATUS_INSUFF_BUF_FRM_DISC:
14201                if (phba->nvmet_support) {
14202                        tgtp = phba->targetport->private;
14203                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14204                                        "6401 RQE Error x%x, posted %d err_cnt "
14205                                        "%d: %x %x %x\n",
14206                                        status, hrq->RQ_buf_posted,
14207                                        hrq->RQ_no_posted_buf,
14208                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
14209                                        atomic_read(&tgtp->rcv_fcp_cmd_out),
14210                                        atomic_read(&tgtp->xmt_fcp_release));
14211                }
14212                fallthrough;
14213
14214        case FC_STATUS_INSUFF_BUF_NEED_BUF:
14215                hrq->RQ_no_posted_buf++;
14216                /* Post more buffers if possible */
14217                break;
14218        }
14219out:
14220        return workposted;
14221}
14222
14223/**
14224 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14225 * @phba: adapter with cq
14226 * @cq: Pointer to the completion queue.
14227 * @cqe: Pointer to fast-path completion queue entry.
14228 *
14229 * This routine process a fast-path work queue completion entry from fast-path
14230 * event queue for FCP command response completion.
14231 *
14232 * Return: true if work posted to worker thread, otherwise false.
14233 **/
14234static bool
14235lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14236                         struct lpfc_cqe *cqe)
14237{
14238        struct lpfc_wcqe_release wcqe;
14239        bool workposted = false;
14240
14241        /* Copy the work queue CQE and convert endian order if needed */
14242        lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14243
14244        /* Check and process for different type of WCQE and dispatch */
14245        switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14246        case CQE_CODE_COMPL_WQE:
14247        case CQE_CODE_NVME_ERSP:
14248                cq->CQ_wq++;
14249                /* Process the WQ complete event */
14250                phba->last_completion_time = jiffies;
14251                if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14252                        lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14253                                (struct lpfc_wcqe_complete *)&wcqe);
14254                break;
14255        case CQE_CODE_RELEASE_WQE:
14256                cq->CQ_release_wqe++;
14257                /* Process the WQ release event */
14258                lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14259                                (struct lpfc_wcqe_release *)&wcqe);
14260                break;
14261        case CQE_CODE_XRI_ABORTED:
14262                cq->CQ_xri_aborted++;
14263                /* Process the WQ XRI abort event */
14264                phba->last_completion_time = jiffies;
14265                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14266                                (struct sli4_wcqe_xri_aborted *)&wcqe);
14267                break;
14268        case CQE_CODE_RECEIVE_V1:
14269        case CQE_CODE_RECEIVE:
14270                phba->last_completion_time = jiffies;
14271                if (cq->subtype == LPFC_NVMET) {
14272                        workposted = lpfc_sli4_nvmet_handle_rcqe(
14273                                phba, cq, (struct lpfc_rcqe *)&wcqe);
14274                }
14275                break;
14276        default:
14277                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14278                                "0144 Not a valid CQE code: x%x\n",
14279                                bf_get(lpfc_wcqe_c_code, &wcqe));
14280                break;
14281        }
14282        return workposted;
14283}
14284
14285/**
14286 * lpfc_sli4_sched_cq_work - Schedules cq work
14287 * @phba: Pointer to HBA context object.
14288 * @cq: Pointer to CQ
14289 * @cqid: CQ ID
14290 *
14291 * This routine checks the poll mode of the CQ corresponding to
14292 * cq->chann, then either schedules a softirq or queue_work to complete
14293 * cq work.
14294 *
14295 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14296 * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
14297 *
14298 **/
14299static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14300                                    struct lpfc_queue *cq, uint16_t cqid)
14301{
14302        int ret = 0;
14303
14304        switch (cq->poll_mode) {
14305        case LPFC_IRQ_POLL:
14306                irq_poll_sched(&cq->iop);
14307                break;
14308        case LPFC_QUEUE_WORK:
14309        default:
14310                if (is_kdump_kernel())
14311                        ret = queue_work(phba->wq, &cq->irqwork);
14312                else
14313                        ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14314                if (!ret)
14315                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14316                                        "0383 Cannot schedule queue work "
14317                                        "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14318                                        cqid, cq->queue_id,
14319                                        raw_smp_processor_id());
14320        }
14321}
14322
14323/**
14324 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14325 * @phba: Pointer to HBA context object.
14326 * @eq: Pointer to the queue structure.
14327 * @eqe: Pointer to fast-path event queue entry.
14328 *
14329 * This routine process a event queue entry from the fast-path event queue.
14330 * It will check the MajorCode and MinorCode to determine this is for a
14331 * completion event on a completion queue, if not, an error shall be logged
14332 * and just return. Otherwise, it will get to the corresponding completion
14333 * queue and process all the entries on the completion queue, rearm the
14334 * completion queue, and then return.
14335 **/
14336static void
14337lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14338                         struct lpfc_eqe *eqe)
14339{
14340        struct lpfc_queue *cq = NULL;
14341        uint32_t qidx = eq->hdwq;
14342        uint16_t cqid, id;
14343
14344        if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14345                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14346                                "0366 Not a valid completion "
14347                                "event: majorcode=x%x, minorcode=x%x\n",
14348                                bf_get_le32(lpfc_eqe_major_code, eqe),
14349                                bf_get_le32(lpfc_eqe_minor_code, eqe));
14350                return;
14351        }
14352
14353        /* Get the reference to the corresponding CQ */
14354        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14355
14356        /* Use the fast lookup method first */
14357        if (cqid <= phba->sli4_hba.cq_max) {
14358                cq = phba->sli4_hba.cq_lookup[cqid];
14359                if (cq)
14360                        goto  work_cq;
14361        }
14362
14363        /* Next check for NVMET completion */
14364        if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14365                id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14366                if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14367                        /* Process NVMET unsol rcv */
14368                        cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14369                        goto  process_cq;
14370                }
14371        }
14372
14373        if (phba->sli4_hba.nvmels_cq &&
14374            (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14375                /* Process NVME unsol rcv */
14376                cq = phba->sli4_hba.nvmels_cq;
14377        }
14378
14379        /* Otherwise this is a Slow path event */
14380        if (cq == NULL) {
14381                lpfc_sli4_sp_handle_eqe(phba, eqe,
14382                                        phba->sli4_hba.hdwq[qidx].hba_eq);
14383                return;
14384        }
14385
14386process_cq:
14387        if (unlikely(cqid != cq->queue_id)) {
14388                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14389                                "0368 Miss-matched fast-path completion "
14390                                "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14391                                cqid, cq->queue_id);
14392                return;
14393        }
14394
14395work_cq:
14396#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14397        if (phba->ktime_on)
14398                cq->isr_timestamp = ktime_get_ns();
14399        else
14400                cq->isr_timestamp = 0;
14401#endif
14402        lpfc_sli4_sched_cq_work(phba, cq, cqid);
14403}
14404
14405/**
14406 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14407 * @cq: Pointer to CQ to be processed
14408 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14409 *
14410 * This routine calls the cq processing routine with the handler for
14411 * fast path CQEs.
14412 *
14413 * The CQ routine returns two values: the first is the calling status,
14414 * which indicates whether work was queued to the  background discovery
14415 * thread. If true, the routine should wakeup the discovery thread;
14416 * the second is the delay parameter. If non-zero, rather than rearming
14417 * the CQ and yet another interrupt, the CQ handler should be queued so
14418 * that it is processed in a subsequent polling action. The value of
14419 * the delay indicates when to reschedule it.
14420 **/
14421static void
14422__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14423                           enum lpfc_poll_mode poll_mode)
14424{
14425        struct lpfc_hba *phba = cq->phba;
14426        unsigned long delay;
14427        bool workposted = false;
14428        int ret = 0;
14429
14430        /* process and rearm the CQ */
14431        workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14432                                             &delay, poll_mode);
14433
14434        if (delay) {
14435                if (is_kdump_kernel())
14436                        ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14437                                                delay);
14438                else
14439                        ret = queue_delayed_work_on(cq->chann, phba->wq,
14440                                                &cq->sched_irqwork, delay);
14441                if (!ret)
14442                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14443                                        "0367 Cannot schedule queue work "
14444                                        "for cqid=%d on CPU %d\n",
14445                                        cq->queue_id, cq->chann);
14446        }
14447
14448        /* wake up worker thread if there are works to be done */
14449        if (workposted)
14450                lpfc_worker_wake_up(phba);
14451}
14452
14453/**
14454 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14455 *   interrupt
14456 * @work: pointer to work element
14457 *
14458 * translates from the work handler and calls the fast-path handler.
14459 **/
14460static void
14461lpfc_sli4_hba_process_cq(struct work_struct *work)
14462{
14463        struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14464
14465        __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14466}
14467
14468/**
14469 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14470 * @work: pointer to work element
14471 *
14472 * translates from the work handler and calls the fast-path handler.
14473 **/
14474static void
14475lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14476{
14477        struct lpfc_queue *cq = container_of(to_delayed_work(work),
14478                                        struct lpfc_queue, sched_irqwork);
14479
14480        __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14481}
14482
14483/**
14484 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14485 * @irq: Interrupt number.
14486 * @dev_id: The device context pointer.
14487 *
14488 * This function is directly called from the PCI layer as an interrupt
14489 * service routine when device with SLI-4 interface spec is enabled with
14490 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14491 * ring event in the HBA. However, when the device is enabled with either
14492 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14493 * device-level interrupt handler. When the PCI slot is in error recovery
14494 * or the HBA is undergoing initialization, the interrupt handler will not
14495 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14496 * the intrrupt context. This function is called without any lock held.
14497 * It gets the hbalock to access and update SLI data structures. Note that,
14498 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14499 * equal to that of FCP CQ index.
14500 *
14501 * The link attention and ELS ring attention events are handled
14502 * by the worker thread. The interrupt handler signals the worker thread
14503 * and returns for these events. This function is called without any lock
14504 * held. It gets the hbalock to access and update SLI data structures.
14505 *
14506 * This function returns IRQ_HANDLED when interrupt is handled else it
14507 * returns IRQ_NONE.
14508 **/
14509irqreturn_t
14510lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14511{
14512        struct lpfc_hba *phba;
14513        struct lpfc_hba_eq_hdl *hba_eq_hdl;
14514        struct lpfc_queue *fpeq;
14515        unsigned long iflag;
14516        int ecount = 0;
14517        int hba_eqidx;
14518        struct lpfc_eq_intr_info *eqi;
14519
14520        /* Get the driver's phba structure from the dev_id */
14521        hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14522        phba = hba_eq_hdl->phba;
14523        hba_eqidx = hba_eq_hdl->idx;
14524
14525        if (unlikely(!phba))
14526                return IRQ_NONE;
14527        if (unlikely(!phba->sli4_hba.hdwq))
14528                return IRQ_NONE;
14529
14530        /* Get to the EQ struct associated with this vector */
14531        fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14532        if (unlikely(!fpeq))
14533                return IRQ_NONE;
14534
14535        /* Check device state for handling interrupt */
14536        if (unlikely(lpfc_intr_state_check(phba))) {
14537                /* Check again for link_state with lock held */
14538                spin_lock_irqsave(&phba->hbalock, iflag);
14539                if (phba->link_state < LPFC_LINK_DOWN)
14540                        /* Flush, clear interrupt, and rearm the EQ */
14541                        lpfc_sli4_eqcq_flush(phba, fpeq);
14542                spin_unlock_irqrestore(&phba->hbalock, iflag);
14543                return IRQ_NONE;
14544        }
14545
14546        eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14547        eqi->icnt++;
14548
14549        fpeq->last_cpu = raw_smp_processor_id();
14550
14551        if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14552            fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14553            phba->cfg_auto_imax &&
14554            fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14555            phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14556                lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14557
14558        /* process and rearm the EQ */
14559        ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14560
14561        if (unlikely(ecount == 0)) {
14562                fpeq->EQ_no_entry++;
14563                if (phba->intr_type == MSIX)
14564                        /* MSI-X treated interrupt served as no EQ share INT */
14565                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14566                                        "0358 MSI-X interrupt with no EQE\n");
14567                else
14568                        /* Non MSI-X treated on interrupt as EQ share INT */
14569                        return IRQ_NONE;
14570        }
14571
14572        return IRQ_HANDLED;
14573} /* lpfc_sli4_fp_intr_handler */
14574
14575/**
14576 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14577 * @irq: Interrupt number.
14578 * @dev_id: The device context pointer.
14579 *
14580 * This function is the device-level interrupt handler to device with SLI-4
14581 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14582 * interrupt mode is enabled and there is an event in the HBA which requires
14583 * driver attention. This function invokes the slow-path interrupt attention
14584 * handling function and fast-path interrupt attention handling function in
14585 * turn to process the relevant HBA attention events. This function is called
14586 * without any lock held. It gets the hbalock to access and update SLI data
14587 * structures.
14588 *
14589 * This function returns IRQ_HANDLED when interrupt is handled, else it
14590 * returns IRQ_NONE.
14591 **/
14592irqreturn_t
14593lpfc_sli4_intr_handler(int irq, void *dev_id)
14594{
14595        struct lpfc_hba  *phba;
14596        irqreturn_t hba_irq_rc;
14597        bool hba_handled = false;
14598        int qidx;
14599
14600        /* Get the driver's phba structure from the dev_id */
14601        phba = (struct lpfc_hba *)dev_id;
14602
14603        if (unlikely(!phba))
14604                return IRQ_NONE;
14605
14606        /*
14607         * Invoke fast-path host attention interrupt handling as appropriate.
14608         */
14609        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14610                hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14611                                        &phba->sli4_hba.hba_eq_hdl[qidx]);
14612                if (hba_irq_rc == IRQ_HANDLED)
14613                        hba_handled |= true;
14614        }
14615
14616        return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14617} /* lpfc_sli4_intr_handler */
14618
14619void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14620{
14621        struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14622        struct lpfc_queue *eq;
14623        int i = 0;
14624
14625        rcu_read_lock();
14626
14627        list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14628                i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14629        if (!list_empty(&phba->poll_list))
14630                mod_timer(&phba->cpuhp_poll_timer,
14631                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14632
14633        rcu_read_unlock();
14634}
14635
14636inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14637{
14638        struct lpfc_hba *phba = eq->phba;
14639        int i = 0;
14640
14641        /*
14642         * Unlocking an irq is one of the entry point to check
14643         * for re-schedule, but we are good for io submission
14644         * path as midlayer does a get_cpu to glue us in. Flush
14645         * out the invalidate queue so we can see the updated
14646         * value for flag.
14647         */
14648        smp_rmb();
14649
14650        if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14651                /* We will not likely get the completion for the caller
14652                 * during this iteration but i guess that's fine.
14653                 * Future io's coming on this eq should be able to
14654                 * pick it up.  As for the case of single io's, they
14655                 * will be handled through a sched from polling timer
14656                 * function which is currently triggered every 1msec.
14657                 */
14658                i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14659
14660        return i;
14661}
14662
14663static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14664{
14665        struct lpfc_hba *phba = eq->phba;
14666
14667        /* kickstart slowpath processing if needed */
14668        if (list_empty(&phba->poll_list))
14669                mod_timer(&phba->cpuhp_poll_timer,
14670                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14671
14672        list_add_rcu(&eq->_poll_list, &phba->poll_list);
14673        synchronize_rcu();
14674}
14675
14676static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14677{
14678        struct lpfc_hba *phba = eq->phba;
14679
14680        /* Disable slowpath processing for this eq.  Kick start the eq
14681         * by RE-ARMING the eq's ASAP
14682         */
14683        list_del_rcu(&eq->_poll_list);
14684        synchronize_rcu();
14685
14686        if (list_empty(&phba->poll_list))
14687                del_timer_sync(&phba->cpuhp_poll_timer);
14688}
14689
14690void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14691{
14692        struct lpfc_queue *eq, *next;
14693
14694        list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14695                list_del(&eq->_poll_list);
14696
14697        INIT_LIST_HEAD(&phba->poll_list);
14698        synchronize_rcu();
14699}
14700
14701static inline void
14702__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14703{
14704        if (mode == eq->mode)
14705                return;
14706        /*
14707         * currently this function is only called during a hotplug
14708         * event and the cpu on which this function is executing
14709         * is going offline.  By now the hotplug has instructed
14710         * the scheduler to remove this cpu from cpu active mask.
14711         * So we don't need to work about being put aside by the
14712         * scheduler for a high priority process.  Yes, the inte-
14713         * rrupts could come but they are known to retire ASAP.
14714         */
14715
14716        /* Disable polling in the fastpath */
14717        WRITE_ONCE(eq->mode, mode);
14718        /* flush out the store buffer */
14719        smp_wmb();
14720
14721        /*
14722         * Add this eq to the polling list and start polling. For
14723         * a grace period both interrupt handler and poller will
14724         * try to process the eq _but_ that's fine.  We have a
14725         * synchronization mechanism in place (queue_claimed) to
14726         * deal with it.  This is just a draining phase for int-
14727         * errupt handler (not eq's) as we have guranteed through
14728         * barrier that all the CPUs have seen the new CQ_POLLED
14729         * state. which will effectively disable the REARMING of
14730         * the EQ.  The whole idea is eq's die off eventually as
14731         * we are not rearming EQ's anymore.
14732         */
14733        mode ? lpfc_sli4_add_to_poll_list(eq) :
14734               lpfc_sli4_remove_from_poll_list(eq);
14735}
14736
14737void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14738{
14739        __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14740}
14741
14742void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14743{
14744        struct lpfc_hba *phba = eq->phba;
14745
14746        __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14747
14748        /* Kick start for the pending io's in h/w.
14749         * Once we switch back to interrupt processing on a eq
14750         * the io path completion will only arm eq's when it
14751         * receives a completion.  But since eq's are in disa-
14752         * rmed state it doesn't receive a completion.  This
14753         * creates a deadlock scenaro.
14754         */
14755        phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14756}
14757
14758/**
14759 * lpfc_sli4_queue_free - free a queue structure and associated memory
14760 * @queue: The queue structure to free.
14761 *
14762 * This function frees a queue structure and the DMAable memory used for
14763 * the host resident queue. This function must be called after destroying the
14764 * queue on the HBA.
14765 **/
14766void
14767lpfc_sli4_queue_free(struct lpfc_queue *queue)
14768{
14769        struct lpfc_dmabuf *dmabuf;
14770
14771        if (!queue)
14772                return;
14773
14774        if (!list_empty(&queue->wq_list))
14775                list_del(&queue->wq_list);
14776
14777        while (!list_empty(&queue->page_list)) {
14778                list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14779                                 list);
14780                dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14781                                  dmabuf->virt, dmabuf->phys);
14782                kfree(dmabuf);
14783        }
14784        if (queue->rqbp) {
14785                lpfc_free_rq_buffer(queue->phba, queue);
14786                kfree(queue->rqbp);
14787        }
14788
14789        if (!list_empty(&queue->cpu_list))
14790                list_del(&queue->cpu_list);
14791
14792        kfree(queue);
14793        return;
14794}
14795
14796/**
14797 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14798 * @phba: The HBA that this queue is being created on.
14799 * @page_size: The size of a queue page
14800 * @entry_size: The size of each queue entry for this queue.
14801 * @entry_count: The number of entries that this queue will handle.
14802 * @cpu: The cpu that will primarily utilize this queue.
14803 *
14804 * This function allocates a queue structure and the DMAable memory used for
14805 * the host resident queue. This function must be called before creating the
14806 * queue on the HBA.
14807 **/
14808struct lpfc_queue *
14809lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14810                      uint32_t entry_size, uint32_t entry_count, int cpu)
14811{
14812        struct lpfc_queue *queue;
14813        struct lpfc_dmabuf *dmabuf;
14814        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14815        uint16_t x, pgcnt;
14816
14817        if (!phba->sli4_hba.pc_sli4_params.supported)
14818                hw_page_size = page_size;
14819
14820        pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14821
14822        /* If needed, Adjust page count to match the max the adapter supports */
14823        if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14824                pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14825
14826        queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14827                             GFP_KERNEL, cpu_to_node(cpu));
14828        if (!queue)
14829                return NULL;
14830
14831        INIT_LIST_HEAD(&queue->list);
14832        INIT_LIST_HEAD(&queue->_poll_list);
14833        INIT_LIST_HEAD(&queue->wq_list);
14834        INIT_LIST_HEAD(&queue->wqfull_list);
14835        INIT_LIST_HEAD(&queue->page_list);
14836        INIT_LIST_HEAD(&queue->child_list);
14837        INIT_LIST_HEAD(&queue->cpu_list);
14838
14839        /* Set queue parameters now.  If the system cannot provide memory
14840         * resources, the free routine needs to know what was allocated.
14841         */
14842        queue->page_count = pgcnt;
14843        queue->q_pgs = (void **)&queue[1];
14844        queue->entry_cnt_per_pg = hw_page_size / entry_size;
14845        queue->entry_size = entry_size;
14846        queue->entry_count = entry_count;
14847        queue->page_size = hw_page_size;
14848        queue->phba = phba;
14849
14850        for (x = 0; x < queue->page_count; x++) {
14851                dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14852                                      dev_to_node(&phba->pcidev->dev));
14853                if (!dmabuf)
14854                        goto out_fail;
14855                dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14856                                                  hw_page_size, &dmabuf->phys,
14857                                                  GFP_KERNEL);
14858                if (!dmabuf->virt) {
14859                        kfree(dmabuf);
14860                        goto out_fail;
14861                }
14862                dmabuf->buffer_tag = x;
14863                list_add_tail(&dmabuf->list, &queue->page_list);
14864                /* use lpfc_sli4_qe to index a paritcular entry in this page */
14865                queue->q_pgs[x] = dmabuf->virt;
14866        }
14867        INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14868        INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14869        INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14870        INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14871
14872        /* notify_interval will be set during q creation */
14873
14874        return queue;
14875out_fail:
14876        lpfc_sli4_queue_free(queue);
14877        return NULL;
14878}
14879
14880/**
14881 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14882 * @phba: HBA structure that indicates port to create a queue on.
14883 * @pci_barset: PCI BAR set flag.
14884 *
14885 * This function shall perform iomap of the specified PCI BAR address to host
14886 * memory address if not already done so and return it. The returned host
14887 * memory address can be NULL.
14888 */
14889static void __iomem *
14890lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14891{
14892        if (!phba->pcidev)
14893                return NULL;
14894
14895        switch (pci_barset) {
14896        case WQ_PCI_BAR_0_AND_1:
14897                return phba->pci_bar0_memmap_p;
14898        case WQ_PCI_BAR_2_AND_3:
14899                return phba->pci_bar2_memmap_p;
14900        case WQ_PCI_BAR_4_AND_5:
14901                return phba->pci_bar4_memmap_p;
14902        default:
14903                break;
14904        }
14905        return NULL;
14906}
14907
14908/**
14909 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14910 * @phba: HBA structure that EQs are on.
14911 * @startq: The starting EQ index to modify
14912 * @numq: The number of EQs (consecutive indexes) to modify
14913 * @usdelay: amount of delay
14914 *
14915 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14916 * is set either by writing to a register (if supported by the SLI Port)
14917 * or by mailbox command. The mailbox command allows several EQs to be
14918 * updated at once.
14919 *
14920 * The @phba struct is used to send a mailbox command to HBA. The @startq
14921 * is used to get the starting EQ index to change. The @numq value is
14922 * used to specify how many consecutive EQ indexes, starting at EQ index,
14923 * are to be changed. This function is asynchronous and will wait for any
14924 * mailbox commands to finish before returning.
14925 *
14926 * On success this function will return a zero. If unable to allocate
14927 * enough memory this function will return -ENOMEM. If a mailbox command
14928 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14929 * have had their delay multipler changed.
14930 **/
14931void
14932lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14933                         uint32_t numq, uint32_t usdelay)
14934{
14935        struct lpfc_mbx_modify_eq_delay *eq_delay;
14936        LPFC_MBOXQ_t *mbox;
14937        struct lpfc_queue *eq;
14938        int cnt = 0, rc, length;
14939        uint32_t shdr_status, shdr_add_status;
14940        uint32_t dmult;
14941        int qidx;
14942        union lpfc_sli4_cfg_shdr *shdr;
14943
14944        if (startq >= phba->cfg_irq_chann)
14945                return;
14946
14947        if (usdelay > 0xFFFF) {
14948                lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14949                                "6429 usdelay %d too large. Scaled down to "
14950                                "0xFFFF.\n", usdelay);
14951                usdelay = 0xFFFF;
14952        }
14953
14954        /* set values by EQ_DELAY register if supported */
14955        if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14956                for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14957                        eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14958                        if (!eq)
14959                                continue;
14960
14961                        lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14962
14963                        if (++cnt >= numq)
14964                                break;
14965                }
14966                return;
14967        }
14968
14969        /* Otherwise, set values by mailbox cmd */
14970
14971        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14972        if (!mbox) {
14973                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14974                                "6428 Failed allocating mailbox cmd buffer."
14975                                " EQ delay was not set.\n");
14976                return;
14977        }
14978        length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14979                  sizeof(struct lpfc_sli4_cfg_mhdr));
14980        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14981                         LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14982                         length, LPFC_SLI4_MBX_EMBED);
14983        eq_delay = &mbox->u.mqe.un.eq_delay;
14984
14985        /* Calculate delay multiper from maximum interrupt per second */
14986        dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14987        if (dmult)
14988                dmult--;
14989        if (dmult > LPFC_DMULT_MAX)
14990                dmult = LPFC_DMULT_MAX;
14991
14992        for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14993                eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14994                if (!eq)
14995                        continue;
14996                eq->q_mode = usdelay;
14997                eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14998                eq_delay->u.request.eq[cnt].phase = 0;
14999                eq_delay->u.request.eq[cnt].delay_multi = dmult;
15000
15001                if (++cnt >= numq)
15002                        break;
15003        }
15004        eq_delay->u.request.num_eq = cnt;
15005
15006        mbox->vport = phba->pport;
15007        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15008        mbox->ctx_buf = NULL;
15009        mbox->ctx_ndlp = NULL;
15010        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15011        shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15012        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15013        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15014        if (shdr_status || shdr_add_status || rc) {
15015                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15016                                "2512 MODIFY_EQ_DELAY mailbox failed with "
15017                                "status x%x add_status x%x, mbx status x%x\n",
15018                                shdr_status, shdr_add_status, rc);
15019        }
15020        mempool_free(mbox, phba->mbox_mem_pool);
15021        return;
15022}
15023
15024/**
15025 * lpfc_eq_create - Create an Event Queue on the HBA
15026 * @phba: HBA structure that indicates port to create a queue on.
15027 * @eq: The queue structure to use to create the event queue.
15028 * @imax: The maximum interrupt per second limit.
15029 *
15030 * This function creates an event queue, as detailed in @eq, on a port,
15031 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15032 *
15033 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15034 * is used to get the entry count and entry size that are necessary to
15035 * determine the number of pages to allocate and use for this queue. This
15036 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15037 * event queue. This function is asynchronous and will wait for the mailbox
15038 * command to finish before continuing.
15039 *
15040 * On success this function will return a zero. If unable to allocate enough
15041 * memory this function will return -ENOMEM. If the queue create mailbox command
15042 * fails this function will return -ENXIO.
15043 **/
15044int
15045lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15046{
15047        struct lpfc_mbx_eq_create *eq_create;
15048        LPFC_MBOXQ_t *mbox;
15049        int rc, length, status = 0;
15050        struct lpfc_dmabuf *dmabuf;
15051        uint32_t shdr_status, shdr_add_status;
15052        union lpfc_sli4_cfg_shdr *shdr;
15053        uint16_t dmult;
15054        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15055
15056        /* sanity check on queue memory */
15057        if (!eq)
15058                return -ENODEV;
15059        if (!phba->sli4_hba.pc_sli4_params.supported)
15060                hw_page_size = SLI4_PAGE_SIZE;
15061
15062        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15063        if (!mbox)
15064                return -ENOMEM;
15065        length = (sizeof(struct lpfc_mbx_eq_create) -
15066                  sizeof(struct lpfc_sli4_cfg_mhdr));
15067        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15068                         LPFC_MBOX_OPCODE_EQ_CREATE,
15069                         length, LPFC_SLI4_MBX_EMBED);
15070        eq_create = &mbox->u.mqe.un.eq_create;
15071        shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15072        bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15073               eq->page_count);
15074        bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15075               LPFC_EQE_SIZE);
15076        bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15077
15078        /* Use version 2 of CREATE_EQ if eqav is set */
15079        if (phba->sli4_hba.pc_sli4_params.eqav) {
15080                bf_set(lpfc_mbox_hdr_version, &shdr->request,
15081                       LPFC_Q_CREATE_VERSION_2);
15082                bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15083                       phba->sli4_hba.pc_sli4_params.eqav);
15084        }
15085
15086        /* don't setup delay multiplier using EQ_CREATE */
15087        dmult = 0;
15088        bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15089               dmult);
15090        switch (eq->entry_count) {
15091        default:
15092                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15093                                "0360 Unsupported EQ count. (%d)\n",
15094                                eq->entry_count);
15095                if (eq->entry_count < 256) {
15096                        status = -EINVAL;
15097                        goto out;
15098                }
15099                fallthrough;    /* otherwise default to smallest count */
15100        case 256:
15101                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15102                       LPFC_EQ_CNT_256);
15103                break;
15104        case 512:
15105                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15106                       LPFC_EQ_CNT_512);
15107                break;
15108        case 1024:
15109                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15110                       LPFC_EQ_CNT_1024);
15111                break;
15112        case 2048:
15113                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15114                       LPFC_EQ_CNT_2048);
15115                break;
15116        case 4096:
15117                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15118                       LPFC_EQ_CNT_4096);
15119                break;
15120        }
15121        list_for_each_entry(dmabuf, &eq->page_list, list) {
15122                memset(dmabuf->virt, 0, hw_page_size);
15123                eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15124                                        putPaddrLow(dmabuf->phys);
15125                eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15126                                        putPaddrHigh(dmabuf->phys);
15127        }
15128        mbox->vport = phba->pport;
15129        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15130        mbox->ctx_buf = NULL;
15131        mbox->ctx_ndlp = NULL;
15132        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15133        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15134        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15135        if (shdr_status || shdr_add_status || rc) {
15136                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15137                                "2500 EQ_CREATE mailbox failed with "
15138                                "status x%x add_status x%x, mbx status x%x\n",
15139                                shdr_status, shdr_add_status, rc);
15140                status = -ENXIO;
15141        }
15142        eq->type = LPFC_EQ;
15143        eq->subtype = LPFC_NONE;
15144        eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15145        if (eq->queue_id == 0xFFFF)
15146                status = -ENXIO;
15147        eq->host_index = 0;
15148        eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15149        eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15150out:
15151        mempool_free(mbox, phba->mbox_mem_pool);
15152        return status;
15153}
15154
15155static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15156{
15157        struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15158
15159        __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15160
15161        return 1;
15162}
15163
15164/**
15165 * lpfc_cq_create - Create a Completion Queue on the HBA
15166 * @phba: HBA structure that indicates port to create a queue on.
15167 * @cq: The queue structure to use to create the completion queue.
15168 * @eq: The event queue to bind this completion queue to.
15169 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15170 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15171 *
15172 * This function creates a completion queue, as detailed in @wq, on a port,
15173 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15174 *
15175 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15176 * is used to get the entry count and entry size that are necessary to
15177 * determine the number of pages to allocate and use for this queue. The @eq
15178 * is used to indicate which event queue to bind this completion queue to. This
15179 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15180 * completion queue. This function is asynchronous and will wait for the mailbox
15181 * command to finish before continuing.
15182 *
15183 * On success this function will return a zero. If unable to allocate enough
15184 * memory this function will return -ENOMEM. If the queue create mailbox command
15185 * fails this function will return -ENXIO.
15186 **/
15187int
15188lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15189               struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15190{
15191        struct lpfc_mbx_cq_create *cq_create;
15192        struct lpfc_dmabuf *dmabuf;
15193        LPFC_MBOXQ_t *mbox;
15194        int rc, length, status = 0;
15195        uint32_t shdr_status, shdr_add_status;
15196        union lpfc_sli4_cfg_shdr *shdr;
15197
15198        /* sanity check on queue memory */
15199        if (!cq || !eq)
15200                return -ENODEV;
15201
15202        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15203        if (!mbox)
15204                return -ENOMEM;
15205        length = (sizeof(struct lpfc_mbx_cq_create) -
15206                  sizeof(struct lpfc_sli4_cfg_mhdr));
15207        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15208                         LPFC_MBOX_OPCODE_CQ_CREATE,
15209                         length, LPFC_SLI4_MBX_EMBED);
15210        cq_create = &mbox->u.mqe.un.cq_create;
15211        shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15212        bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15213                    cq->page_count);
15214        bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15215        bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15216        bf_set(lpfc_mbox_hdr_version, &shdr->request,
15217               phba->sli4_hba.pc_sli4_params.cqv);
15218        if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15219                bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15220                       (cq->page_size / SLI4_PAGE_SIZE));
15221                bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15222                       eq->queue_id);
15223                bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15224                       phba->sli4_hba.pc_sli4_params.cqav);
15225        } else {
15226                bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15227                       eq->queue_id);
15228        }
15229        switch (cq->entry_count) {
15230        case 2048:
15231        case 4096:
15232                if (phba->sli4_hba.pc_sli4_params.cqv ==
15233                    LPFC_Q_CREATE_VERSION_2) {
15234                        cq_create->u.request.context.lpfc_cq_context_count =
15235                                cq->entry_count;
15236                        bf_set(lpfc_cq_context_count,
15237                               &cq_create->u.request.context,
15238                               LPFC_CQ_CNT_WORD7);
15239                        break;
15240                }
15241                fallthrough;
15242        default:
15243                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15244                                "0361 Unsupported CQ count: "
15245                                "entry cnt %d sz %d pg cnt %d\n",
15246                                cq->entry_count, cq->entry_size,
15247                                cq->page_count);
15248                if (cq->entry_count < 256) {
15249                        status = -EINVAL;
15250                        goto out;
15251                }
15252                fallthrough;    /* otherwise default to smallest count */
15253        case 256:
15254                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15255                       LPFC_CQ_CNT_256);
15256                break;
15257        case 512:
15258                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15259                       LPFC_CQ_CNT_512);
15260                break;
15261        case 1024:
15262                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15263                       LPFC_CQ_CNT_1024);
15264                break;
15265        }
15266        list_for_each_entry(dmabuf, &cq->page_list, list) {
15267                memset(dmabuf->virt, 0, cq->page_size);
15268                cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15269                                        putPaddrLow(dmabuf->phys);
15270                cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15271                                        putPaddrHigh(dmabuf->phys);
15272        }
15273        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15274
15275        /* The IOCTL status is embedded in the mailbox subheader. */
15276        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15277        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15278        if (shdr_status || shdr_add_status || rc) {
15279                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15280                                "2501 CQ_CREATE mailbox failed with "
15281                                "status x%x add_status x%x, mbx status x%x\n",
15282                                shdr_status, shdr_add_status, rc);
15283                status = -ENXIO;
15284                goto out;
15285        }
15286        cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15287        if (cq->queue_id == 0xFFFF) {
15288                status = -ENXIO;
15289                goto out;
15290        }
15291        /* link the cq onto the parent eq child list */
15292        list_add_tail(&cq->list, &eq->child_list);
15293        /* Set up completion queue's type and subtype */
15294        cq->type = type;
15295        cq->subtype = subtype;
15296        cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15297        cq->assoc_qid = eq->queue_id;
15298        cq->assoc_qp = eq;
15299        cq->host_index = 0;
15300        cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15301        cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15302
15303        if (cq->queue_id > phba->sli4_hba.cq_max)
15304                phba->sli4_hba.cq_max = cq->queue_id;
15305
15306        irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15307out:
15308        mempool_free(mbox, phba->mbox_mem_pool);
15309        return status;
15310}
15311
15312/**
15313 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15314 * @phba: HBA structure that indicates port to create a queue on.
15315 * @cqp: The queue structure array to use to create the completion queues.
15316 * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
15317 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15318 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15319 *
15320 * This function creates a set of  completion queue, s to support MRQ
15321 * as detailed in @cqp, on a port,
15322 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15323 *
15324 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15325 * is used to get the entry count and entry size that are necessary to
15326 * determine the number of pages to allocate and use for this queue. The @eq
15327 * is used to indicate which event queue to bind this completion queue to. This
15328 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15329 * completion queue. This function is asynchronous and will wait for the mailbox
15330 * command to finish before continuing.
15331 *
15332 * On success this function will return a zero. If unable to allocate enough
15333 * memory this function will return -ENOMEM. If the queue create mailbox command
15334 * fails this function will return -ENXIO.
15335 **/
15336int
15337lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15338                   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15339                   uint32_t subtype)
15340{
15341        struct lpfc_queue *cq;
15342        struct lpfc_queue *eq;
15343        struct lpfc_mbx_cq_create_set *cq_set;
15344        struct lpfc_dmabuf *dmabuf;
15345        LPFC_MBOXQ_t *mbox;
15346        int rc, length, alloclen, status = 0;
15347        int cnt, idx, numcq, page_idx = 0;
15348        uint32_t shdr_status, shdr_add_status;
15349        union lpfc_sli4_cfg_shdr *shdr;
15350        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15351
15352        /* sanity check on queue memory */
15353        numcq = phba->cfg_nvmet_mrq;
15354        if (!cqp || !hdwq || !numcq)
15355                return -ENODEV;
15356
15357        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15358        if (!mbox)
15359                return -ENOMEM;
15360
15361        length = sizeof(struct lpfc_mbx_cq_create_set);
15362        length += ((numcq * cqp[0]->page_count) *
15363                   sizeof(struct dma_address));
15364        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15365                        LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15366                        LPFC_SLI4_MBX_NEMBED);
15367        if (alloclen < length) {
15368                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15369                                "3098 Allocated DMA memory size (%d) is "
15370                                "less than the requested DMA memory size "
15371                                "(%d)\n", alloclen, length);
15372                status = -ENOMEM;
15373                goto out;
15374        }
15375        cq_set = mbox->sge_array->addr[0];
15376        shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15377        bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15378
15379        for (idx = 0; idx < numcq; idx++) {
15380                cq = cqp[idx];
15381                eq = hdwq[idx].hba_eq;
15382                if (!cq || !eq) {
15383                        status = -ENOMEM;
15384                        goto out;
15385                }
15386                if (!phba->sli4_hba.pc_sli4_params.supported)
15387                        hw_page_size = cq->page_size;
15388
15389                switch (idx) {
15390                case 0:
15391                        bf_set(lpfc_mbx_cq_create_set_page_size,
15392                               &cq_set->u.request,
15393                               (hw_page_size / SLI4_PAGE_SIZE));
15394                        bf_set(lpfc_mbx_cq_create_set_num_pages,
15395                               &cq_set->u.request, cq->page_count);
15396                        bf_set(lpfc_mbx_cq_create_set_evt,
15397                               &cq_set->u.request, 1);
15398                        bf_set(lpfc_mbx_cq_create_set_valid,
15399                               &cq_set->u.request, 1);
15400                        bf_set(lpfc_mbx_cq_create_set_cqe_size,
15401                               &cq_set->u.request, 0);
15402                        bf_set(lpfc_mbx_cq_create_set_num_cq,
15403                               &cq_set->u.request, numcq);
15404                        bf_set(lpfc_mbx_cq_create_set_autovalid,
15405                               &cq_set->u.request,
15406                               phba->sli4_hba.pc_sli4_params.cqav);
15407                        switch (cq->entry_count) {
15408                        case 2048:
15409                        case 4096:
15410                                if (phba->sli4_hba.pc_sli4_params.cqv ==
15411                                    LPFC_Q_CREATE_VERSION_2) {
15412                                        bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15413                                               &cq_set->u.request,
15414                                                cq->entry_count);
15415                                        bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15416                                               &cq_set->u.request,
15417                                               LPFC_CQ_CNT_WORD7);
15418                                        break;
15419                                }
15420                                fallthrough;
15421                        default:
15422                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15423                                                "3118 Bad CQ count. (%d)\n",
15424                                                cq->entry_count);
15425                                if (cq->entry_count < 256) {
15426                                        status = -EINVAL;
15427                                        goto out;
15428                                }
15429                                fallthrough;    /* otherwise default to smallest */
15430                        case 256:
15431                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15432                                       &cq_set->u.request, LPFC_CQ_CNT_256);
15433                                break;
15434                        case 512:
15435                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15436                                       &cq_set->u.request, LPFC_CQ_CNT_512);
15437                                break;
15438                        case 1024:
15439                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15440                                       &cq_set->u.request, LPFC_CQ_CNT_1024);
15441                                break;
15442                        }
15443                        bf_set(lpfc_mbx_cq_create_set_eq_id0,
15444                               &cq_set->u.request, eq->queue_id);
15445                        break;
15446                case 1:
15447                        bf_set(lpfc_mbx_cq_create_set_eq_id1,
15448                               &cq_set->u.request, eq->queue_id);
15449                        break;
15450                case 2:
15451                        bf_set(lpfc_mbx_cq_create_set_eq_id2,
15452                               &cq_set->u.request, eq->queue_id);
15453                        break;
15454                case 3:
15455                        bf_set(lpfc_mbx_cq_create_set_eq_id3,
15456                               &cq_set->u.request, eq->queue_id);
15457                        break;
15458                case 4:
15459                        bf_set(lpfc_mbx_cq_create_set_eq_id4,
15460                               &cq_set->u.request, eq->queue_id);
15461                        break;
15462                case 5:
15463                        bf_set(lpfc_mbx_cq_create_set_eq_id5,
15464                               &cq_set->u.request, eq->queue_id);
15465                        break;
15466                case 6:
15467                        bf_set(lpfc_mbx_cq_create_set_eq_id6,
15468                               &cq_set->u.request, eq->queue_id);
15469                        break;
15470                case 7:
15471                        bf_set(lpfc_mbx_cq_create_set_eq_id7,
15472                               &cq_set->u.request, eq->queue_id);
15473                        break;
15474                case 8:
15475                        bf_set(lpfc_mbx_cq_create_set_eq_id8,
15476                               &cq_set->u.request, eq->queue_id);
15477                        break;
15478                case 9:
15479                        bf_set(lpfc_mbx_cq_create_set_eq_id9,
15480                               &cq_set->u.request, eq->queue_id);
15481                        break;
15482                case 10:
15483                        bf_set(lpfc_mbx_cq_create_set_eq_id10,
15484                               &cq_set->u.request, eq->queue_id);
15485                        break;
15486                case 11:
15487                        bf_set(lpfc_mbx_cq_create_set_eq_id11,
15488                               &cq_set->u.request, eq->queue_id);
15489                        break;
15490                case 12:
15491                        bf_set(lpfc_mbx_cq_create_set_eq_id12,
15492                               &cq_set->u.request, eq->queue_id);
15493                        break;
15494                case 13:
15495                        bf_set(lpfc_mbx_cq_create_set_eq_id13,
15496                               &cq_set->u.request, eq->queue_id);
15497                        break;
15498                case 14:
15499                        bf_set(lpfc_mbx_cq_create_set_eq_id14,
15500                               &cq_set->u.request, eq->queue_id);
15501                        break;
15502                case 15:
15503                        bf_set(lpfc_mbx_cq_create_set_eq_id15,
15504                               &cq_set->u.request, eq->queue_id);
15505                        break;
15506                }
15507
15508                /* link the cq onto the parent eq child list */
15509                list_add_tail(&cq->list, &eq->child_list);
15510                /* Set up completion queue's type and subtype */
15511                cq->type = type;
15512                cq->subtype = subtype;
15513                cq->assoc_qid = eq->queue_id;
15514                cq->assoc_qp = eq;
15515                cq->host_index = 0;
15516                cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15517                cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15518                                         cq->entry_count);
15519                cq->chann = idx;
15520
15521                rc = 0;
15522                list_for_each_entry(dmabuf, &cq->page_list, list) {
15523                        memset(dmabuf->virt, 0, hw_page_size);
15524                        cnt = page_idx + dmabuf->buffer_tag;
15525                        cq_set->u.request.page[cnt].addr_lo =
15526                                        putPaddrLow(dmabuf->phys);
15527                        cq_set->u.request.page[cnt].addr_hi =
15528                                        putPaddrHigh(dmabuf->phys);
15529                        rc++;
15530                }
15531                page_idx += rc;
15532        }
15533
15534        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15535
15536        /* The IOCTL status is embedded in the mailbox subheader. */
15537        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15538        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15539        if (shdr_status || shdr_add_status || rc) {
15540                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15541                                "3119 CQ_CREATE_SET mailbox failed with "
15542                                "status x%x add_status x%x, mbx status x%x\n",
15543                                shdr_status, shdr_add_status, rc);
15544                status = -ENXIO;
15545                goto out;
15546        }
15547        rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15548        if (rc == 0xFFFF) {
15549                status = -ENXIO;
15550                goto out;
15551        }
15552
15553        for (idx = 0; idx < numcq; idx++) {
15554                cq = cqp[idx];
15555                cq->queue_id = rc + idx;
15556                if (cq->queue_id > phba->sli4_hba.cq_max)
15557                        phba->sli4_hba.cq_max = cq->queue_id;
15558        }
15559
15560out:
15561        lpfc_sli4_mbox_cmd_free(phba, mbox);
15562        return status;
15563}
15564
15565/**
15566 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15567 * @phba: HBA structure that indicates port to create a queue on.
15568 * @mq: The queue structure to use to create the mailbox queue.
15569 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15570 * @cq: The completion queue to associate with this cq.
15571 *
15572 * This function provides failback (fb) functionality when the
15573 * mq_create_ext fails on older FW generations.  It's purpose is identical
15574 * to mq_create_ext otherwise.
15575 *
15576 * This routine cannot fail as all attributes were previously accessed and
15577 * initialized in mq_create_ext.
15578 **/
15579static void
15580lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15581                       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15582{
15583        struct lpfc_mbx_mq_create *mq_create;
15584        struct lpfc_dmabuf *dmabuf;
15585        int length;
15586
15587        length = (sizeof(struct lpfc_mbx_mq_create) -
15588                  sizeof(struct lpfc_sli4_cfg_mhdr));
15589        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15590                         LPFC_MBOX_OPCODE_MQ_CREATE,
15591                         length, LPFC_SLI4_MBX_EMBED);
15592        mq_create = &mbox->u.mqe.un.mq_create;
15593        bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15594               mq->page_count);
15595        bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15596               cq->queue_id);
15597        bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15598        switch (mq->entry_count) {
15599        case 16:
15600                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15601                       LPFC_MQ_RING_SIZE_16);
15602                break;
15603        case 32:
15604                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15605                       LPFC_MQ_RING_SIZE_32);
15606                break;
15607        case 64:
15608                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15609                       LPFC_MQ_RING_SIZE_64);
15610                break;
15611        case 128:
15612                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15613                       LPFC_MQ_RING_SIZE_128);
15614                break;
15615        }
15616        list_for_each_entry(dmabuf, &mq->page_list, list) {
15617                mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15618                        putPaddrLow(dmabuf->phys);
15619                mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15620                        putPaddrHigh(dmabuf->phys);
15621        }
15622}
15623
15624/**
15625 * lpfc_mq_create - Create a mailbox Queue on the HBA
15626 * @phba: HBA structure that indicates port to create a queue on.
15627 * @mq: The queue structure to use to create the mailbox queue.
15628 * @cq: The completion queue to associate with this cq.
15629 * @subtype: The queue's subtype.
15630 *
15631 * This function creates a mailbox queue, as detailed in @mq, on a port,
15632 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15633 *
15634 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15635 * is used to get the entry count and entry size that are necessary to
15636 * determine the number of pages to allocate and use for this queue. This
15637 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15638 * mailbox queue. This function is asynchronous and will wait for the mailbox
15639 * command to finish before continuing.
15640 *
15641 * On success this function will return a zero. If unable to allocate enough
15642 * memory this function will return -ENOMEM. If the queue create mailbox command
15643 * fails this function will return -ENXIO.
15644 **/
15645int32_t
15646lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15647               struct lpfc_queue *cq, uint32_t subtype)
15648{
15649        struct lpfc_mbx_mq_create *mq_create;
15650        struct lpfc_mbx_mq_create_ext *mq_create_ext;
15651        struct lpfc_dmabuf *dmabuf;
15652        LPFC_MBOXQ_t *mbox;
15653        int rc, length, status = 0;
15654        uint32_t shdr_status, shdr_add_status;
15655        union lpfc_sli4_cfg_shdr *shdr;
15656        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15657
15658        /* sanity check on queue memory */
15659        if (!mq || !cq)
15660                return -ENODEV;
15661        if (!phba->sli4_hba.pc_sli4_params.supported)
15662                hw_page_size = SLI4_PAGE_SIZE;
15663
15664        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15665        if (!mbox)
15666                return -ENOMEM;
15667        length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15668                  sizeof(struct lpfc_sli4_cfg_mhdr));
15669        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15670                         LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15671                         length, LPFC_SLI4_MBX_EMBED);
15672
15673        mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15674        shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15675        bf_set(lpfc_mbx_mq_create_ext_num_pages,
15676               &mq_create_ext->u.request, mq->page_count);
15677        bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15678               &mq_create_ext->u.request, 1);
15679        bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15680               &mq_create_ext->u.request, 1);
15681        bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15682               &mq_create_ext->u.request, 1);
15683        bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15684               &mq_create_ext->u.request, 1);
15685        bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15686               &mq_create_ext->u.request, 1);
15687        bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15688        bf_set(lpfc_mbox_hdr_version, &shdr->request,
15689               phba->sli4_hba.pc_sli4_params.mqv);
15690        if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15691                bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15692                       cq->queue_id);
15693        else
15694                bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15695                       cq->queue_id);
15696        switch (mq->entry_count) {
15697        default:
15698                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15699                                "0362 Unsupported MQ count. (%d)\n",
15700                                mq->entry_count);
15701                if (mq->entry_count < 16) {
15702                        status = -EINVAL;
15703                        goto out;
15704                }
15705                fallthrough;    /* otherwise default to smallest count */
15706        case 16:
15707                bf_set(lpfc_mq_context_ring_size,
15708                       &mq_create_ext->u.request.context,
15709                       LPFC_MQ_RING_SIZE_16);
15710                break;
15711        case 32:
15712                bf_set(lpfc_mq_context_ring_size,
15713                       &mq_create_ext->u.request.context,
15714                       LPFC_MQ_RING_SIZE_32);
15715                break;
15716        case 64:
15717                bf_set(lpfc_mq_context_ring_size,
15718                       &mq_create_ext->u.request.context,
15719                       LPFC_MQ_RING_SIZE_64);
15720                break;
15721        case 128:
15722                bf_set(lpfc_mq_context_ring_size,
15723                       &mq_create_ext->u.request.context,
15724                       LPFC_MQ_RING_SIZE_128);
15725                break;
15726        }
15727        list_for_each_entry(dmabuf, &mq->page_list, list) {
15728                memset(dmabuf->virt, 0, hw_page_size);
15729                mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15730                                        putPaddrLow(dmabuf->phys);
15731                mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15732                                        putPaddrHigh(dmabuf->phys);
15733        }
15734        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15735        mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15736                              &mq_create_ext->u.response);
15737        if (rc != MBX_SUCCESS) {
15738                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15739                                "2795 MQ_CREATE_EXT failed with "
15740                                "status x%x. Failback to MQ_CREATE.\n",
15741                                rc);
15742                lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15743                mq_create = &mbox->u.mqe.un.mq_create;
15744                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15745                shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15746                mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15747                                      &mq_create->u.response);
15748        }
15749
15750        /* The IOCTL status is embedded in the mailbox subheader. */
15751        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15752        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15753        if (shdr_status || shdr_add_status || rc) {
15754                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15755                                "2502 MQ_CREATE mailbox failed with "
15756                                "status x%x add_status x%x, mbx status x%x\n",
15757                                shdr_status, shdr_add_status, rc);
15758                status = -ENXIO;
15759                goto out;
15760        }
15761        if (mq->queue_id == 0xFFFF) {
15762                status = -ENXIO;
15763                goto out;
15764        }
15765        mq->type = LPFC_MQ;
15766        mq->assoc_qid = cq->queue_id;
15767        mq->subtype = subtype;
15768        mq->host_index = 0;
15769        mq->hba_index = 0;
15770
15771        /* link the mq onto the parent cq child list */
15772        list_add_tail(&mq->list, &cq->child_list);
15773out:
15774        mempool_free(mbox, phba->mbox_mem_pool);
15775        return status;
15776}
15777
15778/**
15779 * lpfc_wq_create - Create a Work Queue on the HBA
15780 * @phba: HBA structure that indicates port to create a queue on.
15781 * @wq: The queue structure to use to create the work queue.
15782 * @cq: The completion queue to bind this work queue to.
15783 * @subtype: The subtype of the work queue indicating its functionality.
15784 *
15785 * This function creates a work queue, as detailed in @wq, on a port, described
15786 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15787 *
15788 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15789 * is used to get the entry count and entry size that are necessary to
15790 * determine the number of pages to allocate and use for this queue. The @cq
15791 * is used to indicate which completion queue to bind this work queue to. This
15792 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15793 * work queue. This function is asynchronous and will wait for the mailbox
15794 * command to finish before continuing.
15795 *
15796 * On success this function will return a zero. If unable to allocate enough
15797 * memory this function will return -ENOMEM. If the queue create mailbox command
15798 * fails this function will return -ENXIO.
15799 **/
15800int
15801lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15802               struct lpfc_queue *cq, uint32_t subtype)
15803{
15804        struct lpfc_mbx_wq_create *wq_create;
15805        struct lpfc_dmabuf *dmabuf;
15806        LPFC_MBOXQ_t *mbox;
15807        int rc, length, status = 0;
15808        uint32_t shdr_status, shdr_add_status;
15809        union lpfc_sli4_cfg_shdr *shdr;
15810        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15811        struct dma_address *page;
15812        void __iomem *bar_memmap_p;
15813        uint32_t db_offset;
15814        uint16_t pci_barset;
15815        uint8_t dpp_barset;
15816        uint32_t dpp_offset;
15817        uint8_t wq_create_version;
15818#ifdef CONFIG_X86
15819        unsigned long pg_addr;
15820#endif
15821
15822        /* sanity check on queue memory */
15823        if (!wq || !cq)
15824                return -ENODEV;
15825        if (!phba->sli4_hba.pc_sli4_params.supported)
15826                hw_page_size = wq->page_size;
15827
15828        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15829        if (!mbox)
15830                return -ENOMEM;
15831        length = (sizeof(struct lpfc_mbx_wq_create) -
15832                  sizeof(struct lpfc_sli4_cfg_mhdr));
15833        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15834                         LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15835                         length, LPFC_SLI4_MBX_EMBED);
15836        wq_create = &mbox->u.mqe.un.wq_create;
15837        shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15838        bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15839                    wq->page_count);
15840        bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15841                    cq->queue_id);
15842
15843        /* wqv is the earliest version supported, NOT the latest */
15844        bf_set(lpfc_mbox_hdr_version, &shdr->request,
15845               phba->sli4_hba.pc_sli4_params.wqv);
15846
15847        if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15848            (wq->page_size > SLI4_PAGE_SIZE))
15849                wq_create_version = LPFC_Q_CREATE_VERSION_1;
15850        else
15851                wq_create_version = LPFC_Q_CREATE_VERSION_0;
15852
15853
15854        if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15855                wq_create_version = LPFC_Q_CREATE_VERSION_1;
15856        else
15857                wq_create_version = LPFC_Q_CREATE_VERSION_0;
15858
15859        switch (wq_create_version) {
15860        case LPFC_Q_CREATE_VERSION_1:
15861                bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15862                       wq->entry_count);
15863                bf_set(lpfc_mbox_hdr_version, &shdr->request,
15864                       LPFC_Q_CREATE_VERSION_1);
15865
15866                switch (wq->entry_size) {
15867                default:
15868                case 64:
15869                        bf_set(lpfc_mbx_wq_create_wqe_size,
15870                               &wq_create->u.request_1,
15871                               LPFC_WQ_WQE_SIZE_64);
15872                        break;
15873                case 128:
15874                        bf_set(lpfc_mbx_wq_create_wqe_size,
15875                               &wq_create->u.request_1,
15876                               LPFC_WQ_WQE_SIZE_128);
15877                        break;
15878                }
15879                /* Request DPP by default */
15880                bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15881                bf_set(lpfc_mbx_wq_create_page_size,
15882                       &wq_create->u.request_1,
15883                       (wq->page_size / SLI4_PAGE_SIZE));
15884                page = wq_create->u.request_1.page;
15885                break;
15886        default:
15887                page = wq_create->u.request.page;
15888                break;
15889        }
15890
15891        list_for_each_entry(dmabuf, &wq->page_list, list) {
15892                memset(dmabuf->virt, 0, hw_page_size);
15893                page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15894                page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15895        }
15896
15897        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15898                bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15899
15900        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15901        /* The IOCTL status is embedded in the mailbox subheader. */
15902        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15903        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15904        if (shdr_status || shdr_add_status || rc) {
15905                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15906                                "2503 WQ_CREATE mailbox failed with "
15907                                "status x%x add_status x%x, mbx status x%x\n",
15908                                shdr_status, shdr_add_status, rc);
15909                status = -ENXIO;
15910                goto out;
15911        }
15912
15913        if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15914                wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15915                                        &wq_create->u.response);
15916        else
15917                wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15918                                        &wq_create->u.response_1);
15919
15920        if (wq->queue_id == 0xFFFF) {
15921                status = -ENXIO;
15922                goto out;
15923        }
15924
15925        wq->db_format = LPFC_DB_LIST_FORMAT;
15926        if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15927                if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15928                        wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15929                                               &wq_create->u.response);
15930                        if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15931                            (wq->db_format != LPFC_DB_RING_FORMAT)) {
15932                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15933                                                "3265 WQ[%d] doorbell format "
15934                                                "not supported: x%x\n",
15935                                                wq->queue_id, wq->db_format);
15936                                status = -EINVAL;
15937                                goto out;
15938                        }
15939                        pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15940                                            &wq_create->u.response);
15941                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15942                                                                   pci_barset);
15943                        if (!bar_memmap_p) {
15944                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15945                                                "3263 WQ[%d] failed to memmap "
15946                                                "pci barset:x%x\n",
15947                                                wq->queue_id, pci_barset);
15948                                status = -ENOMEM;
15949                                goto out;
15950                        }
15951                        db_offset = wq_create->u.response.doorbell_offset;
15952                        if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15953                            (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15954                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15955                                                "3252 WQ[%d] doorbell offset "
15956                                                "not supported: x%x\n",
15957                                                wq->queue_id, db_offset);
15958                                status = -EINVAL;
15959                                goto out;
15960                        }
15961                        wq->db_regaddr = bar_memmap_p + db_offset;
15962                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15963                                        "3264 WQ[%d]: barset:x%x, offset:x%x, "
15964                                        "format:x%x\n", wq->queue_id,
15965                                        pci_barset, db_offset, wq->db_format);
15966                } else
15967                        wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15968        } else {
15969                /* Check if DPP was honored by the firmware */
15970                wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15971                                    &wq_create->u.response_1);
15972                if (wq->dpp_enable) {
15973                        pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15974                                            &wq_create->u.response_1);
15975                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15976                                                                   pci_barset);
15977                        if (!bar_memmap_p) {
15978                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15979                                                "3267 WQ[%d] failed to memmap "
15980                                                "pci barset:x%x\n",
15981                                                wq->queue_id, pci_barset);
15982                                status = -ENOMEM;
15983                                goto out;
15984                        }
15985                        db_offset = wq_create->u.response_1.doorbell_offset;
15986                        wq->db_regaddr = bar_memmap_p + db_offset;
15987                        wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15988                                            &wq_create->u.response_1);
15989                        dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15990                                            &wq_create->u.response_1);
15991                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15992                                                                   dpp_barset);
15993                        if (!bar_memmap_p) {
15994                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15995                                                "3268 WQ[%d] failed to memmap "
15996                                                "pci barset:x%x\n",
15997                                                wq->queue_id, dpp_barset);
15998                                status = -ENOMEM;
15999                                goto out;
16000                        }
16001                        dpp_offset = wq_create->u.response_1.dpp_offset;
16002                        wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16003                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16004                                        "3271 WQ[%d]: barset:x%x, offset:x%x, "
16005                                        "dpp_id:x%x dpp_barset:x%x "
16006                                        "dpp_offset:x%x\n",
16007                                        wq->queue_id, pci_barset, db_offset,
16008                                        wq->dpp_id, dpp_barset, dpp_offset);
16009
16010#ifdef CONFIG_X86
16011                        /* Enable combined writes for DPP aperture */
16012                        pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16013                        rc = set_memory_wc(pg_addr, 1);
16014                        if (rc) {
16015                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16016                                        "3272 Cannot setup Combined "
16017                                        "Write on WQ[%d] - disable DPP\n",
16018                                        wq->queue_id);
16019                                phba->cfg_enable_dpp = 0;
16020                        }
16021#else
16022                        phba->cfg_enable_dpp = 0;
16023#endif
16024                } else
16025                        wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16026        }
16027        wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16028        if (wq->pring == NULL) {
16029                status = -ENOMEM;
16030                goto out;
16031        }
16032        wq->type = LPFC_WQ;
16033        wq->assoc_qid = cq->queue_id;
16034        wq->subtype = subtype;
16035        wq->host_index = 0;
16036        wq->hba_index = 0;
16037        wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16038
16039        /* link the wq onto the parent cq child list */
16040        list_add_tail(&wq->list, &cq->child_list);
16041out:
16042        mempool_free(mbox, phba->mbox_mem_pool);
16043        return status;
16044}
16045
16046/**
16047 * lpfc_rq_create - Create a Receive Queue on the HBA
16048 * @phba: HBA structure that indicates port to create a queue on.
16049 * @hrq: The queue structure to use to create the header receive queue.
16050 * @drq: The queue structure to use to create the data receive queue.
16051 * @cq: The completion queue to bind this work queue to.
16052 * @subtype: The subtype of the work queue indicating its functionality.
16053 *
16054 * This function creates a receive buffer queue pair , as detailed in @hrq and
16055 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16056 * to the HBA.
16057 *
16058 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16059 * struct is used to get the entry count that is necessary to determine the
16060 * number of pages to use for this queue. The @cq is used to indicate which
16061 * completion queue to bind received buffers that are posted to these queues to.
16062 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16063 * receive queue pair. This function is asynchronous and will wait for the
16064 * mailbox command to finish before continuing.
16065 *
16066 * On success this function will return a zero. If unable to allocate enough
16067 * memory this function will return -ENOMEM. If the queue create mailbox command
16068 * fails this function will return -ENXIO.
16069 **/
16070int
16071lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16072               struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16073{
16074        struct lpfc_mbx_rq_create *rq_create;
16075        struct lpfc_dmabuf *dmabuf;
16076        LPFC_MBOXQ_t *mbox;
16077        int rc, length, status = 0;
16078        uint32_t shdr_status, shdr_add_status;
16079        union lpfc_sli4_cfg_shdr *shdr;
16080        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16081        void __iomem *bar_memmap_p;
16082        uint32_t db_offset;
16083        uint16_t pci_barset;
16084
16085        /* sanity check on queue memory */
16086        if (!hrq || !drq || !cq)
16087                return -ENODEV;
16088        if (!phba->sli4_hba.pc_sli4_params.supported)
16089                hw_page_size = SLI4_PAGE_SIZE;
16090
16091        if (hrq->entry_count != drq->entry_count)
16092                return -EINVAL;
16093        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16094        if (!mbox)
16095                return -ENOMEM;
16096        length = (sizeof(struct lpfc_mbx_rq_create) -
16097                  sizeof(struct lpfc_sli4_cfg_mhdr));
16098        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16099                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16100                         length, LPFC_SLI4_MBX_EMBED);
16101        rq_create = &mbox->u.mqe.un.rq_create;
16102        shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16103        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16104               phba->sli4_hba.pc_sli4_params.rqv);
16105        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16106                bf_set(lpfc_rq_context_rqe_count_1,
16107                       &rq_create->u.request.context,
16108                       hrq->entry_count);
16109                rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16110                bf_set(lpfc_rq_context_rqe_size,
16111                       &rq_create->u.request.context,
16112                       LPFC_RQE_SIZE_8);
16113                bf_set(lpfc_rq_context_page_size,
16114                       &rq_create->u.request.context,
16115                       LPFC_RQ_PAGE_SIZE_4096);
16116        } else {
16117                switch (hrq->entry_count) {
16118                default:
16119                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16120                                        "2535 Unsupported RQ count. (%d)\n",
16121                                        hrq->entry_count);
16122                        if (hrq->entry_count < 512) {
16123                                status = -EINVAL;
16124                                goto out;
16125                        }
16126                        fallthrough;    /* otherwise default to smallest count */
16127                case 512:
16128                        bf_set(lpfc_rq_context_rqe_count,
16129                               &rq_create->u.request.context,
16130                               LPFC_RQ_RING_SIZE_512);
16131                        break;
16132                case 1024:
16133                        bf_set(lpfc_rq_context_rqe_count,
16134                               &rq_create->u.request.context,
16135                               LPFC_RQ_RING_SIZE_1024);
16136                        break;
16137                case 2048:
16138                        bf_set(lpfc_rq_context_rqe_count,
16139                               &rq_create->u.request.context,
16140                               LPFC_RQ_RING_SIZE_2048);
16141                        break;
16142                case 4096:
16143                        bf_set(lpfc_rq_context_rqe_count,
16144                               &rq_create->u.request.context,
16145                               LPFC_RQ_RING_SIZE_4096);
16146                        break;
16147                }
16148                bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16149                       LPFC_HDR_BUF_SIZE);
16150        }
16151        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16152               cq->queue_id);
16153        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16154               hrq->page_count);
16155        list_for_each_entry(dmabuf, &hrq->page_list, list) {
16156                memset(dmabuf->virt, 0, hw_page_size);
16157                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16158                                        putPaddrLow(dmabuf->phys);
16159                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16160                                        putPaddrHigh(dmabuf->phys);
16161        }
16162        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16163                bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16164
16165        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16166        /* The IOCTL status is embedded in the mailbox subheader. */
16167        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16168        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16169        if (shdr_status || shdr_add_status || rc) {
16170                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16171                                "2504 RQ_CREATE mailbox failed with "
16172                                "status x%x add_status x%x, mbx status x%x\n",
16173                                shdr_status, shdr_add_status, rc);
16174                status = -ENXIO;
16175                goto out;
16176        }
16177        hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16178        if (hrq->queue_id == 0xFFFF) {
16179                status = -ENXIO;
16180                goto out;
16181        }
16182
16183        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16184                hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16185                                        &rq_create->u.response);
16186                if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16187                    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16188                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16189                                        "3262 RQ [%d] doorbell format not "
16190                                        "supported: x%x\n", hrq->queue_id,
16191                                        hrq->db_format);
16192                        status = -EINVAL;
16193                        goto out;
16194                }
16195
16196                pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16197                                    &rq_create->u.response);
16198                bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16199                if (!bar_memmap_p) {
16200                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16201                                        "3269 RQ[%d] failed to memmap pci "
16202                                        "barset:x%x\n", hrq->queue_id,
16203                                        pci_barset);
16204                        status = -ENOMEM;
16205                        goto out;
16206                }
16207
16208                db_offset = rq_create->u.response.doorbell_offset;
16209                if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16210                    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16211                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16212                                        "3270 RQ[%d] doorbell offset not "
16213                                        "supported: x%x\n", hrq->queue_id,
16214                                        db_offset);
16215                        status = -EINVAL;
16216                        goto out;
16217                }
16218                hrq->db_regaddr = bar_memmap_p + db_offset;
16219                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16220                                "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16221                                "format:x%x\n", hrq->queue_id, pci_barset,
16222                                db_offset, hrq->db_format);
16223        } else {
16224                hrq->db_format = LPFC_DB_RING_FORMAT;
16225                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16226        }
16227        hrq->type = LPFC_HRQ;
16228        hrq->assoc_qid = cq->queue_id;
16229        hrq->subtype = subtype;
16230        hrq->host_index = 0;
16231        hrq->hba_index = 0;
16232        hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16233
16234        /* now create the data queue */
16235        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16236                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16237                         length, LPFC_SLI4_MBX_EMBED);
16238        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16239               phba->sli4_hba.pc_sli4_params.rqv);
16240        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16241                bf_set(lpfc_rq_context_rqe_count_1,
16242                       &rq_create->u.request.context, hrq->entry_count);
16243                if (subtype == LPFC_NVMET)
16244                        rq_create->u.request.context.buffer_size =
16245                                LPFC_NVMET_DATA_BUF_SIZE;
16246                else
16247                        rq_create->u.request.context.buffer_size =
16248                                LPFC_DATA_BUF_SIZE;
16249                bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16250                       LPFC_RQE_SIZE_8);
16251                bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16252                       (PAGE_SIZE/SLI4_PAGE_SIZE));
16253        } else {
16254                switch (drq->entry_count) {
16255                default:
16256                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16257                                        "2536 Unsupported RQ count. (%d)\n",
16258                                        drq->entry_count);
16259                        if (drq->entry_count < 512) {
16260                                status = -EINVAL;
16261                                goto out;
16262                        }
16263                        fallthrough;    /* otherwise default to smallest count */
16264                case 512:
16265                        bf_set(lpfc_rq_context_rqe_count,
16266                               &rq_create->u.request.context,
16267                               LPFC_RQ_RING_SIZE_512);
16268                        break;
16269                case 1024:
16270                        bf_set(lpfc_rq_context_rqe_count,
16271                               &rq_create->u.request.context,
16272                               LPFC_RQ_RING_SIZE_1024);
16273                        break;
16274                case 2048:
16275                        bf_set(lpfc_rq_context_rqe_count,
16276                               &rq_create->u.request.context,
16277                               LPFC_RQ_RING_SIZE_2048);
16278                        break;
16279                case 4096:
16280                        bf_set(lpfc_rq_context_rqe_count,
16281                               &rq_create->u.request.context,
16282                               LPFC_RQ_RING_SIZE_4096);
16283                        break;
16284                }
16285                if (subtype == LPFC_NVMET)
16286                        bf_set(lpfc_rq_context_buf_size,
16287                               &rq_create->u.request.context,
16288                               LPFC_NVMET_DATA_BUF_SIZE);
16289                else
16290                        bf_set(lpfc_rq_context_buf_size,
16291                               &rq_create->u.request.context,
16292                               LPFC_DATA_BUF_SIZE);
16293        }
16294        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16295               cq->queue_id);
16296        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16297               drq->page_count);
16298        list_for_each_entry(dmabuf, &drq->page_list, list) {
16299                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16300                                        putPaddrLow(dmabuf->phys);
16301                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16302                                        putPaddrHigh(dmabuf->phys);
16303        }
16304        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16305                bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16306        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16307        /* The IOCTL status is embedded in the mailbox subheader. */
16308        shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16309        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16310        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16311        if (shdr_status || shdr_add_status || rc) {
16312                status = -ENXIO;
16313                goto out;
16314        }
16315        drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16316        if (drq->queue_id == 0xFFFF) {
16317                status = -ENXIO;
16318                goto out;
16319        }
16320        drq->type = LPFC_DRQ;
16321        drq->assoc_qid = cq->queue_id;
16322        drq->subtype = subtype;
16323        drq->host_index = 0;
16324        drq->hba_index = 0;
16325        drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16326
16327        /* link the header and data RQs onto the parent cq child list */
16328        list_add_tail(&hrq->list, &cq->child_list);
16329        list_add_tail(&drq->list, &cq->child_list);
16330
16331out:
16332        mempool_free(mbox, phba->mbox_mem_pool);
16333        return status;
16334}
16335
16336/**
16337 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16338 * @phba: HBA structure that indicates port to create a queue on.
16339 * @hrqp: The queue structure array to use to create the header receive queues.
16340 * @drqp: The queue structure array to use to create the data receive queues.
16341 * @cqp: The completion queue array to bind these receive queues to.
16342 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16343 *
16344 * This function creates a receive buffer queue pair , as detailed in @hrq and
16345 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16346 * to the HBA.
16347 *
16348 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16349 * struct is used to get the entry count that is necessary to determine the
16350 * number of pages to use for this queue. The @cq is used to indicate which
16351 * completion queue to bind received buffers that are posted to these queues to.
16352 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16353 * receive queue pair. This function is asynchronous and will wait for the
16354 * mailbox command to finish before continuing.
16355 *
16356 * On success this function will return a zero. If unable to allocate enough
16357 * memory this function will return -ENOMEM. If the queue create mailbox command
16358 * fails this function will return -ENXIO.
16359 **/
16360int
16361lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16362                struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16363                uint32_t subtype)
16364{
16365        struct lpfc_queue *hrq, *drq, *cq;
16366        struct lpfc_mbx_rq_create_v2 *rq_create;
16367        struct lpfc_dmabuf *dmabuf;
16368        LPFC_MBOXQ_t *mbox;
16369        int rc, length, alloclen, status = 0;
16370        int cnt, idx, numrq, page_idx = 0;
16371        uint32_t shdr_status, shdr_add_status;
16372        union lpfc_sli4_cfg_shdr *shdr;
16373        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16374
16375        numrq = phba->cfg_nvmet_mrq;
16376        /* sanity check on array memory */
16377        if (!hrqp || !drqp || !cqp || !numrq)
16378                return -ENODEV;
16379        if (!phba->sli4_hba.pc_sli4_params.supported)
16380                hw_page_size = SLI4_PAGE_SIZE;
16381
16382        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16383        if (!mbox)
16384                return -ENOMEM;
16385
16386        length = sizeof(struct lpfc_mbx_rq_create_v2);
16387        length += ((2 * numrq * hrqp[0]->page_count) *
16388                   sizeof(struct dma_address));
16389
16390        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16391                                    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16392                                    LPFC_SLI4_MBX_NEMBED);
16393        if (alloclen < length) {
16394                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16395                                "3099 Allocated DMA memory size (%d) is "
16396                                "less than the requested DMA memory size "
16397                                "(%d)\n", alloclen, length);
16398                status = -ENOMEM;
16399                goto out;
16400        }
16401
16402
16403
16404        rq_create = mbox->sge_array->addr[0];
16405        shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16406
16407        bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16408        cnt = 0;
16409
16410        for (idx = 0; idx < numrq; idx++) {
16411                hrq = hrqp[idx];
16412                drq = drqp[idx];
16413                cq  = cqp[idx];
16414
16415                /* sanity check on queue memory */
16416                if (!hrq || !drq || !cq) {
16417                        status = -ENODEV;
16418                        goto out;
16419                }
16420
16421                if (hrq->entry_count != drq->entry_count) {
16422                        status = -EINVAL;
16423                        goto out;
16424                }
16425
16426                if (idx == 0) {
16427                        bf_set(lpfc_mbx_rq_create_num_pages,
16428                               &rq_create->u.request,
16429                               hrq->page_count);
16430                        bf_set(lpfc_mbx_rq_create_rq_cnt,
16431                               &rq_create->u.request, (numrq * 2));
16432                        bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16433                               1);
16434                        bf_set(lpfc_rq_context_base_cq,
16435                               &rq_create->u.request.context,
16436                               cq->queue_id);
16437                        bf_set(lpfc_rq_context_data_size,
16438                               &rq_create->u.request.context,
16439                               LPFC_NVMET_DATA_BUF_SIZE);
16440                        bf_set(lpfc_rq_context_hdr_size,
16441                               &rq_create->u.request.context,
16442                               LPFC_HDR_BUF_SIZE);
16443                        bf_set(lpfc_rq_context_rqe_count_1,
16444                               &rq_create->u.request.context,
16445                               hrq->entry_count);
16446                        bf_set(lpfc_rq_context_rqe_size,
16447                               &rq_create->u.request.context,
16448                               LPFC_RQE_SIZE_8);
16449                        bf_set(lpfc_rq_context_page_size,
16450                               &rq_create->u.request.context,
16451                               (PAGE_SIZE/SLI4_PAGE_SIZE));
16452                }
16453                rc = 0;
16454                list_for_each_entry(dmabuf, &hrq->page_list, list) {
16455                        memset(dmabuf->virt, 0, hw_page_size);
16456                        cnt = page_idx + dmabuf->buffer_tag;
16457                        rq_create->u.request.page[cnt].addr_lo =
16458                                        putPaddrLow(dmabuf->phys);
16459                        rq_create->u.request.page[cnt].addr_hi =
16460                                        putPaddrHigh(dmabuf->phys);
16461                        rc++;
16462                }
16463                page_idx += rc;
16464
16465                rc = 0;
16466                list_for_each_entry(dmabuf, &drq->page_list, list) {
16467                        memset(dmabuf->virt, 0, hw_page_size);
16468                        cnt = page_idx + dmabuf->buffer_tag;
16469                        rq_create->u.request.page[cnt].addr_lo =
16470                                        putPaddrLow(dmabuf->phys);
16471                        rq_create->u.request.page[cnt].addr_hi =
16472                                        putPaddrHigh(dmabuf->phys);
16473                        rc++;
16474                }
16475                page_idx += rc;
16476
16477                hrq->db_format = LPFC_DB_RING_FORMAT;
16478                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16479                hrq->type = LPFC_HRQ;
16480                hrq->assoc_qid = cq->queue_id;
16481                hrq->subtype = subtype;
16482                hrq->host_index = 0;
16483                hrq->hba_index = 0;
16484                hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16485
16486                drq->db_format = LPFC_DB_RING_FORMAT;
16487                drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16488                drq->type = LPFC_DRQ;
16489                drq->assoc_qid = cq->queue_id;
16490                drq->subtype = subtype;
16491                drq->host_index = 0;
16492                drq->hba_index = 0;
16493                drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16494
16495                list_add_tail(&hrq->list, &cq->child_list);
16496                list_add_tail(&drq->list, &cq->child_list);
16497        }
16498
16499        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16500        /* The IOCTL status is embedded in the mailbox subheader. */
16501        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16502        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16503        if (shdr_status || shdr_add_status || rc) {
16504                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16505                                "3120 RQ_CREATE mailbox failed with "
16506                                "status x%x add_status x%x, mbx status x%x\n",
16507                                shdr_status, shdr_add_status, rc);
16508                status = -ENXIO;
16509                goto out;
16510        }
16511        rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16512        if (rc == 0xFFFF) {
16513                status = -ENXIO;
16514                goto out;
16515        }
16516
16517        /* Initialize all RQs with associated queue id */
16518        for (idx = 0; idx < numrq; idx++) {
16519                hrq = hrqp[idx];
16520                hrq->queue_id = rc + (2 * idx);
16521                drq = drqp[idx];
16522                drq->queue_id = rc + (2 * idx) + 1;
16523        }
16524
16525out:
16526        lpfc_sli4_mbox_cmd_free(phba, mbox);
16527        return status;
16528}
16529
16530/**
16531 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16532 * @phba: HBA structure that indicates port to destroy a queue on.
16533 * @eq: The queue structure associated with the queue to destroy.
16534 *
16535 * This function destroys a queue, as detailed in @eq by sending an mailbox
16536 * command, specific to the type of queue, to the HBA.
16537 *
16538 * The @eq struct is used to get the queue ID of the queue to destroy.
16539 *
16540 * On success this function will return a zero. If the queue destroy mailbox
16541 * command fails this function will return -ENXIO.
16542 **/
16543int
16544lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16545{
16546        LPFC_MBOXQ_t *mbox;
16547        int rc, length, status = 0;
16548        uint32_t shdr_status, shdr_add_status;
16549        union lpfc_sli4_cfg_shdr *shdr;
16550
16551        /* sanity check on queue memory */
16552        if (!eq)
16553                return -ENODEV;
16554
16555        mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16556        if (!mbox)
16557                return -ENOMEM;
16558        length = (sizeof(struct lpfc_mbx_eq_destroy) -
16559                  sizeof(struct lpfc_sli4_cfg_mhdr));
16560        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16561                         LPFC_MBOX_OPCODE_EQ_DESTROY,
16562                         length, LPFC_SLI4_MBX_EMBED);
16563        bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16564               eq->queue_id);
16565        mbox->vport = eq->phba->pport;
16566        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16567
16568        rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16569        /* The IOCTL status is embedded in the mailbox subheader. */
16570        shdr = (union lpfc_sli4_cfg_shdr *)
16571                &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16572        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16573        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16574        if (shdr_status || shdr_add_status || rc) {
16575                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16576                                "2505 EQ_DESTROY mailbox failed with "
16577                                "status x%x add_status x%x, mbx status x%x\n",
16578                                shdr_status, shdr_add_status, rc);
16579                status = -ENXIO;
16580        }
16581
16582        /* Remove eq from any list */
16583        list_del_init(&eq->list);
16584        mempool_free(mbox, eq->phba->mbox_mem_pool);
16585        return status;
16586}
16587
16588/**
16589 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16590 * @phba: HBA structure that indicates port to destroy a queue on.
16591 * @cq: The queue structure associated with the queue to destroy.
16592 *
16593 * This function destroys a queue, as detailed in @cq by sending an mailbox
16594 * command, specific to the type of queue, to the HBA.
16595 *
16596 * The @cq struct is used to get the queue ID of the queue to destroy.
16597 *
16598 * On success this function will return a zero. If the queue destroy mailbox
16599 * command fails this function will return -ENXIO.
16600 **/
16601int
16602lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16603{
16604        LPFC_MBOXQ_t *mbox;
16605        int rc, length, status = 0;
16606        uint32_t shdr_status, shdr_add_status;
16607        union lpfc_sli4_cfg_shdr *shdr;
16608
16609        /* sanity check on queue memory */
16610        if (!cq)
16611                return -ENODEV;
16612        mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16613        if (!mbox)
16614                return -ENOMEM;
16615        length = (sizeof(struct lpfc_mbx_cq_destroy) -
16616                  sizeof(struct lpfc_sli4_cfg_mhdr));
16617        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16618                         LPFC_MBOX_OPCODE_CQ_DESTROY,
16619                         length, LPFC_SLI4_MBX_EMBED);
16620        bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16621               cq->queue_id);
16622        mbox->vport = cq->phba->pport;
16623        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16624        rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16625        /* The IOCTL status is embedded in the mailbox subheader. */
16626        shdr = (union lpfc_sli4_cfg_shdr *)
16627                &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16628        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16629        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16630        if (shdr_status || shdr_add_status || rc) {
16631                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16632                                "2506 CQ_DESTROY mailbox failed with "
16633                                "status x%x add_status x%x, mbx status x%x\n",
16634                                shdr_status, shdr_add_status, rc);
16635                status = -ENXIO;
16636        }
16637        /* Remove cq from any list */
16638        list_del_init(&cq->list);
16639        mempool_free(mbox, cq->phba->mbox_mem_pool);
16640        return status;
16641}
16642
16643/**
16644 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16645 * @phba: HBA structure that indicates port to destroy a queue on.
16646 * @mq: The queue structure associated with the queue to destroy.
16647 *
16648 * This function destroys a queue, as detailed in @mq by sending an mailbox
16649 * command, specific to the type of queue, to the HBA.
16650 *
16651 * The @mq struct is used to get the queue ID of the queue to destroy.
16652 *
16653 * On success this function will return a zero. If the queue destroy mailbox
16654 * command fails this function will return -ENXIO.
16655 **/
16656int
16657lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16658{
16659        LPFC_MBOXQ_t *mbox;
16660        int rc, length, status = 0;
16661        uint32_t shdr_status, shdr_add_status;
16662        union lpfc_sli4_cfg_shdr *shdr;
16663
16664        /* sanity check on queue memory */
16665        if (!mq)
16666                return -ENODEV;
16667        mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16668        if (!mbox)
16669                return -ENOMEM;
16670        length = (sizeof(struct lpfc_mbx_mq_destroy) -
16671                  sizeof(struct lpfc_sli4_cfg_mhdr));
16672        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16673                         LPFC_MBOX_OPCODE_MQ_DESTROY,
16674                         length, LPFC_SLI4_MBX_EMBED);
16675        bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16676               mq->queue_id);
16677        mbox->vport = mq->phba->pport;
16678        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16679        rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16680        /* The IOCTL status is embedded in the mailbox subheader. */
16681        shdr = (union lpfc_sli4_cfg_shdr *)
16682                &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16683        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16684        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16685        if (shdr_status || shdr_add_status || rc) {
16686                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16687                                "2507 MQ_DESTROY mailbox failed with "
16688                                "status x%x add_status x%x, mbx status x%x\n",
16689                                shdr_status, shdr_add_status, rc);
16690                status = -ENXIO;
16691        }
16692        /* Remove mq from any list */
16693        list_del_init(&mq->list);
16694        mempool_free(mbox, mq->phba->mbox_mem_pool);
16695        return status;
16696}
16697
16698/**
16699 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16700 * @phba: HBA structure that indicates port to destroy a queue on.
16701 * @wq: The queue structure associated with the queue to destroy.
16702 *
16703 * This function destroys a queue, as detailed in @wq by sending an mailbox
16704 * command, specific to the type of queue, to the HBA.
16705 *
16706 * The @wq struct is used to get the queue ID of the queue to destroy.
16707 *
16708 * On success this function will return a zero. If the queue destroy mailbox
16709 * command fails this function will return -ENXIO.
16710 **/
16711int
16712lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16713{
16714        LPFC_MBOXQ_t *mbox;
16715        int rc, length, status = 0;
16716        uint32_t shdr_status, shdr_add_status;
16717        union lpfc_sli4_cfg_shdr *shdr;
16718
16719        /* sanity check on queue memory */
16720        if (!wq)
16721                return -ENODEV;
16722        mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16723        if (!mbox)
16724                return -ENOMEM;
16725        length = (sizeof(struct lpfc_mbx_wq_destroy) -
16726                  sizeof(struct lpfc_sli4_cfg_mhdr));
16727        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16728                         LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16729                         length, LPFC_SLI4_MBX_EMBED);
16730        bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16731               wq->queue_id);
16732        mbox->vport = wq->phba->pport;
16733        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16734        rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16735        shdr = (union lpfc_sli4_cfg_shdr *)
16736                &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16737        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16738        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16739        if (shdr_status || shdr_add_status || rc) {
16740                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16741                                "2508 WQ_DESTROY mailbox failed with "
16742                                "status x%x add_status x%x, mbx status x%x\n",
16743                                shdr_status, shdr_add_status, rc);
16744                status = -ENXIO;
16745        }
16746        /* Remove wq from any list */
16747        list_del_init(&wq->list);
16748        kfree(wq->pring);
16749        wq->pring = NULL;
16750        mempool_free(mbox, wq->phba->mbox_mem_pool);
16751        return status;
16752}
16753
16754/**
16755 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16756 * @phba: HBA structure that indicates port to destroy a queue on.
16757 * @hrq: The queue structure associated with the queue to destroy.
16758 * @drq: The queue structure associated with the queue to destroy.
16759 *
16760 * This function destroys a queue, as detailed in @rq by sending an mailbox
16761 * command, specific to the type of queue, to the HBA.
16762 *
16763 * The @rq struct is used to get the queue ID of the queue to destroy.
16764 *
16765 * On success this function will return a zero. If the queue destroy mailbox
16766 * command fails this function will return -ENXIO.
16767 **/
16768int
16769lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16770                struct lpfc_queue *drq)
16771{
16772        LPFC_MBOXQ_t *mbox;
16773        int rc, length, status = 0;
16774        uint32_t shdr_status, shdr_add_status;
16775        union lpfc_sli4_cfg_shdr *shdr;
16776
16777        /* sanity check on queue memory */
16778        if (!hrq || !drq)
16779                return -ENODEV;
16780        mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16781        if (!mbox)
16782                return -ENOMEM;
16783        length = (sizeof(struct lpfc_mbx_rq_destroy) -
16784                  sizeof(struct lpfc_sli4_cfg_mhdr));
16785        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16786                         LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16787                         length, LPFC_SLI4_MBX_EMBED);
16788        bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16789               hrq->queue_id);
16790        mbox->vport = hrq->phba->pport;
16791        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16792        rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16793        /* The IOCTL status is embedded in the mailbox subheader. */
16794        shdr = (union lpfc_sli4_cfg_shdr *)
16795                &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16796        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16797        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16798        if (shdr_status || shdr_add_status || rc) {
16799                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16800                                "2509 RQ_DESTROY mailbox failed with "
16801                                "status x%x add_status x%x, mbx status x%x\n",
16802                                shdr_status, shdr_add_status, rc);
16803                if (rc != MBX_TIMEOUT)
16804                        mempool_free(mbox, hrq->phba->mbox_mem_pool);
16805                return -ENXIO;
16806        }
16807        bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16808               drq->queue_id);
16809        rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16810        shdr = (union lpfc_sli4_cfg_shdr *)
16811                &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16812        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16813        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16814        if (shdr_status || shdr_add_status || rc) {
16815                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16816                                "2510 RQ_DESTROY mailbox failed with "
16817                                "status x%x add_status x%x, mbx status x%x\n",
16818                                shdr_status, shdr_add_status, rc);
16819                status = -ENXIO;
16820        }
16821        list_del_init(&hrq->list);
16822        list_del_init(&drq->list);
16823        mempool_free(mbox, hrq->phba->mbox_mem_pool);
16824        return status;
16825}
16826
16827/**
16828 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16829 * @phba: The virtual port for which this call being executed.
16830 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16831 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16832 * @xritag: the xritag that ties this io to the SGL pages.
16833 *
16834 * This routine will post the sgl pages for the IO that has the xritag
16835 * that is in the iocbq structure. The xritag is assigned during iocbq
16836 * creation and persists for as long as the driver is loaded.
16837 * if the caller has fewer than 256 scatter gather segments to map then
16838 * pdma_phys_addr1 should be 0.
16839 * If the caller needs to map more than 256 scatter gather segment then
16840 * pdma_phys_addr1 should be a valid physical address.
16841 * physical address for SGLs must be 64 byte aligned.
16842 * If you are going to map 2 SGL's then the first one must have 256 entries
16843 * the second sgl can have between 1 and 256 entries.
16844 *
16845 * Return codes:
16846 *      0 - Success
16847 *      -ENXIO, -ENOMEM - Failure
16848 **/
16849int
16850lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16851                dma_addr_t pdma_phys_addr0,
16852                dma_addr_t pdma_phys_addr1,
16853                uint16_t xritag)
16854{
16855        struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16856        LPFC_MBOXQ_t *mbox;
16857        int rc;
16858        uint32_t shdr_status, shdr_add_status;
16859        uint32_t mbox_tmo;
16860        union lpfc_sli4_cfg_shdr *shdr;
16861
16862        if (xritag == NO_XRI) {
16863                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16864                                "0364 Invalid param:\n");
16865                return -EINVAL;
16866        }
16867
16868        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16869        if (!mbox)
16870                return -ENOMEM;
16871
16872        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16873                        LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16874                        sizeof(struct lpfc_mbx_post_sgl_pages) -
16875                        sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16876
16877        post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16878                                &mbox->u.mqe.un.post_sgl_pages;
16879        bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16880        bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16881
16882        post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16883                                cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16884        post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16885                                cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16886
16887        post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16888                                cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16889        post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16890                                cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16891        if (!phba->sli4_hba.intr_enable)
16892                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16893        else {
16894                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16895                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16896        }
16897        /* The IOCTL status is embedded in the mailbox subheader. */
16898        shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16899        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16900        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16901        if (rc != MBX_TIMEOUT)
16902                mempool_free(mbox, phba->mbox_mem_pool);
16903        if (shdr_status || shdr_add_status || rc) {
16904                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16905                                "2511 POST_SGL mailbox failed with "
16906                                "status x%x add_status x%x, mbx status x%x\n",
16907                                shdr_status, shdr_add_status, rc);
16908        }
16909        return 0;
16910}
16911
16912/**
16913 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16914 * @phba: pointer to lpfc hba data structure.
16915 *
16916 * This routine is invoked to post rpi header templates to the
16917 * HBA consistent with the SLI-4 interface spec.  This routine
16918 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16919 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16920 *
16921 * Returns
16922 *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16923 *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
16924 **/
16925static uint16_t
16926lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16927{
16928        unsigned long xri;
16929
16930        /*
16931         * Fetch the next logical xri.  Because this index is logical,
16932         * the driver starts at 0 each time.
16933         */
16934        spin_lock_irq(&phba->hbalock);
16935        xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16936                                 phba->sli4_hba.max_cfg_param.max_xri, 0);
16937        if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16938                spin_unlock_irq(&phba->hbalock);
16939                return NO_XRI;
16940        } else {
16941                set_bit(xri, phba->sli4_hba.xri_bmask);
16942                phba->sli4_hba.max_cfg_param.xri_used++;
16943        }
16944        spin_unlock_irq(&phba->hbalock);
16945        return xri;
16946}
16947
16948/**
16949 * lpfc_sli4_free_xri - Release an xri for reuse.
16950 * @phba: pointer to lpfc hba data structure.
16951 * @xri: xri to release.
16952 *
16953 * This routine is invoked to release an xri to the pool of
16954 * available rpis maintained by the driver.
16955 **/
16956static void
16957__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16958{
16959        if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16960                phba->sli4_hba.max_cfg_param.xri_used--;
16961        }
16962}
16963
16964/**
16965 * lpfc_sli4_free_xri - Release an xri for reuse.
16966 * @phba: pointer to lpfc hba data structure.
16967 * @xri: xri to release.
16968 *
16969 * This routine is invoked to release an xri to the pool of
16970 * available rpis maintained by the driver.
16971 **/
16972void
16973lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16974{
16975        spin_lock_irq(&phba->hbalock);
16976        __lpfc_sli4_free_xri(phba, xri);
16977        spin_unlock_irq(&phba->hbalock);
16978}
16979
16980/**
16981 * lpfc_sli4_next_xritag - Get an xritag for the io
16982 * @phba: Pointer to HBA context object.
16983 *
16984 * This function gets an xritag for the iocb. If there is no unused xritag
16985 * it will return 0xffff.
16986 * The function returns the allocated xritag if successful, else returns zero.
16987 * Zero is not a valid xritag.
16988 * The caller is not required to hold any lock.
16989 **/
16990uint16_t
16991lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16992{
16993        uint16_t xri_index;
16994
16995        xri_index = lpfc_sli4_alloc_xri(phba);
16996        if (xri_index == NO_XRI)
16997                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16998                                "2004 Failed to allocate XRI.last XRITAG is %d"
16999                                " Max XRI is %d, Used XRI is %d\n",
17000                                xri_index,
17001                                phba->sli4_hba.max_cfg_param.max_xri,
17002                                phba->sli4_hba.max_cfg_param.xri_used);
17003        return xri_index;
17004}
17005
17006/**
17007 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17008 * @phba: pointer to lpfc hba data structure.
17009 * @post_sgl_list: pointer to els sgl entry list.
17010 * @post_cnt: number of els sgl entries on the list.
17011 *
17012 * This routine is invoked to post a block of driver's sgl pages to the
17013 * HBA using non-embedded mailbox command. No Lock is held. This routine
17014 * is only called when the driver is loading and after all IO has been
17015 * stopped.
17016 **/
17017static int
17018lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17019                            struct list_head *post_sgl_list,
17020                            int post_cnt)
17021{
17022        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17023        struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17024        struct sgl_page_pairs *sgl_pg_pairs;
17025        void *viraddr;
17026        LPFC_MBOXQ_t *mbox;
17027        uint32_t reqlen, alloclen, pg_pairs;
17028        uint32_t mbox_tmo;
17029        uint16_t xritag_start = 0;
17030        int rc = 0;
17031        uint32_t shdr_status, shdr_add_status;
17032        union lpfc_sli4_cfg_shdr *shdr;
17033
17034        reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17035                 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17036        if (reqlen > SLI4_PAGE_SIZE) {
17037                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17038                                "2559 Block sgl registration required DMA "
17039                                "size (%d) great than a page\n", reqlen);
17040                return -ENOMEM;
17041        }
17042
17043        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17044        if (!mbox)
17045                return -ENOMEM;
17046
17047        /* Allocate DMA memory and set up the non-embedded mailbox command */
17048        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17049                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17050                         LPFC_SLI4_MBX_NEMBED);
17051
17052        if (alloclen < reqlen) {
17053                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17054                                "0285 Allocated DMA memory size (%d) is "
17055                                "less than the requested DMA memory "
17056                                "size (%d)\n", alloclen, reqlen);
17057                lpfc_sli4_mbox_cmd_free(phba, mbox);
17058                return -ENOMEM;
17059        }
17060        /* Set up the SGL pages in the non-embedded DMA pages */
17061        viraddr = mbox->sge_array->addr[0];
17062        sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17063        sgl_pg_pairs = &sgl->sgl_pg_pairs;
17064
17065        pg_pairs = 0;
17066        list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17067                /* Set up the sge entry */
17068                sgl_pg_pairs->sgl_pg0_addr_lo =
17069                                cpu_to_le32(putPaddrLow(sglq_entry->phys));
17070                sgl_pg_pairs->sgl_pg0_addr_hi =
17071                                cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17072                sgl_pg_pairs->sgl_pg1_addr_lo =
17073                                cpu_to_le32(putPaddrLow(0));
17074                sgl_pg_pairs->sgl_pg1_addr_hi =
17075                                cpu_to_le32(putPaddrHigh(0));
17076
17077                /* Keep the first xritag on the list */
17078                if (pg_pairs == 0)
17079                        xritag_start = sglq_entry->sli4_xritag;
17080                sgl_pg_pairs++;
17081                pg_pairs++;
17082        }
17083
17084        /* Complete initialization and perform endian conversion. */
17085        bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17086        bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17087        sgl->word0 = cpu_to_le32(sgl->word0);
17088
17089        if (!phba->sli4_hba.intr_enable)
17090                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17091        else {
17092                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17093                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17094        }
17095        shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17096        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17097        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17098        if (rc != MBX_TIMEOUT)
17099                lpfc_sli4_mbox_cmd_free(phba, mbox);
17100        if (shdr_status || shdr_add_status || rc) {
17101                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17102                                "2513 POST_SGL_BLOCK mailbox command failed "
17103                                "status x%x add_status x%x mbx status x%x\n",
17104                                shdr_status, shdr_add_status, rc);
17105                rc = -ENXIO;
17106        }
17107        return rc;
17108}
17109
17110/**
17111 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17112 * @phba: pointer to lpfc hba data structure.
17113 * @nblist: pointer to nvme buffer list.
17114 * @count: number of scsi buffers on the list.
17115 *
17116 * This routine is invoked to post a block of @count scsi sgl pages from a
17117 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17118 * No Lock is held.
17119 *
17120 **/
17121static int
17122lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17123                            int count)
17124{
17125        struct lpfc_io_buf *lpfc_ncmd;
17126        struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17127        struct sgl_page_pairs *sgl_pg_pairs;
17128        void *viraddr;
17129        LPFC_MBOXQ_t *mbox;
17130        uint32_t reqlen, alloclen, pg_pairs;
17131        uint32_t mbox_tmo;
17132        uint16_t xritag_start = 0;
17133        int rc = 0;
17134        uint32_t shdr_status, shdr_add_status;
17135        dma_addr_t pdma_phys_bpl1;
17136        union lpfc_sli4_cfg_shdr *shdr;
17137
17138        /* Calculate the requested length of the dma memory */
17139        reqlen = count * sizeof(struct sgl_page_pairs) +
17140                 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17141        if (reqlen > SLI4_PAGE_SIZE) {
17142                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17143                                "6118 Block sgl registration required DMA "
17144                                "size (%d) great than a page\n", reqlen);
17145                return -ENOMEM;
17146        }
17147        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17148        if (!mbox) {
17149                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17150                                "6119 Failed to allocate mbox cmd memory\n");
17151                return -ENOMEM;
17152        }
17153
17154        /* Allocate DMA memory and set up the non-embedded mailbox command */
17155        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17156                                    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17157                                    reqlen, LPFC_SLI4_MBX_NEMBED);
17158
17159        if (alloclen < reqlen) {
17160                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17161                                "6120 Allocated DMA memory size (%d) is "
17162                                "less than the requested DMA memory "
17163                                "size (%d)\n", alloclen, reqlen);
17164                lpfc_sli4_mbox_cmd_free(phba, mbox);
17165                return -ENOMEM;
17166        }
17167
17168        /* Get the first SGE entry from the non-embedded DMA memory */
17169        viraddr = mbox->sge_array->addr[0];
17170
17171        /* Set up the SGL pages in the non-embedded DMA pages */
17172        sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17173        sgl_pg_pairs = &sgl->sgl_pg_pairs;
17174
17175        pg_pairs = 0;
17176        list_for_each_entry(lpfc_ncmd, nblist, list) {
17177                /* Set up the sge entry */
17178                sgl_pg_pairs->sgl_pg0_addr_lo =
17179                        cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17180                sgl_pg_pairs->sgl_pg0_addr_hi =
17181                        cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17182                if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17183                        pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17184                                                SGL_PAGE_SIZE;
17185                else
17186                        pdma_phys_bpl1 = 0;
17187                sgl_pg_pairs->sgl_pg1_addr_lo =
17188                        cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17189                sgl_pg_pairs->sgl_pg1_addr_hi =
17190                        cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17191                /* Keep the first xritag on the list */
17192                if (pg_pairs == 0)
17193                        xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17194                sgl_pg_pairs++;
17195                pg_pairs++;
17196        }
17197        bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17198        bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17199        /* Perform endian conversion if necessary */
17200        sgl->word0 = cpu_to_le32(sgl->word0);
17201
17202        if (!phba->sli4_hba.intr_enable) {
17203                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17204        } else {
17205                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17206                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17207        }
17208        shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17209        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17210        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17211        if (rc != MBX_TIMEOUT)
17212                lpfc_sli4_mbox_cmd_free(phba, mbox);
17213        if (shdr_status || shdr_add_status || rc) {
17214                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17215                                "6125 POST_SGL_BLOCK mailbox command failed "
17216                                "status x%x add_status x%x mbx status x%x\n",
17217                                shdr_status, shdr_add_status, rc);
17218                rc = -ENXIO;
17219        }
17220        return rc;
17221}
17222
17223/**
17224 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17225 * @phba: pointer to lpfc hba data structure.
17226 * @post_nblist: pointer to the nvme buffer list.
17227 * @sb_count: number of nvme buffers.
17228 *
17229 * This routine walks a list of nvme buffers that was passed in. It attempts
17230 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17231 * uses the non-embedded SGL block post mailbox commands to post to the port.
17232 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17233 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17234 * must be local list, thus no lock is needed when manipulate the list.
17235 *
17236 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17237 **/
17238int
17239lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17240                           struct list_head *post_nblist, int sb_count)
17241{
17242        struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17243        int status, sgl_size;
17244        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17245        dma_addr_t pdma_phys_sgl1;
17246        int last_xritag = NO_XRI;
17247        int cur_xritag;
17248        LIST_HEAD(prep_nblist);
17249        LIST_HEAD(blck_nblist);
17250        LIST_HEAD(nvme_nblist);
17251
17252        /* sanity check */
17253        if (sb_count <= 0)
17254                return -EINVAL;
17255
17256        sgl_size = phba->cfg_sg_dma_buf_size;
17257        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17258                list_del_init(&lpfc_ncmd->list);
17259                block_cnt++;
17260                if ((last_xritag != NO_XRI) &&
17261                    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17262                        /* a hole in xri block, form a sgl posting block */
17263                        list_splice_init(&prep_nblist, &blck_nblist);
17264                        post_cnt = block_cnt - 1;
17265                        /* prepare list for next posting block */
17266                        list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17267                        block_cnt = 1;
17268                } else {
17269                        /* prepare list for next posting block */
17270                        list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17271                        /* enough sgls for non-embed sgl mbox command */
17272                        if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17273                                list_splice_init(&prep_nblist, &blck_nblist);
17274                                post_cnt = block_cnt;
17275                                block_cnt = 0;
17276                        }
17277                }
17278                num_posting++;
17279                last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17280
17281                /* end of repost sgl list condition for NVME buffers */
17282                if (num_posting == sb_count) {
17283                        if (post_cnt == 0) {
17284                                /* last sgl posting block */
17285                                list_splice_init(&prep_nblist, &blck_nblist);
17286                                post_cnt = block_cnt;
17287                        } else if (block_cnt == 1) {
17288                                /* last single sgl with non-contiguous xri */
17289                                if (sgl_size > SGL_PAGE_SIZE)
17290                                        pdma_phys_sgl1 =
17291                                                lpfc_ncmd->dma_phys_sgl +
17292                                                SGL_PAGE_SIZE;
17293                                else
17294                                        pdma_phys_sgl1 = 0;
17295                                cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17296                                status = lpfc_sli4_post_sgl(
17297                                                phba, lpfc_ncmd->dma_phys_sgl,
17298                                                pdma_phys_sgl1, cur_xritag);
17299                                if (status) {
17300                                        /* Post error.  Buffer unavailable. */
17301                                        lpfc_ncmd->flags |=
17302                                                LPFC_SBUF_NOT_POSTED;
17303                                } else {
17304                                        /* Post success. Bffer available. */
17305                                        lpfc_ncmd->flags &=
17306                                                ~LPFC_SBUF_NOT_POSTED;
17307                                        lpfc_ncmd->status = IOSTAT_SUCCESS;
17308                                        num_posted++;
17309                                }
17310                                /* success, put on NVME buffer sgl list */
17311                                list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17312                        }
17313                }
17314
17315                /* continue until a nembed page worth of sgls */
17316                if (post_cnt == 0)
17317                        continue;
17318
17319                /* post block of NVME buffer list sgls */
17320                status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17321                                                     post_cnt);
17322
17323                /* don't reset xirtag due to hole in xri block */
17324                if (block_cnt == 0)
17325                        last_xritag = NO_XRI;
17326
17327                /* reset NVME buffer post count for next round of posting */
17328                post_cnt = 0;
17329
17330                /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17331                while (!list_empty(&blck_nblist)) {
17332                        list_remove_head(&blck_nblist, lpfc_ncmd,
17333                                         struct lpfc_io_buf, list);
17334                        if (status) {
17335                                /* Post error.  Mark buffer unavailable. */
17336                                lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17337                        } else {
17338                                /* Post success, Mark buffer available. */
17339                                lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17340                                lpfc_ncmd->status = IOSTAT_SUCCESS;
17341                                num_posted++;
17342                        }
17343                        list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17344                }
17345        }
17346        /* Push NVME buffers with sgl posted to the available list */
17347        lpfc_io_buf_replenish(phba, &nvme_nblist);
17348
17349        return num_posted;
17350}
17351
17352/**
17353 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17354 * @phba: pointer to lpfc_hba struct that the frame was received on
17355 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17356 *
17357 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17358 * valid type of frame that the LPFC driver will handle. This function will
17359 * return a zero if the frame is a valid frame or a non zero value when the
17360 * frame does not pass the check.
17361 **/
17362static int
17363lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17364{
17365        /*  make rctl_names static to save stack space */
17366        struct fc_vft_header *fc_vft_hdr;
17367        uint32_t *header = (uint32_t *) fc_hdr;
17368
17369#define FC_RCTL_MDS_DIAGS       0xF4
17370
17371        switch (fc_hdr->fh_r_ctl) {
17372        case FC_RCTL_DD_UNCAT:          /* uncategorized information */
17373        case FC_RCTL_DD_SOL_DATA:       /* solicited data */
17374        case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
17375        case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
17376        case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
17377        case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
17378        case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
17379        case FC_RCTL_DD_CMD_STATUS:     /* command status */
17380        case FC_RCTL_ELS_REQ:   /* extended link services request */
17381        case FC_RCTL_ELS_REP:   /* extended link services reply */
17382        case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
17383        case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
17384        case FC_RCTL_BA_NOP:    /* basic link service NOP */
17385        case FC_RCTL_BA_ABTS:   /* basic link service abort */
17386        case FC_RCTL_BA_RMC:    /* remove connection */
17387        case FC_RCTL_BA_ACC:    /* basic accept */
17388        case FC_RCTL_BA_RJT:    /* basic reject */
17389        case FC_RCTL_BA_PRMT:
17390        case FC_RCTL_ACK_1:     /* acknowledge_1 */
17391        case FC_RCTL_ACK_0:     /* acknowledge_0 */
17392        case FC_RCTL_P_RJT:     /* port reject */
17393        case FC_RCTL_F_RJT:     /* fabric reject */
17394        case FC_RCTL_P_BSY:     /* port busy */
17395        case FC_RCTL_F_BSY:     /* fabric busy to data frame */
17396        case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
17397        case FC_RCTL_LCR:       /* link credit reset */
17398        case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17399        case FC_RCTL_END:       /* end */
17400                break;
17401        case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
17402                fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17403                fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17404                return lpfc_fc_frame_check(phba, fc_hdr);
17405        default:
17406                goto drop;
17407        }
17408
17409        switch (fc_hdr->fh_type) {
17410        case FC_TYPE_BLS:
17411        case FC_TYPE_ELS:
17412        case FC_TYPE_FCP:
17413        case FC_TYPE_CT:
17414        case FC_TYPE_NVME:
17415                break;
17416        case FC_TYPE_IP:
17417        case FC_TYPE_ILS:
17418        default:
17419                goto drop;
17420        }
17421
17422        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17423                        "2538 Received frame rctl:x%x, type:x%x, "
17424                        "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17425                        fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17426                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17427                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17428                        be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17429                        be32_to_cpu(header[6]));
17430        return 0;
17431drop:
17432        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17433                        "2539 Dropped frame rctl:x%x type:x%x\n",
17434                        fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17435        return 1;
17436}
17437
17438/**
17439 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17440 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17441 *
17442 * This function processes the FC header to retrieve the VFI from the VF
17443 * header, if one exists. This function will return the VFI if one exists
17444 * or 0 if no VSAN Header exists.
17445 **/
17446static uint32_t
17447lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17448{
17449        struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17450
17451        if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17452                return 0;
17453        return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17454}
17455
17456/**
17457 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17458 * @phba: Pointer to the HBA structure to search for the vport on
17459 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17460 * @fcfi: The FC Fabric ID that the frame came from
17461 * @did: Destination ID to match against
17462 *
17463 * This function searches the @phba for a vport that matches the content of the
17464 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17465 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17466 * returns the matching vport pointer or NULL if unable to match frame to a
17467 * vport.
17468 **/
17469static struct lpfc_vport *
17470lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17471                       uint16_t fcfi, uint32_t did)
17472{
17473        struct lpfc_vport **vports;
17474        struct lpfc_vport *vport = NULL;
17475        int i;
17476
17477        if (did == Fabric_DID)
17478                return phba->pport;
17479        if ((phba->pport->fc_flag & FC_PT2PT) &&
17480                !(phba->link_state == LPFC_HBA_READY))
17481                return phba->pport;
17482
17483        vports = lpfc_create_vport_work_array(phba);
17484        if (vports != NULL) {
17485                for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17486                        if (phba->fcf.fcfi == fcfi &&
17487                            vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17488                            vports[i]->fc_myDID == did) {
17489                                vport = vports[i];
17490                                break;
17491                        }
17492                }
17493        }
17494        lpfc_destroy_vport_work_array(phba, vports);
17495        return vport;
17496}
17497
17498/**
17499 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17500 * @vport: The vport to work on.
17501 *
17502 * This function updates the receive sequence time stamp for this vport. The
17503 * receive sequence time stamp indicates the time that the last frame of the
17504 * the sequence that has been idle for the longest amount of time was received.
17505 * the driver uses this time stamp to indicate if any received sequences have
17506 * timed out.
17507 **/
17508static void
17509lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17510{
17511        struct lpfc_dmabuf *h_buf;
17512        struct hbq_dmabuf *dmabuf = NULL;
17513
17514        /* get the oldest sequence on the rcv list */
17515        h_buf = list_get_first(&vport->rcv_buffer_list,
17516                               struct lpfc_dmabuf, list);
17517        if (!h_buf)
17518                return;
17519        dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17520        vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17521}
17522
17523/**
17524 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17525 * @vport: The vport that the received sequences were sent to.
17526 *
17527 * This function cleans up all outstanding received sequences. This is called
17528 * by the driver when a link event or user action invalidates all the received
17529 * sequences.
17530 **/
17531void
17532lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17533{
17534        struct lpfc_dmabuf *h_buf, *hnext;
17535        struct lpfc_dmabuf *d_buf, *dnext;
17536        struct hbq_dmabuf *dmabuf = NULL;
17537
17538        /* start with the oldest sequence on the rcv list */
17539        list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17540                dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17541                list_del_init(&dmabuf->hbuf.list);
17542                list_for_each_entry_safe(d_buf, dnext,
17543                                         &dmabuf->dbuf.list, list) {
17544                        list_del_init(&d_buf->list);
17545                        lpfc_in_buf_free(vport->phba, d_buf);
17546                }
17547                lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17548        }
17549}
17550
17551/**
17552 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17553 * @vport: The vport that the received sequences were sent to.
17554 *
17555 * This function determines whether any received sequences have timed out by
17556 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17557 * indicates that there is at least one timed out sequence this routine will
17558 * go through the received sequences one at a time from most inactive to most
17559 * active to determine which ones need to be cleaned up. Once it has determined
17560 * that a sequence needs to be cleaned up it will simply free up the resources
17561 * without sending an abort.
17562 **/
17563void
17564lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17565{
17566        struct lpfc_dmabuf *h_buf, *hnext;
17567        struct lpfc_dmabuf *d_buf, *dnext;
17568        struct hbq_dmabuf *dmabuf = NULL;
17569        unsigned long timeout;
17570        int abort_count = 0;
17571
17572        timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17573                   vport->rcv_buffer_time_stamp);
17574        if (list_empty(&vport->rcv_buffer_list) ||
17575            time_before(jiffies, timeout))
17576                return;
17577        /* start with the oldest sequence on the rcv list */
17578        list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17579                dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17580                timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17581                           dmabuf->time_stamp);
17582                if (time_before(jiffies, timeout))
17583                        break;
17584                abort_count++;
17585                list_del_init(&dmabuf->hbuf.list);
17586                list_for_each_entry_safe(d_buf, dnext,
17587                                         &dmabuf->dbuf.list, list) {
17588                        list_del_init(&d_buf->list);
17589                        lpfc_in_buf_free(vport->phba, d_buf);
17590                }
17591                lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17592        }
17593        if (abort_count)
17594                lpfc_update_rcv_time_stamp(vport);
17595}
17596
17597/**
17598 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17599 * @vport: pointer to a vitural port
17600 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17601 *
17602 * This function searches through the existing incomplete sequences that have
17603 * been sent to this @vport. If the frame matches one of the incomplete
17604 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17605 * make up that sequence. If no sequence is found that matches this frame then
17606 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17607 * This function returns a pointer to the first dmabuf in the sequence list that
17608 * the frame was linked to.
17609 **/
17610static struct hbq_dmabuf *
17611lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17612{
17613        struct fc_frame_header *new_hdr;
17614        struct fc_frame_header *temp_hdr;
17615        struct lpfc_dmabuf *d_buf;
17616        struct lpfc_dmabuf *h_buf;
17617        struct hbq_dmabuf *seq_dmabuf = NULL;
17618        struct hbq_dmabuf *temp_dmabuf = NULL;
17619        uint8_t found = 0;
17620
17621        INIT_LIST_HEAD(&dmabuf->dbuf.list);
17622        dmabuf->time_stamp = jiffies;
17623        new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17624
17625        /* Use the hdr_buf to find the sequence that this frame belongs to */
17626        list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17627                temp_hdr = (struct fc_frame_header *)h_buf->virt;
17628                if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17629                    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17630                    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17631                        continue;
17632                /* found a pending sequence that matches this frame */
17633                seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17634                break;
17635        }
17636        if (!seq_dmabuf) {
17637                /*
17638                 * This indicates first frame received for this sequence.
17639                 * Queue the buffer on the vport's rcv_buffer_list.
17640                 */
17641                list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17642                lpfc_update_rcv_time_stamp(vport);
17643                return dmabuf;
17644        }
17645        temp_hdr = seq_dmabuf->hbuf.virt;
17646        if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17647                be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17648                list_del_init(&seq_dmabuf->hbuf.list);
17649                list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17650                list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17651                lpfc_update_rcv_time_stamp(vport);
17652                return dmabuf;
17653        }
17654        /* move this sequence to the tail to indicate a young sequence */
17655        list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17656        seq_dmabuf->time_stamp = jiffies;
17657        lpfc_update_rcv_time_stamp(vport);
17658        if (list_empty(&seq_dmabuf->dbuf.list)) {
17659                temp_hdr = dmabuf->hbuf.virt;
17660                list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17661                return seq_dmabuf;
17662        }
17663        /* find the correct place in the sequence to insert this frame */
17664        d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17665        while (!found) {
17666                temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17667                temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17668                /*
17669                 * If the frame's sequence count is greater than the frame on
17670                 * the list then insert the frame right after this frame
17671                 */
17672                if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17673                        be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17674                        list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17675                        found = 1;
17676                        break;
17677                }
17678
17679                if (&d_buf->list == &seq_dmabuf->dbuf.list)
17680                        break;
17681                d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17682        }
17683
17684        if (found)
17685                return seq_dmabuf;
17686        return NULL;
17687}
17688
17689/**
17690 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17691 * @vport: pointer to a vitural port
17692 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17693 *
17694 * This function tries to abort from the partially assembed sequence, described
17695 * by the information from basic abbort @dmabuf. It checks to see whether such
17696 * partially assembled sequence held by the driver. If so, it shall free up all
17697 * the frames from the partially assembled sequence.
17698 *
17699 * Return
17700 * true  -- if there is matching partially assembled sequence present and all
17701 *          the frames freed with the sequence;
17702 * false -- if there is no matching partially assembled sequence present so
17703 *          nothing got aborted in the lower layer driver
17704 **/
17705static bool
17706lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17707                            struct hbq_dmabuf *dmabuf)
17708{
17709        struct fc_frame_header *new_hdr;
17710        struct fc_frame_header *temp_hdr;
17711        struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17712        struct hbq_dmabuf *seq_dmabuf = NULL;
17713
17714        /* Use the hdr_buf to find the sequence that matches this frame */
17715        INIT_LIST_HEAD(&dmabuf->dbuf.list);
17716        INIT_LIST_HEAD(&dmabuf->hbuf.list);
17717        new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17718        list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17719                temp_hdr = (struct fc_frame_header *)h_buf->virt;
17720                if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17721                    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17722                    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17723                        continue;
17724                /* found a pending sequence that matches this frame */
17725                seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17726                break;
17727        }
17728
17729        /* Free up all the frames from the partially assembled sequence */
17730        if (seq_dmabuf) {
17731                list_for_each_entry_safe(d_buf, n_buf,
17732                                         &seq_dmabuf->dbuf.list, list) {
17733                        list_del_init(&d_buf->list);
17734                        lpfc_in_buf_free(vport->phba, d_buf);
17735                }
17736                return true;
17737        }
17738        return false;
17739}
17740
17741/**
17742 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17743 * @vport: pointer to a vitural port
17744 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17745 *
17746 * This function tries to abort from the assembed sequence from upper level
17747 * protocol, described by the information from basic abbort @dmabuf. It
17748 * checks to see whether such pending context exists at upper level protocol.
17749 * If so, it shall clean up the pending context.
17750 *
17751 * Return
17752 * true  -- if there is matching pending context of the sequence cleaned
17753 *          at ulp;
17754 * false -- if there is no matching pending context of the sequence present
17755 *          at ulp.
17756 **/
17757static bool
17758lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17759{
17760        struct lpfc_hba *phba = vport->phba;
17761        int handled;
17762
17763        /* Accepting abort at ulp with SLI4 only */
17764        if (phba->sli_rev < LPFC_SLI_REV4)
17765                return false;
17766
17767        /* Register all caring upper level protocols to attend abort */
17768        handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17769        if (handled)
17770                return true;
17771
17772        return false;
17773}
17774
17775/**
17776 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17777 * @phba: Pointer to HBA context object.
17778 * @cmd_iocbq: pointer to the command iocbq structure.
17779 * @rsp_iocbq: pointer to the response iocbq structure.
17780 *
17781 * This function handles the sequence abort response iocb command complete
17782 * event. It properly releases the memory allocated to the sequence abort
17783 * accept iocb.
17784 **/
17785static void
17786lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17787                             struct lpfc_iocbq *cmd_iocbq,
17788                             struct lpfc_iocbq *rsp_iocbq)
17789{
17790        struct lpfc_nodelist *ndlp;
17791
17792        if (cmd_iocbq) {
17793                ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17794                lpfc_nlp_put(ndlp);
17795                lpfc_nlp_not_used(ndlp);
17796                lpfc_sli_release_iocbq(phba, cmd_iocbq);
17797        }
17798
17799        /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17800        if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17801                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17802                        "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
17803                        rsp_iocbq->iocb.ulpStatus,
17804                        rsp_iocbq->iocb.un.ulpWord[4]);
17805}
17806
17807/**
17808 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17809 * @phba: Pointer to HBA context object.
17810 * @xri: xri id in transaction.
17811 *
17812 * This function validates the xri maps to the known range of XRIs allocated an
17813 * used by the driver.
17814 **/
17815uint16_t
17816lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17817                      uint16_t xri)
17818{
17819        uint16_t i;
17820
17821        for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17822                if (xri == phba->sli4_hba.xri_ids[i])
17823                        return i;
17824        }
17825        return NO_XRI;
17826}
17827
17828/**
17829 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17830 * @vport: pointer to a vitural port.
17831 * @fc_hdr: pointer to a FC frame header.
17832 * @aborted: was the partially assembled receive sequence successfully aborted
17833 *
17834 * This function sends a basic response to a previous unsol sequence abort
17835 * event after aborting the sequence handling.
17836 **/
17837void
17838lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17839                        struct fc_frame_header *fc_hdr, bool aborted)
17840{
17841        struct lpfc_hba *phba = vport->phba;
17842        struct lpfc_iocbq *ctiocb = NULL;
17843        struct lpfc_nodelist *ndlp;
17844        uint16_t oxid, rxid, xri, lxri;
17845        uint32_t sid, fctl;
17846        IOCB_t *icmd;
17847        int rc;
17848
17849        if (!lpfc_is_link_up(phba))
17850                return;
17851
17852        sid = sli4_sid_from_fc_hdr(fc_hdr);
17853        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17854        rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17855
17856        ndlp = lpfc_findnode_did(vport, sid);
17857        if (!ndlp) {
17858                ndlp = lpfc_nlp_init(vport, sid);
17859                if (!ndlp) {
17860                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17861                                         "1268 Failed to allocate ndlp for "
17862                                         "oxid:x%x SID:x%x\n", oxid, sid);
17863                        return;
17864                }
17865                /* Put ndlp onto pport node list */
17866                lpfc_enqueue_node(vport, ndlp);
17867        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17868                /* re-setup ndlp without removing from node list */
17869                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17870                if (!ndlp) {
17871                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17872                                         "3275 Failed to active ndlp found "
17873                                         "for oxid:x%x SID:x%x\n", oxid, sid);
17874                        return;
17875                }
17876        }
17877
17878        /* Allocate buffer for rsp iocb */
17879        ctiocb = lpfc_sli_get_iocbq(phba);
17880        if (!ctiocb)
17881                return;
17882
17883        /* Extract the F_CTL field from FC_HDR */
17884        fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17885
17886        icmd = &ctiocb->iocb;
17887        icmd->un.xseq64.bdl.bdeSize = 0;
17888        icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17889        icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17890        icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17891        icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17892
17893        /* Fill in the rest of iocb fields */
17894        icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17895        icmd->ulpBdeCount = 0;
17896        icmd->ulpLe = 1;
17897        icmd->ulpClass = CLASS3;
17898        icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17899        ctiocb->context1 = lpfc_nlp_get(ndlp);
17900
17901        ctiocb->vport = phba->pport;
17902        ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17903        ctiocb->sli4_lxritag = NO_XRI;
17904        ctiocb->sli4_xritag = NO_XRI;
17905
17906        if (fctl & FC_FC_EX_CTX)
17907                /* Exchange responder sent the abort so we
17908                 * own the oxid.
17909                 */
17910                xri = oxid;
17911        else
17912                xri = rxid;
17913        lxri = lpfc_sli4_xri_inrange(phba, xri);
17914        if (lxri != NO_XRI)
17915                lpfc_set_rrq_active(phba, ndlp, lxri,
17916                        (xri == oxid) ? rxid : oxid, 0);
17917        /* For BA_ABTS from exchange responder, if the logical xri with
17918         * the oxid maps to the FCP XRI range, the port no longer has
17919         * that exchange context, send a BLS_RJT. Override the IOCB for
17920         * a BA_RJT.
17921         */
17922        if ((fctl & FC_FC_EX_CTX) &&
17923            (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17924                icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17925                bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17926                bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17927                bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17928        }
17929
17930        /* If BA_ABTS failed to abort a partially assembled receive sequence,
17931         * the driver no longer has that exchange, send a BLS_RJT. Override
17932         * the IOCB for a BA_RJT.
17933         */
17934        if (aborted == false) {
17935                icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17936                bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17937                bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17938                bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17939        }
17940
17941        if (fctl & FC_FC_EX_CTX) {
17942                /* ABTS sent by responder to CT exchange, construction
17943                 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17944                 * field and RX_ID from ABTS for RX_ID field.
17945                 */
17946                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17947        } else {
17948                /* ABTS sent by initiator to CT exchange, construction
17949                 * of BA_ACC will need to allocate a new XRI as for the
17950                 * XRI_TAG field.
17951                 */
17952                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17953        }
17954        bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17955        bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17956
17957        /* Xmit CT abts response on exchange <xid> */
17958        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17959                         "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17960                         icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17961
17962        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17963        if (rc == IOCB_ERROR) {
17964                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
17965                                 "2925 Failed to issue CT ABTS RSP x%x on "
17966                                 "xri x%x, Data x%x\n",
17967                                 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17968                                 phba->link_state);
17969                lpfc_nlp_put(ndlp);
17970                ctiocb->context1 = NULL;
17971                lpfc_sli_release_iocbq(phba, ctiocb);
17972        }
17973}
17974
17975/**
17976 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17977 * @vport: Pointer to the vport on which this sequence was received
17978 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17979 *
17980 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17981 * receive sequence is only partially assembed by the driver, it shall abort
17982 * the partially assembled frames for the sequence. Otherwise, if the
17983 * unsolicited receive sequence has been completely assembled and passed to
17984 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
17985 * unsolicited sequence has been aborted. After that, it will issue a basic
17986 * accept to accept the abort.
17987 **/
17988static void
17989lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17990                             struct hbq_dmabuf *dmabuf)
17991{
17992        struct lpfc_hba *phba = vport->phba;
17993        struct fc_frame_header fc_hdr;
17994        uint32_t fctl;
17995        bool aborted;
17996
17997        /* Make a copy of fc_hdr before the dmabuf being released */
17998        memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17999        fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18000
18001        if (fctl & FC_FC_EX_CTX) {
18002                /* ABTS by responder to exchange, no cleanup needed */
18003                aborted = true;
18004        } else {
18005                /* ABTS by initiator to exchange, need to do cleanup */
18006                aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18007                if (aborted == false)
18008                        aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18009        }
18010        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18011
18012        if (phba->nvmet_support) {
18013                lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18014                return;
18015        }
18016
18017        /* Respond with BA_ACC or BA_RJT accordingly */
18018        lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18019}
18020
18021/**
18022 * lpfc_seq_complete - Indicates if a sequence is complete
18023 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18024 *
18025 * This function checks the sequence, starting with the frame described by
18026 * @dmabuf, to see if all the frames associated with this sequence are present.
18027 * the frames associated with this sequence are linked to the @dmabuf using the
18028 * dbuf list. This function looks for two major things. 1) That the first frame
18029 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18030 * set. 3) That there are no holes in the sequence count. The function will
18031 * return 1 when the sequence is complete, otherwise it will return 0.
18032 **/
18033static int
18034lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18035{
18036        struct fc_frame_header *hdr;
18037        struct lpfc_dmabuf *d_buf;
18038        struct hbq_dmabuf *seq_dmabuf;
18039        uint32_t fctl;
18040        int seq_count = 0;
18041
18042        hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18043        /* make sure first fame of sequence has a sequence count of zero */
18044        if (hdr->fh_seq_cnt != seq_count)
18045                return 0;
18046        fctl = (hdr->fh_f_ctl[0] << 16 |
18047                hdr->fh_f_ctl[1] << 8 |
18048                hdr->fh_f_ctl[2]);
18049        /* If last frame of sequence we can return success. */
18050        if (fctl & FC_FC_END_SEQ)
18051                return 1;
18052        list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18053                seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18054                hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18055                /* If there is a hole in the sequence count then fail. */
18056                if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18057                        return 0;
18058                fctl = (hdr->fh_f_ctl[0] << 16 |
18059                        hdr->fh_f_ctl[1] << 8 |
18060                        hdr->fh_f_ctl[2]);
18061                /* If last frame of sequence we can return success. */
18062                if (fctl & FC_FC_END_SEQ)
18063                        return 1;
18064        }
18065        return 0;
18066}
18067
18068/**
18069 * lpfc_prep_seq - Prep sequence for ULP processing
18070 * @vport: Pointer to the vport on which this sequence was received
18071 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18072 *
18073 * This function takes a sequence, described by a list of frames, and creates
18074 * a list of iocbq structures to describe the sequence. This iocbq list will be
18075 * used to issue to the generic unsolicited sequence handler. This routine
18076 * returns a pointer to the first iocbq in the list. If the function is unable
18077 * to allocate an iocbq then it throw out the received frames that were not
18078 * able to be described and return a pointer to the first iocbq. If unable to
18079 * allocate any iocbqs (including the first) this function will return NULL.
18080 **/
18081static struct lpfc_iocbq *
18082lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18083{
18084        struct hbq_dmabuf *hbq_buf;
18085        struct lpfc_dmabuf *d_buf, *n_buf;
18086        struct lpfc_iocbq *first_iocbq, *iocbq;
18087        struct fc_frame_header *fc_hdr;
18088        uint32_t sid;
18089        uint32_t len, tot_len;
18090        struct ulp_bde64 *pbde;
18091
18092        fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18093        /* remove from receive buffer list */
18094        list_del_init(&seq_dmabuf->hbuf.list);
18095        lpfc_update_rcv_time_stamp(vport);
18096        /* get the Remote Port's SID */
18097        sid = sli4_sid_from_fc_hdr(fc_hdr);
18098        tot_len = 0;
18099        /* Get an iocbq struct to fill in. */
18100        first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18101        if (first_iocbq) {
18102                /* Initialize the first IOCB. */
18103                first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18104                first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18105                first_iocbq->vport = vport;
18106
18107                /* Check FC Header to see what TYPE of frame we are rcv'ing */
18108                if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18109                        first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18110                        first_iocbq->iocb.un.rcvels.parmRo =
18111                                sli4_did_from_fc_hdr(fc_hdr);
18112                        first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18113                } else
18114                        first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18115                first_iocbq->iocb.ulpContext = NO_XRI;
18116                first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18117                        be16_to_cpu(fc_hdr->fh_ox_id);
18118                /* iocbq is prepped for internal consumption.  Physical vpi. */
18119                first_iocbq->iocb.unsli3.rcvsli3.vpi =
18120                        vport->phba->vpi_ids[vport->vpi];
18121                /* put the first buffer into the first IOCBq */
18122                tot_len = bf_get(lpfc_rcqe_length,
18123                                       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18124
18125                first_iocbq->context2 = &seq_dmabuf->dbuf;
18126                first_iocbq->context3 = NULL;
18127                first_iocbq->iocb.ulpBdeCount = 1;
18128                if (tot_len > LPFC_DATA_BUF_SIZE)
18129                        first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18130                                                        LPFC_DATA_BUF_SIZE;
18131                else
18132                        first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18133
18134                first_iocbq->iocb.un.rcvels.remoteID = sid;
18135
18136                first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18137        }
18138        iocbq = first_iocbq;
18139        /*
18140         * Each IOCBq can have two Buffers assigned, so go through the list
18141         * of buffers for this sequence and save two buffers in each IOCBq
18142         */
18143        list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18144                if (!iocbq) {
18145                        lpfc_in_buf_free(vport->phba, d_buf);
18146                        continue;
18147                }
18148                if (!iocbq->context3) {
18149                        iocbq->context3 = d_buf;
18150                        iocbq->iocb.ulpBdeCount++;
18151                        /* We need to get the size out of the right CQE */
18152                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18153                        len = bf_get(lpfc_rcqe_length,
18154                                       &hbq_buf->cq_event.cqe.rcqe_cmpl);
18155                        pbde = (struct ulp_bde64 *)
18156                                        &iocbq->iocb.unsli3.sli3Words[4];
18157                        if (len > LPFC_DATA_BUF_SIZE)
18158                                pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18159                        else
18160                                pbde->tus.f.bdeSize = len;
18161
18162                        iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18163                        tot_len += len;
18164                } else {
18165                        iocbq = lpfc_sli_get_iocbq(vport->phba);
18166                        if (!iocbq) {
18167                                if (first_iocbq) {
18168                                        first_iocbq->iocb.ulpStatus =
18169                                                        IOSTAT_FCP_RSP_ERROR;
18170                                        first_iocbq->iocb.un.ulpWord[4] =
18171                                                        IOERR_NO_RESOURCES;
18172                                }
18173                                lpfc_in_buf_free(vport->phba, d_buf);
18174                                continue;
18175                        }
18176                        /* We need to get the size out of the right CQE */
18177                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18178                        len = bf_get(lpfc_rcqe_length,
18179                                       &hbq_buf->cq_event.cqe.rcqe_cmpl);
18180                        iocbq->context2 = d_buf;
18181                        iocbq->context3 = NULL;
18182                        iocbq->iocb.ulpBdeCount = 1;
18183                        if (len > LPFC_DATA_BUF_SIZE)
18184                                iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18185                                                        LPFC_DATA_BUF_SIZE;
18186                        else
18187                                iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18188
18189                        tot_len += len;
18190                        iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18191
18192                        iocbq->iocb.un.rcvels.remoteID = sid;
18193                        list_add_tail(&iocbq->list, &first_iocbq->list);
18194                }
18195        }
18196        /* Free the sequence's header buffer */
18197        if (!first_iocbq)
18198                lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18199
18200        return first_iocbq;
18201}
18202
18203static void
18204lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18205                          struct hbq_dmabuf *seq_dmabuf)
18206{
18207        struct fc_frame_header *fc_hdr;
18208        struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18209        struct lpfc_hba *phba = vport->phba;
18210
18211        fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18212        iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18213        if (!iocbq) {
18214                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18215                                "2707 Ring %d handler: Failed to allocate "
18216                                "iocb Rctl x%x Type x%x received\n",
18217                                LPFC_ELS_RING,
18218                                fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18219                return;
18220        }
18221        if (!lpfc_complete_unsol_iocb(phba,
18222                                      phba->sli4_hba.els_wq->pring,
18223                                      iocbq, fc_hdr->fh_r_ctl,
18224                                      fc_hdr->fh_type))
18225                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18226                                "2540 Ring %d handler: unexpected Rctl "
18227                                "x%x Type x%x received\n",
18228                                LPFC_ELS_RING,
18229                                fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18230
18231        /* Free iocb created in lpfc_prep_seq */
18232        list_for_each_entry_safe(curr_iocb, next_iocb,
18233                &iocbq->list, list) {
18234                list_del_init(&curr_iocb->list);
18235                lpfc_sli_release_iocbq(phba, curr_iocb);
18236        }
18237        lpfc_sli_release_iocbq(phba, iocbq);
18238}
18239
18240static void
18241lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18242                            struct lpfc_iocbq *rspiocb)
18243{
18244        struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18245
18246        if (pcmd && pcmd->virt)
18247                dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18248        kfree(pcmd);
18249        lpfc_sli_release_iocbq(phba, cmdiocb);
18250        lpfc_drain_txq(phba);
18251}
18252
18253static void
18254lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18255                              struct hbq_dmabuf *dmabuf)
18256{
18257        struct fc_frame_header *fc_hdr;
18258        struct lpfc_hba *phba = vport->phba;
18259        struct lpfc_iocbq *iocbq = NULL;
18260        union  lpfc_wqe *wqe;
18261        struct lpfc_dmabuf *pcmd = NULL;
18262        uint32_t frame_len;
18263        int rc;
18264        unsigned long iflags;
18265
18266        fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18267        frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18268
18269        /* Send the received frame back */
18270        iocbq = lpfc_sli_get_iocbq(phba);
18271        if (!iocbq) {
18272                /* Queue cq event and wakeup worker thread to process it */
18273                spin_lock_irqsave(&phba->hbalock, iflags);
18274                list_add_tail(&dmabuf->cq_event.list,
18275                              &phba->sli4_hba.sp_queue_event);
18276                phba->hba_flag |= HBA_SP_QUEUE_EVT;
18277                spin_unlock_irqrestore(&phba->hbalock, iflags);
18278                lpfc_worker_wake_up(phba);
18279                return;
18280        }
18281
18282        /* Allocate buffer for command payload */
18283        pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18284        if (pcmd)
18285                pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18286                                            &pcmd->phys);
18287        if (!pcmd || !pcmd->virt)
18288                goto exit;
18289
18290        INIT_LIST_HEAD(&pcmd->list);
18291
18292        /* copyin the payload */
18293        memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18294
18295        /* fill in BDE's for command */
18296        iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18297        iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18298        iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18299        iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18300
18301        iocbq->context2 = pcmd;
18302        iocbq->vport = vport;
18303        iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18304        iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18305
18306        /*
18307         * Setup rest of the iocb as though it were a WQE
18308         * Build the SEND_FRAME WQE
18309         */
18310        wqe = (union lpfc_wqe *)&iocbq->iocb;
18311
18312        wqe->send_frame.frame_len = frame_len;
18313        wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18314        wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18315        wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18316        wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18317        wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18318        wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18319
18320        iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18321        iocbq->iocb.ulpLe = 1;
18322        iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18323        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18324        if (rc == IOCB_ERROR)
18325                goto exit;
18326
18327        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18328        return;
18329
18330exit:
18331        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18332                        "2023 Unable to process MDS loopback frame\n");
18333        if (pcmd && pcmd->virt)
18334                dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18335        kfree(pcmd);
18336        if (iocbq)
18337                lpfc_sli_release_iocbq(phba, iocbq);
18338        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18339}
18340
18341/**
18342 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18343 * @phba: Pointer to HBA context object.
18344 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18345 *
18346 * This function is called with no lock held. This function processes all
18347 * the received buffers and gives it to upper layers when a received buffer
18348 * indicates that it is the final frame in the sequence. The interrupt
18349 * service routine processes received buffers at interrupt contexts.
18350 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18351 * appropriate receive function when the final frame in a sequence is received.
18352 **/
18353void
18354lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18355                                 struct hbq_dmabuf *dmabuf)
18356{
18357        struct hbq_dmabuf *seq_dmabuf;
18358        struct fc_frame_header *fc_hdr;
18359        struct lpfc_vport *vport;
18360        uint32_t fcfi;
18361        uint32_t did;
18362
18363        /* Process each received buffer */
18364        fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18365
18366        if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18367            fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18368                vport = phba->pport;
18369                /* Handle MDS Loopback frames */
18370                if  (!(phba->pport->load_flag & FC_UNLOADING))
18371                        lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18372                else
18373                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18374                return;
18375        }
18376
18377        /* check to see if this a valid type of frame */
18378        if (lpfc_fc_frame_check(phba, fc_hdr)) {
18379                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18380                return;
18381        }
18382
18383        if ((bf_get(lpfc_cqe_code,
18384                    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18385                fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18386                              &dmabuf->cq_event.cqe.rcqe_cmpl);
18387        else
18388                fcfi = bf_get(lpfc_rcqe_fcf_id,
18389                              &dmabuf->cq_event.cqe.rcqe_cmpl);
18390
18391        if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18392                vport = phba->pport;
18393                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18394                                "2023 MDS Loopback %d bytes\n",
18395                                bf_get(lpfc_rcqe_length,
18396                                       &dmabuf->cq_event.cqe.rcqe_cmpl));
18397                /* Handle MDS Loopback frames */
18398                lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18399                return;
18400        }
18401
18402        /* d_id this frame is directed to */
18403        did = sli4_did_from_fc_hdr(fc_hdr);
18404
18405        vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18406        if (!vport) {
18407                /* throw out the frame */
18408                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18409                return;
18410        }
18411
18412        /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18413        if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18414                (did != Fabric_DID)) {
18415                /*
18416                 * Throw out the frame if we are not pt2pt.
18417                 * The pt2pt protocol allows for discovery frames
18418                 * to be received without a registered VPI.
18419                 */
18420                if (!(vport->fc_flag & FC_PT2PT) ||
18421                        (phba->link_state == LPFC_HBA_READY)) {
18422                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18423                        return;
18424                }
18425        }
18426
18427        /* Handle the basic abort sequence (BA_ABTS) event */
18428        if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18429                lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18430                return;
18431        }
18432
18433        /* Link this frame */
18434        seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18435        if (!seq_dmabuf) {
18436                /* unable to add frame to vport - throw it out */
18437                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18438                return;
18439        }
18440        /* If not last frame in sequence continue processing frames. */
18441        if (!lpfc_seq_complete(seq_dmabuf))
18442                return;
18443
18444        /* Send the complete sequence to the upper layer protocol */
18445        lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18446}
18447
18448/**
18449 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18450 * @phba: pointer to lpfc hba data structure.
18451 *
18452 * This routine is invoked to post rpi header templates to the
18453 * HBA consistent with the SLI-4 interface spec.  This routine
18454 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18455 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18456 *
18457 * This routine does not require any locks.  It's usage is expected
18458 * to be driver load or reset recovery when the driver is
18459 * sequential.
18460 *
18461 * Return codes
18462 *      0 - successful
18463 *      -EIO - The mailbox failed to complete successfully.
18464 *      When this error occurs, the driver is not guaranteed
18465 *      to have any rpi regions posted to the device and
18466 *      must either attempt to repost the regions or take a
18467 *      fatal error.
18468 **/
18469int
18470lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18471{
18472        struct lpfc_rpi_hdr *rpi_page;
18473        uint32_t rc = 0;
18474        uint16_t lrpi = 0;
18475
18476        /* SLI4 ports that support extents do not require RPI headers. */
18477        if (!phba->sli4_hba.rpi_hdrs_in_use)
18478                goto exit;
18479        if (phba->sli4_hba.extents_in_use)
18480                return -EIO;
18481
18482        list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18483                /*
18484                 * Assign the rpi headers a physical rpi only if the driver
18485                 * has not initialized those resources.  A port reset only
18486                 * needs the headers posted.
18487                 */
18488                if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18489                    LPFC_RPI_RSRC_RDY)
18490                        rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18491
18492                rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18493                if (rc != MBX_SUCCESS) {
18494                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18495                                        "2008 Error %d posting all rpi "
18496                                        "headers\n", rc);
18497                        rc = -EIO;
18498                        break;
18499                }
18500        }
18501
18502 exit:
18503        bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18504               LPFC_RPI_RSRC_RDY);
18505        return rc;
18506}
18507
18508/**
18509 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18510 * @phba: pointer to lpfc hba data structure.
18511 * @rpi_page:  pointer to the rpi memory region.
18512 *
18513 * This routine is invoked to post a single rpi header to the
18514 * HBA consistent with the SLI-4 interface spec.  This memory region
18515 * maps up to 64 rpi context regions.
18516 *
18517 * Return codes
18518 *      0 - successful
18519 *      -ENOMEM - No available memory
18520 *      -EIO - The mailbox failed to complete successfully.
18521 **/
18522int
18523lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18524{
18525        LPFC_MBOXQ_t *mboxq;
18526        struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18527        uint32_t rc = 0;
18528        uint32_t shdr_status, shdr_add_status;
18529        union lpfc_sli4_cfg_shdr *shdr;
18530
18531        /* SLI4 ports that support extents do not require RPI headers. */
18532        if (!phba->sli4_hba.rpi_hdrs_in_use)
18533                return rc;
18534        if (phba->sli4_hba.extents_in_use)
18535                return -EIO;
18536
18537        /* The port is notified of the header region via a mailbox command. */
18538        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18539        if (!mboxq) {
18540                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541                                "2001 Unable to allocate memory for issuing "
18542                                "SLI_CONFIG_SPECIAL mailbox command\n");
18543                return -ENOMEM;
18544        }
18545
18546        /* Post all rpi memory regions to the port. */
18547        hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18548        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18549                         LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18550                         sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18551                         sizeof(struct lpfc_sli4_cfg_mhdr),
18552                         LPFC_SLI4_MBX_EMBED);
18553
18554
18555        /* Post the physical rpi to the port for this rpi header. */
18556        bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18557               rpi_page->start_rpi);
18558        bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18559               hdr_tmpl, rpi_page->page_count);
18560
18561        hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18562        hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18563        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18564        shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18565        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18566        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18567        if (rc != MBX_TIMEOUT)
18568                mempool_free(mboxq, phba->mbox_mem_pool);
18569        if (shdr_status || shdr_add_status || rc) {
18570                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18571                                "2514 POST_RPI_HDR mailbox failed with "
18572                                "status x%x add_status x%x, mbx status x%x\n",
18573                                shdr_status, shdr_add_status, rc);
18574                rc = -ENXIO;
18575        } else {
18576                /*
18577                 * The next_rpi stores the next logical module-64 rpi value used
18578                 * to post physical rpis in subsequent rpi postings.
18579                 */
18580                spin_lock_irq(&phba->hbalock);
18581                phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18582                spin_unlock_irq(&phba->hbalock);
18583        }
18584        return rc;
18585}
18586
18587/**
18588 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18589 * @phba: pointer to lpfc hba data structure.
18590 *
18591 * This routine is invoked to post rpi header templates to the
18592 * HBA consistent with the SLI-4 interface spec.  This routine
18593 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18594 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18595 *
18596 * Returns
18597 *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18598 *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
18599 **/
18600int
18601lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18602{
18603        unsigned long rpi;
18604        uint16_t max_rpi, rpi_limit;
18605        uint16_t rpi_remaining, lrpi = 0;
18606        struct lpfc_rpi_hdr *rpi_hdr;
18607        unsigned long iflag;
18608
18609        /*
18610         * Fetch the next logical rpi.  Because this index is logical,
18611         * the  driver starts at 0 each time.
18612         */
18613        spin_lock_irqsave(&phba->hbalock, iflag);
18614        max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18615        rpi_limit = phba->sli4_hba.next_rpi;
18616
18617        rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18618        if (rpi >= rpi_limit)
18619                rpi = LPFC_RPI_ALLOC_ERROR;
18620        else {
18621                set_bit(rpi, phba->sli4_hba.rpi_bmask);
18622                phba->sli4_hba.max_cfg_param.rpi_used++;
18623                phba->sli4_hba.rpi_count++;
18624        }
18625        lpfc_printf_log(phba, KERN_INFO,
18626                        LOG_NODE | LOG_DISCOVERY,
18627                        "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18628                        (int) rpi, max_rpi, rpi_limit);
18629
18630        /*
18631         * Don't try to allocate more rpi header regions if the device limit
18632         * has been exhausted.
18633         */
18634        if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18635            (phba->sli4_hba.rpi_count >= max_rpi)) {
18636                spin_unlock_irqrestore(&phba->hbalock, iflag);
18637                return rpi;
18638        }
18639
18640        /*
18641         * RPI header postings are not required for SLI4 ports capable of
18642         * extents.
18643         */
18644        if (!phba->sli4_hba.rpi_hdrs_in_use) {
18645                spin_unlock_irqrestore(&phba->hbalock, iflag);
18646                return rpi;
18647        }
18648
18649        /*
18650         * If the driver is running low on rpi resources, allocate another
18651         * page now.  Note that the next_rpi value is used because
18652         * it represents how many are actually in use whereas max_rpi notes
18653         * how many are supported max by the device.
18654         */
18655        rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18656        spin_unlock_irqrestore(&phba->hbalock, iflag);
18657        if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18658                rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18659                if (!rpi_hdr) {
18660                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18661                                        "2002 Error Could not grow rpi "
18662                                        "count\n");
18663                } else {
18664                        lrpi = rpi_hdr->start_rpi;
18665                        rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18666                        lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18667                }
18668        }
18669
18670        return rpi;
18671}
18672
18673/**
18674 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18675 * @phba: pointer to lpfc hba data structure.
18676 * @rpi: rpi to free
18677 *
18678 * This routine is invoked to release an rpi to the pool of
18679 * available rpis maintained by the driver.
18680 **/
18681static void
18682__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18683{
18684        /*
18685         * if the rpi value indicates a prior unreg has already
18686         * been done, skip the unreg.
18687         */
18688        if (rpi == LPFC_RPI_ALLOC_ERROR)
18689                return;
18690
18691        if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18692                phba->sli4_hba.rpi_count--;
18693                phba->sli4_hba.max_cfg_param.rpi_used--;
18694        } else {
18695                lpfc_printf_log(phba, KERN_INFO,
18696                                LOG_NODE | LOG_DISCOVERY,
18697                                "2016 rpi %x not inuse\n",
18698                                rpi);
18699        }
18700}
18701
18702/**
18703 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18704 * @phba: pointer to lpfc hba data structure.
18705 * @rpi: rpi to free
18706 *
18707 * This routine is invoked to release an rpi to the pool of
18708 * available rpis maintained by the driver.
18709 **/
18710void
18711lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18712{
18713        spin_lock_irq(&phba->hbalock);
18714        __lpfc_sli4_free_rpi(phba, rpi);
18715        spin_unlock_irq(&phba->hbalock);
18716}
18717
18718/**
18719 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18720 * @phba: pointer to lpfc hba data structure.
18721 *
18722 * This routine is invoked to remove the memory region that
18723 * provided rpi via a bitmask.
18724 **/
18725void
18726lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18727{
18728        kfree(phba->sli4_hba.rpi_bmask);
18729        kfree(phba->sli4_hba.rpi_ids);
18730        bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18731}
18732
18733/**
18734 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18735 * @ndlp: pointer to lpfc nodelist data structure.
18736 * @cmpl: completion call-back.
18737 * @arg: data to load as MBox 'caller buffer information'
18738 *
18739 * This routine is invoked to remove the memory region that
18740 * provided rpi via a bitmask.
18741 **/
18742int
18743lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18744        void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18745{
18746        LPFC_MBOXQ_t *mboxq;
18747        struct lpfc_hba *phba = ndlp->phba;
18748        int rc;
18749
18750        /* The port is notified of the header region via a mailbox command. */
18751        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18752        if (!mboxq)
18753                return -ENOMEM;
18754
18755        /* Post all rpi memory regions to the port. */
18756        lpfc_resume_rpi(mboxq, ndlp);
18757        if (cmpl) {
18758                mboxq->mbox_cmpl = cmpl;
18759                mboxq->ctx_buf = arg;
18760                mboxq->ctx_ndlp = ndlp;
18761        } else
18762                mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18763        mboxq->vport = ndlp->vport;
18764        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18765        if (rc == MBX_NOT_FINISHED) {
18766                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18767                                "2010 Resume RPI Mailbox failed "
18768                                "status %d, mbxStatus x%x\n", rc,
18769                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18770                mempool_free(mboxq, phba->mbox_mem_pool);
18771                return -EIO;
18772        }
18773        return 0;
18774}
18775
18776/**
18777 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18778 * @vport: Pointer to the vport for which the vpi is being initialized
18779 *
18780 * This routine is invoked to activate a vpi with the port.
18781 *
18782 * Returns:
18783 *    0 success
18784 *    -Evalue otherwise
18785 **/
18786int
18787lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18788{
18789        LPFC_MBOXQ_t *mboxq;
18790        int rc = 0;
18791        int retval = MBX_SUCCESS;
18792        uint32_t mbox_tmo;
18793        struct lpfc_hba *phba = vport->phba;
18794        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18795        if (!mboxq)
18796                return -ENOMEM;
18797        lpfc_init_vpi(phba, mboxq, vport->vpi);
18798        mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18799        rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18800        if (rc != MBX_SUCCESS) {
18801                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18802                                "2022 INIT VPI Mailbox failed "
18803                                "status %d, mbxStatus x%x\n", rc,
18804                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18805                retval = -EIO;
18806        }
18807        if (rc != MBX_TIMEOUT)
18808                mempool_free(mboxq, vport->phba->mbox_mem_pool);
18809
18810        return retval;
18811}
18812
18813/**
18814 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18815 * @phba: pointer to lpfc hba data structure.
18816 * @mboxq: Pointer to mailbox object.
18817 *
18818 * This routine is invoked to manually add a single FCF record. The caller
18819 * must pass a completely initialized FCF_Record.  This routine takes
18820 * care of the nonembedded mailbox operations.
18821 **/
18822static void
18823lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18824{
18825        void *virt_addr;
18826        union lpfc_sli4_cfg_shdr *shdr;
18827        uint32_t shdr_status, shdr_add_status;
18828
18829        virt_addr = mboxq->sge_array->addr[0];
18830        /* The IOCTL status is embedded in the mailbox subheader. */
18831        shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18832        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18833        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18834
18835        if ((shdr_status || shdr_add_status) &&
18836                (shdr_status != STATUS_FCF_IN_USE))
18837                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18838                        "2558 ADD_FCF_RECORD mailbox failed with "
18839                        "status x%x add_status x%x\n",
18840                        shdr_status, shdr_add_status);
18841
18842        lpfc_sli4_mbox_cmd_free(phba, mboxq);
18843}
18844
18845/**
18846 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18847 * @phba: pointer to lpfc hba data structure.
18848 * @fcf_record:  pointer to the initialized fcf record to add.
18849 *
18850 * This routine is invoked to manually add a single FCF record. The caller
18851 * must pass a completely initialized FCF_Record.  This routine takes
18852 * care of the nonembedded mailbox operations.
18853 **/
18854int
18855lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18856{
18857        int rc = 0;
18858        LPFC_MBOXQ_t *mboxq;
18859        uint8_t *bytep;
18860        void *virt_addr;
18861        struct lpfc_mbx_sge sge;
18862        uint32_t alloc_len, req_len;
18863        uint32_t fcfindex;
18864
18865        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18866        if (!mboxq) {
18867                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18868                        "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18869                return -ENOMEM;
18870        }
18871
18872        req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18873                  sizeof(uint32_t);
18874
18875        /* Allocate DMA memory and set up the non-embedded mailbox command */
18876        alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18877                                     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18878                                     req_len, LPFC_SLI4_MBX_NEMBED);
18879        if (alloc_len < req_len) {
18880                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18881                        "2523 Allocated DMA memory size (x%x) is "
18882                        "less than the requested DMA memory "
18883                        "size (x%x)\n", alloc_len, req_len);
18884                lpfc_sli4_mbox_cmd_free(phba, mboxq);
18885                return -ENOMEM;
18886        }
18887
18888        /*
18889         * Get the first SGE entry from the non-embedded DMA memory.  This
18890         * routine only uses a single SGE.
18891         */
18892        lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18893        virt_addr = mboxq->sge_array->addr[0];
18894        /*
18895         * Configure the FCF record for FCFI 0.  This is the driver's
18896         * hardcoded default and gets used in nonFIP mode.
18897         */
18898        fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18899        bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18900        lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18901
18902        /*
18903         * Copy the fcf_index and the FCF Record Data. The data starts after
18904         * the FCoE header plus word10. The data copy needs to be endian
18905         * correct.
18906         */
18907        bytep += sizeof(uint32_t);
18908        lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18909        mboxq->vport = phba->pport;
18910        mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18911        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18912        if (rc == MBX_NOT_FINISHED) {
18913                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18914                        "2515 ADD_FCF_RECORD mailbox failed with "
18915                        "status 0x%x\n", rc);
18916                lpfc_sli4_mbox_cmd_free(phba, mboxq);
18917                rc = -EIO;
18918        } else
18919                rc = 0;
18920
18921        return rc;
18922}
18923
18924/**
18925 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18926 * @phba: pointer to lpfc hba data structure.
18927 * @fcf_record:  pointer to the fcf record to write the default data.
18928 * @fcf_index: FCF table entry index.
18929 *
18930 * This routine is invoked to build the driver's default FCF record.  The
18931 * values used are hardcoded.  This routine handles memory initialization.
18932 *
18933 **/
18934void
18935lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18936                                struct fcf_record *fcf_record,
18937                                uint16_t fcf_index)
18938{
18939        memset(fcf_record, 0, sizeof(struct fcf_record));
18940        fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18941        fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18942        fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18943        bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18944        bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18945        bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18946        bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18947        bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18948        bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18949        bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18950        bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18951        bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18952        bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18953        bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18954        bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18955        bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18956                LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18957        /* Set the VLAN bit map */
18958        if (phba->valid_vlan) {
18959                fcf_record->vlan_bitmap[phba->vlan_id / 8]
18960                        = 1 << (phba->vlan_id % 8);
18961        }
18962}
18963
18964/**
18965 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18966 * @phba: pointer to lpfc hba data structure.
18967 * @fcf_index: FCF table entry offset.
18968 *
18969 * This routine is invoked to scan the entire FCF table by reading FCF
18970 * record and processing it one at a time starting from the @fcf_index
18971 * for initial FCF discovery or fast FCF failover rediscovery.
18972 *
18973 * Return 0 if the mailbox command is submitted successfully, none 0
18974 * otherwise.
18975 **/
18976int
18977lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18978{
18979        int rc = 0, error;
18980        LPFC_MBOXQ_t *mboxq;
18981
18982        phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18983        phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18984        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18985        if (!mboxq) {
18986                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18987                                "2000 Failed to allocate mbox for "
18988                                "READ_FCF cmd\n");
18989                error = -ENOMEM;
18990                goto fail_fcf_scan;
18991        }
18992        /* Construct the read FCF record mailbox command */
18993        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18994        if (rc) {
18995                error = -EINVAL;
18996                goto fail_fcf_scan;
18997        }
18998        /* Issue the mailbox command asynchronously */
18999        mboxq->vport = phba->pport;
19000        mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19001
19002        spin_lock_irq(&phba->hbalock);
19003        phba->hba_flag |= FCF_TS_INPROG;
19004        spin_unlock_irq(&phba->hbalock);
19005
19006        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19007        if (rc == MBX_NOT_FINISHED)
19008                error = -EIO;
19009        else {
19010                /* Reset eligible FCF count for new scan */
19011                if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19012                        phba->fcf.eligible_fcf_cnt = 0;
19013                error = 0;
19014        }
19015fail_fcf_scan:
19016        if (error) {
19017                if (mboxq)
19018                        lpfc_sli4_mbox_cmd_free(phba, mboxq);
19019                /* FCF scan failed, clear FCF_TS_INPROG flag */
19020                spin_lock_irq(&phba->hbalock);
19021                phba->hba_flag &= ~FCF_TS_INPROG;
19022                spin_unlock_irq(&phba->hbalock);
19023        }
19024        return error;
19025}
19026
19027/**
19028 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19029 * @phba: pointer to lpfc hba data structure.
19030 * @fcf_index: FCF table entry offset.
19031 *
19032 * This routine is invoked to read an FCF record indicated by @fcf_index
19033 * and to use it for FLOGI roundrobin FCF failover.
19034 *
19035 * Return 0 if the mailbox command is submitted successfully, none 0
19036 * otherwise.
19037 **/
19038int
19039lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19040{
19041        int rc = 0, error;
19042        LPFC_MBOXQ_t *mboxq;
19043
19044        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19045        if (!mboxq) {
19046                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19047                                "2763 Failed to allocate mbox for "
19048                                "READ_FCF cmd\n");
19049                error = -ENOMEM;
19050                goto fail_fcf_read;
19051        }
19052        /* Construct the read FCF record mailbox command */
19053        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19054        if (rc) {
19055                error = -EINVAL;
19056                goto fail_fcf_read;
19057        }
19058        /* Issue the mailbox command asynchronously */
19059        mboxq->vport = phba->pport;
19060        mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19061        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19062        if (rc == MBX_NOT_FINISHED)
19063                error = -EIO;
19064        else
19065                error = 0;
19066
19067fail_fcf_read:
19068        if (error && mboxq)
19069                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19070        return error;
19071}
19072
19073/**
19074 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19075 * @phba: pointer to lpfc hba data structure.
19076 * @fcf_index: FCF table entry offset.
19077 *
19078 * This routine is invoked to read an FCF record indicated by @fcf_index to
19079 * determine whether it's eligible for FLOGI roundrobin failover list.
19080 *
19081 * Return 0 if the mailbox command is submitted successfully, none 0
19082 * otherwise.
19083 **/
19084int
19085lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19086{
19087        int rc = 0, error;
19088        LPFC_MBOXQ_t *mboxq;
19089
19090        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19091        if (!mboxq) {
19092                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19093                                "2758 Failed to allocate mbox for "
19094                                "READ_FCF cmd\n");
19095                                error = -ENOMEM;
19096                                goto fail_fcf_read;
19097        }
19098        /* Construct the read FCF record mailbox command */
19099        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19100        if (rc) {
19101                error = -EINVAL;
19102                goto fail_fcf_read;
19103        }
19104        /* Issue the mailbox command asynchronously */
19105        mboxq->vport = phba->pport;
19106        mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19107        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19108        if (rc == MBX_NOT_FINISHED)
19109                error = -EIO;
19110        else
19111                error = 0;
19112
19113fail_fcf_read:
19114        if (error && mboxq)
19115                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19116        return error;
19117}
19118
19119/**
19120 * lpfc_check_next_fcf_pri_level
19121 * @phba: pointer to the lpfc_hba struct for this port.
19122 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19123 * routine when the rr_bmask is empty. The FCF indecies are put into the
19124 * rr_bmask based on their priority level. Starting from the highest priority
19125 * to the lowest. The most likely FCF candidate will be in the highest
19126 * priority group. When this routine is called it searches the fcf_pri list for
19127 * next lowest priority group and repopulates the rr_bmask with only those
19128 * fcf_indexes.
19129 * returns:
19130 * 1=success 0=failure
19131 **/
19132static int
19133lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19134{
19135        uint16_t next_fcf_pri;
19136        uint16_t last_index;
19137        struct lpfc_fcf_pri *fcf_pri;
19138        int rc;
19139        int ret = 0;
19140
19141        last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19142                        LPFC_SLI4_FCF_TBL_INDX_MAX);
19143        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19144                        "3060 Last IDX %d\n", last_index);
19145
19146        /* Verify the priority list has 2 or more entries */
19147        spin_lock_irq(&phba->hbalock);
19148        if (list_empty(&phba->fcf.fcf_pri_list) ||
19149            list_is_singular(&phba->fcf.fcf_pri_list)) {
19150                spin_unlock_irq(&phba->hbalock);
19151                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19152                        "3061 Last IDX %d\n", last_index);
19153                return 0; /* Empty rr list */
19154        }
19155        spin_unlock_irq(&phba->hbalock);
19156
19157        next_fcf_pri = 0;
19158        /*
19159         * Clear the rr_bmask and set all of the bits that are at this
19160         * priority.
19161         */
19162        memset(phba->fcf.fcf_rr_bmask, 0,
19163                        sizeof(*phba->fcf.fcf_rr_bmask));
19164        spin_lock_irq(&phba->hbalock);
19165        list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19166                if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19167                        continue;
19168                /*
19169                 * the 1st priority that has not FLOGI failed
19170                 * will be the highest.
19171                 */
19172                if (!next_fcf_pri)
19173                        next_fcf_pri = fcf_pri->fcf_rec.priority;
19174                spin_unlock_irq(&phba->hbalock);
19175                if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19176                        rc = lpfc_sli4_fcf_rr_index_set(phba,
19177                                                fcf_pri->fcf_rec.fcf_index);
19178                        if (rc)
19179                                return 0;
19180                }
19181                spin_lock_irq(&phba->hbalock);
19182        }
19183        /*
19184         * if next_fcf_pri was not set above and the list is not empty then
19185         * we have failed flogis on all of them. So reset flogi failed
19186         * and start at the beginning.
19187         */
19188        if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19189                list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19190                        fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19191                        /*
19192                         * the 1st priority that has not FLOGI failed
19193                         * will be the highest.
19194                         */
19195                        if (!next_fcf_pri)
19196                                next_fcf_pri = fcf_pri->fcf_rec.priority;
19197                        spin_unlock_irq(&phba->hbalock);
19198                        if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19199                                rc = lpfc_sli4_fcf_rr_index_set(phba,
19200                                                fcf_pri->fcf_rec.fcf_index);
19201                                if (rc)
19202                                        return 0;
19203                        }
19204                        spin_lock_irq(&phba->hbalock);
19205                }
19206        } else
19207                ret = 1;
19208        spin_unlock_irq(&phba->hbalock);
19209
19210        return ret;
19211}
19212/**
19213 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19214 * @phba: pointer to lpfc hba data structure.
19215 *
19216 * This routine is to get the next eligible FCF record index in a round
19217 * robin fashion. If the next eligible FCF record index equals to the
19218 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19219 * shall be returned, otherwise, the next eligible FCF record's index
19220 * shall be returned.
19221 **/
19222uint16_t
19223lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19224{
19225        uint16_t next_fcf_index;
19226
19227initial_priority:
19228        /* Search start from next bit of currently registered FCF index */
19229        next_fcf_index = phba->fcf.current_rec.fcf_indx;
19230
19231next_priority:
19232        /* Determine the next fcf index to check */
19233        next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19234        next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19235                                       LPFC_SLI4_FCF_TBL_INDX_MAX,
19236                                       next_fcf_index);
19237
19238        /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19239        if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19240                /*
19241                 * If we have wrapped then we need to clear the bits that
19242                 * have been tested so that we can detect when we should
19243                 * change the priority level.
19244                 */
19245                next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19246                                               LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19247        }
19248
19249
19250        /* Check roundrobin failover list empty condition */
19251        if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19252                next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19253                /*
19254                 * If next fcf index is not found check if there are lower
19255                 * Priority level fcf's in the fcf_priority list.
19256                 * Set up the rr_bmask with all of the avaiable fcf bits
19257                 * at that level and continue the selection process.
19258                 */
19259                if (lpfc_check_next_fcf_pri_level(phba))
19260                        goto initial_priority;
19261                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19262                                "2844 No roundrobin failover FCF available\n");
19263
19264                return LPFC_FCOE_FCF_NEXT_NONE;
19265        }
19266
19267        if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19268                phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19269                LPFC_FCF_FLOGI_FAILED) {
19270                if (list_is_singular(&phba->fcf.fcf_pri_list))
19271                        return LPFC_FCOE_FCF_NEXT_NONE;
19272
19273                goto next_priority;
19274        }
19275
19276        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19277                        "2845 Get next roundrobin failover FCF (x%x)\n",
19278                        next_fcf_index);
19279
19280        return next_fcf_index;
19281}
19282
19283/**
19284 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19285 * @phba: pointer to lpfc hba data structure.
19286 * @fcf_index: index into the FCF table to 'set'
19287 *
19288 * This routine sets the FCF record index in to the eligible bmask for
19289 * roundrobin failover search. It checks to make sure that the index
19290 * does not go beyond the range of the driver allocated bmask dimension
19291 * before setting the bit.
19292 *
19293 * Returns 0 if the index bit successfully set, otherwise, it returns
19294 * -EINVAL.
19295 **/
19296int
19297lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19298{
19299        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19300                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19301                                "2610 FCF (x%x) reached driver's book "
19302                                "keeping dimension:x%x\n",
19303                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19304                return -EINVAL;
19305        }
19306        /* Set the eligible FCF record index bmask */
19307        set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19308
19309        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19310                        "2790 Set FCF (x%x) to roundrobin FCF failover "
19311                        "bmask\n", fcf_index);
19312
19313        return 0;
19314}
19315
19316/**
19317 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19318 * @phba: pointer to lpfc hba data structure.
19319 * @fcf_index: index into the FCF table to 'clear'
19320 *
19321 * This routine clears the FCF record index from the eligible bmask for
19322 * roundrobin failover search. It checks to make sure that the index
19323 * does not go beyond the range of the driver allocated bmask dimension
19324 * before clearing the bit.
19325 **/
19326void
19327lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19328{
19329        struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19330        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19331                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19332                                "2762 FCF (x%x) reached driver's book "
19333                                "keeping dimension:x%x\n",
19334                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19335                return;
19336        }
19337        /* Clear the eligible FCF record index bmask */
19338        spin_lock_irq(&phba->hbalock);
19339        list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19340                                 list) {
19341                if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19342                        list_del_init(&fcf_pri->list);
19343                        break;
19344                }
19345        }
19346        spin_unlock_irq(&phba->hbalock);
19347        clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19348
19349        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19350                        "2791 Clear FCF (x%x) from roundrobin failover "
19351                        "bmask\n", fcf_index);
19352}
19353
19354/**
19355 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19356 * @phba: pointer to lpfc hba data structure.
19357 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19358 *
19359 * This routine is the completion routine for the rediscover FCF table mailbox
19360 * command. If the mailbox command returned failure, it will try to stop the
19361 * FCF rediscover wait timer.
19362 **/
19363static void
19364lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19365{
19366        struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19367        uint32_t shdr_status, shdr_add_status;
19368
19369        redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19370
19371        shdr_status = bf_get(lpfc_mbox_hdr_status,
19372                             &redisc_fcf->header.cfg_shdr.response);
19373        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19374                             &redisc_fcf->header.cfg_shdr.response);
19375        if (shdr_status || shdr_add_status) {
19376                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19377                                "2746 Requesting for FCF rediscovery failed "
19378                                "status x%x add_status x%x\n",
19379                                shdr_status, shdr_add_status);
19380                if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19381                        spin_lock_irq(&phba->hbalock);
19382                        phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19383                        spin_unlock_irq(&phba->hbalock);
19384                        /*
19385                         * CVL event triggered FCF rediscover request failed,
19386                         * last resort to re-try current registered FCF entry.
19387                         */
19388                        lpfc_retry_pport_discovery(phba);
19389                } else {
19390                        spin_lock_irq(&phba->hbalock);
19391                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19392                        spin_unlock_irq(&phba->hbalock);
19393                        /*
19394                         * DEAD FCF event triggered FCF rediscover request
19395                         * failed, last resort to fail over as a link down
19396                         * to FCF registration.
19397                         */
19398                        lpfc_sli4_fcf_dead_failthrough(phba);
19399                }
19400        } else {
19401                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19402                                "2775 Start FCF rediscover quiescent timer\n");
19403                /*
19404                 * Start FCF rediscovery wait timer for pending FCF
19405                 * before rescan FCF record table.
19406                 */
19407                lpfc_fcf_redisc_wait_start_timer(phba);
19408        }
19409
19410        mempool_free(mbox, phba->mbox_mem_pool);
19411}
19412
19413/**
19414 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19415 * @phba: pointer to lpfc hba data structure.
19416 *
19417 * This routine is invoked to request for rediscovery of the entire FCF table
19418 * by the port.
19419 **/
19420int
19421lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19422{
19423        LPFC_MBOXQ_t *mbox;
19424        struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19425        int rc, length;
19426
19427        /* Cancel retry delay timers to all vports before FCF rediscover */
19428        lpfc_cancel_all_vport_retry_delay_timer(phba);
19429
19430        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19431        if (!mbox) {
19432                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19433                                "2745 Failed to allocate mbox for "
19434                                "requesting FCF rediscover.\n");
19435                return -ENOMEM;
19436        }
19437
19438        length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19439                  sizeof(struct lpfc_sli4_cfg_mhdr));
19440        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19441                         LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19442                         length, LPFC_SLI4_MBX_EMBED);
19443
19444        redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19445        /* Set count to 0 for invalidating the entire FCF database */
19446        bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19447
19448        /* Issue the mailbox command asynchronously */
19449        mbox->vport = phba->pport;
19450        mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19451        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19452
19453        if (rc == MBX_NOT_FINISHED) {
19454                mempool_free(mbox, phba->mbox_mem_pool);
19455                return -EIO;
19456        }
19457        return 0;
19458}
19459
19460/**
19461 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19462 * @phba: pointer to lpfc hba data structure.
19463 *
19464 * This function is the failover routine as a last resort to the FCF DEAD
19465 * event when driver failed to perform fast FCF failover.
19466 **/
19467void
19468lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19469{
19470        uint32_t link_state;
19471
19472        /*
19473         * Last resort as FCF DEAD event failover will treat this as
19474         * a link down, but save the link state because we don't want
19475         * it to be changed to Link Down unless it is already down.
19476         */
19477        link_state = phba->link_state;
19478        lpfc_linkdown(phba);
19479        phba->link_state = link_state;
19480
19481        /* Unregister FCF if no devices connected to it */
19482        lpfc_unregister_unused_fcf(phba);
19483}
19484
19485/**
19486 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19487 * @phba: pointer to lpfc hba data structure.
19488 * @rgn23_data: pointer to configure region 23 data.
19489 *
19490 * This function gets SLI3 port configure region 23 data through memory dump
19491 * mailbox command. When it successfully retrieves data, the size of the data
19492 * will be returned, otherwise, 0 will be returned.
19493 **/
19494static uint32_t
19495lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19496{
19497        LPFC_MBOXQ_t *pmb = NULL;
19498        MAILBOX_t *mb;
19499        uint32_t offset = 0;
19500        int i, rc;
19501
19502        if (!rgn23_data)
19503                return 0;
19504
19505        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19506        if (!pmb) {
19507                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19508                                "2600 failed to allocate mailbox memory\n");
19509                return 0;
19510        }
19511        mb = &pmb->u.mb;
19512
19513        do {
19514                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19515                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19516
19517                if (rc != MBX_SUCCESS) {
19518                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19519                                        "2601 failed to read config "
19520                                        "region 23, rc 0x%x Status 0x%x\n",
19521                                        rc, mb->mbxStatus);
19522                        mb->un.varDmp.word_cnt = 0;
19523                }
19524                /*
19525                 * dump mem may return a zero when finished or we got a
19526                 * mailbox error, either way we are done.
19527                 */
19528                if (mb->un.varDmp.word_cnt == 0)
19529                        break;
19530
19531                i =  mb->un.varDmp.word_cnt * sizeof(uint32_t);
19532                if (offset + i >  DMP_RGN23_SIZE)
19533                        i =  DMP_RGN23_SIZE - offset;
19534                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19535                                      rgn23_data  + offset, i);
19536                offset += i;
19537        } while (offset < DMP_RGN23_SIZE);
19538
19539        mempool_free(pmb, phba->mbox_mem_pool);
19540        return offset;
19541}
19542
19543/**
19544 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19545 * @phba: pointer to lpfc hba data structure.
19546 * @rgn23_data: pointer to configure region 23 data.
19547 *
19548 * This function gets SLI4 port configure region 23 data through memory dump
19549 * mailbox command. When it successfully retrieves data, the size of the data
19550 * will be returned, otherwise, 0 will be returned.
19551 **/
19552static uint32_t
19553lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19554{
19555        LPFC_MBOXQ_t *mboxq = NULL;
19556        struct lpfc_dmabuf *mp = NULL;
19557        struct lpfc_mqe *mqe;
19558        uint32_t data_length = 0;
19559        int rc;
19560
19561        if (!rgn23_data)
19562                return 0;
19563
19564        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19565        if (!mboxq) {
19566                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19567                                "3105 failed to allocate mailbox memory\n");
19568                return 0;
19569        }
19570
19571        if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19572                goto out;
19573        mqe = &mboxq->u.mqe;
19574        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19575        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19576        if (rc)
19577                goto out;
19578        data_length = mqe->un.mb_words[5];
19579        if (data_length == 0)
19580                goto out;
19581        if (data_length > DMP_RGN23_SIZE) {
19582                data_length = 0;
19583                goto out;
19584        }
19585        lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19586out:
19587        mempool_free(mboxq, phba->mbox_mem_pool);
19588        if (mp) {
19589                lpfc_mbuf_free(phba, mp->virt, mp->phys);
19590                kfree(mp);
19591        }
19592        return data_length;
19593}
19594
19595/**
19596 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19597 * @phba: pointer to lpfc hba data structure.
19598 *
19599 * This function read region 23 and parse TLV for port status to
19600 * decide if the user disaled the port. If the TLV indicates the
19601 * port is disabled, the hba_flag is set accordingly.
19602 **/
19603void
19604lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19605{
19606        uint8_t *rgn23_data = NULL;
19607        uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19608        uint32_t offset = 0;
19609
19610        /* Get adapter Region 23 data */
19611        rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19612        if (!rgn23_data)
19613                goto out;
19614
19615        if (phba->sli_rev < LPFC_SLI_REV4)
19616                data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19617        else {
19618                if_type = bf_get(lpfc_sli_intf_if_type,
19619                                 &phba->sli4_hba.sli_intf);
19620                if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19621                        goto out;
19622                data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19623        }
19624
19625        if (!data_size)
19626                goto out;
19627
19628        /* Check the region signature first */
19629        if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19630                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19631                        "2619 Config region 23 has bad signature\n");
19632                        goto out;
19633        }
19634        offset += 4;
19635
19636        /* Check the data structure version */
19637        if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19638                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19639                        "2620 Config region 23 has bad version\n");
19640                goto out;
19641        }
19642        offset += 4;
19643
19644        /* Parse TLV entries in the region */
19645        while (offset < data_size) {
19646                if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19647                        break;
19648                /*
19649                 * If the TLV is not driver specific TLV or driver id is
19650                 * not linux driver id, skip the record.
19651                 */
19652                if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19653                    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19654                    (rgn23_data[offset + 3] != 0)) {
19655                        offset += rgn23_data[offset + 1] * 4 + 4;
19656                        continue;
19657                }
19658
19659                /* Driver found a driver specific TLV in the config region */
19660                sub_tlv_len = rgn23_data[offset + 1] * 4;
19661                offset += 4;
19662                tlv_offset = 0;
19663
19664                /*
19665                 * Search for configured port state sub-TLV.
19666                 */
19667                while ((offset < data_size) &&
19668                        (tlv_offset < sub_tlv_len)) {
19669                        if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19670                                offset += 4;
19671                                tlv_offset += 4;
19672                                break;
19673                        }
19674                        if (rgn23_data[offset] != PORT_STE_TYPE) {
19675                                offset += rgn23_data[offset + 1] * 4 + 4;
19676                                tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19677                                continue;
19678                        }
19679
19680                        /* This HBA contains PORT_STE configured */
19681                        if (!rgn23_data[offset + 2])
19682                                phba->hba_flag |= LINK_DISABLED;
19683
19684                        goto out;
19685                }
19686        }
19687
19688out:
19689        kfree(rgn23_data);
19690        return;
19691}
19692
19693/**
19694 * lpfc_wr_object - write an object to the firmware
19695 * @phba: HBA structure that indicates port to create a queue on.
19696 * @dmabuf_list: list of dmabufs to write to the port.
19697 * @size: the total byte value of the objects to write to the port.
19698 * @offset: the current offset to be used to start the transfer.
19699 *
19700 * This routine will create a wr_object mailbox command to send to the port.
19701 * the mailbox command will be constructed using the dma buffers described in
19702 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19703 * BDEs that the imbedded mailbox can support. The @offset variable will be
19704 * used to indicate the starting offset of the transfer and will also return
19705 * the offset after the write object mailbox has completed. @size is used to
19706 * determine the end of the object and whether the eof bit should be set.
19707 *
19708 * Return 0 is successful and offset will contain the the new offset to use
19709 * for the next write.
19710 * Return negative value for error cases.
19711 **/
19712int
19713lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19714               uint32_t size, uint32_t *offset)
19715{
19716        struct lpfc_mbx_wr_object *wr_object;
19717        LPFC_MBOXQ_t *mbox;
19718        int rc = 0, i = 0;
19719        uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19720        uint32_t mbox_tmo;
19721        struct lpfc_dmabuf *dmabuf;
19722        uint32_t written = 0;
19723        bool check_change_status = false;
19724
19725        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19726        if (!mbox)
19727                return -ENOMEM;
19728
19729        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19730                        LPFC_MBOX_OPCODE_WRITE_OBJECT,
19731                        sizeof(struct lpfc_mbx_wr_object) -
19732                        sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19733
19734        wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19735        wr_object->u.request.write_offset = *offset;
19736        sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19737        wr_object->u.request.object_name[0] =
19738                cpu_to_le32(wr_object->u.request.object_name[0]);
19739        bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19740        list_for_each_entry(dmabuf, dmabuf_list, list) {
19741                if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19742                        break;
19743                wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19744                wr_object->u.request.bde[i].addrHigh =
19745                        putPaddrHigh(dmabuf->phys);
19746                if (written + SLI4_PAGE_SIZE >= size) {
19747                        wr_object->u.request.bde[i].tus.f.bdeSize =
19748                                (size - written);
19749                        written += (size - written);
19750                        bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19751                        bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19752                        check_change_status = true;
19753                } else {
19754                        wr_object->u.request.bde[i].tus.f.bdeSize =
19755                                SLI4_PAGE_SIZE;
19756                        written += SLI4_PAGE_SIZE;
19757                }
19758                i++;
19759        }
19760        wr_object->u.request.bde_count = i;
19761        bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19762        if (!phba->sli4_hba.intr_enable)
19763                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19764        else {
19765                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19766                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19767        }
19768        /* The IOCTL status is embedded in the mailbox subheader. */
19769        shdr_status = bf_get(lpfc_mbox_hdr_status,
19770                             &wr_object->header.cfg_shdr.response);
19771        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19772                                 &wr_object->header.cfg_shdr.response);
19773        if (check_change_status) {
19774                shdr_change_status = bf_get(lpfc_wr_object_change_status,
19775                                            &wr_object->u.response);
19776
19777                if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19778                    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19779                        shdr_csf = bf_get(lpfc_wr_object_csf,
19780                                          &wr_object->u.response);
19781                        if (shdr_csf)
19782                                shdr_change_status =
19783                                                   LPFC_CHANGE_STATUS_PCI_RESET;
19784                }
19785
19786                switch (shdr_change_status) {
19787                case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19788                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19789                                        "3198 Firmware write complete: System "
19790                                        "reboot required to instantiate\n");
19791                        break;
19792                case (LPFC_CHANGE_STATUS_FW_RESET):
19793                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19794                                        "3199 Firmware write complete: Firmware"
19795                                        " reset required to instantiate\n");
19796                        break;
19797                case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19798                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19799                                        "3200 Firmware write complete: Port "
19800                                        "Migration or PCI Reset required to "
19801                                        "instantiate\n");
19802                        break;
19803                case (LPFC_CHANGE_STATUS_PCI_RESET):
19804                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19805                                        "3201 Firmware write complete: PCI "
19806                                        "Reset required to instantiate\n");
19807                        break;
19808                default:
19809                        break;
19810                }
19811        }
19812        if (rc != MBX_TIMEOUT)
19813                mempool_free(mbox, phba->mbox_mem_pool);
19814        if (shdr_status || shdr_add_status || rc) {
19815                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19816                                "3025 Write Object mailbox failed with "
19817                                "status x%x add_status x%x, mbx status x%x\n",
19818                                shdr_status, shdr_add_status, rc);
19819                rc = -ENXIO;
19820                *offset = shdr_add_status;
19821        } else
19822                *offset += wr_object->u.response.actual_write_length;
19823        return rc;
19824}
19825
19826/**
19827 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19828 * @vport: pointer to vport data structure.
19829 *
19830 * This function iterate through the mailboxq and clean up all REG_LOGIN
19831 * and REG_VPI mailbox commands associated with the vport. This function
19832 * is called when driver want to restart discovery of the vport due to
19833 * a Clear Virtual Link event.
19834 **/
19835void
19836lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19837{
19838        struct lpfc_hba *phba = vport->phba;
19839        LPFC_MBOXQ_t *mb, *nextmb;
19840        struct lpfc_dmabuf *mp;
19841        struct lpfc_nodelist *ndlp;
19842        struct lpfc_nodelist *act_mbx_ndlp = NULL;
19843        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
19844        LIST_HEAD(mbox_cmd_list);
19845        uint8_t restart_loop;
19846
19847        /* Clean up internally queued mailbox commands with the vport */
19848        spin_lock_irq(&phba->hbalock);
19849        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19850                if (mb->vport != vport)
19851                        continue;
19852
19853                if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19854                        (mb->u.mb.mbxCommand != MBX_REG_VPI))
19855                        continue;
19856
19857                list_del(&mb->list);
19858                list_add_tail(&mb->list, &mbox_cmd_list);
19859        }
19860        /* Clean up active mailbox command with the vport */
19861        mb = phba->sli.mbox_active;
19862        if (mb && (mb->vport == vport)) {
19863                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19864                        (mb->u.mb.mbxCommand == MBX_REG_VPI))
19865                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19866                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19867                        act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19868                        /* Put reference count for delayed processing */
19869                        act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19870                        /* Unregister the RPI when mailbox complete */
19871                        mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19872                }
19873        }
19874        /* Cleanup any mailbox completions which are not yet processed */
19875        do {
19876                restart_loop = 0;
19877                list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19878                        /*
19879                         * If this mailox is already processed or it is
19880                         * for another vport ignore it.
19881                         */
19882                        if ((mb->vport != vport) ||
19883                                (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19884                                continue;
19885
19886                        if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19887                                (mb->u.mb.mbxCommand != MBX_REG_VPI))
19888                                continue;
19889
19890                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19891                        if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19892                                ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19893                                /* Unregister the RPI when mailbox complete */
19894                                mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19895                                restart_loop = 1;
19896                                spin_unlock_irq(&phba->hbalock);
19897                                spin_lock(shost->host_lock);
19898                                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19899                                spin_unlock(shost->host_lock);
19900                                spin_lock_irq(&phba->hbalock);
19901                                break;
19902                        }
19903                }
19904        } while (restart_loop);
19905
19906        spin_unlock_irq(&phba->hbalock);
19907
19908        /* Release the cleaned-up mailbox commands */
19909        while (!list_empty(&mbox_cmd_list)) {
19910                list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19911                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19912                        mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19913                        if (mp) {
19914                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19915                                kfree(mp);
19916                        }
19917                        mb->ctx_buf = NULL;
19918                        ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19919                        mb->ctx_ndlp = NULL;
19920                        if (ndlp) {
19921                                spin_lock(shost->host_lock);
19922                                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19923                                spin_unlock(shost->host_lock);
19924                                lpfc_nlp_put(ndlp);
19925                        }
19926                }
19927                mempool_free(mb, phba->mbox_mem_pool);
19928        }
19929
19930        /* Release the ndlp with the cleaned-up active mailbox command */
19931        if (act_mbx_ndlp) {
19932                spin_lock(shost->host_lock);
19933                act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19934                spin_unlock(shost->host_lock);
19935                lpfc_nlp_put(act_mbx_ndlp);
19936        }
19937}
19938
19939/**
19940 * lpfc_drain_txq - Drain the txq
19941 * @phba: Pointer to HBA context object.
19942 *
19943 * This function attempt to submit IOCBs on the txq
19944 * to the adapter.  For SLI4 adapters, the txq contains
19945 * ELS IOCBs that have been deferred because the there
19946 * are no SGLs.  This congestion can occur with large
19947 * vport counts during node discovery.
19948 **/
19949
19950uint32_t
19951lpfc_drain_txq(struct lpfc_hba *phba)
19952{
19953        LIST_HEAD(completions);
19954        struct lpfc_sli_ring *pring;
19955        struct lpfc_iocbq *piocbq = NULL;
19956        unsigned long iflags = 0;
19957        char *fail_msg = NULL;
19958        struct lpfc_sglq *sglq;
19959        union lpfc_wqe128 wqe;
19960        uint32_t txq_cnt = 0;
19961        struct lpfc_queue *wq;
19962
19963        if (phba->link_flag & LS_MDS_LOOPBACK) {
19964                /* MDS WQE are posted only to first WQ*/
19965                wq = phba->sli4_hba.hdwq[0].io_wq;
19966                if (unlikely(!wq))
19967                        return 0;
19968                pring = wq->pring;
19969        } else {
19970                wq = phba->sli4_hba.els_wq;
19971                if (unlikely(!wq))
19972                        return 0;
19973                pring = lpfc_phba_elsring(phba);
19974        }
19975
19976        if (unlikely(!pring) || list_empty(&pring->txq))
19977                return 0;
19978
19979        spin_lock_irqsave(&pring->ring_lock, iflags);
19980        list_for_each_entry(piocbq, &pring->txq, list) {
19981                txq_cnt++;
19982        }
19983
19984        if (txq_cnt > pring->txq_max)
19985                pring->txq_max = txq_cnt;
19986
19987        spin_unlock_irqrestore(&pring->ring_lock, iflags);
19988
19989        while (!list_empty(&pring->txq)) {
19990                spin_lock_irqsave(&pring->ring_lock, iflags);
19991
19992                piocbq = lpfc_sli_ringtx_get(phba, pring);
19993                if (!piocbq) {
19994                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
19995                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19996                                "2823 txq empty and txq_cnt is %d\n ",
19997                                txq_cnt);
19998                        break;
19999                }
20000                sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20001                if (!sglq) {
20002                        __lpfc_sli_ringtx_put(phba, pring, piocbq);
20003                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20004                        break;
20005                }
20006                txq_cnt--;
20007
20008                /* The xri and iocb resources secured,
20009                 * attempt to issue request
20010                 */
20011                piocbq->sli4_lxritag = sglq->sli4_lxritag;
20012                piocbq->sli4_xritag = sglq->sli4_xritag;
20013                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20014                        fail_msg = "to convert bpl to sgl";
20015                else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20016                        fail_msg = "to convert iocb to wqe";
20017                else if (lpfc_sli4_wq_put(wq, &wqe))
20018                        fail_msg = " - Wq is full";
20019                else
20020                        lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20021
20022                if (fail_msg) {
20023                        /* Failed means we can't issue and need to cancel */
20024                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20025                                        "2822 IOCB failed %s iotag 0x%x "
20026                                        "xri 0x%x\n",
20027                                        fail_msg,
20028                                        piocbq->iotag, piocbq->sli4_xritag);
20029                        list_add_tail(&piocbq->list, &completions);
20030                }
20031                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20032        }
20033
20034        /* Cancel all the IOCBs that cannot be issued */
20035        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20036                                IOERR_SLI_ABORTED);
20037
20038        return txq_cnt;
20039}
20040
20041/**
20042 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20043 * @phba: Pointer to HBA context object.
20044 * @pwqeq: Pointer to command WQE.
20045 * @sglq: Pointer to the scatter gather queue object.
20046 *
20047 * This routine converts the bpl or bde that is in the WQE
20048 * to a sgl list for the sli4 hardware. The physical address
20049 * of the bpl/bde is converted back to a virtual address.
20050 * If the WQE contains a BPL then the list of BDE's is
20051 * converted to sli4_sge's. If the WQE contains a single
20052 * BDE then it is converted to a single sli_sge.
20053 * The WQE is still in cpu endianness so the contents of
20054 * the bpl can be used without byte swapping.
20055 *
20056 * Returns valid XRI = Success, NO_XRI = Failure.
20057 */
20058static uint16_t
20059lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20060                 struct lpfc_sglq *sglq)
20061{
20062        uint16_t xritag = NO_XRI;
20063        struct ulp_bde64 *bpl = NULL;
20064        struct ulp_bde64 bde;
20065        struct sli4_sge *sgl  = NULL;
20066        struct lpfc_dmabuf *dmabuf;
20067        union lpfc_wqe128 *wqe;
20068        int numBdes = 0;
20069        int i = 0;
20070        uint32_t offset = 0; /* accumulated offset in the sg request list */
20071        int inbound = 0; /* number of sg reply entries inbound from firmware */
20072        uint32_t cmd;
20073
20074        if (!pwqeq || !sglq)
20075                return xritag;
20076
20077        sgl  = (struct sli4_sge *)sglq->sgl;
20078        wqe = &pwqeq->wqe;
20079        pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20080
20081        cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20082        if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20083                return sglq->sli4_xritag;
20084        numBdes = pwqeq->rsvd2;
20085        if (numBdes) {
20086                /* The addrHigh and addrLow fields within the WQE
20087                 * have not been byteswapped yet so there is no
20088                 * need to swap them back.
20089                 */
20090                if (pwqeq->context3)
20091                        dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20092                else
20093                        return xritag;
20094
20095                bpl  = (struct ulp_bde64 *)dmabuf->virt;
20096                if (!bpl)
20097                        return xritag;
20098
20099                for (i = 0; i < numBdes; i++) {
20100                        /* Should already be byte swapped. */
20101                        sgl->addr_hi = bpl->addrHigh;
20102                        sgl->addr_lo = bpl->addrLow;
20103
20104                        sgl->word2 = le32_to_cpu(sgl->word2);
20105                        if ((i+1) == numBdes)
20106                                bf_set(lpfc_sli4_sge_last, sgl, 1);
20107                        else
20108                                bf_set(lpfc_sli4_sge_last, sgl, 0);
20109                        /* swap the size field back to the cpu so we
20110                         * can assign it to the sgl.
20111                         */
20112                        bde.tus.w = le32_to_cpu(bpl->tus.w);
20113                        sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20114                        /* The offsets in the sgl need to be accumulated
20115                         * separately for the request and reply lists.
20116                         * The request is always first, the reply follows.
20117                         */
20118                        switch (cmd) {
20119                        case CMD_GEN_REQUEST64_WQE:
20120                                /* add up the reply sg entries */
20121                                if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20122                                        inbound++;
20123                                /* first inbound? reset the offset */
20124                                if (inbound == 1)
20125                                        offset = 0;
20126                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
20127                                bf_set(lpfc_sli4_sge_type, sgl,
20128                                        LPFC_SGE_TYPE_DATA);
20129                                offset += bde.tus.f.bdeSize;
20130                                break;
20131                        case CMD_FCP_TRSP64_WQE:
20132                                bf_set(lpfc_sli4_sge_offset, sgl, 0);
20133                                bf_set(lpfc_sli4_sge_type, sgl,
20134                                        LPFC_SGE_TYPE_DATA);
20135                                break;
20136                        case CMD_FCP_TSEND64_WQE:
20137                        case CMD_FCP_TRECEIVE64_WQE:
20138                                bf_set(lpfc_sli4_sge_type, sgl,
20139                                        bpl->tus.f.bdeFlags);
20140                                if (i < 3)
20141                                        offset = 0;
20142                                else
20143                                        offset += bde.tus.f.bdeSize;
20144                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
20145                                break;
20146                        }
20147                        sgl->word2 = cpu_to_le32(sgl->word2);
20148                        bpl++;
20149                        sgl++;
20150                }
20151        } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20152                /* The addrHigh and addrLow fields of the BDE have not
20153                 * been byteswapped yet so they need to be swapped
20154                 * before putting them in the sgl.
20155                 */
20156                sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20157                sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20158                sgl->word2 = le32_to_cpu(sgl->word2);
20159                bf_set(lpfc_sli4_sge_last, sgl, 1);
20160                sgl->word2 = cpu_to_le32(sgl->word2);
20161                sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20162        }
20163        return sglq->sli4_xritag;
20164}
20165
20166/**
20167 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20168 * @phba: Pointer to HBA context object.
20169 * @qp: Pointer to HDW queue.
20170 * @pwqe: Pointer to command WQE.
20171 **/
20172int
20173lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20174                    struct lpfc_iocbq *pwqe)
20175{
20176        union lpfc_wqe128 *wqe = &pwqe->wqe;
20177        struct lpfc_async_xchg_ctx *ctxp;
20178        struct lpfc_queue *wq;
20179        struct lpfc_sglq *sglq;
20180        struct lpfc_sli_ring *pring;
20181        unsigned long iflags;
20182        uint32_t ret = 0;
20183
20184        /* NVME_LS and NVME_LS ABTS requests. */
20185        if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20186                pring =  phba->sli4_hba.nvmels_wq->pring;
20187                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20188                                          qp, wq_access);
20189                sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20190                if (!sglq) {
20191                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20192                        return WQE_BUSY;
20193                }
20194                pwqe->sli4_lxritag = sglq->sli4_lxritag;
20195                pwqe->sli4_xritag = sglq->sli4_xritag;
20196                if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20197                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20198                        return WQE_ERROR;
20199                }
20200                bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20201                       pwqe->sli4_xritag);
20202                ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20203                if (ret) {
20204                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20205                        return ret;
20206                }
20207
20208                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20209                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20210
20211                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20212                return 0;
20213        }
20214
20215        /* NVME_FCREQ and NVME_ABTS requests */
20216        if (pwqe->iocb_flag & LPFC_IO_NVME) {
20217                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20218                wq = qp->io_wq;
20219                pring = wq->pring;
20220
20221                bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20222
20223                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20224                                          qp, wq_access);
20225                ret = lpfc_sli4_wq_put(wq, wqe);
20226                if (ret) {
20227                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20228                        return ret;
20229                }
20230                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20231                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20232
20233                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20234                return 0;
20235        }
20236
20237        /* NVMET requests */
20238        if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20239                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20240                wq = qp->io_wq;
20241                pring = wq->pring;
20242
20243                ctxp = pwqe->context2;
20244                sglq = ctxp->ctxbuf->sglq;
20245                if (pwqe->sli4_xritag ==  NO_XRI) {
20246                        pwqe->sli4_lxritag = sglq->sli4_lxritag;
20247                        pwqe->sli4_xritag = sglq->sli4_xritag;
20248                }
20249                bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20250                       pwqe->sli4_xritag);
20251                bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20252
20253                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20254                                          qp, wq_access);
20255                ret = lpfc_sli4_wq_put(wq, wqe);
20256                if (ret) {
20257                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20258                        return ret;
20259                }
20260                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20261                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20262
20263                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20264                return 0;
20265        }
20266        return WQE_ERROR;
20267}
20268
20269#ifdef LPFC_MXP_STAT
20270/**
20271 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20272 * @phba: pointer to lpfc hba data structure.
20273 * @hwqid: belong to which HWQ.
20274 *
20275 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20276 * 15 seconds after a test case is running.
20277 *
20278 * The user should call lpfc_debugfs_multixripools_write before running a test
20279 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20280 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20281 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20282 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20283 **/
20284void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20285{
20286        struct lpfc_sli4_hdw_queue *qp;
20287        struct lpfc_multixri_pool *multixri_pool;
20288        struct lpfc_pvt_pool *pvt_pool;
20289        struct lpfc_pbl_pool *pbl_pool;
20290        u32 txcmplq_cnt;
20291
20292        qp = &phba->sli4_hba.hdwq[hwqid];
20293        multixri_pool = qp->p_multixri_pool;
20294        if (!multixri_pool)
20295                return;
20296
20297        if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20298                pvt_pool = &qp->p_multixri_pool->pvt_pool;
20299                pbl_pool = &qp->p_multixri_pool->pbl_pool;
20300                txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20301
20302                multixri_pool->stat_pbl_count = pbl_pool->count;
20303                multixri_pool->stat_pvt_count = pvt_pool->count;
20304                multixri_pool->stat_busy_count = txcmplq_cnt;
20305        }
20306
20307        multixri_pool->stat_snapshot_taken++;
20308}
20309#endif
20310
20311/**
20312 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20313 * @phba: pointer to lpfc hba data structure.
20314 * @hwqid: belong to which HWQ.
20315 *
20316 * This routine moves some XRIs from private to public pool when private pool
20317 * is not busy.
20318 **/
20319void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20320{
20321        struct lpfc_multixri_pool *multixri_pool;
20322        u32 io_req_count;
20323        u32 prev_io_req_count;
20324
20325        multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20326        if (!multixri_pool)
20327                return;
20328        io_req_count = multixri_pool->io_req_count;
20329        prev_io_req_count = multixri_pool->prev_io_req_count;
20330
20331        if (prev_io_req_count != io_req_count) {
20332                /* Private pool is busy */
20333                multixri_pool->prev_io_req_count = io_req_count;
20334        } else {
20335                /* Private pool is not busy.
20336                 * Move XRIs from private to public pool.
20337                 */
20338                lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20339        }
20340}
20341
20342/**
20343 * lpfc_adjust_high_watermark - Adjust high watermark
20344 * @phba: pointer to lpfc hba data structure.
20345 * @hwqid: belong to which HWQ.
20346 *
20347 * This routine sets high watermark as number of outstanding XRIs,
20348 * but make sure the new value is between xri_limit/2 and xri_limit.
20349 **/
20350void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20351{
20352        u32 new_watermark;
20353        u32 watermark_max;
20354        u32 watermark_min;
20355        u32 xri_limit;
20356        u32 txcmplq_cnt;
20357        u32 abts_io_bufs;
20358        struct lpfc_multixri_pool *multixri_pool;
20359        struct lpfc_sli4_hdw_queue *qp;
20360
20361        qp = &phba->sli4_hba.hdwq[hwqid];
20362        multixri_pool = qp->p_multixri_pool;
20363        if (!multixri_pool)
20364                return;
20365        xri_limit = multixri_pool->xri_limit;
20366
20367        watermark_max = xri_limit;
20368        watermark_min = xri_limit / 2;
20369
20370        txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20371        abts_io_bufs = qp->abts_scsi_io_bufs;
20372        abts_io_bufs += qp->abts_nvme_io_bufs;
20373
20374        new_watermark = txcmplq_cnt + abts_io_bufs;
20375        new_watermark = min(watermark_max, new_watermark);
20376        new_watermark = max(watermark_min, new_watermark);
20377        multixri_pool->pvt_pool.high_watermark = new_watermark;
20378
20379#ifdef LPFC_MXP_STAT
20380        multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20381                                          new_watermark);
20382#endif
20383}
20384
20385/**
20386 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20387 * @phba: pointer to lpfc hba data structure.
20388 * @hwqid: belong to which HWQ.
20389 *
20390 * This routine is called from hearbeat timer when pvt_pool is idle.
20391 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20392 * The first step moves (all - low_watermark) amount of XRIs.
20393 * The second step moves the rest of XRIs.
20394 **/
20395void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20396{
20397        struct lpfc_pbl_pool *pbl_pool;
20398        struct lpfc_pvt_pool *pvt_pool;
20399        struct lpfc_sli4_hdw_queue *qp;
20400        struct lpfc_io_buf *lpfc_ncmd;
20401        struct lpfc_io_buf *lpfc_ncmd_next;
20402        unsigned long iflag;
20403        struct list_head tmp_list;
20404        u32 tmp_count;
20405
20406        qp = &phba->sli4_hba.hdwq[hwqid];
20407        pbl_pool = &qp->p_multixri_pool->pbl_pool;
20408        pvt_pool = &qp->p_multixri_pool->pvt_pool;
20409        tmp_count = 0;
20410
20411        lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20412        lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20413
20414        if (pvt_pool->count > pvt_pool->low_watermark) {
20415                /* Step 1: move (all - low_watermark) from pvt_pool
20416                 * to pbl_pool
20417                 */
20418
20419                /* Move low watermark of bufs from pvt_pool to tmp_list */
20420                INIT_LIST_HEAD(&tmp_list);
20421                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20422                                         &pvt_pool->list, list) {
20423                        list_move_tail(&lpfc_ncmd->list, &tmp_list);
20424                        tmp_count++;
20425                        if (tmp_count >= pvt_pool->low_watermark)
20426                                break;
20427                }
20428
20429                /* Move all bufs from pvt_pool to pbl_pool */
20430                list_splice_init(&pvt_pool->list, &pbl_pool->list);
20431
20432                /* Move all bufs from tmp_list to pvt_pool */
20433                list_splice(&tmp_list, &pvt_pool->list);
20434
20435                pbl_pool->count += (pvt_pool->count - tmp_count);
20436                pvt_pool->count = tmp_count;
20437        } else {
20438                /* Step 2: move the rest from pvt_pool to pbl_pool */
20439                list_splice_init(&pvt_pool->list, &pbl_pool->list);
20440                pbl_pool->count += pvt_pool->count;
20441                pvt_pool->count = 0;
20442        }
20443
20444        spin_unlock(&pvt_pool->lock);
20445        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20446}
20447
20448/**
20449 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20450 * @phba: pointer to lpfc hba data structure
20451 * @qp: pointer to HDW queue
20452 * @pbl_pool: specified public free XRI pool
20453 * @pvt_pool: specified private free XRI pool
20454 * @count: number of XRIs to move
20455 *
20456 * This routine tries to move some free common bufs from the specified pbl_pool
20457 * to the specified pvt_pool. It might move less than count XRIs if there's not
20458 * enough in public pool.
20459 *
20460 * Return:
20461 *   true - if XRIs are successfully moved from the specified pbl_pool to the
20462 *          specified pvt_pool
20463 *   false - if the specified pbl_pool is empty or locked by someone else
20464 **/
20465static bool
20466_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20467                          struct lpfc_pbl_pool *pbl_pool,
20468                          struct lpfc_pvt_pool *pvt_pool, u32 count)
20469{
20470        struct lpfc_io_buf *lpfc_ncmd;
20471        struct lpfc_io_buf *lpfc_ncmd_next;
20472        unsigned long iflag;
20473        int ret;
20474
20475        ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20476        if (ret) {
20477                if (pbl_pool->count) {
20478                        /* Move a batch of XRIs from public to private pool */
20479                        lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20480                        list_for_each_entry_safe(lpfc_ncmd,
20481                                                 lpfc_ncmd_next,
20482                                                 &pbl_pool->list,
20483                                                 list) {
20484                                list_move_tail(&lpfc_ncmd->list,
20485                                               &pvt_pool->list);
20486                                pvt_pool->count++;
20487                                pbl_pool->count--;
20488                                count--;
20489                                if (count == 0)
20490                                        break;
20491                        }
20492
20493                        spin_unlock(&pvt_pool->lock);
20494                        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20495                        return true;
20496                }
20497                spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20498        }
20499
20500        return false;
20501}
20502
20503/**
20504 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20505 * @phba: pointer to lpfc hba data structure.
20506 * @hwqid: belong to which HWQ.
20507 * @count: number of XRIs to move
20508 *
20509 * This routine tries to find some free common bufs in one of public pools with
20510 * Round Robin method. The search always starts from local hwqid, then the next
20511 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20512 * a batch of free common bufs are moved to private pool on hwqid.
20513 * It might move less than count XRIs if there's not enough in public pool.
20514 **/
20515void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20516{
20517        struct lpfc_multixri_pool *multixri_pool;
20518        struct lpfc_multixri_pool *next_multixri_pool;
20519        struct lpfc_pvt_pool *pvt_pool;
20520        struct lpfc_pbl_pool *pbl_pool;
20521        struct lpfc_sli4_hdw_queue *qp;
20522        u32 next_hwqid;
20523        u32 hwq_count;
20524        int ret;
20525
20526        qp = &phba->sli4_hba.hdwq[hwqid];
20527        multixri_pool = qp->p_multixri_pool;
20528        pvt_pool = &multixri_pool->pvt_pool;
20529        pbl_pool = &multixri_pool->pbl_pool;
20530
20531        /* Check if local pbl_pool is available */
20532        ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20533        if (ret) {
20534#ifdef LPFC_MXP_STAT
20535                multixri_pool->local_pbl_hit_count++;
20536#endif
20537                return;
20538        }
20539
20540        hwq_count = phba->cfg_hdw_queue;
20541
20542        /* Get the next hwqid which was found last time */
20543        next_hwqid = multixri_pool->rrb_next_hwqid;
20544
20545        do {
20546                /* Go to next hwq */
20547                next_hwqid = (next_hwqid + 1) % hwq_count;
20548
20549                next_multixri_pool =
20550                        phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20551                pbl_pool = &next_multixri_pool->pbl_pool;
20552
20553                /* Check if the public free xri pool is available */
20554                ret = _lpfc_move_xri_pbl_to_pvt(
20555                        phba, qp, pbl_pool, pvt_pool, count);
20556
20557                /* Exit while-loop if success or all hwqid are checked */
20558        } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20559
20560        /* Starting point for the next time */
20561        multixri_pool->rrb_next_hwqid = next_hwqid;
20562
20563        if (!ret) {
20564                /* stats: all public pools are empty*/
20565                multixri_pool->pbl_empty_count++;
20566        }
20567
20568#ifdef LPFC_MXP_STAT
20569        if (ret) {
20570                if (next_hwqid == hwqid)
20571                        multixri_pool->local_pbl_hit_count++;
20572                else
20573                        multixri_pool->other_pbl_hit_count++;
20574        }
20575#endif
20576}
20577
20578/**
20579 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20580 * @phba: pointer to lpfc hba data structure.
20581 * @hwqid: belong to which HWQ.
20582 *
20583 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20584 * low watermark.
20585 **/
20586void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20587{
20588        struct lpfc_multixri_pool *multixri_pool;
20589        struct lpfc_pvt_pool *pvt_pool;
20590
20591        multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20592        pvt_pool = &multixri_pool->pvt_pool;
20593
20594        if (pvt_pool->count < pvt_pool->low_watermark)
20595                lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20596}
20597
20598/**
20599 * lpfc_release_io_buf - Return one IO buf back to free pool
20600 * @phba: pointer to lpfc hba data structure.
20601 * @lpfc_ncmd: IO buf to be returned.
20602 * @qp: belong to which HWQ.
20603 *
20604 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20605 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20606 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20607 * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
20608 * lpfc_io_buf_list_put.
20609 **/
20610void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20611                         struct lpfc_sli4_hdw_queue *qp)
20612{
20613        unsigned long iflag;
20614        struct lpfc_pbl_pool *pbl_pool;
20615        struct lpfc_pvt_pool *pvt_pool;
20616        struct lpfc_epd_pool *epd_pool;
20617        u32 txcmplq_cnt;
20618        u32 xri_owned;
20619        u32 xri_limit;
20620        u32 abts_io_bufs;
20621
20622        /* MUST zero fields if buffer is reused by another protocol */
20623        lpfc_ncmd->nvmeCmd = NULL;
20624        lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20625        lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20626
20627        if (phba->cfg_xpsgl && !phba->nvmet_support &&
20628            !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20629                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20630
20631        if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20632                lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20633
20634        if (phba->cfg_xri_rebalancing) {
20635                if (lpfc_ncmd->expedite) {
20636                        /* Return to expedite pool */
20637                        epd_pool = &phba->epd_pool;
20638                        spin_lock_irqsave(&epd_pool->lock, iflag);
20639                        list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20640                        epd_pool->count++;
20641                        spin_unlock_irqrestore(&epd_pool->lock, iflag);
20642                        return;
20643                }
20644
20645                /* Avoid invalid access if an IO sneaks in and is being rejected
20646                 * just _after_ xri pools are destroyed in lpfc_offline.
20647                 * Nothing much can be done at this point.
20648                 */
20649                if (!qp->p_multixri_pool)
20650                        return;
20651
20652                pbl_pool = &qp->p_multixri_pool->pbl_pool;
20653                pvt_pool = &qp->p_multixri_pool->pvt_pool;
20654
20655                txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20656                abts_io_bufs = qp->abts_scsi_io_bufs;
20657                abts_io_bufs += qp->abts_nvme_io_bufs;
20658
20659                xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20660                xri_limit = qp->p_multixri_pool->xri_limit;
20661
20662#ifdef LPFC_MXP_STAT
20663                if (xri_owned <= xri_limit)
20664                        qp->p_multixri_pool->below_limit_count++;
20665                else
20666                        qp->p_multixri_pool->above_limit_count++;
20667#endif
20668
20669                /* XRI goes to either public or private free xri pool
20670                 *     based on watermark and xri_limit
20671                 */
20672                if ((pvt_pool->count < pvt_pool->low_watermark) ||
20673                    (xri_owned < xri_limit &&
20674                     pvt_pool->count < pvt_pool->high_watermark)) {
20675                        lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20676                                                  qp, free_pvt_pool);
20677                        list_add_tail(&lpfc_ncmd->list,
20678                                      &pvt_pool->list);
20679                        pvt_pool->count++;
20680                        spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20681                } else {
20682                        lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20683                                                  qp, free_pub_pool);
20684                        list_add_tail(&lpfc_ncmd->list,
20685                                      &pbl_pool->list);
20686                        pbl_pool->count++;
20687                        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20688                }
20689        } else {
20690                lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20691                                          qp, free_xri);
20692                list_add_tail(&lpfc_ncmd->list,
20693                              &qp->lpfc_io_buf_list_put);
20694                qp->put_io_bufs++;
20695                spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20696                                       iflag);
20697        }
20698}
20699
20700/**
20701 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20702 * @phba: pointer to lpfc hba data structure.
20703 * @qp: pointer to HDW queue
20704 * @pvt_pool: pointer to private pool data structure.
20705 * @ndlp: pointer to lpfc nodelist data structure.
20706 *
20707 * This routine tries to get one free IO buf from private pool.
20708 *
20709 * Return:
20710 *   pointer to one free IO buf - if private pool is not empty
20711 *   NULL - if private pool is empty
20712 **/
20713static struct lpfc_io_buf *
20714lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20715                                  struct lpfc_sli4_hdw_queue *qp,
20716                                  struct lpfc_pvt_pool *pvt_pool,
20717                                  struct lpfc_nodelist *ndlp)
20718{
20719        struct lpfc_io_buf *lpfc_ncmd;
20720        struct lpfc_io_buf *lpfc_ncmd_next;
20721        unsigned long iflag;
20722
20723        lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20724        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20725                                 &pvt_pool->list, list) {
20726                if (lpfc_test_rrq_active(
20727                        phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20728                        continue;
20729                list_del(&lpfc_ncmd->list);
20730                pvt_pool->count--;
20731                spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20732                return lpfc_ncmd;
20733        }
20734        spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20735
20736        return NULL;
20737}
20738
20739/**
20740 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20741 * @phba: pointer to lpfc hba data structure.
20742 *
20743 * This routine tries to get one free IO buf from expedite pool.
20744 *
20745 * Return:
20746 *   pointer to one free IO buf - if expedite pool is not empty
20747 *   NULL - if expedite pool is empty
20748 **/
20749static struct lpfc_io_buf *
20750lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20751{
20752        struct lpfc_io_buf *lpfc_ncmd;
20753        struct lpfc_io_buf *lpfc_ncmd_next;
20754        unsigned long iflag;
20755        struct lpfc_epd_pool *epd_pool;
20756
20757        epd_pool = &phba->epd_pool;
20758        lpfc_ncmd = NULL;
20759
20760        spin_lock_irqsave(&epd_pool->lock, iflag);
20761        if (epd_pool->count > 0) {
20762                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20763                                         &epd_pool->list, list) {
20764                        list_del(&lpfc_ncmd->list);
20765                        epd_pool->count--;
20766                        break;
20767                }
20768        }
20769        spin_unlock_irqrestore(&epd_pool->lock, iflag);
20770
20771        return lpfc_ncmd;
20772}
20773
20774/**
20775 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20776 * @phba: pointer to lpfc hba data structure.
20777 * @ndlp: pointer to lpfc nodelist data structure.
20778 * @hwqid: belong to which HWQ
20779 * @expedite: 1 means this request is urgent.
20780 *
20781 * This routine will do the following actions and then return a pointer to
20782 * one free IO buf.
20783 *
20784 * 1. If private free xri count is empty, move some XRIs from public to
20785 *    private pool.
20786 * 2. Get one XRI from private free xri pool.
20787 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20788 *    get one free xri from expedite pool.
20789 *
20790 * Note: ndlp is only used on SCSI side for RRQ testing.
20791 *       The caller should pass NULL for ndlp on NVME side.
20792 *
20793 * Return:
20794 *   pointer to one free IO buf - if private pool is not empty
20795 *   NULL - if private pool is empty
20796 **/
20797static struct lpfc_io_buf *
20798lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20799                                    struct lpfc_nodelist *ndlp,
20800                                    int hwqid, int expedite)
20801{
20802        struct lpfc_sli4_hdw_queue *qp;
20803        struct lpfc_multixri_pool *multixri_pool;
20804        struct lpfc_pvt_pool *pvt_pool;
20805        struct lpfc_io_buf *lpfc_ncmd;
20806
20807        qp = &phba->sli4_hba.hdwq[hwqid];
20808        lpfc_ncmd = NULL;
20809        multixri_pool = qp->p_multixri_pool;
20810        pvt_pool = &multixri_pool->pvt_pool;
20811        multixri_pool->io_req_count++;
20812
20813        /* If pvt_pool is empty, move some XRIs from public to private pool */
20814        if (pvt_pool->count == 0)
20815                lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20816
20817        /* Get one XRI from private free xri pool */
20818        lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20819
20820        if (lpfc_ncmd) {
20821                lpfc_ncmd->hdwq = qp;
20822                lpfc_ncmd->hdwq_no = hwqid;
20823        } else if (expedite) {
20824                /* If we fail to get one from pvt_pool and this is an expedite
20825                 * request, get one free xri from expedite pool.
20826                 */
20827                lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20828        }
20829
20830        return lpfc_ncmd;
20831}
20832
20833static inline struct lpfc_io_buf *
20834lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20835{
20836        struct lpfc_sli4_hdw_queue *qp;
20837        struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20838
20839        qp = &phba->sli4_hba.hdwq[idx];
20840        list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20841                                 &qp->lpfc_io_buf_list_get, list) {
20842                if (lpfc_test_rrq_active(phba, ndlp,
20843                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
20844                        continue;
20845
20846                if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20847                        continue;
20848
20849                list_del_init(&lpfc_cmd->list);
20850                qp->get_io_bufs--;
20851                lpfc_cmd->hdwq = qp;
20852                lpfc_cmd->hdwq_no = idx;
20853                return lpfc_cmd;
20854        }
20855        return NULL;
20856}
20857
20858/**
20859 * lpfc_get_io_buf - Get one IO buffer from free pool
20860 * @phba: The HBA for which this call is being executed.
20861 * @ndlp: pointer to lpfc nodelist data structure.
20862 * @hwqid: belong to which HWQ
20863 * @expedite: 1 means this request is urgent.
20864 *
20865 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20866 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20867 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20868 *
20869 * Note: ndlp is only used on SCSI side for RRQ testing.
20870 *       The caller should pass NULL for ndlp on NVME side.
20871 *
20872 * Return codes:
20873 *   NULL - Error
20874 *   Pointer to lpfc_io_buf - Success
20875 **/
20876struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20877                                    struct lpfc_nodelist *ndlp,
20878                                    u32 hwqid, int expedite)
20879{
20880        struct lpfc_sli4_hdw_queue *qp;
20881        unsigned long iflag;
20882        struct lpfc_io_buf *lpfc_cmd;
20883
20884        qp = &phba->sli4_hba.hdwq[hwqid];
20885        lpfc_cmd = NULL;
20886
20887        if (phba->cfg_xri_rebalancing)
20888                lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20889                        phba, ndlp, hwqid, expedite);
20890        else {
20891                lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20892                                          qp, alloc_xri_get);
20893                if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20894                        lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20895                if (!lpfc_cmd) {
20896                        lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20897                                          qp, alloc_xri_put);
20898                        list_splice(&qp->lpfc_io_buf_list_put,
20899                                    &qp->lpfc_io_buf_list_get);
20900                        qp->get_io_bufs += qp->put_io_bufs;
20901                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20902                        qp->put_io_bufs = 0;
20903                        spin_unlock(&qp->io_buf_list_put_lock);
20904                        if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20905                            expedite)
20906                                lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20907                }
20908                spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20909        }
20910
20911        return lpfc_cmd;
20912}
20913
20914/**
20915 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20916 * @phba: The HBA for which this call is being executed.
20917 * @lpfc_buf: IO buf structure to append the SGL chunk
20918 *
20919 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20920 * and will allocate an SGL chunk if the pool is empty.
20921 *
20922 * Return codes:
20923 *   NULL - Error
20924 *   Pointer to sli4_hybrid_sgl - Success
20925 **/
20926struct sli4_hybrid_sgl *
20927lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20928{
20929        struct sli4_hybrid_sgl *list_entry = NULL;
20930        struct sli4_hybrid_sgl *tmp = NULL;
20931        struct sli4_hybrid_sgl *allocated_sgl = NULL;
20932        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20933        struct list_head *buf_list = &hdwq->sgl_list;
20934        unsigned long iflags;
20935
20936        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20937
20938        if (likely(!list_empty(buf_list))) {
20939                /* break off 1 chunk from the sgl_list */
20940                list_for_each_entry_safe(list_entry, tmp,
20941                                         buf_list, list_node) {
20942                        list_move_tail(&list_entry->list_node,
20943                                       &lpfc_buf->dma_sgl_xtra_list);
20944                        break;
20945                }
20946        } else {
20947                /* allocate more */
20948                spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20949                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20950                                   cpu_to_node(hdwq->io_wq->chann));
20951                if (!tmp) {
20952                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20953                                        "8353 error kmalloc memory for HDWQ "
20954                                        "%d %s\n",
20955                                        lpfc_buf->hdwq_no, __func__);
20956                        return NULL;
20957                }
20958
20959                tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20960                                              GFP_ATOMIC, &tmp->dma_phys_sgl);
20961                if (!tmp->dma_sgl) {
20962                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20963                                        "8354 error pool_alloc memory for HDWQ "
20964                                        "%d %s\n",
20965                                        lpfc_buf->hdwq_no, __func__);
20966                        kfree(tmp);
20967                        return NULL;
20968                }
20969
20970                spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20971                list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20972        }
20973
20974        allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20975                                        struct sli4_hybrid_sgl,
20976                                        list_node);
20977
20978        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20979
20980        return allocated_sgl;
20981}
20982
20983/**
20984 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20985 * @phba: The HBA for which this call is being executed.
20986 * @lpfc_buf: IO buf structure with the SGL chunk
20987 *
20988 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20989 *
20990 * Return codes:
20991 *   0 - Success
20992 *   -EINVAL - Error
20993 **/
20994int
20995lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20996{
20997        int rc = 0;
20998        struct sli4_hybrid_sgl *list_entry = NULL;
20999        struct sli4_hybrid_sgl *tmp = NULL;
21000        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21001        struct list_head *buf_list = &hdwq->sgl_list;
21002        unsigned long iflags;
21003
21004        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21005
21006        if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21007                list_for_each_entry_safe(list_entry, tmp,
21008                                         &lpfc_buf->dma_sgl_xtra_list,
21009                                         list_node) {
21010                        list_move_tail(&list_entry->list_node,
21011                                       buf_list);
21012                }
21013        } else {
21014                rc = -EINVAL;
21015        }
21016
21017        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21018        return rc;
21019}
21020
21021/**
21022 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21023 * @phba: phba object
21024 * @hdwq: hdwq to cleanup sgl buff resources on
21025 *
21026 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21027 *
21028 * Return codes:
21029 *   None
21030 **/
21031void
21032lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21033                       struct lpfc_sli4_hdw_queue *hdwq)
21034{
21035        struct list_head *buf_list = &hdwq->sgl_list;
21036        struct sli4_hybrid_sgl *list_entry = NULL;
21037        struct sli4_hybrid_sgl *tmp = NULL;
21038        unsigned long iflags;
21039
21040        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21041
21042        /* Free sgl pool */
21043        list_for_each_entry_safe(list_entry, tmp,
21044                                 buf_list, list_node) {
21045                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21046                              list_entry->dma_sgl,
21047                              list_entry->dma_phys_sgl);
21048                list_del(&list_entry->list_node);
21049                kfree(list_entry);
21050        }
21051
21052        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21053}
21054
21055/**
21056 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21057 * @phba: The HBA for which this call is being executed.
21058 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21059 *
21060 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21061 * and will allocate an CMD/RSP buffer if the pool is empty.
21062 *
21063 * Return codes:
21064 *   NULL - Error
21065 *   Pointer to fcp_cmd_rsp_buf - Success
21066 **/
21067struct fcp_cmd_rsp_buf *
21068lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21069                              struct lpfc_io_buf *lpfc_buf)
21070{
21071        struct fcp_cmd_rsp_buf *list_entry = NULL;
21072        struct fcp_cmd_rsp_buf *tmp = NULL;
21073        struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21074        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21075        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21076        unsigned long iflags;
21077
21078        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21079
21080        if (likely(!list_empty(buf_list))) {
21081                /* break off 1 chunk from the list */
21082                list_for_each_entry_safe(list_entry, tmp,
21083                                         buf_list,
21084                                         list_node) {
21085                        list_move_tail(&list_entry->list_node,
21086                                       &lpfc_buf->dma_cmd_rsp_list);
21087                        break;
21088                }
21089        } else {
21090                /* allocate more */
21091                spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21092                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21093                                   cpu_to_node(hdwq->io_wq->chann));
21094                if (!tmp) {
21095                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21096                                        "8355 error kmalloc memory for HDWQ "
21097                                        "%d %s\n",
21098                                        lpfc_buf->hdwq_no, __func__);
21099                        return NULL;
21100                }
21101
21102                tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21103                                                GFP_ATOMIC,
21104                                                &tmp->fcp_cmd_rsp_dma_handle);
21105
21106                if (!tmp->fcp_cmnd) {
21107                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21108                                        "8356 error pool_alloc memory for HDWQ "
21109                                        "%d %s\n",
21110                                        lpfc_buf->hdwq_no, __func__);
21111                        kfree(tmp);
21112                        return NULL;
21113                }
21114
21115                tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21116                                sizeof(struct fcp_cmnd));
21117
21118                spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21119                list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21120        }
21121
21122        allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21123                                        struct fcp_cmd_rsp_buf,
21124                                        list_node);
21125
21126        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21127
21128        return allocated_buf;
21129}
21130
21131/**
21132 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21133 * @phba: The HBA for which this call is being executed.
21134 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21135 *
21136 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21137 *
21138 * Return codes:
21139 *   0 - Success
21140 *   -EINVAL - Error
21141 **/
21142int
21143lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21144                              struct lpfc_io_buf *lpfc_buf)
21145{
21146        int rc = 0;
21147        struct fcp_cmd_rsp_buf *list_entry = NULL;
21148        struct fcp_cmd_rsp_buf *tmp = NULL;
21149        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21150        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21151        unsigned long iflags;
21152
21153        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21154
21155        if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21156                list_for_each_entry_safe(list_entry, tmp,
21157                                         &lpfc_buf->dma_cmd_rsp_list,
21158                                         list_node) {
21159                        list_move_tail(&list_entry->list_node,
21160                                       buf_list);
21161                }
21162        } else {
21163                rc = -EINVAL;
21164        }
21165
21166        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21167        return rc;
21168}
21169
21170/**
21171 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21172 * @phba: phba object
21173 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21174 *
21175 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21176 *
21177 * Return codes:
21178 *   None
21179 **/
21180void
21181lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21182                               struct lpfc_sli4_hdw_queue *hdwq)
21183{
21184        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21185        struct fcp_cmd_rsp_buf *list_entry = NULL;
21186        struct fcp_cmd_rsp_buf *tmp = NULL;
21187        unsigned long iflags;
21188
21189        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21190
21191        /* Free cmd_rsp buf pool */
21192        list_for_each_entry_safe(list_entry, tmp,
21193                                 buf_list,
21194                                 list_node) {
21195                dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21196                              list_entry->fcp_cmnd,
21197                              list_entry->fcp_cmd_rsp_dma_handle);
21198                list_del(&list_entry->list_node);
21199                kfree(list_entry);
21200        }
21201
21202        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21203}
21204