linux/drivers/scsi/lpfc/lpfc_sli.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/slab.h>
  29#include <linux/lockdep.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_cmnd.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport_fc.h>
  36#include <scsi/fc/fc_fs.h>
  37#include <linux/aer.h>
  38#include <linux/crash_dump.h>
  39#ifdef CONFIG_X86
  40#include <asm/set_memory.h>
  41#endif
  42
  43#include "lpfc_hw4.h"
  44#include "lpfc_hw.h"
  45#include "lpfc_sli.h"
  46#include "lpfc_sli4.h"
  47#include "lpfc_nl.h"
  48#include "lpfc_disc.h"
  49#include "lpfc.h"
  50#include "lpfc_scsi.h"
  51#include "lpfc_nvme.h"
  52#include "lpfc_crtn.h"
  53#include "lpfc_logmsg.h"
  54#include "lpfc_compat.h"
  55#include "lpfc_debugfs.h"
  56#include "lpfc_vport.h"
  57#include "lpfc_version.h"
  58
  59/* There are only four IOCB completion types. */
  60typedef enum _lpfc_iocb_type {
  61        LPFC_UNKNOWN_IOCB,
  62        LPFC_UNSOL_IOCB,
  63        LPFC_SOL_IOCB,
  64        LPFC_ABORT_IOCB
  65} lpfc_iocb_type;
  66
  67
  68/* Provide function prototypes local to this module. */
  69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
  70                                  uint32_t);
  71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
  72                              uint8_t *, uint32_t *);
  73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
  74                                                         struct lpfc_iocbq *);
  75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
  76                                      struct hbq_dmabuf *);
  77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  78                                          struct hbq_dmabuf *dmabuf);
  79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
  80                                   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
  81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
  82                                       int);
  83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
  84                                     struct lpfc_queue *eq,
  85                                     struct lpfc_eqe *eqe);
  86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
  87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
  88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
  89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
  90                                    struct lpfc_queue *cq,
  91                                    struct lpfc_cqe *cqe);
  92
  93union lpfc_wqe128 lpfc_iread_cmd_template;
  94union lpfc_wqe128 lpfc_iwrite_cmd_template;
  95union lpfc_wqe128 lpfc_icmnd_cmd_template;
  96
  97static IOCB_t *
  98lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
  99{
 100        return &iocbq->iocb;
 101}
 102
 103/* Setup WQE templates for IOs */
 104void lpfc_wqe_cmd_template(void)
 105{
 106        union lpfc_wqe128 *wqe;
 107
 108        /* IREAD template */
 109        wqe = &lpfc_iread_cmd_template;
 110        memset(wqe, 0, sizeof(union lpfc_wqe128));
 111
 112        /* Word 0, 1, 2 - BDE is variable */
 113
 114        /* Word 3 - cmd_buff_len, payload_offset_len is zero */
 115
 116        /* Word 4 - total_xfer_len is variable */
 117
 118        /* Word 5 - is zero */
 119
 120        /* Word 6 - ctxt_tag, xri_tag is variable */
 121
 122        /* Word 7 */
 123        bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
 124        bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
 125        bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
 126        bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
 127
 128        /* Word 8 - abort_tag is variable */
 129
 130        /* Word 9  - reqtag is variable */
 131
 132        /* Word 10 - dbde, wqes is variable */
 133        bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
 134        bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
 135        bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
 136        bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
 137        bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
 138
 139        /* Word 11 - pbde is variable */
 140        bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
 141        bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 142        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
 143
 144        /* Word 12 - is zero */
 145
 146        /* Word 13, 14, 15 - PBDE is variable */
 147
 148        /* IWRITE template */
 149        wqe = &lpfc_iwrite_cmd_template;
 150        memset(wqe, 0, sizeof(union lpfc_wqe128));
 151
 152        /* Word 0, 1, 2 - BDE is variable */
 153
 154        /* Word 3 - cmd_buff_len, payload_offset_len is zero */
 155
 156        /* Word 4 - total_xfer_len is variable */
 157
 158        /* Word 5 - initial_xfer_len is variable */
 159
 160        /* Word 6 - ctxt_tag, xri_tag is variable */
 161
 162        /* Word 7 */
 163        bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
 164        bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
 165        bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
 166        bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
 167
 168        /* Word 8 - abort_tag is variable */
 169
 170        /* Word 9  - reqtag is variable */
 171
 172        /* Word 10 - dbde, wqes is variable */
 173        bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
 174        bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
 175        bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
 176        bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
 177        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
 178
 179        /* Word 11 - pbde is variable */
 180        bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
 181        bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 182        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
 183
 184        /* Word 12 - is zero */
 185
 186        /* Word 13, 14, 15 - PBDE is variable */
 187
 188        /* ICMND template */
 189        wqe = &lpfc_icmnd_cmd_template;
 190        memset(wqe, 0, sizeof(union lpfc_wqe128));
 191
 192        /* Word 0, 1, 2 - BDE is variable */
 193
 194        /* Word 3 - payload_offset_len is variable */
 195
 196        /* Word 4, 5 - is zero */
 197
 198        /* Word 6 - ctxt_tag, xri_tag is variable */
 199
 200        /* Word 7 */
 201        bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
 202        bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 203        bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
 204        bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
 205
 206        /* Word 8 - abort_tag is variable */
 207
 208        /* Word 9  - reqtag is variable */
 209
 210        /* Word 10 - dbde, wqes is variable */
 211        bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
 212        bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
 213        bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
 214        bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
 215        bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
 216
 217        /* Word 11 */
 218        bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
 219        bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 220        bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
 221
 222        /* Word 12, 13, 14, 15 - is zero */
 223}
 224
 225#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
 226/**
 227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
 228 * @srcp: Source memory pointer.
 229 * @destp: Destination memory pointer.
 230 * @cnt: Number of words required to be copied.
 231 *       Must be a multiple of sizeof(uint64_t)
 232 *
 233 * This function is used for copying data between driver memory
 234 * and the SLI WQ. This function also changes the endianness
 235 * of each word if native endianness is different from SLI
 236 * endianness. This function can be called with or without
 237 * lock.
 238 **/
 239static void
 240lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
 241{
 242        uint64_t *src = srcp;
 243        uint64_t *dest = destp;
 244        int i;
 245
 246        for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
 247                *dest++ = *src++;
 248}
 249#else
 250#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
 251#endif
 252
 253/**
 254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
 255 * @q: The Work Queue to operate on.
 256 * @wqe: The work Queue Entry to put on the Work queue.
 257 *
 258 * This routine will copy the contents of @wqe to the next available entry on
 259 * the @q. This function will then ring the Work Queue Doorbell to signal the
 260 * HBA to start processing the Work Queue Entry. This function returns 0 if
 261 * successful. If no entries are available on @q then this function will return
 262 * -ENOMEM.
 263 * The caller is expected to hold the hbalock when calling this routine.
 264 **/
 265static int
 266lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
 267{
 268        union lpfc_wqe *temp_wqe;
 269        struct lpfc_register doorbell;
 270        uint32_t host_index;
 271        uint32_t idx;
 272        uint32_t i = 0;
 273        uint8_t *tmp;
 274        u32 if_type;
 275
 276        /* sanity check on queue memory */
 277        if (unlikely(!q))
 278                return -ENOMEM;
 279
 280        temp_wqe = lpfc_sli4_qe(q, q->host_index);
 281
 282        /* If the host has not yet processed the next entry then we are done */
 283        idx = ((q->host_index + 1) % q->entry_count);
 284        if (idx == q->hba_index) {
 285                q->WQ_overflow++;
 286                return -EBUSY;
 287        }
 288        q->WQ_posted++;
 289        /* set consumption flag every once in a while */
 290        if (!((q->host_index + 1) % q->notify_interval))
 291                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 292        else
 293                bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
 294        if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
 295                bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 296        lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 297        if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 298                /* write to DPP aperture taking advatage of Combined Writes */
 299                tmp = (uint8_t *)temp_wqe;
 300#ifdef __raw_writeq
 301                for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
 302                        __raw_writeq(*((uint64_t *)(tmp + i)),
 303                                        q->dpp_regaddr + i);
 304#else
 305                for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
 306                        __raw_writel(*((uint32_t *)(tmp + i)),
 307                                        q->dpp_regaddr + i);
 308#endif
 309        }
 310        /* ensure WQE bcopy and DPP flushed before doorbell write */
 311        wmb();
 312
 313        /* Update the host index before invoking device */
 314        host_index = q->host_index;
 315
 316        q->host_index = idx;
 317
 318        /* Ring Doorbell */
 319        doorbell.word0 = 0;
 320        if (q->db_format == LPFC_DB_LIST_FORMAT) {
 321                if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 322                        bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
 323                        bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
 324                        bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
 325                            q->dpp_id);
 326                        bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
 327                            q->queue_id);
 328                } else {
 329                        bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
 330                        bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
 331
 332                        /* Leave bits <23:16> clear for if_type 6 dpp */
 333                        if_type = bf_get(lpfc_sli_intf_if_type,
 334                                         &q->phba->sli4_hba.sli_intf);
 335                        if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
 336                                bf_set(lpfc_wq_db_list_fm_index, &doorbell,
 337                                       host_index);
 338                }
 339        } else if (q->db_format == LPFC_DB_RING_FORMAT) {
 340                bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
 341                bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
 342        } else {
 343                return -EINVAL;
 344        }
 345        writel(doorbell.word0, q->db_regaddr);
 346
 347        return 0;
 348}
 349
 350/**
 351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
 352 * @q: The Work Queue to operate on.
 353 * @index: The index to advance the hba index to.
 354 *
 355 * This routine will update the HBA index of a queue to reflect consumption of
 356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
 357 * an entry the host calls this function to update the queue's internal
 358 * pointers.
 359 **/
 360static void
 361lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
 362{
 363        /* sanity check on queue memory */
 364        if (unlikely(!q))
 365                return;
 366
 367        q->hba_index = index;
 368}
 369
 370/**
 371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
 372 * @q: The Mailbox Queue to operate on.
 373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
 374 *
 375 * This routine will copy the contents of @mqe to the next available entry on
 376 * the @q. This function will then ring the Work Queue Doorbell to signal the
 377 * HBA to start processing the Work Queue Entry. This function returns 0 if
 378 * successful. If no entries are available on @q then this function will return
 379 * -ENOMEM.
 380 * The caller is expected to hold the hbalock when calling this routine.
 381 **/
 382static uint32_t
 383lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
 384{
 385        struct lpfc_mqe *temp_mqe;
 386        struct lpfc_register doorbell;
 387
 388        /* sanity check on queue memory */
 389        if (unlikely(!q))
 390                return -ENOMEM;
 391        temp_mqe = lpfc_sli4_qe(q, q->host_index);
 392
 393        /* If the host has not yet processed the next entry then we are done */
 394        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
 395                return -ENOMEM;
 396        lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
 397        /* Save off the mailbox pointer for completion */
 398        q->phba->mbox = (MAILBOX_t *)temp_mqe;
 399
 400        /* Update the host index before invoking device */
 401        q->host_index = ((q->host_index + 1) % q->entry_count);
 402
 403        /* Ring Doorbell */
 404        doorbell.word0 = 0;
 405        bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
 406        bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
 407        writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
 408        return 0;
 409}
 410
 411/**
 412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
 413 * @q: The Mailbox Queue to operate on.
 414 *
 415 * This routine will update the HBA index of a queue to reflect consumption of
 416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
 417 * an entry the host calls this function to update the queue's internal
 418 * pointers. This routine returns the number of entries that were consumed by
 419 * the HBA.
 420 **/
 421static uint32_t
 422lpfc_sli4_mq_release(struct lpfc_queue *q)
 423{
 424        /* sanity check on queue memory */
 425        if (unlikely(!q))
 426                return 0;
 427
 428        /* Clear the mailbox pointer for completion */
 429        q->phba->mbox = NULL;
 430        q->hba_index = ((q->hba_index + 1) % q->entry_count);
 431        return 1;
 432}
 433
 434/**
 435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
 436 * @q: The Event Queue to get the first valid EQE from
 437 *
 438 * This routine will get the first valid Event Queue Entry from @q, update
 439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
 440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
 441 * processed, but not popped back to the HBA then this routine will return NULL.
 442 **/
 443static struct lpfc_eqe *
 444lpfc_sli4_eq_get(struct lpfc_queue *q)
 445{
 446        struct lpfc_eqe *eqe;
 447
 448        /* sanity check on queue memory */
 449        if (unlikely(!q))
 450                return NULL;
 451        eqe = lpfc_sli4_qe(q, q->host_index);
 452
 453        /* If the next EQE is not valid then we are done */
 454        if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
 455                return NULL;
 456
 457        /*
 458         * insert barrier for instruction interlock : data from the hardware
 459         * must have the valid bit checked before it can be copied and acted
 460         * upon. Speculative instructions were allowing a bcopy at the start
 461         * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
 462         * after our return, to copy data before the valid bit check above
 463         * was done. As such, some of the copied data was stale. The barrier
 464         * ensures the check is before any data is copied.
 465         */
 466        mb();
 467        return eqe;
 468}
 469
 470/**
 471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
 472 * @q: The Event Queue to disable interrupts
 473 *
 474 **/
 475void
 476lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
 477{
 478        struct lpfc_register doorbell;
 479
 480        doorbell.word0 = 0;
 481        bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 482        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 483        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 484                (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 485        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 486        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 487}
 488
 489/**
 490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
 491 * @q: The Event Queue to disable interrupts
 492 *
 493 **/
 494void
 495lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
 496{
 497        struct lpfc_register doorbell;
 498
 499        doorbell.word0 = 0;
 500        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 501        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 502}
 503
 504/**
 505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
 506 * @phba: adapter with EQ
 507 * @q: The Event Queue that the host has completed processing for.
 508 * @count: Number of elements that have been consumed
 509 * @arm: Indicates whether the host wants to arms this CQ.
 510 *
 511 * This routine will notify the HBA, by ringing the doorbell, that count
 512 * number of EQEs have been processed. The @arm parameter indicates whether
 513 * the queue should be rearmed when ringing the doorbell.
 514 **/
 515void
 516lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 517                     uint32_t count, bool arm)
 518{
 519        struct lpfc_register doorbell;
 520
 521        /* sanity check on queue memory */
 522        if (unlikely(!q || (count == 0 && !arm)))
 523                return;
 524
 525        /* ring doorbell for number popped */
 526        doorbell.word0 = 0;
 527        if (arm) {
 528                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 529                bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 530        }
 531        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 532        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 533        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 534                        (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 535        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 536        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 537        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 538        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 539                readl(q->phba->sli4_hba.EQDBregaddr);
 540}
 541
 542/**
 543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
 544 * @phba: adapter with EQ
 545 * @q: The Event Queue that the host has completed processing for.
 546 * @count: Number of elements that have been consumed
 547 * @arm: Indicates whether the host wants to arms this CQ.
 548 *
 549 * This routine will notify the HBA, by ringing the doorbell, that count
 550 * number of EQEs have been processed. The @arm parameter indicates whether
 551 * the queue should be rearmed when ringing the doorbell.
 552 **/
 553void
 554lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 555                          uint32_t count, bool arm)
 556{
 557        struct lpfc_register doorbell;
 558
 559        /* sanity check on queue memory */
 560        if (unlikely(!q || (count == 0 && !arm)))
 561                return;
 562
 563        /* ring doorbell for number popped */
 564        doorbell.word0 = 0;
 565        if (arm)
 566                bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
 567        bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
 568        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 569        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 570        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 571        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 572                readl(q->phba->sli4_hba.EQDBregaddr);
 573}
 574
 575static void
 576__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
 577                        struct lpfc_eqe *eqe)
 578{
 579        if (!phba->sli4_hba.pc_sli4_params.eqav)
 580                bf_set_le32(lpfc_eqe_valid, eqe, 0);
 581
 582        eq->host_index = ((eq->host_index + 1) % eq->entry_count);
 583
 584        /* if the index wrapped around, toggle the valid bit */
 585        if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
 586                eq->qe_valid = (eq->qe_valid) ? 0 : 1;
 587}
 588
 589static void
 590lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
 591{
 592        struct lpfc_eqe *eqe = NULL;
 593        u32 eq_count = 0, cq_count = 0;
 594        struct lpfc_cqe *cqe = NULL;
 595        struct lpfc_queue *cq = NULL, *childq = NULL;
 596        int cqid = 0;
 597
 598        /* walk all the EQ entries and drop on the floor */
 599        eqe = lpfc_sli4_eq_get(eq);
 600        while (eqe) {
 601                /* Get the reference to the corresponding CQ */
 602                cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 603                cq = NULL;
 604
 605                list_for_each_entry(childq, &eq->child_list, list) {
 606                        if (childq->queue_id == cqid) {
 607                                cq = childq;
 608                                break;
 609                        }
 610                }
 611                /* If CQ is valid, iterate through it and drop all the CQEs */
 612                if (cq) {
 613                        cqe = lpfc_sli4_cq_get(cq);
 614                        while (cqe) {
 615                                __lpfc_sli4_consume_cqe(phba, cq, cqe);
 616                                cq_count++;
 617                                cqe = lpfc_sli4_cq_get(cq);
 618                        }
 619                        /* Clear and re-arm the CQ */
 620                        phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
 621                            LPFC_QUEUE_REARM);
 622                        cq_count = 0;
 623                }
 624                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 625                eq_count++;
 626                eqe = lpfc_sli4_eq_get(eq);
 627        }
 628
 629        /* Clear and re-arm the EQ */
 630        phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
 631}
 632
 633static int
 634lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
 635                     uint8_t rearm)
 636{
 637        struct lpfc_eqe *eqe;
 638        int count = 0, consumed = 0;
 639
 640        if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
 641                goto rearm_and_exit;
 642
 643        eqe = lpfc_sli4_eq_get(eq);
 644        while (eqe) {
 645                lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
 646                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 647
 648                consumed++;
 649                if (!(++count % eq->max_proc_limit))
 650                        break;
 651
 652                if (!(count % eq->notify_interval)) {
 653                        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
 654                                                        LPFC_QUEUE_NOARM);
 655                        consumed = 0;
 656                }
 657
 658                eqe = lpfc_sli4_eq_get(eq);
 659        }
 660        eq->EQ_processed += count;
 661
 662        /* Track the max number of EQEs processed in 1 intr */
 663        if (count > eq->EQ_max_eqe)
 664                eq->EQ_max_eqe = count;
 665
 666        xchg(&eq->queue_claimed, 0);
 667
 668rearm_and_exit:
 669        /* Always clear the EQ. */
 670        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
 671
 672        return count;
 673}
 674
 675/**
 676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
 677 * @q: The Completion Queue to get the first valid CQE from
 678 *
 679 * This routine will get the first valid Completion Queue Entry from @q, update
 680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
 681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
 682 * processed, but not popped back to the HBA then this routine will return NULL.
 683 **/
 684static struct lpfc_cqe *
 685lpfc_sli4_cq_get(struct lpfc_queue *q)
 686{
 687        struct lpfc_cqe *cqe;
 688
 689        /* sanity check on queue memory */
 690        if (unlikely(!q))
 691                return NULL;
 692        cqe = lpfc_sli4_qe(q, q->host_index);
 693
 694        /* If the next CQE is not valid then we are done */
 695        if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
 696                return NULL;
 697
 698        /*
 699         * insert barrier for instruction interlock : data from the hardware
 700         * must have the valid bit checked before it can be copied and acted
 701         * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
 702         * instructions allowing action on content before valid bit checked,
 703         * add barrier here as well. May not be needed as "content" is a
 704         * single 32-bit entity here (vs multi word structure for cq's).
 705         */
 706        mb();
 707        return cqe;
 708}
 709
 710static void
 711__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 712                        struct lpfc_cqe *cqe)
 713{
 714        if (!phba->sli4_hba.pc_sli4_params.cqav)
 715                bf_set_le32(lpfc_cqe_valid, cqe, 0);
 716
 717        cq->host_index = ((cq->host_index + 1) % cq->entry_count);
 718
 719        /* if the index wrapped around, toggle the valid bit */
 720        if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
 721                cq->qe_valid = (cq->qe_valid) ? 0 : 1;
 722}
 723
 724/**
 725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
 726 * @phba: the adapter with the CQ
 727 * @q: The Completion Queue that the host has completed processing for.
 728 * @count: the number of elements that were consumed
 729 * @arm: Indicates whether the host wants to arms this CQ.
 730 *
 731 * This routine will notify the HBA, by ringing the doorbell, that the
 732 * CQEs have been processed. The @arm parameter specifies whether the
 733 * queue should be rearmed when ringing the doorbell.
 734 **/
 735void
 736lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 737                     uint32_t count, bool arm)
 738{
 739        struct lpfc_register doorbell;
 740
 741        /* sanity check on queue memory */
 742        if (unlikely(!q || (count == 0 && !arm)))
 743                return;
 744
 745        /* ring doorbell for number popped */
 746        doorbell.word0 = 0;
 747        if (arm)
 748                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 749        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 750        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
 751        bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
 752                        (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
 753        bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
 754        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 755}
 756
 757/**
 758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
 759 * @phba: the adapter with the CQ
 760 * @q: The Completion Queue that the host has completed processing for.
 761 * @count: the number of elements that were consumed
 762 * @arm: Indicates whether the host wants to arms this CQ.
 763 *
 764 * This routine will notify the HBA, by ringing the doorbell, that the
 765 * CQEs have been processed. The @arm parameter specifies whether the
 766 * queue should be rearmed when ringing the doorbell.
 767 **/
 768void
 769lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 770                         uint32_t count, bool arm)
 771{
 772        struct lpfc_register doorbell;
 773
 774        /* sanity check on queue memory */
 775        if (unlikely(!q || (count == 0 && !arm)))
 776                return;
 777
 778        /* ring doorbell for number popped */
 779        doorbell.word0 = 0;
 780        if (arm)
 781                bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
 782        bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
 783        bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
 784        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 785}
 786
 787/*
 788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
 789 *
 790 * This routine will copy the contents of @wqe to the next available entry on
 791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
 792 * HBA to start processing the Receive Queue Entry. This function returns the
 793 * index that the rqe was copied to if successful. If no entries are available
 794 * on @q then this function will return -ENOMEM.
 795 * The caller is expected to hold the hbalock when calling this routine.
 796 **/
 797int
 798lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
 799                 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
 800{
 801        struct lpfc_rqe *temp_hrqe;
 802        struct lpfc_rqe *temp_drqe;
 803        struct lpfc_register doorbell;
 804        int hq_put_index;
 805        int dq_put_index;
 806
 807        /* sanity check on queue memory */
 808        if (unlikely(!hq) || unlikely(!dq))
 809                return -ENOMEM;
 810        hq_put_index = hq->host_index;
 811        dq_put_index = dq->host_index;
 812        temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
 813        temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
 814
 815        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
 816                return -EINVAL;
 817        if (hq_put_index != dq_put_index)
 818                return -EINVAL;
 819        /* If the host has not yet processed the next entry then we are done */
 820        if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
 821                return -EBUSY;
 822        lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
 823        lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 824
 825        /* Update the host index to point to the next slot */
 826        hq->host_index = ((hq_put_index + 1) % hq->entry_count);
 827        dq->host_index = ((dq_put_index + 1) % dq->entry_count);
 828        hq->RQ_buf_posted++;
 829
 830        /* Ring The Header Receive Queue Doorbell */
 831        if (!(hq->host_index % hq->notify_interval)) {
 832                doorbell.word0 = 0;
 833                if (hq->db_format == LPFC_DB_RING_FORMAT) {
 834                        bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
 835                               hq->notify_interval);
 836                        bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
 837                } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
 838                        bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
 839                               hq->notify_interval);
 840                        bf_set(lpfc_rq_db_list_fm_index, &doorbell,
 841                               hq->host_index);
 842                        bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
 843                } else {
 844                        return -EINVAL;
 845                }
 846                writel(doorbell.word0, hq->db_regaddr);
 847        }
 848        return hq_put_index;
 849}
 850
 851/*
 852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
 853 *
 854 * This routine will update the HBA index of a queue to reflect consumption of
 855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
 856 * consumed an entry the host calls this function to update the queue's
 857 * internal pointers. This routine returns the number of entries that were
 858 * consumed by the HBA.
 859 **/
 860static uint32_t
 861lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
 862{
 863        /* sanity check on queue memory */
 864        if (unlikely(!hq) || unlikely(!dq))
 865                return 0;
 866
 867        if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
 868                return 0;
 869        hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
 870        dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
 871        return 1;
 872}
 873
 874/**
 875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
 876 * @phba: Pointer to HBA context object.
 877 * @pring: Pointer to driver SLI ring object.
 878 *
 879 * This function returns pointer to next command iocb entry
 880 * in the command ring. The caller must hold hbalock to prevent
 881 * other threads consume the next command iocb.
 882 * SLI-2/SLI-3 provide different sized iocbs.
 883 **/
 884static inline IOCB_t *
 885lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 886{
 887        return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
 888                           pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
 889}
 890
 891/**
 892 * lpfc_resp_iocb - Get next response iocb entry in the ring
 893 * @phba: Pointer to HBA context object.
 894 * @pring: Pointer to driver SLI ring object.
 895 *
 896 * This function returns pointer to next response iocb entry
 897 * in the response ring. The caller must hold hbalock to make sure
 898 * that no other thread consume the next response iocb.
 899 * SLI-2/SLI-3 provide different sized iocbs.
 900 **/
 901static inline IOCB_t *
 902lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 903{
 904        return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
 905                           pring->sli.sli3.rspidx * phba->iocb_rsp_size);
 906}
 907
 908/**
 909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
 910 * @phba: Pointer to HBA context object.
 911 *
 912 * This function is called with hbalock held. This function
 913 * allocates a new driver iocb object from the iocb pool. If the
 914 * allocation is successful, it returns pointer to the newly
 915 * allocated iocb object else it returns NULL.
 916 **/
 917struct lpfc_iocbq *
 918__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 919{
 920        struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
 921        struct lpfc_iocbq * iocbq = NULL;
 922
 923        lockdep_assert_held(&phba->hbalock);
 924
 925        list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
 926        if (iocbq)
 927                phba->iocb_cnt++;
 928        if (phba->iocb_cnt > phba->iocb_max)
 929                phba->iocb_max = phba->iocb_cnt;
 930        return iocbq;
 931}
 932
 933/**
 934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
 935 * @phba: Pointer to HBA context object.
 936 * @xritag: XRI value.
 937 *
 938 * This function clears the sglq pointer from the array of active
 939 * sglq's. The xritag that is passed in is used to index into the
 940 * array. Before the xritag can be used it needs to be adjusted
 941 * by subtracting the xribase.
 942 *
 943 * Returns sglq ponter = success, NULL = Failure.
 944 **/
 945struct lpfc_sglq *
 946__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 947{
 948        struct lpfc_sglq *sglq;
 949
 950        sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
 951        phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
 952        return sglq;
 953}
 954
 955/**
 956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
 957 * @phba: Pointer to HBA context object.
 958 * @xritag: XRI value.
 959 *
 960 * This function returns the sglq pointer from the array of active
 961 * sglq's. The xritag that is passed in is used to index into the
 962 * array. Before the xritag can be used it needs to be adjusted
 963 * by subtracting the xribase.
 964 *
 965 * Returns sglq ponter = success, NULL = Failure.
 966 **/
 967struct lpfc_sglq *
 968__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 969{
 970        struct lpfc_sglq *sglq;
 971
 972        sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
 973        return sglq;
 974}
 975
 976/**
 977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
 978 * @phba: Pointer to HBA context object.
 979 * @xritag: xri used in this exchange.
 980 * @rrq: The RRQ to be cleared.
 981 *
 982 **/
 983void
 984lpfc_clr_rrq_active(struct lpfc_hba *phba,
 985                    uint16_t xritag,
 986                    struct lpfc_node_rrq *rrq)
 987{
 988        struct lpfc_nodelist *ndlp = NULL;
 989
 990        /* Lookup did to verify if did is still active on this vport */
 991        if (rrq->vport)
 992                ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 993
 994        if (!ndlp)
 995                goto out;
 996
 997        if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
 998                rrq->send_rrq = 0;
 999                rrq->xritag = 0;
1000                rrq->rrq_stop_time = 0;
1001        }
1002out:
1003        mempool_free(rrq, phba->rrq_pool);
1004}
1005
1006/**
1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1008 * @phba: Pointer to HBA context object.
1009 *
1010 * This function is called with hbalock held. This function
1011 * Checks if stop_time (ratov from setting rrq active) has
1012 * been reached, if it has and the send_rrq flag is set then
1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1014 * then it will just call the routine to clear the rrq and
1015 * free the rrq resource.
1016 * The timer is set to the next rrq that is going to expire before
1017 * leaving the routine.
1018 *
1019 **/
1020void
1021lpfc_handle_rrq_active(struct lpfc_hba *phba)
1022{
1023        struct lpfc_node_rrq *rrq;
1024        struct lpfc_node_rrq *nextrrq;
1025        unsigned long next_time;
1026        unsigned long iflags;
1027        LIST_HEAD(send_rrq);
1028
1029        spin_lock_irqsave(&phba->hbalock, iflags);
1030        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031        next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032        list_for_each_entry_safe(rrq, nextrrq,
1033                                 &phba->active_rrq_list, list) {
1034                if (time_after(jiffies, rrq->rrq_stop_time))
1035                        list_move(&rrq->list, &send_rrq);
1036                else if (time_before(rrq->rrq_stop_time, next_time))
1037                        next_time = rrq->rrq_stop_time;
1038        }
1039        spin_unlock_irqrestore(&phba->hbalock, iflags);
1040        if ((!list_empty(&phba->active_rrq_list)) &&
1041            (!(phba->pport->load_flag & FC_UNLOADING)))
1042                mod_timer(&phba->rrq_tmr, next_time);
1043        list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044                list_del(&rrq->list);
1045                if (!rrq->send_rrq) {
1046                        /* this call will free the rrq */
1047                        lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048                } else if (lpfc_send_rrq(phba, rrq)) {
1049                        /* if we send the rrq then the completion handler
1050                        *  will clear the bit in the xribitmap.
1051                        */
1052                        lpfc_clr_rrq_active(phba, rrq->xritag,
1053                                            rrq);
1054                }
1055        }
1056}
1057
1058/**
1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1060 * @vport: Pointer to vport context object.
1061 * @xri: The xri used in the exchange.
1062 * @did: The targets DID for this exchange.
1063 *
1064 * returns NULL = rrq not found in the phba->active_rrq_list.
1065 *         rrq = rrq for this xri and target.
1066 **/
1067struct lpfc_node_rrq *
1068lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1069{
1070        struct lpfc_hba *phba = vport->phba;
1071        struct lpfc_node_rrq *rrq;
1072        struct lpfc_node_rrq *nextrrq;
1073        unsigned long iflags;
1074
1075        if (phba->sli_rev != LPFC_SLI_REV4)
1076                return NULL;
1077        spin_lock_irqsave(&phba->hbalock, iflags);
1078        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079                if (rrq->vport == vport && rrq->xritag == xri &&
1080                                rrq->nlp_DID == did){
1081                        list_del(&rrq->list);
1082                        spin_unlock_irqrestore(&phba->hbalock, iflags);
1083                        return rrq;
1084                }
1085        }
1086        spin_unlock_irqrestore(&phba->hbalock, iflags);
1087        return NULL;
1088}
1089
1090/**
1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1092 * @vport: Pointer to vport context object.
1093 * @ndlp: Pointer to the lpfc_node_list structure.
1094 * If ndlp is NULL Remove all active RRQs for this vport from the
1095 * phba->active_rrq_list and clear the rrq.
1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1097 **/
1098void
1099lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1100
1101{
1102        struct lpfc_hba *phba = vport->phba;
1103        struct lpfc_node_rrq *rrq;
1104        struct lpfc_node_rrq *nextrrq;
1105        unsigned long iflags;
1106        LIST_HEAD(rrq_list);
1107
1108        if (phba->sli_rev != LPFC_SLI_REV4)
1109                return;
1110        if (!ndlp) {
1111                lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112                lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1113        }
1114        spin_lock_irqsave(&phba->hbalock, iflags);
1115        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116                if (rrq->vport != vport)
1117                        continue;
1118
1119                if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120                        list_move(&rrq->list, &rrq_list);
1121
1122        }
1123        spin_unlock_irqrestore(&phba->hbalock, iflags);
1124
1125        list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126                list_del(&rrq->list);
1127                lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1128        }
1129}
1130
1131/**
1132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1133 * @phba: Pointer to HBA context object.
1134 * @ndlp: Targets nodelist pointer for this exchange.
1135 * @xritag: the xri in the bitmap to test.
1136 *
1137 * This function returns:
1138 * 0 = rrq not active for this xri
1139 * 1 = rrq is valid for this xri.
1140 **/
1141int
1142lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1143                        uint16_t  xritag)
1144{
1145        if (!ndlp)
1146                return 0;
1147        if (!ndlp->active_rrqs_xri_bitmap)
1148                return 0;
1149        if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1150                return 1;
1151        else
1152                return 0;
1153}
1154
1155/**
1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1157 * @phba: Pointer to HBA context object.
1158 * @ndlp: nodelist pointer for this target.
1159 * @xritag: xri used in this exchange.
1160 * @rxid: Remote Exchange ID.
1161 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1162 *
1163 * This function takes the hbalock.
1164 * The active bit is always set in the active rrq xri_bitmap even
1165 * if there is no slot avaiable for the other rrq information.
1166 *
1167 * returns 0 rrq actived for this xri
1168 *         < 0 No memory or invalid ndlp.
1169 **/
1170int
1171lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172                    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1173{
1174        unsigned long iflags;
1175        struct lpfc_node_rrq *rrq;
1176        int empty;
1177
1178        if (!ndlp)
1179                return -EINVAL;
1180
1181        if (!phba->cfg_enable_rrq)
1182                return -EINVAL;
1183
1184        spin_lock_irqsave(&phba->hbalock, iflags);
1185        if (phba->pport->load_flag & FC_UNLOADING) {
1186                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1187                goto out;
1188        }
1189
1190        if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191                goto out;
1192
1193        if (!ndlp->active_rrqs_xri_bitmap)
1194                goto out;
1195
1196        if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1197                goto out;
1198
1199        spin_unlock_irqrestore(&phba->hbalock, iflags);
1200        rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1201        if (!rrq) {
1202                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203                                "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204                                " DID:0x%x Send:%d\n",
1205                                xritag, rxid, ndlp->nlp_DID, send_rrq);
1206                return -EINVAL;
1207        }
1208        if (phba->cfg_enable_rrq == 1)
1209                rrq->send_rrq = send_rrq;
1210        else
1211                rrq->send_rrq = 0;
1212        rrq->xritag = xritag;
1213        rrq->rrq_stop_time = jiffies +
1214                                msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215        rrq->nlp_DID = ndlp->nlp_DID;
1216        rrq->vport = ndlp->vport;
1217        rrq->rxid = rxid;
1218        spin_lock_irqsave(&phba->hbalock, iflags);
1219        empty = list_empty(&phba->active_rrq_list);
1220        list_add_tail(&rrq->list, &phba->active_rrq_list);
1221        phba->hba_flag |= HBA_RRQ_ACTIVE;
1222        if (empty)
1223                lpfc_worker_wake_up(phba);
1224        spin_unlock_irqrestore(&phba->hbalock, iflags);
1225        return 0;
1226out:
1227        spin_unlock_irqrestore(&phba->hbalock, iflags);
1228        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229                        "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230                        " DID:0x%x Send:%d\n",
1231                        xritag, rxid, ndlp->nlp_DID, send_rrq);
1232        return -EINVAL;
1233}
1234
1235/**
1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237 * @phba: Pointer to HBA context object.
1238 * @piocbq: Pointer to the iocbq.
1239 *
1240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage.  This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
1245 **/
1246static struct lpfc_sglq *
1247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248{
1249        struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250        struct lpfc_sglq *sglq = NULL;
1251        struct lpfc_sglq *start_sglq = NULL;
1252        struct lpfc_io_buf *lpfc_cmd;
1253        struct lpfc_nodelist *ndlp;
1254        struct lpfc_sli_ring *pring = NULL;
1255        int found = 0;
1256
1257        if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258                pring =  phba->sli4_hba.nvmels_wq->pring;
1259        else
1260                pring = lpfc_phba_elsring(phba);
1261
1262        lockdep_assert_held(&pring->ring_lock);
1263
1264        if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1265                lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266                ndlp = lpfc_cmd->rdata->pnode;
1267        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269                ndlp = piocbq->context_un.ndlp;
1270        } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271                if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1272                        ndlp = NULL;
1273                else
1274                        ndlp = piocbq->context_un.ndlp;
1275        } else {
1276                ndlp = piocbq->context1;
1277        }
1278
1279        spin_lock(&phba->sli4_hba.sgl_list_lock);
1280        list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1281        start_sglq = sglq;
1282        while (!found) {
1283                if (!sglq)
1284                        break;
1285                if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286                    test_bit(sglq->sli4_lxritag,
1287                    ndlp->active_rrqs_xri_bitmap)) {
1288                        /* This xri has an rrq outstanding for this DID.
1289                         * put it back in the list and get another xri.
1290                         */
1291                        list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292                        sglq = NULL;
1293                        list_remove_head(lpfc_els_sgl_list, sglq,
1294                                                struct lpfc_sglq, list);
1295                        if (sglq == start_sglq) {
1296                                list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297                                sglq = NULL;
1298                                break;
1299                        } else
1300                                continue;
1301                }
1302                sglq->ndlp = ndlp;
1303                found = 1;
1304                phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305                sglq->state = SGL_ALLOCATED;
1306        }
1307        spin_unlock(&phba->sli4_hba.sgl_list_lock);
1308        return sglq;
1309}
1310
1311/**
1312 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1313 * @phba: Pointer to HBA context object.
1314 * @piocbq: Pointer to the iocbq.
1315 *
1316 * This function is called with the sgl_list lock held. This function
1317 * gets a new driver sglq object from the sglq list. If the
1318 * list is not empty then it is successful, it returns pointer to the newly
1319 * allocated sglq object else it returns NULL.
1320 **/
1321struct lpfc_sglq *
1322__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1323{
1324        struct list_head *lpfc_nvmet_sgl_list;
1325        struct lpfc_sglq *sglq = NULL;
1326
1327        lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1328
1329        lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1330
1331        list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1332        if (!sglq)
1333                return NULL;
1334        phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335        sglq->state = SGL_ALLOCATED;
1336        return sglq;
1337}
1338
1339/**
1340 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1341 * @phba: Pointer to HBA context object.
1342 *
1343 * This function is called with no lock held. This function
1344 * allocates a new driver iocb object from the iocb pool. If the
1345 * allocation is successful, it returns pointer to the newly
1346 * allocated iocb object else it returns NULL.
1347 **/
1348struct lpfc_iocbq *
1349lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1350{
1351        struct lpfc_iocbq * iocbq = NULL;
1352        unsigned long iflags;
1353
1354        spin_lock_irqsave(&phba->hbalock, iflags);
1355        iocbq = __lpfc_sli_get_iocbq(phba);
1356        spin_unlock_irqrestore(&phba->hbalock, iflags);
1357        return iocbq;
1358}
1359
1360/**
1361 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1362 * @phba: Pointer to HBA context object.
1363 * @iocbq: Pointer to driver iocb object.
1364 *
1365 * This function is called to release the driver iocb object
1366 * to the iocb pool. The iotag in the iocb object
1367 * does not change for each use of the iocb object. This function
1368 * clears all other fields of the iocb object when it is freed.
1369 * The sqlq structure that holds the xritag and phys and virtual
1370 * mappings for the scatter gather list is retrieved from the
1371 * active array of sglq. The get of the sglq pointer also clears
1372 * the entry in the array. If the status of the IO indiactes that
1373 * this IO was aborted then the sglq entry it put on the
1374 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1375 * IO has good status or fails for any other reason then the sglq
1376 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1377 *  asserted held in the code path calling this routine.
1378 **/
1379static void
1380__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1381{
1382        struct lpfc_sglq *sglq;
1383        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384        unsigned long iflag = 0;
1385        struct lpfc_sli_ring *pring;
1386
1387        if (iocbq->sli4_xritag == NO_XRI)
1388                sglq = NULL;
1389        else
1390                sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1391
1392
1393        if (sglq)  {
1394                if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1396                                          iflag);
1397                        sglq->state = SGL_FREED;
1398                        sglq->ndlp = NULL;
1399                        list_add_tail(&sglq->list,
1400                                      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401                        spin_unlock_irqrestore(
1402                                &phba->sli4_hba.sgl_list_lock, iflag);
1403                        goto out;
1404                }
1405
1406                if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407                        (sglq->state != SGL_XRI_ABORTED)) {
1408                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1409                                          iflag);
1410
1411                        /* Check if we can get a reference on ndlp */
1412                        if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1413                                sglq->ndlp = NULL;
1414
1415                        list_add(&sglq->list,
1416                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417                        spin_unlock_irqrestore(
1418                                &phba->sli4_hba.sgl_list_lock, iflag);
1419                } else {
1420                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1421                                          iflag);
1422                        sglq->state = SGL_FREED;
1423                        sglq->ndlp = NULL;
1424                        list_add_tail(&sglq->list,
1425                                      &phba->sli4_hba.lpfc_els_sgl_list);
1426                        spin_unlock_irqrestore(
1427                                &phba->sli4_hba.sgl_list_lock, iflag);
1428                        pring = lpfc_phba_elsring(phba);
1429                        /* Check if TXQ queue needs to be serviced */
1430                        if (pring && (!list_empty(&pring->txq)))
1431                                lpfc_worker_wake_up(phba);
1432                }
1433        }
1434
1435out:
1436        /*
1437         * Clean all volatile data fields, preserve iotag and node struct.
1438         */
1439        memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440        iocbq->sli4_lxritag = NO_XRI;
1441        iocbq->sli4_xritag = NO_XRI;
1442        iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1443                              LPFC_IO_NVME_LS);
1444        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1445}
1446
1447
1448/**
1449 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1450 * @phba: Pointer to HBA context object.
1451 * @iocbq: Pointer to driver iocb object.
1452 *
1453 * This function is called to release the driver iocb object to the
1454 * iocb pool. The iotag in the iocb object does not change for each
1455 * use of the iocb object. This function clears all other fields of
1456 * the iocb object when it is freed. The hbalock is asserted held in
1457 * the code path calling this routine.
1458 **/
1459static void
1460__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1461{
1462        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1463
1464        /*
1465         * Clean all volatile data fields, preserve iotag and node struct.
1466         */
1467        memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468        iocbq->sli4_xritag = NO_XRI;
1469        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1470}
1471
1472/**
1473 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1474 * @phba: Pointer to HBA context object.
1475 * @iocbq: Pointer to driver iocb object.
1476 *
1477 * This function is called with hbalock held to release driver
1478 * iocb object to the iocb pool. The iotag in the iocb object
1479 * does not change for each use of the iocb object. This function
1480 * clears all other fields of the iocb object when it is freed.
1481 **/
1482static void
1483__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1484{
1485        lockdep_assert_held(&phba->hbalock);
1486
1487        phba->__lpfc_sli_release_iocbq(phba, iocbq);
1488        phba->iocb_cnt--;
1489}
1490
1491/**
1492 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1493 * @phba: Pointer to HBA context object.
1494 * @iocbq: Pointer to driver iocb object.
1495 *
1496 * This function is called with no lock held to release the iocb to
1497 * iocb pool.
1498 **/
1499void
1500lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1501{
1502        unsigned long iflags;
1503
1504        /*
1505         * Clean all volatile data fields, preserve iotag and node struct.
1506         */
1507        spin_lock_irqsave(&phba->hbalock, iflags);
1508        __lpfc_sli_release_iocbq(phba, iocbq);
1509        spin_unlock_irqrestore(&phba->hbalock, iflags);
1510}
1511
1512/**
1513 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1514 * @phba: Pointer to HBA context object.
1515 * @iocblist: List of IOCBs.
1516 * @ulpstatus: ULP status in IOCB command field.
1517 * @ulpWord4: ULP word-4 in IOCB command field.
1518 *
1519 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1520 * on the list by invoking the complete callback function associated with the
1521 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1522 * fields.
1523 **/
1524void
1525lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526                      uint32_t ulpstatus, uint32_t ulpWord4)
1527{
1528        struct lpfc_iocbq *piocb;
1529
1530        while (!list_empty(iocblist)) {
1531                list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532                if (piocb->wqe_cmpl) {
1533                        if (piocb->iocb_flag & LPFC_IO_NVME)
1534                                lpfc_nvme_cancel_iocb(phba, piocb,
1535                                                      ulpstatus, ulpWord4);
1536                        else
1537                                lpfc_sli_release_iocbq(phba, piocb);
1538
1539                } else if (piocb->iocb_cmpl) {
1540                        piocb->iocb.ulpStatus = ulpstatus;
1541                        piocb->iocb.un.ulpWord[4] = ulpWord4;
1542                        (piocb->iocb_cmpl) (phba, piocb, piocb);
1543                } else {
1544                        lpfc_sli_release_iocbq(phba, piocb);
1545                }
1546        }
1547        return;
1548}
1549
1550/**
1551 * lpfc_sli_iocb_cmd_type - Get the iocb type
1552 * @iocb_cmnd: iocb command code.
1553 *
1554 * This function is called by ring event handler function to get the iocb type.
1555 * This function translates the iocb command to an iocb command type used to
1556 * decide the final disposition of each completed IOCB.
1557 * The function returns
1558 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1559 * LPFC_SOL_IOCB     if it is a solicited iocb completion
1560 * LPFC_ABORT_IOCB   if it is an abort iocb
1561 * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1562 *
1563 * The caller is not required to hold any lock.
1564 **/
1565static lpfc_iocb_type
1566lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1567{
1568        lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1569
1570        if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571                return 0;
1572
1573        switch (iocb_cmnd) {
1574        case CMD_XMIT_SEQUENCE_CR:
1575        case CMD_XMIT_SEQUENCE_CX:
1576        case CMD_XMIT_BCAST_CN:
1577        case CMD_XMIT_BCAST_CX:
1578        case CMD_ELS_REQUEST_CR:
1579        case CMD_ELS_REQUEST_CX:
1580        case CMD_CREATE_XRI_CR:
1581        case CMD_CREATE_XRI_CX:
1582        case CMD_GET_RPI_CN:
1583        case CMD_XMIT_ELS_RSP_CX:
1584        case CMD_GET_RPI_CR:
1585        case CMD_FCP_IWRITE_CR:
1586        case CMD_FCP_IWRITE_CX:
1587        case CMD_FCP_IREAD_CR:
1588        case CMD_FCP_IREAD_CX:
1589        case CMD_FCP_ICMND_CR:
1590        case CMD_FCP_ICMND_CX:
1591        case CMD_FCP_TSEND_CX:
1592        case CMD_FCP_TRSP_CX:
1593        case CMD_FCP_TRECEIVE_CX:
1594        case CMD_FCP_AUTO_TRSP_CX:
1595        case CMD_ADAPTER_MSG:
1596        case CMD_ADAPTER_DUMP:
1597        case CMD_XMIT_SEQUENCE64_CR:
1598        case CMD_XMIT_SEQUENCE64_CX:
1599        case CMD_XMIT_BCAST64_CN:
1600        case CMD_XMIT_BCAST64_CX:
1601        case CMD_ELS_REQUEST64_CR:
1602        case CMD_ELS_REQUEST64_CX:
1603        case CMD_FCP_IWRITE64_CR:
1604        case CMD_FCP_IWRITE64_CX:
1605        case CMD_FCP_IREAD64_CR:
1606        case CMD_FCP_IREAD64_CX:
1607        case CMD_FCP_ICMND64_CR:
1608        case CMD_FCP_ICMND64_CX:
1609        case CMD_FCP_TSEND64_CX:
1610        case CMD_FCP_TRSP64_CX:
1611        case CMD_FCP_TRECEIVE64_CX:
1612        case CMD_GEN_REQUEST64_CR:
1613        case CMD_GEN_REQUEST64_CX:
1614        case CMD_XMIT_ELS_RSP64_CX:
1615        case DSSCMD_IWRITE64_CR:
1616        case DSSCMD_IWRITE64_CX:
1617        case DSSCMD_IREAD64_CR:
1618        case DSSCMD_IREAD64_CX:
1619        case CMD_SEND_FRAME:
1620                type = LPFC_SOL_IOCB;
1621                break;
1622        case CMD_ABORT_XRI_CN:
1623        case CMD_ABORT_XRI_CX:
1624        case CMD_CLOSE_XRI_CN:
1625        case CMD_CLOSE_XRI_CX:
1626        case CMD_XRI_ABORTED_CX:
1627        case CMD_ABORT_MXRI64_CN:
1628        case CMD_XMIT_BLS_RSP64_CX:
1629                type = LPFC_ABORT_IOCB;
1630                break;
1631        case CMD_RCV_SEQUENCE_CX:
1632        case CMD_RCV_ELS_REQ_CX:
1633        case CMD_RCV_SEQUENCE64_CX:
1634        case CMD_RCV_ELS_REQ64_CX:
1635        case CMD_ASYNC_STATUS:
1636        case CMD_IOCB_RCV_SEQ64_CX:
1637        case CMD_IOCB_RCV_ELS64_CX:
1638        case CMD_IOCB_RCV_CONT64_CX:
1639        case CMD_IOCB_RET_XRI64_CX:
1640                type = LPFC_UNSOL_IOCB;
1641                break;
1642        case CMD_IOCB_XMIT_MSEQ64_CR:
1643        case CMD_IOCB_XMIT_MSEQ64_CX:
1644        case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645        case CMD_IOCB_RCV_ELS_LIST64_CX:
1646        case CMD_IOCB_CLOSE_EXTENDED_CN:
1647        case CMD_IOCB_ABORT_EXTENDED_CN:
1648        case CMD_IOCB_RET_HBQE64_CN:
1649        case CMD_IOCB_FCP_IBIDIR64_CR:
1650        case CMD_IOCB_FCP_IBIDIR64_CX:
1651        case CMD_IOCB_FCP_ITASKMGT64_CX:
1652        case CMD_IOCB_LOGENTRY_CN:
1653        case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654                printk("%s - Unhandled SLI-3 Command x%x\n",
1655                                __func__, iocb_cmnd);
1656                type = LPFC_UNKNOWN_IOCB;
1657                break;
1658        default:
1659                type = LPFC_UNKNOWN_IOCB;
1660                break;
1661        }
1662
1663        return type;
1664}
1665
1666/**
1667 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1668 * @phba: Pointer to HBA context object.
1669 *
1670 * This function is called from SLI initialization code
1671 * to configure every ring of the HBA's SLI interface. The
1672 * caller is not required to hold any lock. This function issues
1673 * a config_ring mailbox command for each ring.
1674 * This function returns zero if successful else returns a negative
1675 * error code.
1676 **/
1677static int
1678lpfc_sli_ring_map(struct lpfc_hba *phba)
1679{
1680        struct lpfc_sli *psli = &phba->sli;
1681        LPFC_MBOXQ_t *pmb;
1682        MAILBOX_t *pmbox;
1683        int i, rc, ret = 0;
1684
1685        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1686        if (!pmb)
1687                return -ENOMEM;
1688        pmbox = &pmb->u.mb;
1689        phba->link_state = LPFC_INIT_MBX_CMDS;
1690        for (i = 0; i < psli->num_rings; i++) {
1691                lpfc_config_ring(phba, i, pmb);
1692                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693                if (rc != MBX_SUCCESS) {
1694                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695                                        "0446 Adapter failed to init (%d), "
1696                                        "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697                                        "ring %d\n",
1698                                        rc, pmbox->mbxCommand,
1699                                        pmbox->mbxStatus, i);
1700                        phba->link_state = LPFC_HBA_ERROR;
1701                        ret = -ENXIO;
1702                        break;
1703                }
1704        }
1705        mempool_free(pmb, phba->mbox_mem_pool);
1706        return ret;
1707}
1708
1709/**
1710 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1711 * @phba: Pointer to HBA context object.
1712 * @pring: Pointer to driver SLI ring object.
1713 * @piocb: Pointer to the driver iocb object.
1714 *
1715 * The driver calls this function with the hbalock held for SLI3 ports or
1716 * the ring lock held for SLI4 ports. The function adds the
1717 * new iocb to txcmplq of the given ring. This function always returns
1718 * 0. If this function is called for ELS ring, this function checks if
1719 * there is a vport associated with the ELS command. This function also
1720 * starts els_tmofunc timer if this is an ELS command.
1721 **/
1722static int
1723lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724                        struct lpfc_iocbq *piocb)
1725{
1726        if (phba->sli_rev == LPFC_SLI_REV4)
1727                lockdep_assert_held(&pring->ring_lock);
1728        else
1729                lockdep_assert_held(&phba->hbalock);
1730
1731        BUG_ON(!piocb);
1732
1733        list_add_tail(&piocb->list, &pring->txcmplq);
1734        piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735        pring->txcmplq_cnt++;
1736
1737        if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739           (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740                BUG_ON(!piocb->vport);
1741                if (!(piocb->vport->load_flag & FC_UNLOADING))
1742                        mod_timer(&piocb->vport->els_tmofunc,
1743                                  jiffies +
1744                                  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1745        }
1746
1747        return 0;
1748}
1749
1750/**
1751 * lpfc_sli_ringtx_get - Get first element of the txq
1752 * @phba: Pointer to HBA context object.
1753 * @pring: Pointer to driver SLI ring object.
1754 *
1755 * This function is called with hbalock held to get next
1756 * iocb in txq of the given ring. If there is any iocb in
1757 * the txq, the function returns first iocb in the list after
1758 * removing the iocb from the list, else it returns NULL.
1759 **/
1760struct lpfc_iocbq *
1761lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1762{
1763        struct lpfc_iocbq *cmd_iocb;
1764
1765        lockdep_assert_held(&phba->hbalock);
1766
1767        list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1768        return cmd_iocb;
1769}
1770
1771/**
1772 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1773 * @phba: Pointer to HBA context object.
1774 * @pring: Pointer to driver SLI ring object.
1775 *
1776 * This function is called with hbalock held and the caller must post the
1777 * iocb without releasing the lock. If the caller releases the lock,
1778 * iocb slot returned by the function is not guaranteed to be available.
1779 * The function returns pointer to the next available iocb slot if there
1780 * is available slot in the ring, else it returns NULL.
1781 * If the get index of the ring is ahead of the put index, the function
1782 * will post an error attention event to the worker thread to take the
1783 * HBA to offline state.
1784 **/
1785static IOCB_t *
1786lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1787{
1788        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1789        uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1790
1791        lockdep_assert_held(&phba->hbalock);
1792
1793        if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794           (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795                pring->sli.sli3.next_cmdidx = 0;
1796
1797        if (unlikely(pring->sli.sli3.local_getidx ==
1798                pring->sli.sli3.next_cmdidx)) {
1799
1800                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1801
1802                if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1803                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1804                                        "0315 Ring %d issue: portCmdGet %d "
1805                                        "is bigger than cmd ring %d\n",
1806                                        pring->ringno,
1807                                        pring->sli.sli3.local_getidx,
1808                                        max_cmd_idx);
1809
1810                        phba->link_state = LPFC_HBA_ERROR;
1811                        /*
1812                         * All error attention handlers are posted to
1813                         * worker thread
1814                         */
1815                        phba->work_ha |= HA_ERATT;
1816                        phba->work_hs = HS_FFER3;
1817
1818                        lpfc_worker_wake_up(phba);
1819
1820                        return NULL;
1821                }
1822
1823                if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1824                        return NULL;
1825        }
1826
1827        return lpfc_cmd_iocb(phba, pring);
1828}
1829
1830/**
1831 * lpfc_sli_next_iotag - Get an iotag for the iocb
1832 * @phba: Pointer to HBA context object.
1833 * @iocbq: Pointer to driver iocb object.
1834 *
1835 * This function gets an iotag for the iocb. If there is no unused iotag and
1836 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1837 * array and assigns a new iotag.
1838 * The function returns the allocated iotag if successful, else returns zero.
1839 * Zero is not a valid iotag.
1840 * The caller is not required to hold any lock.
1841 **/
1842uint16_t
1843lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1844{
1845        struct lpfc_iocbq **new_arr;
1846        struct lpfc_iocbq **old_arr;
1847        size_t new_len;
1848        struct lpfc_sli *psli = &phba->sli;
1849        uint16_t iotag;
1850
1851        spin_lock_irq(&phba->hbalock);
1852        iotag = psli->last_iotag;
1853        if(++iotag < psli->iocbq_lookup_len) {
1854                psli->last_iotag = iotag;
1855                psli->iocbq_lookup[iotag] = iocbq;
1856                spin_unlock_irq(&phba->hbalock);
1857                iocbq->iotag = iotag;
1858                return iotag;
1859        } else if (psli->iocbq_lookup_len < (0xffff
1860                                           - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861                new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1862                spin_unlock_irq(&phba->hbalock);
1863                new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1864                                  GFP_KERNEL);
1865                if (new_arr) {
1866                        spin_lock_irq(&phba->hbalock);
1867                        old_arr = psli->iocbq_lookup;
1868                        if (new_len <= psli->iocbq_lookup_len) {
1869                                /* highly unprobable case */
1870                                kfree(new_arr);
1871                                iotag = psli->last_iotag;
1872                                if(++iotag < psli->iocbq_lookup_len) {
1873                                        psli->last_iotag = iotag;
1874                                        psli->iocbq_lookup[iotag] = iocbq;
1875                                        spin_unlock_irq(&phba->hbalock);
1876                                        iocbq->iotag = iotag;
1877                                        return iotag;
1878                                }
1879                                spin_unlock_irq(&phba->hbalock);
1880                                return 0;
1881                        }
1882                        if (psli->iocbq_lookup)
1883                                memcpy(new_arr, old_arr,
1884                                       ((psli->last_iotag  + 1) *
1885                                        sizeof (struct lpfc_iocbq *)));
1886                        psli->iocbq_lookup = new_arr;
1887                        psli->iocbq_lookup_len = new_len;
1888                        psli->last_iotag = iotag;
1889                        psli->iocbq_lookup[iotag] = iocbq;
1890                        spin_unlock_irq(&phba->hbalock);
1891                        iocbq->iotag = iotag;
1892                        kfree(old_arr);
1893                        return iotag;
1894                }
1895        } else
1896                spin_unlock_irq(&phba->hbalock);
1897
1898        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1899                        "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1900                        psli->last_iotag);
1901
1902        return 0;
1903}
1904
1905/**
1906 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1907 * @phba: Pointer to HBA context object.
1908 * @pring: Pointer to driver SLI ring object.
1909 * @iocb: Pointer to iocb slot in the ring.
1910 * @nextiocb: Pointer to driver iocb object which need to be
1911 *            posted to firmware.
1912 *
1913 * This function is called to post a new iocb to the firmware. This
1914 * function copies the new iocb to ring iocb slot and updates the
1915 * ring pointers. It adds the new iocb to txcmplq if there is
1916 * a completion call back for this iocb else the function will free the
1917 * iocb object.  The hbalock is asserted held in the code path calling
1918 * this routine.
1919 **/
1920static void
1921lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922                IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1923{
1924        /*
1925         * Set up an iotag
1926         */
1927        nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1928
1929
1930        if (pring->ringno == LPFC_ELS_RING) {
1931                lpfc_debugfs_slow_ring_trc(phba,
1932                        "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1933                        *(((uint32_t *) &nextiocb->iocb) + 4),
1934                        *(((uint32_t *) &nextiocb->iocb) + 6),
1935                        *(((uint32_t *) &nextiocb->iocb) + 7));
1936        }
1937
1938        /*
1939         * Issue iocb command to adapter
1940         */
1941        lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1942        wmb();
1943        pring->stats.iocb_cmd++;
1944
1945        /*
1946         * If there is no completion routine to call, we can release the
1947         * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1948         * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1949         */
1950        if (nextiocb->iocb_cmpl)
1951                lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1952        else
1953                __lpfc_sli_release_iocbq(phba, nextiocb);
1954
1955        /*
1956         * Let the HBA know what IOCB slot will be the next one the
1957         * driver will put a command into.
1958         */
1959        pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960        writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1961}
1962
1963/**
1964 * lpfc_sli_update_full_ring - Update the chip attention register
1965 * @phba: Pointer to HBA context object.
1966 * @pring: Pointer to driver SLI ring object.
1967 *
1968 * The caller is not required to hold any lock for calling this function.
1969 * This function updates the chip attention bits for the ring to inform firmware
1970 * that there are pending work to be done for this ring and requests an
1971 * interrupt when there is space available in the ring. This function is
1972 * called when the driver is unable to post more iocbs to the ring due
1973 * to unavailability of space in the ring.
1974 **/
1975static void
1976lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1977{
1978        int ringno = pring->ringno;
1979
1980        pring->flag |= LPFC_CALL_RING_AVAILABLE;
1981
1982        wmb();
1983
1984        /*
1985         * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1986         * The HBA will tell us when an IOCB entry is available.
1987         */
1988        writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989        readl(phba->CAregaddr); /* flush */
1990
1991        pring->stats.iocb_cmd_full++;
1992}
1993
1994/**
1995 * lpfc_sli_update_ring - Update chip attention register
1996 * @phba: Pointer to HBA context object.
1997 * @pring: Pointer to driver SLI ring object.
1998 *
1999 * This function updates the chip attention register bit for the
2000 * given ring to inform HBA that there is more work to be done
2001 * in this ring. The caller is not required to hold any lock.
2002 **/
2003static void
2004lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2005{
2006        int ringno = pring->ringno;
2007
2008        /*
2009         * Tell the HBA that there is work to do in this ring.
2010         */
2011        if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2012                wmb();
2013                writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014                readl(phba->CAregaddr); /* flush */
2015        }
2016}
2017
2018/**
2019 * lpfc_sli_resume_iocb - Process iocbs in the txq
2020 * @phba: Pointer to HBA context object.
2021 * @pring: Pointer to driver SLI ring object.
2022 *
2023 * This function is called with hbalock held to post pending iocbs
2024 * in the txq to the firmware. This function is called when driver
2025 * detects space available in the ring.
2026 **/
2027static void
2028lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2029{
2030        IOCB_t *iocb;
2031        struct lpfc_iocbq *nextiocb;
2032
2033        lockdep_assert_held(&phba->hbalock);
2034
2035        /*
2036         * Check to see if:
2037         *  (a) there is anything on the txq to send
2038         *  (b) link is up
2039         *  (c) link attention events can be processed (fcp ring only)
2040         *  (d) IOCB processing is not blocked by the outstanding mbox command.
2041         */
2042
2043        if (lpfc_is_link_up(phba) &&
2044            (!list_empty(&pring->txq)) &&
2045            (pring->ringno != LPFC_FCP_RING ||
2046             phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2047
2048                while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049                       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050                        lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2051
2052                if (iocb)
2053                        lpfc_sli_update_ring(phba, pring);
2054                else
2055                        lpfc_sli_update_full_ring(phba, pring);
2056        }
2057
2058        return;
2059}
2060
2061/**
2062 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2063 * @phba: Pointer to HBA context object.
2064 * @hbqno: HBQ number.
2065 *
2066 * This function is called with hbalock held to get the next
2067 * available slot for the given HBQ. If there is free slot
2068 * available for the HBQ it will return pointer to the next available
2069 * HBQ entry else it will return NULL.
2070 **/
2071static struct lpfc_hbq_entry *
2072lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2073{
2074        struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075
2076        lockdep_assert_held(&phba->hbalock);
2077
2078        if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079            ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080                hbqp->next_hbqPutIdx = 0;
2081
2082        if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2083                uint32_t raw_index = phba->hbq_get[hbqno];
2084                uint32_t getidx = le32_to_cpu(raw_index);
2085
2086                hbqp->local_hbqGetIdx = getidx;
2087
2088                if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2089                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2090                                        "1802 HBQ %d: local_hbqGetIdx "
2091                                        "%u is > than hbqp->entry_count %u\n",
2092                                        hbqno, hbqp->local_hbqGetIdx,
2093                                        hbqp->entry_count);
2094
2095                        phba->link_state = LPFC_HBA_ERROR;
2096                        return NULL;
2097                }
2098
2099                if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2100                        return NULL;
2101        }
2102
2103        return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2104                        hbqp->hbqPutIdx;
2105}
2106
2107/**
2108 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2109 * @phba: Pointer to HBA context object.
2110 *
2111 * This function is called with no lock held to free all the
2112 * hbq buffers while uninitializing the SLI interface. It also
2113 * frees the HBQ buffers returned by the firmware but not yet
2114 * processed by the upper layers.
2115 **/
2116void
2117lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2118{
2119        struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120        struct hbq_dmabuf *hbq_buf;
2121        unsigned long flags;
2122        int i, hbq_count;
2123
2124        hbq_count = lpfc_sli_hbq_count();
2125        /* Return all memory used by all HBQs */
2126        spin_lock_irqsave(&phba->hbalock, flags);
2127        for (i = 0; i < hbq_count; ++i) {
2128                list_for_each_entry_safe(dmabuf, next_dmabuf,
2129                                &phba->hbqs[i].hbq_buffer_list, list) {
2130                        hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131                        list_del(&hbq_buf->dbuf.list);
2132                        (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2133                }
2134                phba->hbqs[i].buffer_count = 0;
2135        }
2136
2137        /* Mark the HBQs not in use */
2138        phba->hbq_in_use = 0;
2139        spin_unlock_irqrestore(&phba->hbalock, flags);
2140}
2141
2142/**
2143 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2144 * @phba: Pointer to HBA context object.
2145 * @hbqno: HBQ number.
2146 * @hbq_buf: Pointer to HBQ buffer.
2147 *
2148 * This function is called with the hbalock held to post a
2149 * hbq buffer to the firmware. If the function finds an empty
2150 * slot in the HBQ, it will post the buffer. The function will return
2151 * pointer to the hbq entry if it successfully post the buffer
2152 * else it will return NULL.
2153 **/
2154static int
2155lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2156                         struct hbq_dmabuf *hbq_buf)
2157{
2158        lockdep_assert_held(&phba->hbalock);
2159        return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2160}
2161
2162/**
2163 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2164 * @phba: Pointer to HBA context object.
2165 * @hbqno: HBQ number.
2166 * @hbq_buf: Pointer to HBQ buffer.
2167 *
2168 * This function is called with the hbalock held to post a hbq buffer to the
2169 * firmware. If the function finds an empty slot in the HBQ, it will post the
2170 * buffer and place it on the hbq_buffer_list. The function will return zero if
2171 * it successfully post the buffer else it will return an error.
2172 **/
2173static int
2174lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175                            struct hbq_dmabuf *hbq_buf)
2176{
2177        struct lpfc_hbq_entry *hbqe;
2178        dma_addr_t physaddr = hbq_buf->dbuf.phys;
2179
2180        lockdep_assert_held(&phba->hbalock);
2181        /* Get next HBQ entry slot to use */
2182        hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2183        if (hbqe) {
2184                struct hbq_s *hbqp = &phba->hbqs[hbqno];
2185
2186                hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187                hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2188                hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2189                hbqe->bde.tus.f.bdeFlags = 0;
2190                hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191                hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2192                                /* Sync SLIM */
2193                hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194                writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2195                                /* flush */
2196                readl(phba->hbq_put + hbqno);
2197                list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2198                return 0;
2199        } else
2200                return -ENOMEM;
2201}
2202
2203/**
2204 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2205 * @phba: Pointer to HBA context object.
2206 * @hbqno: HBQ number.
2207 * @hbq_buf: Pointer to HBQ buffer.
2208 *
2209 * This function is called with the hbalock held to post an RQE to the SLI4
2210 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2211 * the hbq_buffer_list and return zero, otherwise it will return an error.
2212 **/
2213static int
2214lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215                            struct hbq_dmabuf *hbq_buf)
2216{
2217        int rc;
2218        struct lpfc_rqe hrqe;
2219        struct lpfc_rqe drqe;
2220        struct lpfc_queue *hrq;
2221        struct lpfc_queue *drq;
2222
2223        if (hbqno != LPFC_ELS_HBQ)
2224                return 1;
2225        hrq = phba->sli4_hba.hdr_rq;
2226        drq = phba->sli4_hba.dat_rq;
2227
2228        lockdep_assert_held(&phba->hbalock);
2229        hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230        hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231        drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232        drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2233        rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2234        if (rc < 0)
2235                return rc;
2236        hbq_buf->tag = (rc | (hbqno << 16));
2237        list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2238        return 0;
2239}
2240
2241/* HBQ for ELS and CT traffic. */
2242static struct lpfc_hbq_init lpfc_els_hbq = {
2243        .rn = 1,
2244        .entry_count = 256,
2245        .mask_count = 0,
2246        .profile = 0,
2247        .ring_mask = (1 << LPFC_ELS_RING),
2248        .buffer_count = 0,
2249        .init_count = 40,
2250        .add_count = 40,
2251};
2252
2253/* Array of HBQs */
2254struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2255        &lpfc_els_hbq,
2256};
2257
2258/**
2259 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2260 * @phba: Pointer to HBA context object.
2261 * @hbqno: HBQ number.
2262 * @count: Number of HBQ buffers to be posted.
2263 *
2264 * This function is called with no lock held to post more hbq buffers to the
2265 * given HBQ. The function returns the number of HBQ buffers successfully
2266 * posted.
2267 **/
2268static int
2269lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2270{
2271        uint32_t i, posted = 0;
2272        unsigned long flags;
2273        struct hbq_dmabuf *hbq_buffer;
2274        LIST_HEAD(hbq_buf_list);
2275        if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2276                return 0;
2277
2278        if ((phba->hbqs[hbqno].buffer_count + count) >
2279            lpfc_hbq_defs[hbqno]->entry_count)
2280                count = lpfc_hbq_defs[hbqno]->entry_count -
2281                                        phba->hbqs[hbqno].buffer_count;
2282        if (!count)
2283                return 0;
2284        /* Allocate HBQ entries */
2285        for (i = 0; i < count; i++) {
2286                hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2287                if (!hbq_buffer)
2288                        break;
2289                list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2290        }
2291        /* Check whether HBQ is still in use */
2292        spin_lock_irqsave(&phba->hbalock, flags);
2293        if (!phba->hbq_in_use)
2294                goto err;
2295        while (!list_empty(&hbq_buf_list)) {
2296                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2297                                 dbuf.list);
2298                hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2299                                      (hbqno << 16));
2300                if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2301                        phba->hbqs[hbqno].buffer_count++;
2302                        posted++;
2303                } else
2304                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2305        }
2306        spin_unlock_irqrestore(&phba->hbalock, flags);
2307        return posted;
2308err:
2309        spin_unlock_irqrestore(&phba->hbalock, flags);
2310        while (!list_empty(&hbq_buf_list)) {
2311                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2312                                 dbuf.list);
2313                (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2314        }
2315        return 0;
2316}
2317
2318/**
2319 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2320 * @phba: Pointer to HBA context object.
2321 * @qno: HBQ number.
2322 *
2323 * This function posts more buffers to the HBQ. This function
2324 * is called with no lock held. The function returns the number of HBQ entries
2325 * successfully allocated.
2326 **/
2327int
2328lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2329{
2330        if (phba->sli_rev == LPFC_SLI_REV4)
2331                return 0;
2332        else
2333                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334                                         lpfc_hbq_defs[qno]->add_count);
2335}
2336
2337/**
2338 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2339 * @phba: Pointer to HBA context object.
2340 * @qno:  HBQ queue number.
2341 *
2342 * This function is called from SLI initialization code path with
2343 * no lock held to post initial HBQ buffers to firmware. The
2344 * function returns the number of HBQ entries successfully allocated.
2345 **/
2346static int
2347lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2348{
2349        if (phba->sli_rev == LPFC_SLI_REV4)
2350                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2351                                        lpfc_hbq_defs[qno]->entry_count);
2352        else
2353                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354                                         lpfc_hbq_defs[qno]->init_count);
2355}
2356
2357/*
2358 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2359 *
2360 * This function removes the first hbq buffer on an hbq list and returns a
2361 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2362 **/
2363static struct hbq_dmabuf *
2364lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2365{
2366        struct lpfc_dmabuf *d_buf;
2367
2368        list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2369        if (!d_buf)
2370                return NULL;
2371        return container_of(d_buf, struct hbq_dmabuf, dbuf);
2372}
2373
2374/**
2375 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2376 * @phba: Pointer to HBA context object.
2377 * @hrq: HBQ number.
2378 *
2379 * This function removes the first RQ buffer on an RQ buffer list and returns a
2380 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2381 **/
2382static struct rqb_dmabuf *
2383lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2384{
2385        struct lpfc_dmabuf *h_buf;
2386        struct lpfc_rqb *rqbp;
2387
2388        rqbp = hrq->rqbp;
2389        list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390                         struct lpfc_dmabuf, list);
2391        if (!h_buf)
2392                return NULL;
2393        rqbp->buffer_count--;
2394        return container_of(h_buf, struct rqb_dmabuf, hbuf);
2395}
2396
2397/**
2398 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2399 * @phba: Pointer to HBA context object.
2400 * @tag: Tag of the hbq buffer.
2401 *
2402 * This function searches for the hbq buffer associated with the given tag in
2403 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2404 * otherwise it returns NULL.
2405 **/
2406static struct hbq_dmabuf *
2407lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2408{
2409        struct lpfc_dmabuf *d_buf;
2410        struct hbq_dmabuf *hbq_buf;
2411        uint32_t hbqno;
2412
2413        hbqno = tag >> 16;
2414        if (hbqno >= LPFC_MAX_HBQS)
2415                return NULL;
2416
2417        spin_lock_irq(&phba->hbalock);
2418        list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2419                hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2420                if (hbq_buf->tag == tag) {
2421                        spin_unlock_irq(&phba->hbalock);
2422                        return hbq_buf;
2423                }
2424        }
2425        spin_unlock_irq(&phba->hbalock);
2426        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2427                        "1803 Bad hbq tag. Data: x%x x%x\n",
2428                        tag, phba->hbqs[tag >> 16].buffer_count);
2429        return NULL;
2430}
2431
2432/**
2433 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2434 * @phba: Pointer to HBA context object.
2435 * @hbq_buffer: Pointer to HBQ buffer.
2436 *
2437 * This function is called with hbalock. This function gives back
2438 * the hbq buffer to firmware. If the HBQ does not have space to
2439 * post the buffer, it will free the buffer.
2440 **/
2441void
2442lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2443{
2444        uint32_t hbqno;
2445
2446        if (hbq_buffer) {
2447                hbqno = hbq_buffer->tag >> 16;
2448                if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2449                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2450        }
2451}
2452
2453/**
2454 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2455 * @mbxCommand: mailbox command code.
2456 *
2457 * This function is called by the mailbox event handler function to verify
2458 * that the completed mailbox command is a legitimate mailbox command. If the
2459 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2460 * and the mailbox event handler will take the HBA offline.
2461 **/
2462static int
2463lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2464{
2465        uint8_t ret;
2466
2467        switch (mbxCommand) {
2468        case MBX_LOAD_SM:
2469        case MBX_READ_NV:
2470        case MBX_WRITE_NV:
2471        case MBX_WRITE_VPARMS:
2472        case MBX_RUN_BIU_DIAG:
2473        case MBX_INIT_LINK:
2474        case MBX_DOWN_LINK:
2475        case MBX_CONFIG_LINK:
2476        case MBX_CONFIG_RING:
2477        case MBX_RESET_RING:
2478        case MBX_READ_CONFIG:
2479        case MBX_READ_RCONFIG:
2480        case MBX_READ_SPARM:
2481        case MBX_READ_STATUS:
2482        case MBX_READ_RPI:
2483        case MBX_READ_XRI:
2484        case MBX_READ_REV:
2485        case MBX_READ_LNK_STAT:
2486        case MBX_REG_LOGIN:
2487        case MBX_UNREG_LOGIN:
2488        case MBX_CLEAR_LA:
2489        case MBX_DUMP_MEMORY:
2490        case MBX_DUMP_CONTEXT:
2491        case MBX_RUN_DIAGS:
2492        case MBX_RESTART:
2493        case MBX_UPDATE_CFG:
2494        case MBX_DOWN_LOAD:
2495        case MBX_DEL_LD_ENTRY:
2496        case MBX_RUN_PROGRAM:
2497        case MBX_SET_MASK:
2498        case MBX_SET_VARIABLE:
2499        case MBX_UNREG_D_ID:
2500        case MBX_KILL_BOARD:
2501        case MBX_CONFIG_FARP:
2502        case MBX_BEACON:
2503        case MBX_LOAD_AREA:
2504        case MBX_RUN_BIU_DIAG64:
2505        case MBX_CONFIG_PORT:
2506        case MBX_READ_SPARM64:
2507        case MBX_READ_RPI64:
2508        case MBX_REG_LOGIN64:
2509        case MBX_READ_TOPOLOGY:
2510        case MBX_WRITE_WWN:
2511        case MBX_SET_DEBUG:
2512        case MBX_LOAD_EXP_ROM:
2513        case MBX_ASYNCEVT_ENABLE:
2514        case MBX_REG_VPI:
2515        case MBX_UNREG_VPI:
2516        case MBX_HEARTBEAT:
2517        case MBX_PORT_CAPABILITIES:
2518        case MBX_PORT_IOV_CONTROL:
2519        case MBX_SLI4_CONFIG:
2520        case MBX_SLI4_REQ_FTRS:
2521        case MBX_REG_FCFI:
2522        case MBX_UNREG_FCFI:
2523        case MBX_REG_VFI:
2524        case MBX_UNREG_VFI:
2525        case MBX_INIT_VPI:
2526        case MBX_INIT_VFI:
2527        case MBX_RESUME_RPI:
2528        case MBX_READ_EVENT_LOG_STATUS:
2529        case MBX_READ_EVENT_LOG:
2530        case MBX_SECURITY_MGMT:
2531        case MBX_AUTH_PORT:
2532        case MBX_ACCESS_VDATA:
2533                ret = mbxCommand;
2534                break;
2535        default:
2536                ret = MBX_SHUTDOWN;
2537                break;
2538        }
2539        return ret;
2540}
2541
2542/**
2543 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2544 * @phba: Pointer to HBA context object.
2545 * @pmboxq: Pointer to mailbox command.
2546 *
2547 * This is completion handler function for mailbox commands issued from
2548 * lpfc_sli_issue_mbox_wait function. This function is called by the
2549 * mailbox event handler function with no lock held. This function
2550 * will wake up thread waiting on the wait queue pointed by context1
2551 * of the mailbox.
2552 **/
2553void
2554lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2555{
2556        unsigned long drvr_flag;
2557        struct completion *pmbox_done;
2558
2559        /*
2560         * If pmbox_done is empty, the driver thread gave up waiting and
2561         * continued running.
2562         */
2563        pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2564        spin_lock_irqsave(&phba->hbalock, drvr_flag);
2565        pmbox_done = (struct completion *)pmboxq->context3;
2566        if (pmbox_done)
2567                complete(pmbox_done);
2568        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2569        return;
2570}
2571
2572static void
2573__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2574{
2575        unsigned long iflags;
2576
2577        if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578                lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2579                spin_lock_irqsave(&ndlp->lock, iflags);
2580                ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581                ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2582                spin_unlock_irqrestore(&ndlp->lock, iflags);
2583        }
2584        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585}
2586
2587/**
2588 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2589 * @phba: Pointer to HBA context object.
2590 * @pmb: Pointer to mailbox object.
2591 *
2592 * This function is the default mailbox completion handler. It
2593 * frees the memory resources associated with the completed mailbox
2594 * command. If the completed command is a REG_LOGIN mailbox command,
2595 * this function will issue a UREG_LOGIN to re-claim the RPI.
2596 **/
2597void
2598lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2599{
2600        struct lpfc_vport  *vport = pmb->vport;
2601        struct lpfc_dmabuf *mp;
2602        struct lpfc_nodelist *ndlp;
2603        struct Scsi_Host *shost;
2604        uint16_t rpi, vpi;
2605        int rc;
2606
2607        mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2608
2609        if (mp) {
2610                lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611                kfree(mp);
2612        }
2613
2614        /*
2615         * If a REG_LOGIN succeeded  after node is destroyed or node
2616         * is in re-discovery driver need to cleanup the RPI.
2617         */
2618        if (!(phba->pport->load_flag & FC_UNLOADING) &&
2619            pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620            !pmb->u.mb.mbxStatus) {
2621                rpi = pmb->u.mb.un.varWords[0];
2622                vpi = pmb->u.mb.un.varRegLogin.vpi;
2623                if (phba->sli_rev == LPFC_SLI_REV4)
2624                        vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2625                lpfc_unreg_login(phba, vpi, rpi, pmb);
2626                pmb->vport = vport;
2627                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2628                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629                if (rc != MBX_NOT_FINISHED)
2630                        return;
2631        }
2632
2633        if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634                !(phba->pport->load_flag & FC_UNLOADING) &&
2635                !pmb->u.mb.mbxStatus) {
2636                shost = lpfc_shost_from_vport(vport);
2637                spin_lock_irq(shost->host_lock);
2638                vport->vpi_state |= LPFC_VPI_REGISTERED;
2639                vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640                spin_unlock_irq(shost->host_lock);
2641        }
2642
2643        if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2644                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2645                lpfc_nlp_put(ndlp);
2646                pmb->ctx_buf = NULL;
2647                pmb->ctx_ndlp = NULL;
2648        }
2649
2650        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2652
2653                /* Check to see if there are any deferred events to process */
2654                if (ndlp) {
2655                        lpfc_printf_vlog(
2656                                vport,
2657                                KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658                                "1438 UNREG cmpl deferred mbox x%x "
2659                                "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2660                                ndlp->nlp_rpi, ndlp->nlp_DID,
2661                                ndlp->nlp_flag, ndlp->nlp_defer_did,
2662                                ndlp, vport->load_flag, kref_read(&ndlp->kref));
2663
2664                        if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665                            (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2666                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
2667                                ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668                                lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2669                        } else {
2670                                __lpfc_sli_rpi_release(vport, ndlp);
2671                        }
2672
2673                        /* The unreg_login mailbox is complete and had a
2674                         * reference that has to be released.  The PLOGI
2675                         * got its own ref.
2676                         */
2677                        lpfc_nlp_put(ndlp);
2678                        pmb->ctx_ndlp = NULL;
2679                }
2680        }
2681
2682        /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2683        if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2685                lpfc_nlp_put(ndlp);
2686        }
2687
2688        /* Check security permission status on INIT_LINK mailbox command */
2689        if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690            (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2691                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2692                                "2860 SLI authentication is required "
2693                                "for INIT_LINK but has not done yet\n");
2694
2695        if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696                lpfc_sli4_mbox_cmd_free(phba, pmb);
2697        else
2698                mempool_free(pmb, phba->mbox_mem_pool);
2699}
2700 /**
2701 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2702 * @phba: Pointer to HBA context object.
2703 * @pmb: Pointer to mailbox object.
2704 *
2705 * This function is the unreg rpi mailbox completion handler. It
2706 * frees the memory resources associated with the completed mailbox
2707 * command. An additional reference is put on the ndlp to prevent
2708 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2709 * the unreg mailbox command completes, this routine puts the
2710 * reference back.
2711 *
2712 **/
2713void
2714lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715{
2716        struct lpfc_vport  *vport = pmb->vport;
2717        struct lpfc_nodelist *ndlp;
2718
2719        ndlp = pmb->ctx_ndlp;
2720        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721                if (phba->sli_rev == LPFC_SLI_REV4 &&
2722                    (bf_get(lpfc_sli_intf_if_type,
2723                     &phba->sli4_hba.sli_intf) >=
2724                     LPFC_SLI_INTF_IF_TYPE_2)) {
2725                        if (ndlp) {
2726                                lpfc_printf_vlog(
2727                                         vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2728                                         "0010 UNREG_LOGIN vpi:%x "
2729                                         "rpi:%x DID:%x defer x%x flg x%x "
2730                                         "x%px\n",
2731                                         vport->vpi, ndlp->nlp_rpi,
2732                                         ndlp->nlp_DID, ndlp->nlp_defer_did,
2733                                         ndlp->nlp_flag,
2734                                         ndlp);
2735                                ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2736
2737                                /* Check to see if there are any deferred
2738                                 * events to process
2739                                 */
2740                                if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741                                    (ndlp->nlp_defer_did !=
2742                                    NLP_EVT_NOTHING_PENDING)) {
2743                                        lpfc_printf_vlog(
2744                                                vport, KERN_INFO, LOG_DISCOVERY,
2745                                                "4111 UNREG cmpl deferred "
2746                                                "clr x%x on "
2747                                                "NPort x%x Data: x%x x%px\n",
2748                                                ndlp->nlp_rpi, ndlp->nlp_DID,
2749                                                ndlp->nlp_defer_did, ndlp);
2750                                        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2751                                        ndlp->nlp_defer_did =
2752                                                NLP_EVT_NOTHING_PENDING;
2753                                        lpfc_issue_els_plogi(
2754                                                vport, ndlp->nlp_DID, 0);
2755                                } else {
2756                                        __lpfc_sli_rpi_release(vport, ndlp);
2757                                }
2758                                lpfc_nlp_put(ndlp);
2759                        }
2760                }
2761        }
2762
2763        mempool_free(pmb, phba->mbox_mem_pool);
2764}
2765
2766/**
2767 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2768 * @phba: Pointer to HBA context object.
2769 *
2770 * This function is called with no lock held. This function processes all
2771 * the completed mailbox commands and gives it to upper layers. The interrupt
2772 * service routine processes mailbox completion interrupt and adds completed
2773 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2774 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2775 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2776 * function returns the mailbox commands to the upper layer by calling the
2777 * completion handler function of each mailbox.
2778 **/
2779int
2780lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2781{
2782        MAILBOX_t *pmbox;
2783        LPFC_MBOXQ_t *pmb;
2784        int rc;
2785        LIST_HEAD(cmplq);
2786
2787        phba->sli.slistat.mbox_event++;
2788
2789        /* Get all completed mailboxe buffers into the cmplq */
2790        spin_lock_irq(&phba->hbalock);
2791        list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792        spin_unlock_irq(&phba->hbalock);
2793
2794        /* Get a Mailbox buffer to setup mailbox commands for callback */
2795        do {
2796                list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2797                if (pmb == NULL)
2798                        break;
2799
2800                pmbox = &pmb->u.mb;
2801
2802                if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2803                        if (pmb->vport) {
2804                                lpfc_debugfs_disc_trc(pmb->vport,
2805                                        LPFC_DISC_TRC_MBOX_VPORT,
2806                                        "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807                                        (uint32_t)pmbox->mbxCommand,
2808                                        pmbox->un.varWords[0],
2809                                        pmbox->un.varWords[1]);
2810                        }
2811                        else {
2812                                lpfc_debugfs_disc_trc(phba->pport,
2813                                        LPFC_DISC_TRC_MBOX,
2814                                        "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2815                                        (uint32_t)pmbox->mbxCommand,
2816                                        pmbox->un.varWords[0],
2817                                        pmbox->un.varWords[1]);
2818                        }
2819                }
2820
2821                /*
2822                 * It is a fatal error if unknown mbox command completion.
2823                 */
2824                if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2825                    MBX_SHUTDOWN) {
2826                        /* Unknown mailbox command compl */
2827                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2828                                        "(%d):0323 Unknown Mailbox command "
2829                                        "x%x (x%x/x%x) Cmpl\n",
2830                                        pmb->vport ? pmb->vport->vpi :
2831                                        LPFC_VPORT_UNKNOWN,
2832                                        pmbox->mbxCommand,
2833                                        lpfc_sli_config_mbox_subsys_get(phba,
2834                                                                        pmb),
2835                                        lpfc_sli_config_mbox_opcode_get(phba,
2836                                                                        pmb));
2837                        phba->link_state = LPFC_HBA_ERROR;
2838                        phba->work_hs = HS_FFER3;
2839                        lpfc_handle_eratt(phba);
2840                        continue;
2841                }
2842
2843                if (pmbox->mbxStatus) {
2844                        phba->sli.slistat.mbox_stat_err++;
2845                        if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846                                /* Mbox cmd cmpl error - RETRYing */
2847                                lpfc_printf_log(phba, KERN_INFO,
2848                                        LOG_MBOX | LOG_SLI,
2849                                        "(%d):0305 Mbox cmd cmpl "
2850                                        "error - RETRYing Data: x%x "
2851                                        "(x%x/x%x) x%x x%x x%x\n",
2852                                        pmb->vport ? pmb->vport->vpi :
2853                                        LPFC_VPORT_UNKNOWN,
2854                                        pmbox->mbxCommand,
2855                                        lpfc_sli_config_mbox_subsys_get(phba,
2856                                                                        pmb),
2857                                        lpfc_sli_config_mbox_opcode_get(phba,
2858                                                                        pmb),
2859                                        pmbox->mbxStatus,
2860                                        pmbox->un.varWords[0],
2861                                        pmb->vport ? pmb->vport->port_state :
2862                                        LPFC_VPORT_UNKNOWN);
2863                                pmbox->mbxStatus = 0;
2864                                pmbox->mbxOwner = OWN_HOST;
2865                                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2866                                if (rc != MBX_NOT_FINISHED)
2867                                        continue;
2868                        }
2869                }
2870
2871                /* Mailbox cmd <cmd> Cmpl <cmpl> */
2872                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2873                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2874                                "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2875                                "x%x x%x x%x\n",
2876                                pmb->vport ? pmb->vport->vpi : 0,
2877                                pmbox->mbxCommand,
2878                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879                                lpfc_sli_config_mbox_opcode_get(phba, pmb),
2880                                pmb->mbox_cmpl,
2881                                *((uint32_t *) pmbox),
2882                                pmbox->un.varWords[0],
2883                                pmbox->un.varWords[1],
2884                                pmbox->un.varWords[2],
2885                                pmbox->un.varWords[3],
2886                                pmbox->un.varWords[4],
2887                                pmbox->un.varWords[5],
2888                                pmbox->un.varWords[6],
2889                                pmbox->un.varWords[7],
2890                                pmbox->un.varWords[8],
2891                                pmbox->un.varWords[9],
2892                                pmbox->un.varWords[10]);
2893
2894                if (pmb->mbox_cmpl)
2895                        pmb->mbox_cmpl(phba,pmb);
2896        } while (1);
2897        return 0;
2898}
2899
2900/**
2901 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2902 * @phba: Pointer to HBA context object.
2903 * @pring: Pointer to driver SLI ring object.
2904 * @tag: buffer tag.
2905 *
2906 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2907 * is set in the tag the buffer is posted for a particular exchange,
2908 * the function will return the buffer without replacing the buffer.
2909 * If the buffer is for unsolicited ELS or CT traffic, this function
2910 * returns the buffer and also posts another buffer to the firmware.
2911 **/
2912static struct lpfc_dmabuf *
2913lpfc_sli_get_buff(struct lpfc_hba *phba,
2914                  struct lpfc_sli_ring *pring,
2915                  uint32_t tag)
2916{
2917        struct hbq_dmabuf *hbq_entry;
2918
2919        if (tag & QUE_BUFTAG_BIT)
2920                return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2921        hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922        if (!hbq_entry)
2923                return NULL;
2924        return &hbq_entry->dbuf;
2925}
2926
2927/**
2928 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2929 *                              containing a NVME LS request.
2930 * @phba: pointer to lpfc hba data structure.
2931 * @piocb: pointer to the iocbq struct representing the sequence starting
2932 *        frame.
2933 *
2934 * This routine initially validates the NVME LS, validates there is a login
2935 * with the port that sent the LS, and then calls the appropriate nvme host
2936 * or target LS request handler.
2937 **/
2938static void
2939lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2940{
2941        struct lpfc_nodelist *ndlp;
2942        struct lpfc_dmabuf *d_buf;
2943        struct hbq_dmabuf *nvmebuf;
2944        struct fc_frame_header *fc_hdr;
2945        struct lpfc_async_xchg_ctx *axchg = NULL;
2946        char *failwhy = NULL;
2947        uint32_t oxid, sid, did, fctl, size;
2948        int ret = 1;
2949
2950        d_buf = piocb->context2;
2951
2952        nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953        fc_hdr = nvmebuf->hbuf.virt;
2954        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955        sid = sli4_sid_from_fc_hdr(fc_hdr);
2956        did = sli4_did_from_fc_hdr(fc_hdr);
2957        fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958                fc_hdr->fh_f_ctl[1] << 8 |
2959                fc_hdr->fh_f_ctl[2]);
2960        size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2961
2962        lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
2963                         oxid, size, sid);
2964
2965        if (phba->pport->load_flag & FC_UNLOADING) {
2966                failwhy = "Driver Unloading";
2967        } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968                failwhy = "NVME FC4 Disabled";
2969        } else if (!phba->nvmet_support && !phba->pport->localport) {
2970                failwhy = "No Localport";
2971        } else if (phba->nvmet_support && !phba->targetport) {
2972                failwhy = "No Targetport";
2973        } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974                failwhy = "Bad NVME LS R_CTL";
2975        } else if (unlikely((fctl & 0x00FF0000) !=
2976                        (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977                failwhy = "Bad NVME LS F_CTL";
2978        } else {
2979                axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2980                if (!axchg)
2981                        failwhy = "No CTX memory";
2982        }
2983
2984        if (unlikely(failwhy)) {
2985                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2986                                "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987                                sid, oxid, failwhy);
2988                goto out_fail;
2989        }
2990
2991        /* validate the source of the LS is logged in */
2992        ndlp = lpfc_findnode_did(phba->pport, sid);
2993        if (!ndlp ||
2994            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997                                "6216 NVME Unsol rcv: No ndlp: "
2998                                "NPort_ID x%x oxid x%x\n",
2999                                sid, oxid);
3000                goto out_fail;
3001        }
3002
3003        axchg->phba = phba;
3004        axchg->ndlp = ndlp;
3005        axchg->size = size;
3006        axchg->oxid = oxid;
3007        axchg->sid = sid;
3008        axchg->wqeq = NULL;
3009        axchg->state = LPFC_NVME_STE_LS_RCV;
3010        axchg->entry_cnt = 1;
3011        axchg->rqb_buffer = (void *)nvmebuf;
3012        axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013        axchg->payload = nvmebuf->dbuf.virt;
3014        INIT_LIST_HEAD(&axchg->list);
3015
3016        if (phba->nvmet_support) {
3017                ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3018                spin_lock_irq(&ndlp->lock);
3019                if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020                        ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021                        spin_unlock_irq(&ndlp->lock);
3022
3023                        /* This reference is a single occurrence to hold the
3024                         * node valid until the nvmet transport calls
3025                         * host_release.
3026                         */
3027                        if (!lpfc_nlp_get(ndlp))
3028                                goto out_fail;
3029
3030                        lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3031                                        "6206 NVMET unsol ls_req ndlp x%px "
3032                                        "DID x%x xflags x%x refcnt %d\n",
3033                                        ndlp, ndlp->nlp_DID,
3034                                        ndlp->fc4_xpt_flags,
3035                                        kref_read(&ndlp->kref));
3036                } else {
3037                        spin_unlock_irq(&ndlp->lock);
3038                }
3039        } else {
3040                ret = lpfc_nvme_handle_lsreq(phba, axchg);
3041        }
3042
3043        /* if zero, LS was successfully handled. If non-zero, LS not handled */
3044        if (!ret)
3045                return;
3046
3047out_fail:
3048        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3049                        "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050                        "NVMe%s handler failed %d\n",
3051                        did, sid, oxid,
3052                        (phba->nvmet_support) ? "T" : "I", ret);
3053
3054        /* recycle receive buffer */
3055        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3056
3057        /* If start of new exchange, abort it */
3058        if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059                ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3060
3061        if (ret)
3062                kfree(axchg);
3063}
3064
3065/**
3066 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3067 * @phba: Pointer to HBA context object.
3068 * @pring: Pointer to driver SLI ring object.
3069 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3070 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3071 * @fch_type: the type for the first frame of the sequence.
3072 *
3073 * This function is called with no lock held. This function uses the r_ctl and
3074 * type of the received sequence to find the correct callback function to call
3075 * to process the sequence.
3076 **/
3077static int
3078lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079                         struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3080                         uint32_t fch_type)
3081{
3082        int i;
3083
3084        switch (fch_type) {
3085        case FC_TYPE_NVME:
3086                lpfc_nvme_unsol_ls_handler(phba, saveq);
3087                return 1;
3088        default:
3089                break;
3090        }
3091
3092        /* unSolicited Responses */
3093        if (pring->prt[0].profile) {
3094                if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095                        (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3096                                                                        saveq);
3097                return 1;
3098        }
3099        /* We must search, based on rctl / type
3100           for the right routine */
3101        for (i = 0; i < pring->num_mask; i++) {
3102                if ((pring->prt[i].rctl == fch_r_ctl) &&
3103                    (pring->prt[i].type == fch_type)) {
3104                        if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105                                (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106                                                (phba, pring, saveq);
3107                        return 1;
3108                }
3109        }
3110        return 0;
3111}
3112
3113/**
3114 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3115 * @phba: Pointer to HBA context object.
3116 * @pring: Pointer to driver SLI ring object.
3117 * @saveq: Pointer to the unsolicited iocb.
3118 *
3119 * This function is called with no lock held by the ring event handler
3120 * when there is an unsolicited iocb posted to the response ring by the
3121 * firmware. This function gets the buffer associated with the iocbs
3122 * and calls the event handler for the ring. This function handles both
3123 * qring buffers and hbq buffers.
3124 * When the function returns 1 the caller can free the iocb object otherwise
3125 * upper layer functions will free the iocb objects.
3126 **/
3127static int
3128lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129                            struct lpfc_iocbq *saveq)
3130{
3131        IOCB_t           * irsp;
3132        WORD5            * w5p;
3133        uint32_t           Rctl, Type;
3134        struct lpfc_iocbq *iocbq;
3135        struct lpfc_dmabuf *dmzbuf;
3136
3137        irsp = &(saveq->iocb);
3138
3139        if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140                if (pring->lpfc_sli_rcv_async_status)
3141                        pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3142                else
3143                        lpfc_printf_log(phba,
3144                                        KERN_WARNING,
3145                                        LOG_SLI,
3146                                        "0316 Ring %d handler: unexpected "
3147                                        "ASYNC_STATUS iocb received evt_code "
3148                                        "0x%x\n",
3149                                        pring->ringno,
3150                                        irsp->un.asyncstat.evt_code);
3151                return 1;
3152        }
3153
3154        if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155                (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156                if (irsp->ulpBdeCount > 0) {
3157                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3158                                        irsp->un.ulpWord[3]);
3159                        lpfc_in_buf_free(phba, dmzbuf);
3160                }
3161
3162                if (irsp->ulpBdeCount > 1) {
3163                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3164                                        irsp->unsli3.sli3Words[3]);
3165                        lpfc_in_buf_free(phba, dmzbuf);
3166                }
3167
3168                if (irsp->ulpBdeCount > 2) {
3169                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3170                                irsp->unsli3.sli3Words[7]);
3171                        lpfc_in_buf_free(phba, dmzbuf);
3172                }
3173
3174                return 1;
3175        }
3176
3177        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3178                if (irsp->ulpBdeCount != 0) {
3179                        saveq->context2 = lpfc_sli_get_buff(phba, pring,
3180                                                irsp->un.ulpWord[3]);
3181                        if (!saveq->context2)
3182                                lpfc_printf_log(phba,
3183                                        KERN_ERR,
3184                                        LOG_SLI,
3185                                        "0341 Ring %d Cannot find buffer for "
3186                                        "an unsolicited iocb. tag 0x%x\n",
3187                                        pring->ringno,
3188                                        irsp->un.ulpWord[3]);
3189                }
3190                if (irsp->ulpBdeCount == 2) {
3191                        saveq->context3 = lpfc_sli_get_buff(phba, pring,
3192                                                irsp->unsli3.sli3Words[7]);
3193                        if (!saveq->context3)
3194                                lpfc_printf_log(phba,
3195                                        KERN_ERR,
3196                                        LOG_SLI,
3197                                        "0342 Ring %d Cannot find buffer for an"
3198                                        " unsolicited iocb. tag 0x%x\n",
3199                                        pring->ringno,
3200                                        irsp->unsli3.sli3Words[7]);
3201                }
3202                list_for_each_entry(iocbq, &saveq->list, list) {
3203                        irsp = &(iocbq->iocb);
3204                        if (irsp->ulpBdeCount != 0) {
3205                                iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206                                                        irsp->un.ulpWord[3]);
3207                                if (!iocbq->context2)
3208                                        lpfc_printf_log(phba,
3209                                                KERN_ERR,
3210                                                LOG_SLI,
3211                                                "0343 Ring %d Cannot find "
3212                                                "buffer for an unsolicited iocb"
3213                                                ". tag 0x%x\n", pring->ringno,
3214                                                irsp->un.ulpWord[3]);
3215                        }
3216                        if (irsp->ulpBdeCount == 2) {
3217                                iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218                                                irsp->unsli3.sli3Words[7]);
3219                                if (!iocbq->context3)
3220                                        lpfc_printf_log(phba,
3221                                                KERN_ERR,
3222                                                LOG_SLI,
3223                                                "0344 Ring %d Cannot find "
3224                                                "buffer for an unsolicited "
3225                                                "iocb. tag 0x%x\n",
3226                                                pring->ringno,
3227                                                irsp->unsli3.sli3Words[7]);
3228                        }
3229                }
3230        }
3231        if (irsp->ulpBdeCount != 0 &&
3232            (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233             irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234                int found = 0;
3235
3236                /* search continue save q for same XRI */
3237                list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3238                        if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239                                saveq->iocb.unsli3.rcvsli3.ox_id) {
3240                                list_add_tail(&saveq->list, &iocbq->list);
3241                                found = 1;
3242                                break;
3243                        }
3244                }
3245                if (!found)
3246                        list_add_tail(&saveq->clist,
3247                                      &pring->iocb_continue_saveq);
3248                if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249                        list_del_init(&iocbq->clist);
3250                        saveq = iocbq;
3251                        irsp = &(saveq->iocb);
3252                } else
3253                        return 0;
3254        }
3255        if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256            (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257            (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3258                Rctl = FC_RCTL_ELS_REQ;
3259                Type = FC_TYPE_ELS;
3260        } else {
3261                w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262                Rctl = w5p->hcsw.Rctl;
3263                Type = w5p->hcsw.Type;
3264
3265                /* Firmware Workaround */
3266                if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267                        (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268                         irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3269                        Rctl = FC_RCTL_ELS_REQ;
3270                        Type = FC_TYPE_ELS;
3271                        w5p->hcsw.Rctl = Rctl;
3272                        w5p->hcsw.Type = Type;
3273                }
3274        }
3275
3276        if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3277                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3278                                "0313 Ring %d handler: unexpected Rctl x%x "
3279                                "Type x%x received\n",
3280                                pring->ringno, Rctl, Type);
3281
3282        return 1;
3283}
3284
3285/**
3286 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3287 * @phba: Pointer to HBA context object.
3288 * @pring: Pointer to driver SLI ring object.
3289 * @prspiocb: Pointer to response iocb object.
3290 *
3291 * This function looks up the iocb_lookup table to get the command iocb
3292 * corresponding to the given response iocb using the iotag of the
3293 * response iocb. The driver calls this function with the hbalock held
3294 * for SLI3 ports or the ring lock held for SLI4 ports.
3295 * This function returns the command iocb object if it finds the command
3296 * iocb else returns NULL.
3297 **/
3298static struct lpfc_iocbq *
3299lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300                      struct lpfc_sli_ring *pring,
3301                      struct lpfc_iocbq *prspiocb)
3302{
3303        struct lpfc_iocbq *cmd_iocb = NULL;
3304        uint16_t iotag;
3305        spinlock_t *temp_lock = NULL;
3306        unsigned long iflag = 0;
3307
3308        if (phba->sli_rev == LPFC_SLI_REV4)
3309                temp_lock = &pring->ring_lock;
3310        else
3311                temp_lock = &phba->hbalock;
3312
3313        spin_lock_irqsave(temp_lock, iflag);
3314        iotag = prspiocb->iocb.ulpIoTag;
3315
3316        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3318                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3319                        /* remove from txcmpl queue list */
3320                        list_del_init(&cmd_iocb->list);
3321                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3322                        pring->txcmplq_cnt--;
3323                        spin_unlock_irqrestore(temp_lock, iflag);
3324                        return cmd_iocb;
3325                }
3326        }
3327
3328        spin_unlock_irqrestore(temp_lock, iflag);
3329        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3330                        "0317 iotag x%x is out of "
3331                        "range: max iotag x%x wd0 x%x\n",
3332                        iotag, phba->sli.last_iotag,
3333                        *(((uint32_t *) &prspiocb->iocb) + 7));
3334        return NULL;
3335}
3336
3337/**
3338 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3339 * @phba: Pointer to HBA context object.
3340 * @pring: Pointer to driver SLI ring object.
3341 * @iotag: IOCB tag.
3342 *
3343 * This function looks up the iocb_lookup table to get the command iocb
3344 * corresponding to the given iotag. The driver calls this function with
3345 * the ring lock held because this function is an SLI4 port only helper.
3346 * This function returns the command iocb object if it finds the command
3347 * iocb else returns NULL.
3348 **/
3349static struct lpfc_iocbq *
3350lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351                             struct lpfc_sli_ring *pring, uint16_t iotag)
3352{
3353        struct lpfc_iocbq *cmd_iocb = NULL;
3354        spinlock_t *temp_lock = NULL;
3355        unsigned long iflag = 0;
3356
3357        if (phba->sli_rev == LPFC_SLI_REV4)
3358                temp_lock = &pring->ring_lock;
3359        else
3360                temp_lock = &phba->hbalock;
3361
3362        spin_lock_irqsave(temp_lock, iflag);
3363        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3365                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366                        /* remove from txcmpl queue list */
3367                        list_del_init(&cmd_iocb->list);
3368                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3369                        pring->txcmplq_cnt--;
3370                        spin_unlock_irqrestore(temp_lock, iflag);
3371                        return cmd_iocb;
3372                }
3373        }
3374
3375        spin_unlock_irqrestore(temp_lock, iflag);
3376        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377                        "0372 iotag x%x lookup error: max iotag (x%x) "
3378                        "iocb_flag x%x\n",
3379                        iotag, phba->sli.last_iotag,
3380                        cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3381        return NULL;
3382}
3383
3384/**
3385 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3386 * @phba: Pointer to HBA context object.
3387 * @pring: Pointer to driver SLI ring object.
3388 * @saveq: Pointer to the response iocb to be processed.
3389 *
3390 * This function is called by the ring event handler for non-fcp
3391 * rings when there is a new response iocb in the response ring.
3392 * The caller is not required to hold any locks. This function
3393 * gets the command iocb associated with the response iocb and
3394 * calls the completion handler for the command iocb. If there
3395 * is no completion handler, the function will free the resources
3396 * associated with command iocb. If the response iocb is for
3397 * an already aborted command iocb, the status of the completion
3398 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3399 * This function always returns 1.
3400 **/
3401static int
3402lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3403                          struct lpfc_iocbq *saveq)
3404{
3405        struct lpfc_iocbq *cmdiocbp;
3406        int rc = 1;
3407        unsigned long iflag;
3408
3409        cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3410        if (cmdiocbp) {
3411                if (cmdiocbp->iocb_cmpl) {
3412                        /*
3413                         * If an ELS command failed send an event to mgmt
3414                         * application.
3415                         */
3416                        if (saveq->iocb.ulpStatus &&
3417                             (pring->ringno == LPFC_ELS_RING) &&
3418                             (cmdiocbp->iocb.ulpCommand ==
3419                                CMD_ELS_REQUEST64_CR))
3420                                lpfc_send_els_failure_event(phba,
3421                                        cmdiocbp, saveq);
3422
3423                        /*
3424                         * Post all ELS completions to the worker thread.
3425                         * All other are passed to the completion callback.
3426                         */
3427                        if (pring->ringno == LPFC_ELS_RING) {
3428                                if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429                                    (cmdiocbp->iocb_flag &
3430                                                        LPFC_DRIVER_ABORTED)) {
3431                                        spin_lock_irqsave(&phba->hbalock,
3432                                                          iflag);
3433                                        cmdiocbp->iocb_flag &=
3434                                                ~LPFC_DRIVER_ABORTED;
3435                                        spin_unlock_irqrestore(&phba->hbalock,
3436                                                               iflag);
3437                                        saveq->iocb.ulpStatus =
3438                                                IOSTAT_LOCAL_REJECT;
3439                                        saveq->iocb.un.ulpWord[4] =
3440                                                IOERR_SLI_ABORTED;
3441
3442                                        /* Firmware could still be in progress
3443                                         * of DMAing payload, so don't free data
3444                                         * buffer till after a hbeat.
3445                                         */
3446                                        spin_lock_irqsave(&phba->hbalock,
3447                                                          iflag);
3448                                        saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3449                                        spin_unlock_irqrestore(&phba->hbalock,
3450                                                               iflag);
3451                                }
3452                                if (phba->sli_rev == LPFC_SLI_REV4) {
3453                                        if (saveq->iocb_flag &
3454                                            LPFC_EXCHANGE_BUSY) {
3455                                                /* Set cmdiocb flag for the
3456                                                 * exchange busy so sgl (xri)
3457                                                 * will not be released until
3458                                                 * the abort xri is received
3459                                                 * from hba.
3460                                                 */
3461                                                spin_lock_irqsave(
3462                                                        &phba->hbalock, iflag);
3463                                                cmdiocbp->iocb_flag |=
3464                                                        LPFC_EXCHANGE_BUSY;
3465                                                spin_unlock_irqrestore(
3466                                                        &phba->hbalock, iflag);
3467                                        }
3468                                        if (cmdiocbp->iocb_flag &
3469                                            LPFC_DRIVER_ABORTED) {
3470                                                /*
3471                                                 * Clear LPFC_DRIVER_ABORTED
3472                                                 * bit in case it was driver
3473                                                 * initiated abort.
3474                                                 */
3475                                                spin_lock_irqsave(
3476                                                        &phba->hbalock, iflag);
3477                                                cmdiocbp->iocb_flag &=
3478                                                        ~LPFC_DRIVER_ABORTED;
3479                                                spin_unlock_irqrestore(
3480                                                        &phba->hbalock, iflag);
3481                                                cmdiocbp->iocb.ulpStatus =
3482                                                        IOSTAT_LOCAL_REJECT;
3483                                                cmdiocbp->iocb.un.ulpWord[4] =
3484                                                        IOERR_ABORT_REQUESTED;
3485                                                /*
3486                                                 * For SLI4, irsiocb contains
3487                                                 * NO_XRI in sli_xritag, it
3488                                                 * shall not affect releasing
3489                                                 * sgl (xri) process.
3490                                                 */
3491                                                saveq->iocb.ulpStatus =
3492                                                        IOSTAT_LOCAL_REJECT;
3493                                                saveq->iocb.un.ulpWord[4] =
3494                                                        IOERR_SLI_ABORTED;
3495                                                spin_lock_irqsave(
3496                                                        &phba->hbalock, iflag);
3497                                                saveq->iocb_flag |=
3498                                                        LPFC_DELAY_MEM_FREE;
3499                                                spin_unlock_irqrestore(
3500                                                        &phba->hbalock, iflag);
3501                                        }
3502                                }
3503                        }
3504                        (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3505                } else
3506                        lpfc_sli_release_iocbq(phba, cmdiocbp);
3507        } else {
3508                /*
3509                 * Unknown initiating command based on the response iotag.
3510                 * This could be the case on the ELS ring because of
3511                 * lpfc_els_abort().
3512                 */
3513                if (pring->ringno != LPFC_ELS_RING) {
3514                        /*
3515                         * Ring <ringno> handler: unexpected completion IoTag
3516                         * <IoTag>
3517                         */
3518                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3519                                         "0322 Ring %d handler: "
3520                                         "unexpected completion IoTag x%x "
3521                                         "Data: x%x x%x x%x x%x\n",
3522                                         pring->ringno,
3523                                         saveq->iocb.ulpIoTag,
3524                                         saveq->iocb.ulpStatus,
3525                                         saveq->iocb.un.ulpWord[4],
3526                                         saveq->iocb.ulpCommand,
3527                                         saveq->iocb.ulpContext);
3528                }
3529        }
3530
3531        return rc;
3532}
3533
3534/**
3535 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3536 * @phba: Pointer to HBA context object.
3537 * @pring: Pointer to driver SLI ring object.
3538 *
3539 * This function is called from the iocb ring event handlers when
3540 * put pointer is ahead of the get pointer for a ring. This function signal
3541 * an error attention condition to the worker thread and the worker
3542 * thread will transition the HBA to offline state.
3543 **/
3544static void
3545lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3546{
3547        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3548        /*
3549         * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3550         * rsp ring <portRspMax>
3551         */
3552        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553                        "0312 Ring %d handler: portRspPut %d "
3554                        "is bigger than rsp ring %d\n",
3555                        pring->ringno, le32_to_cpu(pgp->rspPutInx),
3556                        pring->sli.sli3.numRiocb);
3557
3558        phba->link_state = LPFC_HBA_ERROR;
3559
3560        /*
3561         * All error attention handlers are posted to
3562         * worker thread
3563         */
3564        phba->work_ha |= HA_ERATT;
3565        phba->work_hs = HS_FFER3;
3566
3567        lpfc_worker_wake_up(phba);
3568
3569        return;
3570}
3571
3572/**
3573 * lpfc_poll_eratt - Error attention polling timer timeout handler
3574 * @t: Context to fetch pointer to address of HBA context object from.
3575 *
3576 * This function is invoked by the Error Attention polling timer when the
3577 * timer times out. It will check the SLI Error Attention register for
3578 * possible attention events. If so, it will post an Error Attention event
3579 * and wake up worker thread to process it. Otherwise, it will set up the
3580 * Error Attention polling timer for the next poll.
3581 **/
3582void lpfc_poll_eratt(struct timer_list *t)
3583{
3584        struct lpfc_hba *phba;
3585        uint32_t eratt = 0;
3586        uint64_t sli_intr, cnt;
3587
3588        phba = from_timer(phba, t, eratt_poll);
3589
3590        /* Here we will also keep track of interrupts per sec of the hba */
3591        sli_intr = phba->sli.slistat.sli_intr;
3592
3593        if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594                cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595                        sli_intr);
3596        else
3597                cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3598
3599        /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3600        do_div(cnt, phba->eratt_poll_interval);
3601        phba->sli.slistat.sli_ips = cnt;
3602
3603        phba->sli.slistat.sli_prev_intr = sli_intr;
3604
3605        /* Check chip HA register for error event */
3606        eratt = lpfc_sli_check_eratt(phba);
3607
3608        if (eratt)
3609                /* Tell the worker thread there is work to do */
3610                lpfc_worker_wake_up(phba);
3611        else
3612                /* Restart the timer for next eratt poll */
3613                mod_timer(&phba->eratt_poll,
3614                          jiffies +
3615                          msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3616        return;
3617}
3618
3619
3620/**
3621 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3622 * @phba: Pointer to HBA context object.
3623 * @pring: Pointer to driver SLI ring object.
3624 * @mask: Host attention register mask for this ring.
3625 *
3626 * This function is called from the interrupt context when there is a ring
3627 * event for the fcp ring. The caller does not hold any lock.
3628 * The function processes each response iocb in the response ring until it
3629 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3630 * LE bit set. The function will call the completion handler of the command iocb
3631 * if the response iocb indicates a completion for a command iocb or it is
3632 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3633 * function if this is an unsolicited iocb.
3634 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3635 * to check it explicitly.
3636 */
3637int
3638lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639                                struct lpfc_sli_ring *pring, uint32_t mask)
3640{
3641        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3642        IOCB_t *irsp = NULL;
3643        IOCB_t *entry = NULL;
3644        struct lpfc_iocbq *cmdiocbq = NULL;
3645        struct lpfc_iocbq rspiocbq;
3646        uint32_t status;
3647        uint32_t portRspPut, portRspMax;
3648        int rc = 1;
3649        lpfc_iocb_type type;
3650        unsigned long iflag;
3651        uint32_t rsp_cmpl = 0;
3652
3653        spin_lock_irqsave(&phba->hbalock, iflag);
3654        pring->stats.iocb_event++;
3655
3656        /*
3657         * The next available response entry should never exceed the maximum
3658         * entries.  If it does, treat it as an adapter hardware error.
3659         */
3660        portRspMax = pring->sli.sli3.numRiocb;
3661        portRspPut = le32_to_cpu(pgp->rspPutInx);
3662        if (unlikely(portRspPut >= portRspMax)) {
3663                lpfc_sli_rsp_pointers_error(phba, pring);
3664                spin_unlock_irqrestore(&phba->hbalock, iflag);
3665                return 1;
3666        }
3667        if (phba->fcp_ring_in_use) {
3668                spin_unlock_irqrestore(&phba->hbalock, iflag);
3669                return 1;
3670        } else
3671                phba->fcp_ring_in_use = 1;
3672
3673        rmb();
3674        while (pring->sli.sli3.rspidx != portRspPut) {
3675                /*
3676                 * Fetch an entry off the ring and copy it into a local data
3677                 * structure.  The copy involves a byte-swap since the
3678                 * network byte order and pci byte orders are different.
3679                 */
3680                entry = lpfc_resp_iocb(phba, pring);
3681                phba->last_completion_time = jiffies;
3682
3683                if (++pring->sli.sli3.rspidx >= portRspMax)
3684                        pring->sli.sli3.rspidx = 0;
3685
3686                lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687                                      (uint32_t *) &rspiocbq.iocb,
3688                                      phba->iocb_rsp_size);
3689                INIT_LIST_HEAD(&(rspiocbq.list));
3690                irsp = &rspiocbq.iocb;
3691
3692                type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693                pring->stats.iocb_rsp++;
3694                rsp_cmpl++;
3695
3696                if (unlikely(irsp->ulpStatus)) {
3697                        /*
3698                         * If resource errors reported from HBA, reduce
3699                         * queuedepths of the SCSI device.
3700                         */
3701                        if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3702                            ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703                             IOERR_NO_RESOURCES)) {
3704                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3705                                phba->lpfc_rampdown_queue_depth(phba);
3706                                spin_lock_irqsave(&phba->hbalock, iflag);
3707                        }
3708
3709                        /* Rsp ring <ringno> error: IOCB */
3710                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3711                                        "0336 Rsp Ring %d error: IOCB Data: "
3712                                        "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3713                                        pring->ringno,
3714                                        irsp->un.ulpWord[0],
3715                                        irsp->un.ulpWord[1],
3716                                        irsp->un.ulpWord[2],
3717                                        irsp->un.ulpWord[3],
3718                                        irsp->un.ulpWord[4],
3719                                        irsp->un.ulpWord[5],
3720                                        *(uint32_t *)&irsp->un1,
3721                                        *((uint32_t *)&irsp->un1 + 1));
3722                }
3723
3724                switch (type) {
3725                case LPFC_ABORT_IOCB:
3726                case LPFC_SOL_IOCB:
3727                        /*
3728                         * Idle exchange closed via ABTS from port.  No iocb
3729                         * resources need to be recovered.
3730                         */
3731                        if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3732                                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3733                                                "0333 IOCB cmd 0x%x"
3734                                                " processed. Skipping"
3735                                                " completion\n",
3736                                                irsp->ulpCommand);
3737                                break;
3738                        }
3739
3740                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3741                        cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3742                                                         &rspiocbq);
3743                        spin_lock_irqsave(&phba->hbalock, iflag);
3744                        if (unlikely(!cmdiocbq))
3745                                break;
3746                        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747                                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748                        if (cmdiocbq->iocb_cmpl) {
3749                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3750                                (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3751                                                      &rspiocbq);
3752                                spin_lock_irqsave(&phba->hbalock, iflag);
3753                        }
3754                        break;
3755                case LPFC_UNSOL_IOCB:
3756                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3757                        lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3758                        spin_lock_irqsave(&phba->hbalock, iflag);
3759                        break;
3760                default:
3761                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762                                char adaptermsg[LPFC_MAX_ADPTMSG];
3763                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764                                memcpy(&adaptermsg[0], (uint8_t *) irsp,
3765                                       MAX_MSG_DATA);
3766                                dev_warn(&((phba->pcidev)->dev),
3767                                         "lpfc%d: %s\n",
3768                                         phba->brd_no, adaptermsg);
3769                        } else {
3770                                /* Unknown IOCB command */
3771                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772                                                "0334 Unknown IOCB command "
3773                                                "Data: x%x, x%x x%x x%x x%x\n",
3774                                                type, irsp->ulpCommand,
3775                                                irsp->ulpStatus,
3776                                                irsp->ulpIoTag,
3777                                                irsp->ulpContext);
3778                        }
3779                        break;
3780                }
3781
3782                /*
3783                 * The response IOCB has been processed.  Update the ring
3784                 * pointer in SLIM.  If the port response put pointer has not
3785                 * been updated, sync the pgp->rspPutInx and fetch the new port
3786                 * response put pointer.
3787                 */
3788                writel(pring->sli.sli3.rspidx,
3789                        &phba->host_gp[pring->ringno].rspGetInx);
3790
3791                if (pring->sli.sli3.rspidx == portRspPut)
3792                        portRspPut = le32_to_cpu(pgp->rspPutInx);
3793        }
3794
3795        if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796                pring->stats.iocb_rsp_full++;
3797                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798                writel(status, phba->CAregaddr);
3799                readl(phba->CAregaddr);
3800        }
3801        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803                pring->stats.iocb_cmd_empty++;
3804
3805                /* Force update of the local copy of cmdGetInx */
3806                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3807                lpfc_sli_resume_iocb(phba, pring);
3808
3809                if ((pring->lpfc_sli_cmd_available))
3810                        (pring->lpfc_sli_cmd_available) (phba, pring);
3811
3812        }
3813
3814        phba->fcp_ring_in_use = 0;
3815        spin_unlock_irqrestore(&phba->hbalock, iflag);
3816        return rc;
3817}
3818
3819/**
3820 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3821 * @phba: Pointer to HBA context object.
3822 * @pring: Pointer to driver SLI ring object.
3823 * @rspiocbp: Pointer to driver response IOCB object.
3824 *
3825 * This function is called from the worker thread when there is a slow-path
3826 * response IOCB to process. This function chains all the response iocbs until
3827 * seeing the iocb with the LE bit set. The function will call
3828 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3829 * completion of a command iocb. The function will call the
3830 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3831 * The function frees the resources or calls the completion handler if this
3832 * iocb is an abort completion. The function returns NULL when the response
3833 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3834 * this function shall chain the iocb on to the iocb_continueq and return the
3835 * response iocb passed in.
3836 **/
3837static struct lpfc_iocbq *
3838lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839                        struct lpfc_iocbq *rspiocbp)
3840{
3841        struct lpfc_iocbq *saveq;
3842        struct lpfc_iocbq *cmdiocbp;
3843        struct lpfc_iocbq *next_iocb;
3844        IOCB_t *irsp = NULL;
3845        uint32_t free_saveq;
3846        uint8_t iocb_cmd_type;
3847        lpfc_iocb_type type;
3848        unsigned long iflag;
3849        int rc;
3850
3851        spin_lock_irqsave(&phba->hbalock, iflag);
3852        /* First add the response iocb to the countinueq list */
3853        list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854        pring->iocb_continueq_cnt++;
3855
3856        /* Now, determine whether the list is completed for processing */
3857        irsp = &rspiocbp->iocb;
3858        if (irsp->ulpLe) {
3859                /*
3860                 * By default, the driver expects to free all resources
3861                 * associated with this iocb completion.
3862                 */
3863                free_saveq = 1;
3864                saveq = list_get_first(&pring->iocb_continueq,
3865                                       struct lpfc_iocbq, list);
3866                irsp = &(saveq->iocb);
3867                list_del_init(&pring->iocb_continueq);
3868                pring->iocb_continueq_cnt = 0;
3869
3870                pring->stats.iocb_rsp++;
3871
3872                /*
3873                 * If resource errors reported from HBA, reduce
3874                 * queuedepths of the SCSI device.
3875                 */
3876                if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3877                    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878                     IOERR_NO_RESOURCES)) {
3879                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3880                        phba->lpfc_rampdown_queue_depth(phba);
3881                        spin_lock_irqsave(&phba->hbalock, iflag);
3882                }
3883
3884                if (irsp->ulpStatus) {
3885                        /* Rsp ring <ringno> error: IOCB */
3886                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887                                        "0328 Rsp Ring %d error: "
3888                                        "IOCB Data: "
3889                                        "x%x x%x x%x x%x "
3890                                        "x%x x%x x%x x%x "
3891                                        "x%x x%x x%x x%x "
3892                                        "x%x x%x x%x x%x\n",
3893                                        pring->ringno,
3894                                        irsp->un.ulpWord[0],
3895                                        irsp->un.ulpWord[1],
3896                                        irsp->un.ulpWord[2],
3897                                        irsp->un.ulpWord[3],
3898                                        irsp->un.ulpWord[4],
3899                                        irsp->un.ulpWord[5],
3900                                        *(((uint32_t *) irsp) + 6),
3901                                        *(((uint32_t *) irsp) + 7),
3902                                        *(((uint32_t *) irsp) + 8),
3903                                        *(((uint32_t *) irsp) + 9),
3904                                        *(((uint32_t *) irsp) + 10),
3905                                        *(((uint32_t *) irsp) + 11),
3906                                        *(((uint32_t *) irsp) + 12),
3907                                        *(((uint32_t *) irsp) + 13),
3908                                        *(((uint32_t *) irsp) + 14),
3909                                        *(((uint32_t *) irsp) + 15));
3910                }
3911
3912                /*
3913                 * Fetch the IOCB command type and call the correct completion
3914                 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3915                 * get freed back to the lpfc_iocb_list by the discovery
3916                 * kernel thread.
3917                 */
3918                iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919                type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920                switch (type) {
3921                case LPFC_SOL_IOCB:
3922                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3923                        rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924                        spin_lock_irqsave(&phba->hbalock, iflag);
3925                        break;
3926
3927                case LPFC_UNSOL_IOCB:
3928                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3929                        rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930                        spin_lock_irqsave(&phba->hbalock, iflag);
3931                        if (!rc)
3932                                free_saveq = 0;
3933                        break;
3934
3935                case LPFC_ABORT_IOCB:
3936                        cmdiocbp = NULL;
3937                        if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3939                                cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3940                                                                 saveq);
3941                                spin_lock_irqsave(&phba->hbalock, iflag);
3942                        }
3943                        if (cmdiocbp) {
3944                                /* Call the specified completion routine */
3945                                if (cmdiocbp->iocb_cmpl) {
3946                                        spin_unlock_irqrestore(&phba->hbalock,
3947                                                               iflag);
3948                                        (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3949                                                              saveq);
3950                                        spin_lock_irqsave(&phba->hbalock,
3951                                                          iflag);
3952                                } else
3953                                        __lpfc_sli_release_iocbq(phba,
3954                                                                 cmdiocbp);
3955                        }
3956                        break;
3957
3958                case LPFC_UNKNOWN_IOCB:
3959                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960                                char adaptermsg[LPFC_MAX_ADPTMSG];
3961                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962                                memcpy(&adaptermsg[0], (uint8_t *)irsp,
3963                                       MAX_MSG_DATA);
3964                                dev_warn(&((phba->pcidev)->dev),
3965                                         "lpfc%d: %s\n",
3966                                         phba->brd_no, adaptermsg);
3967                        } else {
3968                                /* Unknown IOCB command */
3969                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3970                                                "0335 Unknown IOCB "
3971                                                "command Data: x%x "
3972                                                "x%x x%x x%x\n",
3973                                                irsp->ulpCommand,
3974                                                irsp->ulpStatus,
3975                                                irsp->ulpIoTag,
3976                                                irsp->ulpContext);
3977                        }
3978                        break;
3979                }
3980
3981                if (free_saveq) {
3982                        list_for_each_entry_safe(rspiocbp, next_iocb,
3983                                                 &saveq->list, list) {
3984                                list_del_init(&rspiocbp->list);
3985                                __lpfc_sli_release_iocbq(phba, rspiocbp);
3986                        }
3987                        __lpfc_sli_release_iocbq(phba, saveq);
3988                }
3989                rspiocbp = NULL;
3990        }
3991        spin_unlock_irqrestore(&phba->hbalock, iflag);
3992        return rspiocbp;
3993}
3994
3995/**
3996 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3997 * @phba: Pointer to HBA context object.
3998 * @pring: Pointer to driver SLI ring object.
3999 * @mask: Host attention register mask for this ring.
4000 *
4001 * This routine wraps the actual slow_ring event process routine from the
4002 * API jump table function pointer from the lpfc_hba struct.
4003 **/
4004void
4005lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006                                struct lpfc_sli_ring *pring, uint32_t mask)
4007{
4008        phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4009}
4010
4011/**
4012 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4013 * @phba: Pointer to HBA context object.
4014 * @pring: Pointer to driver SLI ring object.
4015 * @mask: Host attention register mask for this ring.
4016 *
4017 * This function is called from the worker thread when there is a ring event
4018 * for non-fcp rings. The caller does not hold any lock. The function will
4019 * remove each response iocb in the response ring and calls the handle
4020 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4021 **/
4022static void
4023lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024                                   struct lpfc_sli_ring *pring, uint32_t mask)
4025{
4026        struct lpfc_pgp *pgp;
4027        IOCB_t *entry;
4028        IOCB_t *irsp = NULL;
4029        struct lpfc_iocbq *rspiocbp = NULL;
4030        uint32_t portRspPut, portRspMax;
4031        unsigned long iflag;
4032        uint32_t status;
4033
4034        pgp = &phba->port_gp[pring->ringno];
4035        spin_lock_irqsave(&phba->hbalock, iflag);
4036        pring->stats.iocb_event++;
4037
4038        /*
4039         * The next available response entry should never exceed the maximum
4040         * entries.  If it does, treat it as an adapter hardware error.
4041         */
4042        portRspMax = pring->sli.sli3.numRiocb;
4043        portRspPut = le32_to_cpu(pgp->rspPutInx);
4044        if (portRspPut >= portRspMax) {
4045                /*
4046                 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4047                 * rsp ring <portRspMax>
4048                 */
4049                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4050                                "0303 Ring %d handler: portRspPut %d "
4051                                "is bigger than rsp ring %d\n",
4052                                pring->ringno, portRspPut, portRspMax);
4053
4054                phba->link_state = LPFC_HBA_ERROR;
4055                spin_unlock_irqrestore(&phba->hbalock, iflag);
4056
4057                phba->work_hs = HS_FFER3;
4058                lpfc_handle_eratt(phba);
4059
4060                return;
4061        }
4062
4063        rmb();
4064        while (pring->sli.sli3.rspidx != portRspPut) {
4065                /*
4066                 * Build a completion list and call the appropriate handler.
4067                 * The process is to get the next available response iocb, get
4068                 * a free iocb from the list, copy the response data into the
4069                 * free iocb, insert to the continuation list, and update the
4070                 * next response index to slim.  This process makes response
4071                 * iocb's in the ring available to DMA as fast as possible but
4072                 * pays a penalty for a copy operation.  Since the iocb is
4073                 * only 32 bytes, this penalty is considered small relative to
4074                 * the PCI reads for register values and a slim write.  When
4075                 * the ulpLe field is set, the entire Command has been
4076                 * received.
4077                 */
4078                entry = lpfc_resp_iocb(phba, pring);
4079
4080                phba->last_completion_time = jiffies;
4081                rspiocbp = __lpfc_sli_get_iocbq(phba);
4082                if (rspiocbp == NULL) {
4083                        printk(KERN_ERR "%s: out of buffers! Failing "
4084                               "completion.\n", __func__);
4085                        break;
4086                }
4087
4088                lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089                                      phba->iocb_rsp_size);
4090                irsp = &rspiocbp->iocb;
4091
4092                if (++pring->sli.sli3.rspidx >= portRspMax)
4093                        pring->sli.sli3.rspidx = 0;
4094
4095                if (pring->ringno == LPFC_ELS_RING) {
4096                        lpfc_debugfs_slow_ring_trc(phba,
4097                        "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4098                                *(((uint32_t *) irsp) + 4),
4099                                *(((uint32_t *) irsp) + 6),
4100                                *(((uint32_t *) irsp) + 7));
4101                }
4102
4103                writel(pring->sli.sli3.rspidx,
4104                        &phba->host_gp[pring->ringno].rspGetInx);
4105
4106                spin_unlock_irqrestore(&phba->hbalock, iflag);
4107                /* Handle the response IOCB */
4108                rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4109                spin_lock_irqsave(&phba->hbalock, iflag);
4110
4111                /*
4112                 * If the port response put pointer has not been updated, sync
4113                 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4114                 * response put pointer.
4115                 */
4116                if (pring->sli.sli3.rspidx == portRspPut) {
4117                        portRspPut = le32_to_cpu(pgp->rspPutInx);
4118                }
4119        } /* while (pring->sli.sli3.rspidx != portRspPut) */
4120
4121        if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4122                /* At least one response entry has been freed */
4123                pring->stats.iocb_rsp_full++;
4124                /* SET RxRE_RSP in Chip Att register */
4125                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4126                writel(status, phba->CAregaddr);
4127                readl(phba->CAregaddr); /* flush */
4128        }
4129        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4130                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4131                pring->stats.iocb_cmd_empty++;
4132
4133                /* Force update of the local copy of cmdGetInx */
4134                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4135                lpfc_sli_resume_iocb(phba, pring);
4136
4137                if ((pring->lpfc_sli_cmd_available))
4138                        (pring->lpfc_sli_cmd_available) (phba, pring);
4139
4140        }
4141
4142        spin_unlock_irqrestore(&phba->hbalock, iflag);
4143        return;
4144}
4145
4146/**
4147 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4148 * @phba: Pointer to HBA context object.
4149 * @pring: Pointer to driver SLI ring object.
4150 * @mask: Host attention register mask for this ring.
4151 *
4152 * This function is called from the worker thread when there is a pending
4153 * ELS response iocb on the driver internal slow-path response iocb worker
4154 * queue. The caller does not hold any lock. The function will remove each
4155 * response iocb from the response worker queue and calls the handle
4156 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4157 **/
4158static void
4159lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4160                                   struct lpfc_sli_ring *pring, uint32_t mask)
4161{
4162        struct lpfc_iocbq *irspiocbq;
4163        struct hbq_dmabuf *dmabuf;
4164        struct lpfc_cq_event *cq_event;
4165        unsigned long iflag;
4166        int count = 0;
4167
4168        spin_lock_irqsave(&phba->hbalock, iflag);
4169        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4170        spin_unlock_irqrestore(&phba->hbalock, iflag);
4171        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4172                /* Get the response iocb from the head of work queue */
4173                spin_lock_irqsave(&phba->hbalock, iflag);
4174                list_remove_head(&phba->sli4_hba.sp_queue_event,
4175                                 cq_event, struct lpfc_cq_event, list);
4176                spin_unlock_irqrestore(&phba->hbalock, iflag);
4177
4178                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4179                case CQE_CODE_COMPL_WQE:
4180                        irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4181                                                 cq_event);
4182                        /* Translate ELS WCQE to response IOCBQ */
4183                        irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4184                                                                   irspiocbq);
4185                        if (irspiocbq)
4186                                lpfc_sli_sp_handle_rspiocb(phba, pring,
4187                                                           irspiocbq);
4188                        count++;
4189                        break;
4190                case CQE_CODE_RECEIVE:
4191                case CQE_CODE_RECEIVE_V1:
4192                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
4193                                              cq_event);
4194                        lpfc_sli4_handle_received_buffer(phba, dmabuf);
4195                        count++;
4196                        break;
4197                default:
4198                        break;
4199                }
4200
4201                /* Limit the number of events to 64 to avoid soft lockups */
4202                if (count == 64)
4203                        break;
4204        }
4205}
4206
4207/**
4208 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4209 * @phba: Pointer to HBA context object.
4210 * @pring: Pointer to driver SLI ring object.
4211 *
4212 * This function aborts all iocbs in the given ring and frees all the iocb
4213 * objects in txq. This function issues an abort iocb for all the iocb commands
4214 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4215 * the return of this function. The caller is not required to hold any locks.
4216 **/
4217void
4218lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4219{
4220        LIST_HEAD(completions);
4221        struct lpfc_iocbq *iocb, *next_iocb;
4222
4223        if (pring->ringno == LPFC_ELS_RING) {
4224                lpfc_fabric_abort_hba(phba);
4225        }
4226
4227        /* Error everything on txq and txcmplq
4228         * First do the txq.
4229         */
4230        if (phba->sli_rev >= LPFC_SLI_REV4) {
4231                spin_lock_irq(&pring->ring_lock);
4232                list_splice_init(&pring->txq, &completions);
4233                pring->txq_cnt = 0;
4234                spin_unlock_irq(&pring->ring_lock);
4235
4236                spin_lock_irq(&phba->hbalock);
4237                /* Next issue ABTS for everything on the txcmplq */
4238                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4239                        lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4240                spin_unlock_irq(&phba->hbalock);
4241        } else {
4242                spin_lock_irq(&phba->hbalock);
4243                list_splice_init(&pring->txq, &completions);
4244                pring->txq_cnt = 0;
4245
4246                /* Next issue ABTS for everything on the txcmplq */
4247                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4248                        lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4249                spin_unlock_irq(&phba->hbalock);
4250        }
4251        /* Make sure HBA is alive */
4252        lpfc_issue_hb_tmo(phba);
4253
4254        /* Cancel all the IOCBs from the completions list */
4255        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4256                              IOERR_SLI_ABORTED);
4257}
4258
4259/**
4260 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4261 * @phba: Pointer to HBA context object.
4262 *
4263 * This function aborts all iocbs in FCP rings and frees all the iocb
4264 * objects in txq. This function issues an abort iocb for all the iocb commands
4265 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4266 * the return of this function. The caller is not required to hold any locks.
4267 **/
4268void
4269lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4270{
4271        struct lpfc_sli *psli = &phba->sli;
4272        struct lpfc_sli_ring  *pring;
4273        uint32_t i;
4274
4275        /* Look on all the FCP Rings for the iotag */
4276        if (phba->sli_rev >= LPFC_SLI_REV4) {
4277                for (i = 0; i < phba->cfg_hdw_queue; i++) {
4278                        pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4279                        lpfc_sli_abort_iocb_ring(phba, pring);
4280                }
4281        } else {
4282                pring = &psli->sli3_ring[LPFC_FCP_RING];
4283                lpfc_sli_abort_iocb_ring(phba, pring);
4284        }
4285}
4286
4287/**
4288 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4289 * @phba: Pointer to HBA context object.
4290 *
4291 * This function flushes all iocbs in the IO ring and frees all the iocb
4292 * objects in txq and txcmplq. This function will not issue abort iocbs
4293 * for all the iocb commands in txcmplq, they will just be returned with
4294 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4295 * slot has been permanently disabled.
4296 **/
4297void
4298lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4299{
4300        LIST_HEAD(txq);
4301        LIST_HEAD(txcmplq);
4302        struct lpfc_sli *psli = &phba->sli;
4303        struct lpfc_sli_ring  *pring;
4304        uint32_t i;
4305        struct lpfc_iocbq *piocb, *next_iocb;
4306
4307        spin_lock_irq(&phba->hbalock);
4308        if (phba->hba_flag & HBA_IOQ_FLUSH ||
4309            !phba->sli4_hba.hdwq) {
4310                spin_unlock_irq(&phba->hbalock);
4311                return;
4312        }
4313        /* Indicate the I/O queues are flushed */
4314        phba->hba_flag |= HBA_IOQ_FLUSH;
4315        spin_unlock_irq(&phba->hbalock);
4316
4317        /* Look on all the FCP Rings for the iotag */
4318        if (phba->sli_rev >= LPFC_SLI_REV4) {
4319                for (i = 0; i < phba->cfg_hdw_queue; i++) {
4320                        pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4321
4322                        spin_lock_irq(&pring->ring_lock);
4323                        /* Retrieve everything on txq */
4324                        list_splice_init(&pring->txq, &txq);
4325                        list_for_each_entry_safe(piocb, next_iocb,
4326                                                 &pring->txcmplq, list)
4327                                piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4328                        /* Retrieve everything on the txcmplq */
4329                        list_splice_init(&pring->txcmplq, &txcmplq);
4330                        pring->txq_cnt = 0;
4331                        pring->txcmplq_cnt = 0;
4332                        spin_unlock_irq(&pring->ring_lock);
4333
4334                        /* Flush the txq */
4335                        lpfc_sli_cancel_iocbs(phba, &txq,
4336                                              IOSTAT_LOCAL_REJECT,
4337                                              IOERR_SLI_DOWN);
4338                        /* Flush the txcmpq */
4339                        lpfc_sli_cancel_iocbs(phba, &txcmplq,
4340                                              IOSTAT_LOCAL_REJECT,
4341                                              IOERR_SLI_DOWN);
4342                }
4343        } else {
4344                pring = &psli->sli3_ring[LPFC_FCP_RING];
4345
4346                spin_lock_irq(&phba->hbalock);
4347                /* Retrieve everything on txq */
4348                list_splice_init(&pring->txq, &txq);
4349                list_for_each_entry_safe(piocb, next_iocb,
4350                                         &pring->txcmplq, list)
4351                        piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4352                /* Retrieve everything on the txcmplq */
4353                list_splice_init(&pring->txcmplq, &txcmplq);
4354                pring->txq_cnt = 0;
4355                pring->txcmplq_cnt = 0;
4356                spin_unlock_irq(&phba->hbalock);
4357
4358                /* Flush the txq */
4359                lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4360                                      IOERR_SLI_DOWN);
4361                /* Flush the txcmpq */
4362                lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4363                                      IOERR_SLI_DOWN);
4364        }
4365}
4366
4367/**
4368 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4369 * @phba: Pointer to HBA context object.
4370 * @mask: Bit mask to be checked.
4371 *
4372 * This function reads the host status register and compares
4373 * with the provided bit mask to check if HBA completed
4374 * the restart. This function will wait in a loop for the
4375 * HBA to complete restart. If the HBA does not restart within
4376 * 15 iterations, the function will reset the HBA again. The
4377 * function returns 1 when HBA fail to restart otherwise returns
4378 * zero.
4379 **/
4380static int
4381lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4382{
4383        uint32_t status;
4384        int i = 0;
4385        int retval = 0;
4386
4387        /* Read the HBA Host Status Register */
4388        if (lpfc_readl(phba->HSregaddr, &status))
4389                return 1;
4390
4391        phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4392
4393        /*
4394         * Check status register every 100ms for 5 retries, then every
4395         * 500ms for 5, then every 2.5 sec for 5, then reset board and
4396         * every 2.5 sec for 4.
4397         * Break our of the loop if errors occurred during init.
4398         */
4399        while (((status & mask) != mask) &&
4400               !(status & HS_FFERM) &&
4401               i++ < 20) {
4402
4403                if (i <= 5)
4404                        msleep(10);
4405                else if (i <= 10)
4406                        msleep(500);
4407                else
4408                        msleep(2500);
4409
4410                if (i == 15) {
4411                                /* Do post */
4412                        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4413                        lpfc_sli_brdrestart(phba);
4414                }
4415                /* Read the HBA Host Status Register */
4416                if (lpfc_readl(phba->HSregaddr, &status)) {
4417                        retval = 1;
4418                        break;
4419                }
4420        }
4421
4422        /* Check to see if any errors occurred during init */
4423        if ((status & HS_FFERM) || (i >= 20)) {
4424                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4425                                "2751 Adapter failed to restart, "
4426                                "status reg x%x, FW Data: A8 x%x AC x%x\n",
4427                                status,
4428                                readl(phba->MBslimaddr + 0xa8),
4429                                readl(phba->MBslimaddr + 0xac));
4430                phba->link_state = LPFC_HBA_ERROR;
4431                retval = 1;
4432        }
4433
4434        return retval;
4435}
4436
4437/**
4438 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4439 * @phba: Pointer to HBA context object.
4440 * @mask: Bit mask to be checked.
4441 *
4442 * This function checks the host status register to check if HBA is
4443 * ready. This function will wait in a loop for the HBA to be ready
4444 * If the HBA is not ready , the function will will reset the HBA PCI
4445 * function again. The function returns 1 when HBA fail to be ready
4446 * otherwise returns zero.
4447 **/
4448static int
4449lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4450{
4451        uint32_t status;
4452        int retval = 0;
4453
4454        /* Read the HBA Host Status Register */
4455        status = lpfc_sli4_post_status_check(phba);
4456
4457        if (status) {
4458                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4459                lpfc_sli_brdrestart(phba);
4460                status = lpfc_sli4_post_status_check(phba);
4461        }
4462
4463        /* Check to see if any errors occurred during init */
4464        if (status) {
4465                phba->link_state = LPFC_HBA_ERROR;
4466                retval = 1;
4467        } else
4468                phba->sli4_hba.intr_enable = 0;
4469
4470        return retval;
4471}
4472
4473/**
4474 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4475 * @phba: Pointer to HBA context object.
4476 * @mask: Bit mask to be checked.
4477 *
4478 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4479 * from the API jump table function pointer from the lpfc_hba struct.
4480 **/
4481int
4482lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4483{
4484        return phba->lpfc_sli_brdready(phba, mask);
4485}
4486
4487#define BARRIER_TEST_PATTERN (0xdeadbeef)
4488
4489/**
4490 * lpfc_reset_barrier - Make HBA ready for HBA reset
4491 * @phba: Pointer to HBA context object.
4492 *
4493 * This function is called before resetting an HBA. This function is called
4494 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4495 **/
4496void lpfc_reset_barrier(struct lpfc_hba *phba)
4497{
4498        uint32_t __iomem *resp_buf;
4499        uint32_t __iomem *mbox_buf;
4500        volatile uint32_t mbox;
4501        uint32_t hc_copy, ha_copy, resp_data;
4502        int  i;
4503        uint8_t hdrtype;
4504
4505        lockdep_assert_held(&phba->hbalock);
4506
4507        pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4508        if (hdrtype != 0x80 ||
4509            (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4510             FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4511                return;
4512
4513        /*
4514         * Tell the other part of the chip to suspend temporarily all
4515         * its DMA activity.
4516         */
4517        resp_buf = phba->MBslimaddr;
4518
4519        /* Disable the error attention */
4520        if (lpfc_readl(phba->HCregaddr, &hc_copy))
4521                return;
4522        writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4523        readl(phba->HCregaddr); /* flush */
4524        phba->link_flag |= LS_IGNORE_ERATT;
4525
4526        if (lpfc_readl(phba->HAregaddr, &ha_copy))
4527                return;
4528        if (ha_copy & HA_ERATT) {
4529                /* Clear Chip error bit */
4530                writel(HA_ERATT, phba->HAregaddr);
4531                phba->pport->stopped = 1;
4532        }
4533
4534        mbox = 0;
4535        ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4536        ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4537
4538        writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4539        mbox_buf = phba->MBslimaddr;
4540        writel(mbox, mbox_buf);
4541
4542        for (i = 0; i < 50; i++) {
4543                if (lpfc_readl((resp_buf + 1), &resp_data))
4544                        return;
4545                if (resp_data != ~(BARRIER_TEST_PATTERN))
4546                        mdelay(1);
4547                else
4548                        break;
4549        }
4550        resp_data = 0;
4551        if (lpfc_readl((resp_buf + 1), &resp_data))
4552                return;
4553        if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4554                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4555                    phba->pport->stopped)
4556                        goto restore_hc;
4557                else
4558                        goto clear_errat;
4559        }
4560
4561        ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4562        resp_data = 0;
4563        for (i = 0; i < 500; i++) {
4564                if (lpfc_readl(resp_buf, &resp_data))
4565                        return;
4566                if (resp_data != mbox)
4567                        mdelay(1);
4568                else
4569                        break;
4570        }
4571
4572clear_errat:
4573
4574        while (++i < 500) {
4575                if (lpfc_readl(phba->HAregaddr, &ha_copy))
4576                        return;
4577                if (!(ha_copy & HA_ERATT))
4578                        mdelay(1);
4579                else
4580                        break;
4581        }
4582
4583        if (readl(phba->HAregaddr) & HA_ERATT) {
4584                writel(HA_ERATT, phba->HAregaddr);
4585                phba->pport->stopped = 1;
4586        }
4587
4588restore_hc:
4589        phba->link_flag &= ~LS_IGNORE_ERATT;
4590        writel(hc_copy, phba->HCregaddr);
4591        readl(phba->HCregaddr); /* flush */
4592}
4593
4594/**
4595 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4596 * @phba: Pointer to HBA context object.
4597 *
4598 * This function issues a kill_board mailbox command and waits for
4599 * the error attention interrupt. This function is called for stopping
4600 * the firmware processing. The caller is not required to hold any
4601 * locks. This function calls lpfc_hba_down_post function to free
4602 * any pending commands after the kill. The function will return 1 when it
4603 * fails to kill the board else will return 0.
4604 **/
4605int
4606lpfc_sli_brdkill(struct lpfc_hba *phba)
4607{
4608        struct lpfc_sli *psli;
4609        LPFC_MBOXQ_t *pmb;
4610        uint32_t status;
4611        uint32_t ha_copy;
4612        int retval;
4613        int i = 0;
4614
4615        psli = &phba->sli;
4616
4617        /* Kill HBA */
4618        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4619                        "0329 Kill HBA Data: x%x x%x\n",
4620                        phba->pport->port_state, psli->sli_flag);
4621
4622        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4623        if (!pmb)
4624                return 1;
4625
4626        /* Disable the error attention */
4627        spin_lock_irq(&phba->hbalock);
4628        if (lpfc_readl(phba->HCregaddr, &status)) {
4629                spin_unlock_irq(&phba->hbalock);
4630                mempool_free(pmb, phba->mbox_mem_pool);
4631                return 1;
4632        }
4633        status &= ~HC_ERINT_ENA;
4634        writel(status, phba->HCregaddr);
4635        readl(phba->HCregaddr); /* flush */
4636        phba->link_flag |= LS_IGNORE_ERATT;
4637        spin_unlock_irq(&phba->hbalock);
4638
4639        lpfc_kill_board(phba, pmb);
4640        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4641        retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4642
4643        if (retval != MBX_SUCCESS) {
4644                if (retval != MBX_BUSY)
4645                        mempool_free(pmb, phba->mbox_mem_pool);
4646                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4647                                "2752 KILL_BOARD command failed retval %d\n",
4648                                retval);
4649                spin_lock_irq(&phba->hbalock);
4650                phba->link_flag &= ~LS_IGNORE_ERATT;
4651                spin_unlock_irq(&phba->hbalock);
4652                return 1;
4653        }
4654
4655        spin_lock_irq(&phba->hbalock);
4656        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4657        spin_unlock_irq(&phba->hbalock);
4658
4659        mempool_free(pmb, phba->mbox_mem_pool);
4660
4661        /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4662         * attention every 100ms for 3 seconds. If we don't get ERATT after
4663         * 3 seconds we still set HBA_ERROR state because the status of the
4664         * board is now undefined.
4665         */
4666        if (lpfc_readl(phba->HAregaddr, &ha_copy))
4667                return 1;
4668        while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4669                mdelay(100);
4670                if (lpfc_readl(phba->HAregaddr, &ha_copy))
4671                        return 1;
4672        }
4673
4674        del_timer_sync(&psli->mbox_tmo);
4675        if (ha_copy & HA_ERATT) {
4676                writel(HA_ERATT, phba->HAregaddr);
4677                phba->pport->stopped = 1;
4678        }
4679        spin_lock_irq(&phba->hbalock);
4680        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4681        psli->mbox_active = NULL;
4682        phba->link_flag &= ~LS_IGNORE_ERATT;
4683        spin_unlock_irq(&phba->hbalock);
4684
4685        lpfc_hba_down_post(phba);
4686        phba->link_state = LPFC_HBA_ERROR;
4687
4688        return ha_copy & HA_ERATT ? 0 : 1;
4689}
4690
4691/**
4692 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4693 * @phba: Pointer to HBA context object.
4694 *
4695 * This function resets the HBA by writing HC_INITFF to the control
4696 * register. After the HBA resets, this function resets all the iocb ring
4697 * indices. This function disables PCI layer parity checking during
4698 * the reset.
4699 * This function returns 0 always.
4700 * The caller is not required to hold any locks.
4701 **/
4702int
4703lpfc_sli_brdreset(struct lpfc_hba *phba)
4704{
4705        struct lpfc_sli *psli;
4706        struct lpfc_sli_ring *pring;
4707        uint16_t cfg_value;
4708        int i;
4709
4710        psli = &phba->sli;
4711
4712        /* Reset HBA */
4713        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4714                        "0325 Reset HBA Data: x%x x%x\n",
4715                        (phba->pport) ? phba->pport->port_state : 0,
4716                        psli->sli_flag);
4717
4718        /* perform board reset */
4719        phba->fc_eventTag = 0;
4720        phba->link_events = 0;
4721        phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4722        if (phba->pport) {
4723                phba->pport->fc_myDID = 0;
4724                phba->pport->fc_prevDID = 0;
4725        }
4726
4727        /* Turn off parity checking and serr during the physical reset */
4728        if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4729                return -EIO;
4730
4731        pci_write_config_word(phba->pcidev, PCI_COMMAND,
4732                              (cfg_value &
4733                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4734
4735        psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4736
4737        /* Now toggle INITFF bit in the Host Control Register */
4738        writel(HC_INITFF, phba->HCregaddr);
4739        mdelay(1);
4740        readl(phba->HCregaddr); /* flush */
4741        writel(0, phba->HCregaddr);
4742        readl(phba->HCregaddr); /* flush */
4743
4744        /* Restore PCI cmd register */
4745        pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4746
4747        /* Initialize relevant SLI info */
4748        for (i = 0; i < psli->num_rings; i++) {
4749                pring = &psli->sli3_ring[i];
4750                pring->flag = 0;
4751                pring->sli.sli3.rspidx = 0;
4752                pring->sli.sli3.next_cmdidx  = 0;
4753                pring->sli.sli3.local_getidx = 0;
4754                pring->sli.sli3.cmdidx = 0;
4755                pring->missbufcnt = 0;
4756        }
4757
4758        phba->link_state = LPFC_WARM_START;
4759        return 0;
4760}
4761
4762/**
4763 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4764 * @phba: Pointer to HBA context object.
4765 *
4766 * This function resets a SLI4 HBA. This function disables PCI layer parity
4767 * checking during resets the device. The caller is not required to hold
4768 * any locks.
4769 *
4770 * This function returns 0 on success else returns negative error code.
4771 **/
4772int
4773lpfc_sli4_brdreset(struct lpfc_hba *phba)
4774{
4775        struct lpfc_sli *psli = &phba->sli;
4776        uint16_t cfg_value;
4777        int rc = 0;
4778
4779        /* Reset HBA */
4780        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4781                        "0295 Reset HBA Data: x%x x%x x%x\n",
4782                        phba->pport->port_state, psli->sli_flag,
4783                        phba->hba_flag);
4784
4785        /* perform board reset */
4786        phba->fc_eventTag = 0;
4787        phba->link_events = 0;
4788        phba->pport->fc_myDID = 0;
4789        phba->pport->fc_prevDID = 0;
4790
4791        spin_lock_irq(&phba->hbalock);
4792        psli->sli_flag &= ~(LPFC_PROCESS_LA);
4793        phba->fcf.fcf_flag = 0;
4794        spin_unlock_irq(&phba->hbalock);
4795
4796        /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4797        if (phba->hba_flag & HBA_FW_DUMP_OP) {
4798                phba->hba_flag &= ~HBA_FW_DUMP_OP;
4799                return rc;
4800        }
4801
4802        /* Now physically reset the device */
4803        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4804                        "0389 Performing PCI function reset!\n");
4805
4806        /* Turn off parity checking and serr during the physical reset */
4807        if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4808                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4809                                "3205 PCI read Config failed\n");
4810                return -EIO;
4811        }
4812
4813        pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4814                              ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4815
4816        /* Perform FCoE PCI function reset before freeing queue memory */
4817        rc = lpfc_pci_function_reset(phba);
4818
4819        /* Restore PCI cmd register */
4820        pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4821
4822        return rc;
4823}
4824
4825/**
4826 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4827 * @phba: Pointer to HBA context object.
4828 *
4829 * This function is called in the SLI initialization code path to
4830 * restart the HBA. The caller is not required to hold any lock.
4831 * This function writes MBX_RESTART mailbox command to the SLIM and
4832 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4833 * function to free any pending commands. The function enables
4834 * POST only during the first initialization. The function returns zero.
4835 * The function does not guarantee completion of MBX_RESTART mailbox
4836 * command before the return of this function.
4837 **/
4838static int
4839lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4840{
4841        MAILBOX_t *mb;
4842        struct lpfc_sli *psli;
4843        volatile uint32_t word0;
4844        void __iomem *to_slim;
4845        uint32_t hba_aer_enabled;
4846
4847        spin_lock_irq(&phba->hbalock);
4848
4849        /* Take PCIe device Advanced Error Reporting (AER) state */
4850        hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4851
4852        psli = &phba->sli;
4853
4854        /* Restart HBA */
4855        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4856                        "0337 Restart HBA Data: x%x x%x\n",
4857                        (phba->pport) ? phba->pport->port_state : 0,
4858                        psli->sli_flag);
4859
4860        word0 = 0;
4861        mb = (MAILBOX_t *) &word0;
4862        mb->mbxCommand = MBX_RESTART;
4863        mb->mbxHc = 1;
4864
4865        lpfc_reset_barrier(phba);
4866
4867        to_slim = phba->MBslimaddr;
4868        writel(*(uint32_t *) mb, to_slim);
4869        readl(to_slim); /* flush */
4870
4871        /* Only skip post after fc_ffinit is completed */
4872        if (phba->pport && phba->pport->port_state)
4873                word0 = 1;      /* This is really setting up word1 */
4874        else
4875                word0 = 0;      /* This is really setting up word1 */
4876        to_slim = phba->MBslimaddr + sizeof (uint32_t);
4877        writel(*(uint32_t *) mb, to_slim);
4878        readl(to_slim); /* flush */
4879
4880        lpfc_sli_brdreset(phba);
4881        if (phba->pport)
4882                phba->pport->stopped = 0;
4883        phba->link_state = LPFC_INIT_START;
4884        phba->hba_flag = 0;
4885        spin_unlock_irq(&phba->hbalock);
4886
4887        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4888        psli->stats_start = ktime_get_seconds();
4889
4890        /* Give the INITFF and Post time to settle. */
4891        mdelay(100);
4892
4893        /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4894        if (hba_aer_enabled)
4895                pci_disable_pcie_error_reporting(phba->pcidev);
4896
4897        lpfc_hba_down_post(phba);
4898
4899        return 0;
4900}
4901
4902/**
4903 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4904 * @phba: Pointer to HBA context object.
4905 *
4906 * This function is called in the SLI initialization code path to restart
4907 * a SLI4 HBA. The caller is not required to hold any lock.
4908 * At the end of the function, it calls lpfc_hba_down_post function to
4909 * free any pending commands.
4910 **/
4911static int
4912lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4913{
4914        struct lpfc_sli *psli = &phba->sli;
4915        uint32_t hba_aer_enabled;
4916        int rc;
4917
4918        /* Restart HBA */
4919        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4920                        "0296 Restart HBA Data: x%x x%x\n",
4921                        phba->pport->port_state, psli->sli_flag);
4922
4923        /* Take PCIe device Advanced Error Reporting (AER) state */
4924        hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4925
4926        rc = lpfc_sli4_brdreset(phba);
4927        if (rc) {
4928                phba->link_state = LPFC_HBA_ERROR;
4929                goto hba_down_queue;
4930        }
4931
4932        spin_lock_irq(&phba->hbalock);
4933        phba->pport->stopped = 0;
4934        phba->link_state = LPFC_INIT_START;
4935        phba->hba_flag = 0;
4936        spin_unlock_irq(&phba->hbalock);
4937
4938        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4939        psli->stats_start = ktime_get_seconds();
4940
4941        /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4942        if (hba_aer_enabled)
4943                pci_disable_pcie_error_reporting(phba->pcidev);
4944
4945hba_down_queue:
4946        lpfc_hba_down_post(phba);
4947        lpfc_sli4_queue_destroy(phba);
4948
4949        return rc;
4950}
4951
4952/**
4953 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4954 * @phba: Pointer to HBA context object.
4955 *
4956 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4957 * API jump table function pointer from the lpfc_hba struct.
4958**/
4959int
4960lpfc_sli_brdrestart(struct lpfc_hba *phba)
4961{
4962        return phba->lpfc_sli_brdrestart(phba);
4963}
4964
4965/**
4966 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4967 * @phba: Pointer to HBA context object.
4968 *
4969 * This function is called after a HBA restart to wait for successful
4970 * restart of the HBA. Successful restart of the HBA is indicated by
4971 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4972 * iteration, the function will restart the HBA again. The function returns
4973 * zero if HBA successfully restarted else returns negative error code.
4974 **/
4975int
4976lpfc_sli_chipset_init(struct lpfc_hba *phba)
4977{
4978        uint32_t status, i = 0;
4979
4980        /* Read the HBA Host Status Register */
4981        if (lpfc_readl(phba->HSregaddr, &status))
4982                return -EIO;
4983
4984        /* Check status register to see what current state is */
4985        i = 0;
4986        while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4987
4988                /* Check every 10ms for 10 retries, then every 100ms for 90
4989                 * retries, then every 1 sec for 50 retires for a total of
4990                 * ~60 seconds before reset the board again and check every
4991                 * 1 sec for 50 retries. The up to 60 seconds before the
4992                 * board ready is required by the Falcon FIPS zeroization
4993                 * complete, and any reset the board in between shall cause
4994                 * restart of zeroization, further delay the board ready.
4995                 */
4996                if (i++ >= 200) {
4997                        /* Adapter failed to init, timeout, status reg
4998                           <status> */
4999                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5000                                        "0436 Adapter failed to init, "
5001                                        "timeout, status reg x%x, "
5002                                        "FW Data: A8 x%x AC x%x\n", status,
5003                                        readl(phba->MBslimaddr + 0xa8),
5004                                        readl(phba->MBslimaddr + 0xac));
5005                        phba->link_state = LPFC_HBA_ERROR;
5006                        return -ETIMEDOUT;
5007                }
5008
5009                /* Check to see if any errors occurred during init */
5010                if (status & HS_FFERM) {
5011                        /* ERROR: During chipset initialization */
5012                        /* Adapter failed to init, chipset, status reg
5013                           <status> */
5014                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5015                                        "0437 Adapter failed to init, "
5016                                        "chipset, status reg x%x, "
5017                                        "FW Data: A8 x%x AC x%x\n", status,
5018                                        readl(phba->MBslimaddr + 0xa8),
5019                                        readl(phba->MBslimaddr + 0xac));
5020                        phba->link_state = LPFC_HBA_ERROR;
5021                        return -EIO;
5022                }
5023
5024                if (i <= 10)
5025                        msleep(10);
5026                else if (i <= 100)
5027                        msleep(100);
5028                else
5029                        msleep(1000);
5030
5031                if (i == 150) {
5032                        /* Do post */
5033                        phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5034                        lpfc_sli_brdrestart(phba);
5035                }
5036                /* Read the HBA Host Status Register */
5037                if (lpfc_readl(phba->HSregaddr, &status))
5038                        return -EIO;
5039        }
5040
5041        /* Check to see if any errors occurred during init */
5042        if (status & HS_FFERM) {
5043                /* ERROR: During chipset initialization */
5044                /* Adapter failed to init, chipset, status reg <status> */
5045                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5046                                "0438 Adapter failed to init, chipset, "
5047                                "status reg x%x, "
5048                                "FW Data: A8 x%x AC x%x\n", status,
5049                                readl(phba->MBslimaddr + 0xa8),
5050                                readl(phba->MBslimaddr + 0xac));
5051                phba->link_state = LPFC_HBA_ERROR;
5052                return -EIO;
5053        }
5054
5055        phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5056
5057        /* Clear all interrupt enable conditions */
5058        writel(0, phba->HCregaddr);
5059        readl(phba->HCregaddr); /* flush */
5060
5061        /* setup host attn register */
5062        writel(0xffffffff, phba->HAregaddr);
5063        readl(phba->HAregaddr); /* flush */
5064        return 0;
5065}
5066
5067/**
5068 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5069 *
5070 * This function calculates and returns the number of HBQs required to be
5071 * configured.
5072 **/
5073int
5074lpfc_sli_hbq_count(void)
5075{
5076        return ARRAY_SIZE(lpfc_hbq_defs);
5077}
5078
5079/**
5080 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5081 *
5082 * This function adds the number of hbq entries in every HBQ to get
5083 * the total number of hbq entries required for the HBA and returns
5084 * the total count.
5085 **/
5086static int
5087lpfc_sli_hbq_entry_count(void)
5088{
5089        int  hbq_count = lpfc_sli_hbq_count();
5090        int  count = 0;
5091        int  i;
5092
5093        for (i = 0; i < hbq_count; ++i)
5094                count += lpfc_hbq_defs[i]->entry_count;
5095        return count;
5096}
5097
5098/**
5099 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5100 *
5101 * This function calculates amount of memory required for all hbq entries
5102 * to be configured and returns the total memory required.
5103 **/
5104int
5105lpfc_sli_hbq_size(void)
5106{
5107        return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5108}
5109
5110/**
5111 * lpfc_sli_hbq_setup - configure and initialize HBQs
5112 * @phba: Pointer to HBA context object.
5113 *
5114 * This function is called during the SLI initialization to configure
5115 * all the HBQs and post buffers to the HBQ. The caller is not
5116 * required to hold any locks. This function will return zero if successful
5117 * else it will return negative error code.
5118 **/
5119static int
5120lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5121{
5122        int  hbq_count = lpfc_sli_hbq_count();
5123        LPFC_MBOXQ_t *pmb;
5124        MAILBOX_t *pmbox;
5125        uint32_t hbqno;
5126        uint32_t hbq_entry_index;
5127
5128                                /* Get a Mailbox buffer to setup mailbox
5129                                 * commands for HBA initialization
5130                                 */
5131        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5132
5133        if (!pmb)
5134                return -ENOMEM;
5135
5136        pmbox = &pmb->u.mb;
5137
5138        /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5139        phba->link_state = LPFC_INIT_MBX_CMDS;
5140        phba->hbq_in_use = 1;
5141
5142        hbq_entry_index = 0;
5143        for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5144                phba->hbqs[hbqno].next_hbqPutIdx = 0;
5145                phba->hbqs[hbqno].hbqPutIdx      = 0;
5146                phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5147                phba->hbqs[hbqno].entry_count =
5148                        lpfc_hbq_defs[hbqno]->entry_count;
5149                lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5150                        hbq_entry_index, pmb);
5151                hbq_entry_index += phba->hbqs[hbqno].entry_count;
5152
5153                if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5154                        /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5155                           mbxStatus <status>, ring <num> */
5156
5157                        lpfc_printf_log(phba, KERN_ERR,
5158                                        LOG_SLI | LOG_VPORT,
5159                                        "1805 Adapter failed to init. "
5160                                        "Data: x%x x%x x%x\n",
5161                                        pmbox->mbxCommand,
5162                                        pmbox->mbxStatus, hbqno);
5163
5164                        phba->link_state = LPFC_HBA_ERROR;
5165                        mempool_free(pmb, phba->mbox_mem_pool);
5166                        return -ENXIO;
5167                }
5168        }
5169        phba->hbq_count = hbq_count;
5170
5171        mempool_free(pmb, phba->mbox_mem_pool);
5172
5173        /* Initially populate or replenish the HBQs */
5174        for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5175                lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5176        return 0;
5177}
5178
5179/**
5180 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5181 * @phba: Pointer to HBA context object.
5182 *
5183 * This function is called during the SLI initialization to configure
5184 * all the HBQs and post buffers to the HBQ. The caller is not
5185 * required to hold any locks. This function will return zero if successful
5186 * else it will return negative error code.
5187 **/
5188static int
5189lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5190{
5191        phba->hbq_in_use = 1;
5192        /**
5193         * Specific case when the MDS diagnostics is enabled and supported.
5194         * The receive buffer count is truncated to manage the incoming
5195         * traffic.
5196         **/
5197        if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5198                phba->hbqs[LPFC_ELS_HBQ].entry_count =
5199                        lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5200        else
5201                phba->hbqs[LPFC_ELS_HBQ].entry_count =
5202                        lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5203        phba->hbq_count = 1;
5204        lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5205        /* Initially populate or replenish the HBQs */
5206        return 0;
5207}
5208
5209/**
5210 * lpfc_sli_config_port - Issue config port mailbox command
5211 * @phba: Pointer to HBA context object.
5212 * @sli_mode: sli mode - 2/3
5213 *
5214 * This function is called by the sli initialization code path
5215 * to issue config_port mailbox command. This function restarts the
5216 * HBA firmware and issues a config_port mailbox command to configure
5217 * the SLI interface in the sli mode specified by sli_mode
5218 * variable. The caller is not required to hold any locks.
5219 * The function returns 0 if successful, else returns negative error
5220 * code.
5221 **/
5222int
5223lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5224{
5225        LPFC_MBOXQ_t *pmb;
5226        uint32_t resetcount = 0, rc = 0, done = 0;
5227
5228        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5229        if (!pmb) {
5230                phba->link_state = LPFC_HBA_ERROR;
5231                return -ENOMEM;
5232        }
5233
5234        phba->sli_rev = sli_mode;
5235        while (resetcount < 2 && !done) {
5236                spin_lock_irq(&phba->hbalock);
5237                phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5238                spin_unlock_irq(&phba->hbalock);
5239                phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5240                lpfc_sli_brdrestart(phba);
5241                rc = lpfc_sli_chipset_init(phba);
5242                if (rc)
5243                        break;
5244
5245                spin_lock_irq(&phba->hbalock);
5246                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5247                spin_unlock_irq(&phba->hbalock);
5248                resetcount++;
5249
5250                /* Call pre CONFIG_PORT mailbox command initialization.  A
5251                 * value of 0 means the call was successful.  Any other
5252                 * nonzero value is a failure, but if ERESTART is returned,
5253                 * the driver may reset the HBA and try again.
5254                 */
5255                rc = lpfc_config_port_prep(phba);
5256                if (rc == -ERESTART) {
5257                        phba->link_state = LPFC_LINK_UNKNOWN;
5258                        continue;
5259                } else if (rc)
5260                        break;
5261
5262                phba->link_state = LPFC_INIT_MBX_CMDS;
5263                lpfc_config_port(phba, pmb);
5264                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5265                phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5266                                        LPFC_SLI3_HBQ_ENABLED |
5267                                        LPFC_SLI3_CRP_ENABLED |
5268                                        LPFC_SLI3_DSS_ENABLED);
5269                if (rc != MBX_SUCCESS) {
5270                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5271                                "0442 Adapter failed to init, mbxCmd x%x "
5272                                "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5273                                pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5274                        spin_lock_irq(&phba->hbalock);
5275                        phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5276                        spin_unlock_irq(&phba->hbalock);
5277                        rc = -ENXIO;
5278                } else {
5279                        /* Allow asynchronous mailbox command to go through */
5280                        spin_lock_irq(&phba->hbalock);
5281                        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5282                        spin_unlock_irq(&phba->hbalock);
5283                        done = 1;
5284
5285                        if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5286                            (pmb->u.mb.un.varCfgPort.gasabt == 0))
5287                                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5288                                        "3110 Port did not grant ASABT\n");
5289                }
5290        }
5291        if (!done) {
5292                rc = -EINVAL;
5293                goto do_prep_failed;
5294        }
5295        if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5296                if (!pmb->u.mb.un.varCfgPort.cMA) {
5297                        rc = -ENXIO;
5298                        goto do_prep_failed;
5299                }
5300                if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5301                        phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5302                        phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5303                        phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5304                                phba->max_vpi : phba->max_vports;
5305
5306                } else
5307                        phba->max_vpi = 0;
5308                if (pmb->u.mb.un.varCfgPort.gerbm)
5309                        phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5310                if (pmb->u.mb.un.varCfgPort.gcrp)
5311                        phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5312
5313                phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5314                phba->port_gp = phba->mbox->us.s3_pgp.port;
5315
5316                if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5317                        if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5318                                phba->cfg_enable_bg = 0;
5319                                phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5320                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5321                                                "0443 Adapter did not grant "
5322                                                "BlockGuard\n");
5323                        }
5324                }
5325        } else {
5326                phba->hbq_get = NULL;
5327                phba->port_gp = phba->mbox->us.s2.port;
5328                phba->max_vpi = 0;
5329        }
5330do_prep_failed:
5331        mempool_free(pmb, phba->mbox_mem_pool);
5332        return rc;
5333}
5334
5335
5336/**
5337 * lpfc_sli_hba_setup - SLI initialization function
5338 * @phba: Pointer to HBA context object.
5339 *
5340 * This function is the main SLI initialization function. This function
5341 * is called by the HBA initialization code, HBA reset code and HBA
5342 * error attention handler code. Caller is not required to hold any
5343 * locks. This function issues config_port mailbox command to configure
5344 * the SLI, setup iocb rings and HBQ rings. In the end the function
5345 * calls the config_port_post function to issue init_link mailbox
5346 * command and to start the discovery. The function will return zero
5347 * if successful, else it will return negative error code.
5348 **/
5349int
5350lpfc_sli_hba_setup(struct lpfc_hba *phba)
5351{
5352        uint32_t rc;
5353        int  i;
5354        int longs;
5355
5356        /* Enable ISR already does config_port because of config_msi mbx */
5357        if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5358                rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5359                if (rc)
5360                        return -EIO;
5361                phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5362        }
5363        phba->fcp_embed_io = 0; /* SLI4 FC support only */
5364
5365        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5366        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5367                rc = pci_enable_pcie_error_reporting(phba->pcidev);
5368                if (!rc) {
5369                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5370                                        "2709 This device supports "
5371                                        "Advanced Error Reporting (AER)\n");
5372                        spin_lock_irq(&phba->hbalock);
5373                        phba->hba_flag |= HBA_AER_ENABLED;
5374                        spin_unlock_irq(&phba->hbalock);
5375                } else {
5376                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377                                        "2708 This device does not support "
5378                                        "Advanced Error Reporting (AER): %d\n",
5379                                        rc);
5380                        phba->cfg_aer_support = 0;
5381                }
5382        }
5383
5384        if (phba->sli_rev == 3) {
5385                phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5386                phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5387        } else {
5388                phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5389                phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5390                phba->sli3_options = 0;
5391        }
5392
5393        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5394                        "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5395                        phba->sli_rev, phba->max_vpi);
5396        rc = lpfc_sli_ring_map(phba);
5397
5398        if (rc)
5399                goto lpfc_sli_hba_setup_error;
5400
5401        /* Initialize VPIs. */
5402        if (phba->sli_rev == LPFC_SLI_REV3) {
5403                /*
5404                 * The VPI bitmask and physical ID array are allocated
5405                 * and initialized once only - at driver load.  A port
5406                 * reset doesn't need to reinitialize this memory.
5407                 */
5408                if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5409                        longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5410                        phba->vpi_bmask = kcalloc(longs,
5411                                                  sizeof(unsigned long),
5412                                                  GFP_KERNEL);
5413                        if (!phba->vpi_bmask) {
5414                                rc = -ENOMEM;
5415                                goto lpfc_sli_hba_setup_error;
5416                        }
5417
5418                        phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5419                                                sizeof(uint16_t),
5420                                                GFP_KERNEL);
5421                        if (!phba->vpi_ids) {
5422                                kfree(phba->vpi_bmask);
5423                                rc = -ENOMEM;
5424                                goto lpfc_sli_hba_setup_error;
5425                        }
5426                        for (i = 0; i < phba->max_vpi; i++)
5427                                phba->vpi_ids[i] = i;
5428                }
5429        }
5430
5431        /* Init HBQs */
5432        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5433                rc = lpfc_sli_hbq_setup(phba);
5434                if (rc)
5435                        goto lpfc_sli_hba_setup_error;
5436        }
5437        spin_lock_irq(&phba->hbalock);
5438        phba->sli.sli_flag |= LPFC_PROCESS_LA;
5439        spin_unlock_irq(&phba->hbalock);
5440
5441        rc = lpfc_config_port_post(phba);
5442        if (rc)
5443                goto lpfc_sli_hba_setup_error;
5444
5445        return rc;
5446
5447lpfc_sli_hba_setup_error:
5448        phba->link_state = LPFC_HBA_ERROR;
5449        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5450                        "0445 Firmware initialization failed\n");
5451        return rc;
5452}
5453
5454/**
5455 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5456 * @phba: Pointer to HBA context object.
5457 *
5458 * This function issue a dump mailbox command to read config region
5459 * 23 and parse the records in the region and populate driver
5460 * data structure.
5461 **/
5462static int
5463lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5464{
5465        LPFC_MBOXQ_t *mboxq;
5466        struct lpfc_dmabuf *mp;
5467        struct lpfc_mqe *mqe;
5468        uint32_t data_length;
5469        int rc;
5470
5471        /* Program the default value of vlan_id and fc_map */
5472        phba->valid_vlan = 0;
5473        phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5474        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5475        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5476
5477        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5478        if (!mboxq)
5479                return -ENOMEM;
5480
5481        mqe = &mboxq->u.mqe;
5482        if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5483                rc = -ENOMEM;
5484                goto out_free_mboxq;
5485        }
5486
5487        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5488        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5489
5490        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5491                        "(%d):2571 Mailbox cmd x%x Status x%x "
5492                        "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5493                        "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5494                        "CQ: x%x x%x x%x x%x\n",
5495                        mboxq->vport ? mboxq->vport->vpi : 0,
5496                        bf_get(lpfc_mqe_command, mqe),
5497                        bf_get(lpfc_mqe_status, mqe),
5498                        mqe->un.mb_words[0], mqe->un.mb_words[1],
5499                        mqe->un.mb_words[2], mqe->un.mb_words[3],
5500                        mqe->un.mb_words[4], mqe->un.mb_words[5],
5501                        mqe->un.mb_words[6], mqe->un.mb_words[7],
5502                        mqe->un.mb_words[8], mqe->un.mb_words[9],
5503                        mqe->un.mb_words[10], mqe->un.mb_words[11],
5504                        mqe->un.mb_words[12], mqe->un.mb_words[13],
5505                        mqe->un.mb_words[14], mqe->un.mb_words[15],
5506                        mqe->un.mb_words[16], mqe->un.mb_words[50],
5507                        mboxq->mcqe.word0,
5508                        mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5509                        mboxq->mcqe.trailer);
5510
5511        if (rc) {
5512                lpfc_mbuf_free(phba, mp->virt, mp->phys);
5513                kfree(mp);
5514                rc = -EIO;
5515                goto out_free_mboxq;
5516        }
5517        data_length = mqe->un.mb_words[5];
5518        if (data_length > DMP_RGN23_SIZE) {
5519                lpfc_mbuf_free(phba, mp->virt, mp->phys);
5520                kfree(mp);
5521                rc = -EIO;
5522                goto out_free_mboxq;
5523        }
5524
5525        lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5526        lpfc_mbuf_free(phba, mp->virt, mp->phys);
5527        kfree(mp);
5528        rc = 0;
5529
5530out_free_mboxq:
5531        mempool_free(mboxq, phba->mbox_mem_pool);
5532        return rc;
5533}
5534
5535/**
5536 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5537 * @phba: pointer to lpfc hba data structure.
5538 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5539 * @vpd: pointer to the memory to hold resulting port vpd data.
5540 * @vpd_size: On input, the number of bytes allocated to @vpd.
5541 *            On output, the number of data bytes in @vpd.
5542 *
5543 * This routine executes a READ_REV SLI4 mailbox command.  In
5544 * addition, this routine gets the port vpd data.
5545 *
5546 * Return codes
5547 *      0 - successful
5548 *      -ENOMEM - could not allocated memory.
5549 **/
5550static int
5551lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5552                    uint8_t *vpd, uint32_t *vpd_size)
5553{
5554        int rc = 0;
5555        uint32_t dma_size;
5556        struct lpfc_dmabuf *dmabuf;
5557        struct lpfc_mqe *mqe;
5558
5559        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5560        if (!dmabuf)
5561                return -ENOMEM;
5562
5563        /*
5564         * Get a DMA buffer for the vpd data resulting from the READ_REV
5565         * mailbox command.
5566         */
5567        dma_size = *vpd_size;
5568        dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5569                                          &dmabuf->phys, GFP_KERNEL);
5570        if (!dmabuf->virt) {
5571                kfree(dmabuf);
5572                return -ENOMEM;
5573        }
5574
5575        /*
5576         * The SLI4 implementation of READ_REV conflicts at word1,
5577         * bits 31:16 and SLI4 adds vpd functionality not present
5578         * in SLI3.  This code corrects the conflicts.
5579         */
5580        lpfc_read_rev(phba, mboxq);
5581        mqe = &mboxq->u.mqe;
5582        mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5583        mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5584        mqe->un.read_rev.word1 &= 0x0000FFFF;
5585        bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5586        bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5587
5588        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5589        if (rc) {
5590                dma_free_coherent(&phba->pcidev->dev, dma_size,
5591                                  dmabuf->virt, dmabuf->phys);
5592                kfree(dmabuf);
5593                return -EIO;
5594        }
5595
5596        /*
5597         * The available vpd length cannot be bigger than the
5598         * DMA buffer passed to the port.  Catch the less than
5599         * case and update the caller's size.
5600         */
5601        if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5602                *vpd_size = mqe->un.read_rev.avail_vpd_len;
5603
5604        memcpy(vpd, dmabuf->virt, *vpd_size);
5605
5606        dma_free_coherent(&phba->pcidev->dev, dma_size,
5607                          dmabuf->virt, dmabuf->phys);
5608        kfree(dmabuf);
5609        return 0;
5610}
5611
5612/**
5613 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5614 * @phba: pointer to lpfc hba data structure.
5615 *
5616 * This routine retrieves SLI4 device physical port name this PCI function
5617 * is attached to.
5618 *
5619 * Return codes
5620 *      0 - successful
5621 *      otherwise - failed to retrieve controller attributes
5622 **/
5623static int
5624lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5625{
5626        LPFC_MBOXQ_t *mboxq;
5627        struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5628        struct lpfc_controller_attribute *cntl_attr;
5629        void *virtaddr = NULL;
5630        uint32_t alloclen, reqlen;
5631        uint32_t shdr_status, shdr_add_status;
5632        union lpfc_sli4_cfg_shdr *shdr;
5633        int rc;
5634
5635        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5636        if (!mboxq)
5637                return -ENOMEM;
5638
5639        /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5640        reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5641        alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5642                        LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5643                        LPFC_SLI4_MBX_NEMBED);
5644
5645        if (alloclen < reqlen) {
5646                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5647                                "3084 Allocated DMA memory size (%d) is "
5648                                "less than the requested DMA memory size "
5649                                "(%d)\n", alloclen, reqlen);
5650                rc = -ENOMEM;
5651                goto out_free_mboxq;
5652        }
5653        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5654        virtaddr = mboxq->sge_array->addr[0];
5655        mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5656        shdr = &mbx_cntl_attr->cfg_shdr;
5657        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5658        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5659        if (shdr_status || shdr_add_status || rc) {
5660                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5661                                "3085 Mailbox x%x (x%x/x%x) failed, "
5662                                "rc:x%x, status:x%x, add_status:x%x\n",
5663                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5664                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5665                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5666                                rc, shdr_status, shdr_add_status);
5667                rc = -ENXIO;
5668                goto out_free_mboxq;
5669        }
5670
5671        cntl_attr = &mbx_cntl_attr->cntl_attr;
5672        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5673        phba->sli4_hba.lnk_info.lnk_tp =
5674                bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5675        phba->sli4_hba.lnk_info.lnk_no =
5676                bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5677
5678        memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5679        strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5680                sizeof(phba->BIOSVersion));
5681
5682        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5683                        "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5684                        phba->sli4_hba.lnk_info.lnk_tp,
5685                        phba->sli4_hba.lnk_info.lnk_no,
5686                        phba->BIOSVersion);
5687out_free_mboxq:
5688        if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5689                lpfc_sli4_mbox_cmd_free(phba, mboxq);
5690        else
5691                mempool_free(mboxq, phba->mbox_mem_pool);
5692        return rc;
5693}
5694
5695/**
5696 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine retrieves SLI4 device physical port name this PCI function
5700 * is attached to.
5701 *
5702 * Return codes
5703 *      0 - successful
5704 *      otherwise - failed to retrieve physical port name
5705 **/
5706static int
5707lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5708{
5709        LPFC_MBOXQ_t *mboxq;
5710        struct lpfc_mbx_get_port_name *get_port_name;
5711        uint32_t shdr_status, shdr_add_status;
5712        union lpfc_sli4_cfg_shdr *shdr;
5713        char cport_name = 0;
5714        int rc;
5715
5716        /* We assume nothing at this point */
5717        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5718        phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5719
5720        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5721        if (!mboxq)
5722                return -ENOMEM;
5723        /* obtain link type and link number via READ_CONFIG */
5724        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5725        lpfc_sli4_read_config(phba);
5726        if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5727                goto retrieve_ppname;
5728
5729        /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5730        rc = lpfc_sli4_get_ctl_attr(phba);
5731        if (rc)
5732                goto out_free_mboxq;
5733
5734retrieve_ppname:
5735        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5736                LPFC_MBOX_OPCODE_GET_PORT_NAME,
5737                sizeof(struct lpfc_mbx_get_port_name) -
5738                sizeof(struct lpfc_sli4_cfg_mhdr),
5739                LPFC_SLI4_MBX_EMBED);
5740        get_port_name = &mboxq->u.mqe.un.get_port_name;
5741        shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5742        bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5743        bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5744                phba->sli4_hba.lnk_info.lnk_tp);
5745        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5746        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5747        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5748        if (shdr_status || shdr_add_status || rc) {
5749                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5750                                "3087 Mailbox x%x (x%x/x%x) failed: "
5751                                "rc:x%x, status:x%x, add_status:x%x\n",
5752                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5753                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5754                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5755                                rc, shdr_status, shdr_add_status);
5756                rc = -ENXIO;
5757                goto out_free_mboxq;
5758        }
5759        switch (phba->sli4_hba.lnk_info.lnk_no) {
5760        case LPFC_LINK_NUMBER_0:
5761                cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5762                                &get_port_name->u.response);
5763                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5764                break;
5765        case LPFC_LINK_NUMBER_1:
5766                cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5767                                &get_port_name->u.response);
5768                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5769                break;
5770        case LPFC_LINK_NUMBER_2:
5771                cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5772                                &get_port_name->u.response);
5773                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5774                break;
5775        case LPFC_LINK_NUMBER_3:
5776                cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5777                                &get_port_name->u.response);
5778                phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5779                break;
5780        default:
5781                break;
5782        }
5783
5784        if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5785                phba->Port[0] = cport_name;
5786                phba->Port[1] = '\0';
5787                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5788                                "3091 SLI get port name: %s\n", phba->Port);
5789        }
5790
5791out_free_mboxq:
5792        if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5793                lpfc_sli4_mbox_cmd_free(phba, mboxq);
5794        else
5795                mempool_free(mboxq, phba->mbox_mem_pool);
5796        return rc;
5797}
5798
5799/**
5800 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5801 * @phba: pointer to lpfc hba data structure.
5802 *
5803 * This routine is called to explicitly arm the SLI4 device's completion and
5804 * event queues
5805 **/
5806static void
5807lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5808{
5809        int qidx;
5810        struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5811        struct lpfc_sli4_hdw_queue *qp;
5812        struct lpfc_queue *eq;
5813
5814        sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5815        sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5816        if (sli4_hba->nvmels_cq)
5817                sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5818                                           LPFC_QUEUE_REARM);
5819
5820        if (sli4_hba->hdwq) {
5821                /* Loop thru all Hardware Queues */
5822                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5823                        qp = &sli4_hba->hdwq[qidx];
5824                        /* ARM the corresponding CQ */
5825                        sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5826                                                LPFC_QUEUE_REARM);
5827                }
5828
5829                /* Loop thru all IRQ vectors */
5830                for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5831                        eq = sli4_hba->hba_eq_hdl[qidx].eq;
5832                        /* ARM the corresponding EQ */
5833                        sli4_hba->sli4_write_eq_db(phba, eq,
5834                                                   0, LPFC_QUEUE_REARM);
5835                }
5836        }
5837
5838        if (phba->nvmet_support) {
5839                for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5840                        sli4_hba->sli4_write_cq_db(phba,
5841                                sli4_hba->nvmet_cqset[qidx], 0,
5842                                LPFC_QUEUE_REARM);
5843                }
5844        }
5845}
5846
5847/**
5848 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5849 * @phba: Pointer to HBA context object.
5850 * @type: The resource extent type.
5851 * @extnt_count: buffer to hold port available extent count.
5852 * @extnt_size: buffer to hold element count per extent.
5853 *
5854 * This function calls the port and retrievs the number of available
5855 * extents and their size for a particular extent type.
5856 *
5857 * Returns: 0 if successful.  Nonzero otherwise.
5858 **/
5859int
5860lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5861                               uint16_t *extnt_count, uint16_t *extnt_size)
5862{
5863        int rc = 0;
5864        uint32_t length;
5865        uint32_t mbox_tmo;
5866        struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5867        LPFC_MBOXQ_t *mbox;
5868
5869        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5870        if (!mbox)
5871                return -ENOMEM;
5872
5873        /* Find out how many extents are available for this resource type */
5874        length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5875                  sizeof(struct lpfc_sli4_cfg_mhdr));
5876        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5877                         LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5878                         length, LPFC_SLI4_MBX_EMBED);
5879
5880        /* Send an extents count of 0 - the GET doesn't use it. */
5881        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5882                                        LPFC_SLI4_MBX_EMBED);
5883        if (unlikely(rc)) {
5884                rc = -EIO;
5885                goto err_exit;
5886        }
5887
5888        if (!phba->sli4_hba.intr_enable)
5889                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5890        else {
5891                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5892                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5893        }
5894        if (unlikely(rc)) {
5895                rc = -EIO;
5896                goto err_exit;
5897        }
5898
5899        rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5900        if (bf_get(lpfc_mbox_hdr_status,
5901                   &rsrc_info->header.cfg_shdr.response)) {
5902                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5903                                "2930 Failed to get resource extents "
5904                                "Status 0x%x Add'l Status 0x%x\n",
5905                                bf_get(lpfc_mbox_hdr_status,
5906                                       &rsrc_info->header.cfg_shdr.response),
5907                                bf_get(lpfc_mbox_hdr_add_status,
5908                                       &rsrc_info->header.cfg_shdr.response));
5909                rc = -EIO;
5910                goto err_exit;
5911        }
5912
5913        *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5914                              &rsrc_info->u.rsp);
5915        *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5916                             &rsrc_info->u.rsp);
5917
5918        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5919                        "3162 Retrieved extents type-%d from port: count:%d, "
5920                        "size:%d\n", type, *extnt_count, *extnt_size);
5921
5922err_exit:
5923        mempool_free(mbox, phba->mbox_mem_pool);
5924        return rc;
5925}
5926
5927/**
5928 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5929 * @phba: Pointer to HBA context object.
5930 * @type: The extent type to check.
5931 *
5932 * This function reads the current available extents from the port and checks
5933 * if the extent count or extent size has changed since the last access.
5934 * Callers use this routine post port reset to understand if there is a
5935 * extent reprovisioning requirement.
5936 *
5937 * Returns:
5938 *   -Error: error indicates problem.
5939 *   1: Extent count or size has changed.
5940 *   0: No changes.
5941 **/
5942static int
5943lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5944{
5945        uint16_t curr_ext_cnt, rsrc_ext_cnt;
5946        uint16_t size_diff, rsrc_ext_size;
5947        int rc = 0;
5948        struct lpfc_rsrc_blks *rsrc_entry;
5949        struct list_head *rsrc_blk_list = NULL;
5950
5951        size_diff = 0;
5952        curr_ext_cnt = 0;
5953        rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5954                                            &rsrc_ext_cnt,
5955                                            &rsrc_ext_size);
5956        if (unlikely(rc))
5957                return -EIO;
5958
5959        switch (type) {
5960        case LPFC_RSC_TYPE_FCOE_RPI:
5961                rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5962                break;
5963        case LPFC_RSC_TYPE_FCOE_VPI:
5964                rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5965                break;
5966        case LPFC_RSC_TYPE_FCOE_XRI:
5967                rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5968                break;
5969        case LPFC_RSC_TYPE_FCOE_VFI:
5970                rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5971                break;
5972        default:
5973                break;
5974        }
5975
5976        list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5977                curr_ext_cnt++;
5978                if (rsrc_entry->rsrc_size != rsrc_ext_size)
5979                        size_diff++;
5980        }
5981
5982        if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5983                rc = 1;
5984
5985        return rc;
5986}
5987
5988/**
5989 * lpfc_sli4_cfg_post_extnts -
5990 * @phba: Pointer to HBA context object.
5991 * @extnt_cnt: number of available extents.
5992 * @type: the extent type (rpi, xri, vfi, vpi).
5993 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5994 * @mbox: pointer to the caller's allocated mailbox structure.
5995 *
5996 * This function executes the extents allocation request.  It also
5997 * takes care of the amount of memory needed to allocate or get the
5998 * allocated extents. It is the caller's responsibility to evaluate
5999 * the response.
6000 *
6001 * Returns:
6002 *   -Error:  Error value describes the condition found.
6003 *   0: if successful
6004 **/
6005static int
6006lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6007                          uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6008{
6009        int rc = 0;
6010        uint32_t req_len;
6011        uint32_t emb_len;
6012        uint32_t alloc_len, mbox_tmo;
6013
6014        /* Calculate the total requested length of the dma memory */
6015        req_len = extnt_cnt * sizeof(uint16_t);
6016
6017        /*
6018         * Calculate the size of an embedded mailbox.  The uint32_t
6019         * accounts for extents-specific word.
6020         */
6021        emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6022                sizeof(uint32_t);
6023
6024        /*
6025         * Presume the allocation and response will fit into an embedded
6026         * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6027         */
6028        *emb = LPFC_SLI4_MBX_EMBED;
6029        if (req_len > emb_len) {
6030                req_len = extnt_cnt * sizeof(uint16_t) +
6031                        sizeof(union lpfc_sli4_cfg_shdr) +
6032                        sizeof(uint32_t);
6033                *emb = LPFC_SLI4_MBX_NEMBED;
6034        }
6035
6036        alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6037                                     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6038                                     req_len, *emb);
6039        if (alloc_len < req_len) {
6040                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6041                        "2982 Allocated DMA memory size (x%x) is "
6042                        "less than the requested DMA memory "
6043                        "size (x%x)\n", alloc_len, req_len);
6044                return -ENOMEM;
6045        }
6046        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6047        if (unlikely(rc))
6048                return -EIO;
6049
6050        if (!phba->sli4_hba.intr_enable)
6051                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6052        else {
6053                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6054                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6055        }
6056
6057        if (unlikely(rc))
6058                rc = -EIO;
6059        return rc;
6060}
6061
6062/**
6063 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6064 * @phba: Pointer to HBA context object.
6065 * @type:  The resource extent type to allocate.
6066 *
6067 * This function allocates the number of elements for the specified
6068 * resource type.
6069 **/
6070static int
6071lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6072{
6073        bool emb = false;
6074        uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6075        uint16_t rsrc_id, rsrc_start, j, k;
6076        uint16_t *ids;
6077        int i, rc;
6078        unsigned long longs;
6079        unsigned long *bmask;
6080        struct lpfc_rsrc_blks *rsrc_blks;
6081        LPFC_MBOXQ_t *mbox;
6082        uint32_t length;
6083        struct lpfc_id_range *id_array = NULL;
6084        void *virtaddr = NULL;
6085        struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6086        struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6087        struct list_head *ext_blk_list;
6088
6089        rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6090                                            &rsrc_cnt,
6091                                            &rsrc_size);
6092        if (unlikely(rc))
6093                return -EIO;
6094
6095        if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6096                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6097                        "3009 No available Resource Extents "
6098                        "for resource type 0x%x: Count: 0x%x, "
6099                        "Size 0x%x\n", type, rsrc_cnt,
6100                        rsrc_size);
6101                return -ENOMEM;
6102        }
6103
6104        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6105                        "2903 Post resource extents type-0x%x: "
6106                        "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6107
6108        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6109        if (!mbox)
6110                return -ENOMEM;
6111
6112        rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6113        if (unlikely(rc)) {
6114                rc = -EIO;
6115                goto err_exit;
6116        }
6117
6118        /*
6119         * Figure out where the response is located.  Then get local pointers
6120         * to the response data.  The port does not guarantee to respond to
6121         * all extents counts request so update the local variable with the
6122         * allocated count from the port.
6123         */
6124        if (emb == LPFC_SLI4_MBX_EMBED) {
6125                rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6126                id_array = &rsrc_ext->u.rsp.id[0];
6127                rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6128        } else {
6129                virtaddr = mbox->sge_array->addr[0];
6130                n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6131                rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6132                id_array = &n_rsrc->id;
6133        }
6134
6135        longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6136        rsrc_id_cnt = rsrc_cnt * rsrc_size;
6137
6138        /*
6139         * Based on the resource size and count, correct the base and max
6140         * resource values.
6141         */
6142        length = sizeof(struct lpfc_rsrc_blks);
6143        switch (type) {
6144        case LPFC_RSC_TYPE_FCOE_RPI:
6145                phba->sli4_hba.rpi_bmask = kcalloc(longs,
6146                                                   sizeof(unsigned long),
6147                                                   GFP_KERNEL);
6148                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6149                        rc = -ENOMEM;
6150                        goto err_exit;
6151                }
6152                phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6153                                                 sizeof(uint16_t),
6154                                                 GFP_KERNEL);
6155                if (unlikely(!phba->sli4_hba.rpi_ids)) {
6156                        kfree(phba->sli4_hba.rpi_bmask);
6157                        rc = -ENOMEM;
6158                        goto err_exit;
6159                }
6160
6161                /*
6162                 * The next_rpi was initialized with the maximum available
6163                 * count but the port may allocate a smaller number.  Catch
6164                 * that case and update the next_rpi.
6165                 */
6166                phba->sli4_hba.next_rpi = rsrc_id_cnt;
6167
6168                /* Initialize local ptrs for common extent processing later. */
6169                bmask = phba->sli4_hba.rpi_bmask;
6170                ids = phba->sli4_hba.rpi_ids;
6171                ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6172                break;
6173        case LPFC_RSC_TYPE_FCOE_VPI:
6174                phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6175                                          GFP_KERNEL);
6176                if (unlikely(!phba->vpi_bmask)) {
6177                        rc = -ENOMEM;
6178                        goto err_exit;
6179                }
6180                phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6181                                         GFP_KERNEL);
6182                if (unlikely(!phba->vpi_ids)) {
6183                        kfree(phba->vpi_bmask);
6184                        rc = -ENOMEM;
6185                        goto err_exit;
6186                }
6187
6188                /* Initialize local ptrs for common extent processing later. */
6189                bmask = phba->vpi_bmask;
6190                ids = phba->vpi_ids;
6191                ext_blk_list = &phba->lpfc_vpi_blk_list;
6192                break;
6193        case LPFC_RSC_TYPE_FCOE_XRI:
6194                phba->sli4_hba.xri_bmask = kcalloc(longs,
6195                                                   sizeof(unsigned long),
6196                                                   GFP_KERNEL);
6197                if (unlikely(!phba->sli4_hba.xri_bmask)) {
6198                        rc = -ENOMEM;
6199                        goto err_exit;
6200                }
6201                phba->sli4_hba.max_cfg_param.xri_used = 0;
6202                phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6203                                                 sizeof(uint16_t),
6204                                                 GFP_KERNEL);
6205                if (unlikely(!phba->sli4_hba.xri_ids)) {
6206                        kfree(phba->sli4_hba.xri_bmask);
6207                        rc = -ENOMEM;
6208                        goto err_exit;
6209                }
6210
6211                /* Initialize local ptrs for common extent processing later. */
6212                bmask = phba->sli4_hba.xri_bmask;
6213                ids = phba->sli4_hba.xri_ids;
6214                ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6215                break;
6216        case LPFC_RSC_TYPE_FCOE_VFI:
6217                phba->sli4_hba.vfi_bmask = kcalloc(longs,
6218                                                   sizeof(unsigned long),
6219                                                   GFP_KERNEL);
6220                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6221                        rc = -ENOMEM;
6222                        goto err_exit;
6223                }
6224                phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6225                                                 sizeof(uint16_t),
6226                                                 GFP_KERNEL);
6227                if (unlikely(!phba->sli4_hba.vfi_ids)) {
6228                        kfree(phba->sli4_hba.vfi_bmask);
6229                        rc = -ENOMEM;
6230                        goto err_exit;
6231                }
6232
6233                /* Initialize local ptrs for common extent processing later. */
6234                bmask = phba->sli4_hba.vfi_bmask;
6235                ids = phba->sli4_hba.vfi_ids;
6236                ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6237                break;
6238        default:
6239                /* Unsupported Opcode.  Fail call. */
6240                id_array = NULL;
6241                bmask = NULL;
6242                ids = NULL;
6243                ext_blk_list = NULL;
6244                goto err_exit;
6245        }
6246
6247        /*
6248         * Complete initializing the extent configuration with the
6249         * allocated ids assigned to this function.  The bitmask serves
6250         * as an index into the array and manages the available ids.  The
6251         * array just stores the ids communicated to the port via the wqes.
6252         */
6253        for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6254                if ((i % 2) == 0)
6255                        rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6256                                         &id_array[k]);
6257                else
6258                        rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6259                                         &id_array[k]);
6260
6261                rsrc_blks = kzalloc(length, GFP_KERNEL);
6262                if (unlikely(!rsrc_blks)) {
6263                        rc = -ENOMEM;
6264                        kfree(bmask);
6265                        kfree(ids);
6266                        goto err_exit;
6267                }
6268                rsrc_blks->rsrc_start = rsrc_id;
6269                rsrc_blks->rsrc_size = rsrc_size;
6270                list_add_tail(&rsrc_blks->list, ext_blk_list);
6271                rsrc_start = rsrc_id;
6272                if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6273                        phba->sli4_hba.io_xri_start = rsrc_start +
6274                                lpfc_sli4_get_iocb_cnt(phba);
6275                }
6276
6277                while (rsrc_id < (rsrc_start + rsrc_size)) {
6278                        ids[j] = rsrc_id;
6279                        rsrc_id++;
6280                        j++;
6281                }
6282                /* Entire word processed.  Get next word.*/
6283                if ((i % 2) == 1)
6284                        k++;
6285        }
6286 err_exit:
6287        lpfc_sli4_mbox_cmd_free(phba, mbox);
6288        return rc;
6289}
6290
6291
6292
6293/**
6294 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6295 * @phba: Pointer to HBA context object.
6296 * @type: the extent's type.
6297 *
6298 * This function deallocates all extents of a particular resource type.
6299 * SLI4 does not allow for deallocating a particular extent range.  It
6300 * is the caller's responsibility to release all kernel memory resources.
6301 **/
6302static int
6303lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6304{
6305        int rc;
6306        uint32_t length, mbox_tmo = 0;
6307        LPFC_MBOXQ_t *mbox;
6308        struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6309        struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6310
6311        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6312        if (!mbox)
6313                return -ENOMEM;
6314
6315        /*
6316         * This function sends an embedded mailbox because it only sends the
6317         * the resource type.  All extents of this type are released by the
6318         * port.
6319         */
6320        length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6321                  sizeof(struct lpfc_sli4_cfg_mhdr));
6322        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6323                         LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6324                         length, LPFC_SLI4_MBX_EMBED);
6325
6326        /* Send an extents count of 0 - the dealloc doesn't use it. */
6327        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6328                                        LPFC_SLI4_MBX_EMBED);
6329        if (unlikely(rc)) {
6330                rc = -EIO;
6331                goto out_free_mbox;
6332        }
6333        if (!phba->sli4_hba.intr_enable)
6334                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6335        else {
6336                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6337                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6338        }
6339        if (unlikely(rc)) {
6340                rc = -EIO;
6341                goto out_free_mbox;
6342        }
6343
6344        dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6345        if (bf_get(lpfc_mbox_hdr_status,
6346                   &dealloc_rsrc->header.cfg_shdr.response)) {
6347                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6348                                "2919 Failed to release resource extents "
6349                                "for type %d - Status 0x%x Add'l Status 0x%x. "
6350                                "Resource memory not released.\n",
6351                                type,
6352                                bf_get(lpfc_mbox_hdr_status,
6353                                    &dealloc_rsrc->header.cfg_shdr.response),
6354                                bf_get(lpfc_mbox_hdr_add_status,
6355                                    &dealloc_rsrc->header.cfg_shdr.response));
6356                rc = -EIO;
6357                goto out_free_mbox;
6358        }
6359
6360        /* Release kernel memory resources for the specific type. */
6361        switch (type) {
6362        case LPFC_RSC_TYPE_FCOE_VPI:
6363                kfree(phba->vpi_bmask);
6364                kfree(phba->vpi_ids);
6365                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6366                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6367                                    &phba->lpfc_vpi_blk_list, list) {
6368                        list_del_init(&rsrc_blk->list);
6369                        kfree(rsrc_blk);
6370                }
6371                phba->sli4_hba.max_cfg_param.vpi_used = 0;
6372                break;
6373        case LPFC_RSC_TYPE_FCOE_XRI:
6374                kfree(phba->sli4_hba.xri_bmask);
6375                kfree(phba->sli4_hba.xri_ids);
6376                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6377                                    &phba->sli4_hba.lpfc_xri_blk_list, list) {
6378                        list_del_init(&rsrc_blk->list);
6379                        kfree(rsrc_blk);
6380                }
6381                break;
6382        case LPFC_RSC_TYPE_FCOE_VFI:
6383                kfree(phba->sli4_hba.vfi_bmask);
6384                kfree(phba->sli4_hba.vfi_ids);
6385                bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6386                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6387                                    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6388                        list_del_init(&rsrc_blk->list);
6389                        kfree(rsrc_blk);
6390                }
6391                break;
6392        case LPFC_RSC_TYPE_FCOE_RPI:
6393                /* RPI bitmask and physical id array are cleaned up earlier. */
6394                list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6395                                    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6396                        list_del_init(&rsrc_blk->list);
6397                        kfree(rsrc_blk);
6398                }
6399                break;
6400        default:
6401                break;
6402        }
6403
6404        bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6405
6406 out_free_mbox:
6407        mempool_free(mbox, phba->mbox_mem_pool);
6408        return rc;
6409}
6410
6411static void
6412lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6413                  uint32_t feature)
6414{
6415        uint32_t len;
6416
6417        len = sizeof(struct lpfc_mbx_set_feature) -
6418                sizeof(struct lpfc_sli4_cfg_mhdr);
6419        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6420                         LPFC_MBOX_OPCODE_SET_FEATURES, len,
6421                         LPFC_SLI4_MBX_EMBED);
6422
6423        switch (feature) {
6424        case LPFC_SET_UE_RECOVERY:
6425                bf_set(lpfc_mbx_set_feature_UER,
6426                       &mbox->u.mqe.un.set_feature, 1);
6427                mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6428                mbox->u.mqe.un.set_feature.param_len = 8;
6429                break;
6430        case LPFC_SET_MDS_DIAGS:
6431                bf_set(lpfc_mbx_set_feature_mds,
6432                       &mbox->u.mqe.un.set_feature, 1);
6433                bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6434                       &mbox->u.mqe.un.set_feature, 1);
6435                mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6436                mbox->u.mqe.un.set_feature.param_len = 8;
6437                break;
6438        case LPFC_SET_DUAL_DUMP:
6439                bf_set(lpfc_mbx_set_feature_dd,
6440                       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6441                bf_set(lpfc_mbx_set_feature_ddquery,
6442                       &mbox->u.mqe.un.set_feature, 0);
6443                mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6444                mbox->u.mqe.un.set_feature.param_len = 4;
6445                break;
6446        }
6447
6448        return;
6449}
6450
6451/**
6452 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6453 * @phba: Pointer to HBA context object.
6454 *
6455 * Disable FW logging into host memory on the adapter. To
6456 * be done before reading logs from the host memory.
6457 **/
6458void
6459lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6460{
6461        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6462
6463        spin_lock_irq(&phba->hbalock);
6464        ras_fwlog->state = INACTIVE;
6465        spin_unlock_irq(&phba->hbalock);
6466
6467        /* Disable FW logging to host memory */
6468        writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6469               phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6470
6471        /* Wait 10ms for firmware to stop using DMA buffer */
6472        usleep_range(10 * 1000, 20 * 1000);
6473}
6474
6475/**
6476 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6477 * @phba: Pointer to HBA context object.
6478 *
6479 * This function is called to free memory allocated for RAS FW logging
6480 * support in the driver.
6481 **/
6482void
6483lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6484{
6485        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6486        struct lpfc_dmabuf *dmabuf, *next;
6487
6488        if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6489                list_for_each_entry_safe(dmabuf, next,
6490                                    &ras_fwlog->fwlog_buff_list,
6491                                    list) {
6492                        list_del(&dmabuf->list);
6493                        dma_free_coherent(&phba->pcidev->dev,
6494                                          LPFC_RAS_MAX_ENTRY_SIZE,
6495                                          dmabuf->virt, dmabuf->phys);
6496                        kfree(dmabuf);
6497                }
6498        }
6499
6500        if (ras_fwlog->lwpd.virt) {
6501                dma_free_coherent(&phba->pcidev->dev,
6502                                  sizeof(uint32_t) * 2,
6503                                  ras_fwlog->lwpd.virt,
6504                                  ras_fwlog->lwpd.phys);
6505                ras_fwlog->lwpd.virt = NULL;
6506        }
6507
6508        spin_lock_irq(&phba->hbalock);
6509        ras_fwlog->state = INACTIVE;
6510        spin_unlock_irq(&phba->hbalock);
6511}
6512
6513/**
6514 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6515 * @phba: Pointer to HBA context object.
6516 * @fwlog_buff_count: Count of buffers to be created.
6517 *
6518 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6519 * to update FW log is posted to the adapter.
6520 * Buffer count is calculated based on module param ras_fwlog_buffsize
6521 * Size of each buffer posted to FW is 64K.
6522 **/
6523
6524static int
6525lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6526                        uint32_t fwlog_buff_count)
6527{
6528        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6529        struct lpfc_dmabuf *dmabuf;
6530        int rc = 0, i = 0;
6531
6532        /* Initialize List */
6533        INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6534
6535        /* Allocate memory for the LWPD */
6536        ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6537                                            sizeof(uint32_t) * 2,
6538                                            &ras_fwlog->lwpd.phys,
6539                                            GFP_KERNEL);
6540        if (!ras_fwlog->lwpd.virt) {
6541                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6542                                "6185 LWPD Memory Alloc Failed\n");
6543
6544                return -ENOMEM;
6545        }
6546
6547        ras_fwlog->fw_buffcount = fwlog_buff_count;
6548        for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6549                dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6550                                 GFP_KERNEL);
6551                if (!dmabuf) {
6552                        rc = -ENOMEM;
6553                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6554                                        "6186 Memory Alloc failed FW logging");
6555                        goto free_mem;
6556                }
6557
6558                dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6559                                                  LPFC_RAS_MAX_ENTRY_SIZE,
6560                                                  &dmabuf->phys, GFP_KERNEL);
6561                if (!dmabuf->virt) {
6562                        kfree(dmabuf);
6563                        rc = -ENOMEM;
6564                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6565                                        "6187 DMA Alloc Failed FW logging");
6566                        goto free_mem;
6567                }
6568                dmabuf->buffer_tag = i;
6569                list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6570        }
6571
6572free_mem:
6573        if (rc)
6574                lpfc_sli4_ras_dma_free(phba);
6575
6576        return rc;
6577}
6578
6579/**
6580 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6581 * @phba: pointer to lpfc hba data structure.
6582 * @pmb: pointer to the driver internal queue element for mailbox command.
6583 *
6584 * Completion handler for driver's RAS MBX command to the device.
6585 **/
6586static void
6587lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6588{
6589        MAILBOX_t *mb;
6590        union lpfc_sli4_cfg_shdr *shdr;
6591        uint32_t shdr_status, shdr_add_status;
6592        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6593
6594        mb = &pmb->u.mb;
6595
6596        shdr = (union lpfc_sli4_cfg_shdr *)
6597                &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6598        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6599        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6600
6601        if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6602                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6603                                "6188 FW LOG mailbox "
6604                                "completed with status x%x add_status x%x,"
6605                                " mbx status x%x\n",
6606                                shdr_status, shdr_add_status, mb->mbxStatus);
6607
6608                ras_fwlog->ras_hwsupport = false;
6609                goto disable_ras;
6610        }
6611
6612        spin_lock_irq(&phba->hbalock);
6613        ras_fwlog->state = ACTIVE;
6614        spin_unlock_irq(&phba->hbalock);
6615        mempool_free(pmb, phba->mbox_mem_pool);
6616
6617        return;
6618
6619disable_ras:
6620        /* Free RAS DMA memory */
6621        lpfc_sli4_ras_dma_free(phba);
6622        mempool_free(pmb, phba->mbox_mem_pool);
6623}
6624
6625/**
6626 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6627 * @phba: pointer to lpfc hba data structure.
6628 * @fwlog_level: Logging verbosity level.
6629 * @fwlog_enable: Enable/Disable logging.
6630 *
6631 * Initialize memory and post mailbox command to enable FW logging in host
6632 * memory.
6633 **/
6634int
6635lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6636                         uint32_t fwlog_level,
6637                         uint32_t fwlog_enable)
6638{
6639        struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6640        struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6641        struct lpfc_dmabuf *dmabuf;
6642        LPFC_MBOXQ_t *mbox;
6643        uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6644        int rc = 0;
6645
6646        spin_lock_irq(&phba->hbalock);
6647        ras_fwlog->state = INACTIVE;
6648        spin_unlock_irq(&phba->hbalock);
6649
6650        fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6651                          phba->cfg_ras_fwlog_buffsize);
6652        fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6653
6654        /*
6655         * If re-enabling FW logging support use earlier allocated
6656         * DMA buffers while posting MBX command.
6657         **/
6658        if (!ras_fwlog->lwpd.virt) {
6659                rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6660                if (rc) {
6661                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6662                                        "6189 FW Log Memory Allocation Failed");
6663                        return rc;
6664                }
6665        }
6666
6667        /* Setup Mailbox command */
6668        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6669        if (!mbox) {
6670                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671                                "6190 RAS MBX Alloc Failed");
6672                rc = -ENOMEM;
6673                goto mem_free;
6674        }
6675
6676        ras_fwlog->fw_loglevel = fwlog_level;
6677        len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6678                sizeof(struct lpfc_sli4_cfg_mhdr));
6679
6680        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6681                         LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6682                         len, LPFC_SLI4_MBX_EMBED);
6683
6684        mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6685        bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6686               fwlog_enable);
6687        bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6688               ras_fwlog->fw_loglevel);
6689        bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6690               ras_fwlog->fw_buffcount);
6691        bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6692               LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6693
6694        /* Update DMA buffer address */
6695        list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6696                memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6697
6698                mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6699                        putPaddrLow(dmabuf->phys);
6700
6701                mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6702                        putPaddrHigh(dmabuf->phys);
6703        }
6704
6705        /* Update LPWD address */
6706        mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6707        mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6708
6709        spin_lock_irq(&phba->hbalock);
6710        ras_fwlog->state = REG_INPROGRESS;
6711        spin_unlock_irq(&phba->hbalock);
6712        mbox->vport = phba->pport;
6713        mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6714
6715        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6716
6717        if (rc == MBX_NOT_FINISHED) {
6718                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6719                                "6191 FW-Log Mailbox failed. "
6720                                "status %d mbxStatus : x%x", rc,
6721                                bf_get(lpfc_mqe_status, &mbox->u.mqe));
6722                mempool_free(mbox, phba->mbox_mem_pool);
6723                rc = -EIO;
6724                goto mem_free;
6725        } else
6726                rc = 0;
6727mem_free:
6728        if (rc)
6729                lpfc_sli4_ras_dma_free(phba);
6730
6731        return rc;
6732}
6733
6734/**
6735 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6736 * @phba: Pointer to HBA context object.
6737 *
6738 * Check if RAS is supported on the adapter and initialize it.
6739 **/
6740void
6741lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6742{
6743        /* Check RAS FW Log needs to be enabled or not */
6744        if (lpfc_check_fwlog_support(phba))
6745                return;
6746
6747        lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6748                                 LPFC_RAS_ENABLE_LOGGING);
6749}
6750
6751/**
6752 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6753 * @phba: Pointer to HBA context object.
6754 *
6755 * This function allocates all SLI4 resource identifiers.
6756 **/
6757int
6758lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6759{
6760        int i, rc, error = 0;
6761        uint16_t count, base;
6762        unsigned long longs;
6763
6764        if (!phba->sli4_hba.rpi_hdrs_in_use)
6765                phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6766        if (phba->sli4_hba.extents_in_use) {
6767                /*
6768                 * The port supports resource extents. The XRI, VPI, VFI, RPI
6769                 * resource extent count must be read and allocated before
6770                 * provisioning the resource id arrays.
6771                 */
6772                if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6773                    LPFC_IDX_RSRC_RDY) {
6774                        /*
6775                         * Extent-based resources are set - the driver could
6776                         * be in a port reset. Figure out if any corrective
6777                         * actions need to be taken.
6778                         */
6779                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6780                                                 LPFC_RSC_TYPE_FCOE_VFI);
6781                        if (rc != 0)
6782                                error++;
6783                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6784                                                 LPFC_RSC_TYPE_FCOE_VPI);
6785                        if (rc != 0)
6786                                error++;
6787                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6788                                                 LPFC_RSC_TYPE_FCOE_XRI);
6789                        if (rc != 0)
6790                                error++;
6791                        rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6792                                                 LPFC_RSC_TYPE_FCOE_RPI);
6793                        if (rc != 0)
6794                                error++;
6795
6796                        /*
6797                         * It's possible that the number of resources
6798                         * provided to this port instance changed between
6799                         * resets.  Detect this condition and reallocate
6800                         * resources.  Otherwise, there is no action.
6801                         */
6802                        if (error) {
6803                                lpfc_printf_log(phba, KERN_INFO,
6804                                                LOG_MBOX | LOG_INIT,
6805                                                "2931 Detected extent resource "
6806                                                "change.  Reallocating all "
6807                                                "extents.\n");
6808                                rc = lpfc_sli4_dealloc_extent(phba,
6809                                                 LPFC_RSC_TYPE_FCOE_VFI);
6810                                rc = lpfc_sli4_dealloc_extent(phba,
6811                                                 LPFC_RSC_TYPE_FCOE_VPI);
6812                                rc = lpfc_sli4_dealloc_extent(phba,
6813                                                 LPFC_RSC_TYPE_FCOE_XRI);
6814                                rc = lpfc_sli4_dealloc_extent(phba,
6815                                                 LPFC_RSC_TYPE_FCOE_RPI);
6816                        } else
6817                                return 0;
6818                }
6819
6820                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6821                if (unlikely(rc))
6822                        goto err_exit;
6823
6824                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6825                if (unlikely(rc))
6826                        goto err_exit;
6827
6828                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6829                if (unlikely(rc))
6830                        goto err_exit;
6831
6832                rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6833                if (unlikely(rc))
6834                        goto err_exit;
6835                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6836                       LPFC_IDX_RSRC_RDY);
6837                return rc;
6838        } else {
6839                /*
6840                 * The port does not support resource extents.  The XRI, VPI,
6841                 * VFI, RPI resource ids were determined from READ_CONFIG.
6842                 * Just allocate the bitmasks and provision the resource id
6843                 * arrays.  If a port reset is active, the resources don't
6844                 * need any action - just exit.
6845                 */
6846                if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6847                    LPFC_IDX_RSRC_RDY) {
6848                        lpfc_sli4_dealloc_resource_identifiers(phba);
6849                        lpfc_sli4_remove_rpis(phba);
6850                }
6851                /* RPIs. */
6852                count = phba->sli4_hba.max_cfg_param.max_rpi;
6853                if (count <= 0) {
6854                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6855                                        "3279 Invalid provisioning of "
6856                                        "rpi:%d\n", count);
6857                        rc = -EINVAL;
6858                        goto err_exit;
6859                }
6860                base = phba->sli4_hba.max_cfg_param.rpi_base;
6861                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6862                phba->sli4_hba.rpi_bmask = kcalloc(longs,
6863                                                   sizeof(unsigned long),
6864                                                   GFP_KERNEL);
6865                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6866                        rc = -ENOMEM;
6867                        goto err_exit;
6868                }
6869                phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6870                                                 GFP_KERNEL);
6871                if (unlikely(!phba->sli4_hba.rpi_ids)) {
6872                        rc = -ENOMEM;
6873                        goto free_rpi_bmask;
6874                }
6875
6876                for (i = 0; i < count; i++)
6877                        phba->sli4_hba.rpi_ids[i] = base + i;
6878
6879                /* VPIs. */
6880                count = phba->sli4_hba.max_cfg_param.max_vpi;
6881                if (count <= 0) {
6882                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6883                                        "3280 Invalid provisioning of "
6884                                        "vpi:%d\n", count);
6885                        rc = -EINVAL;
6886                        goto free_rpi_ids;
6887                }
6888                base = phba->sli4_hba.max_cfg_param.vpi_base;
6889                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6890                phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6891                                          GFP_KERNEL);
6892                if (unlikely(!phba->vpi_bmask)) {
6893                        rc = -ENOMEM;
6894                        goto free_rpi_ids;
6895                }
6896                phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6897                                        GFP_KERNEL);
6898                if (unlikely(!phba->vpi_ids)) {
6899                        rc = -ENOMEM;
6900                        goto free_vpi_bmask;
6901                }
6902
6903                for (i = 0; i < count; i++)
6904                        phba->vpi_ids[i] = base + i;
6905
6906                /* XRIs. */
6907                count = phba->sli4_hba.max_cfg_param.max_xri;
6908                if (count <= 0) {
6909                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6910                                        "3281 Invalid provisioning of "
6911                                        "xri:%d\n", count);
6912                        rc = -EINVAL;
6913                        goto free_vpi_ids;
6914                }
6915                base = phba->sli4_hba.max_cfg_param.xri_base;
6916                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6917                phba->sli4_hba.xri_bmask = kcalloc(longs,
6918                                                   sizeof(unsigned long),
6919                                                   GFP_KERNEL);
6920                if (unlikely(!phba->sli4_hba.xri_bmask)) {
6921                        rc = -ENOMEM;
6922                        goto free_vpi_ids;
6923                }
6924                phba->sli4_hba.max_cfg_param.xri_used = 0;
6925                phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6926                                                 GFP_KERNEL);
6927                if (unlikely(!phba->sli4_hba.xri_ids)) {
6928                        rc = -ENOMEM;
6929                        goto free_xri_bmask;
6930                }
6931
6932                for (i = 0; i < count; i++)
6933                        phba->sli4_hba.xri_ids[i] = base + i;
6934
6935                /* VFIs. */
6936                count = phba->sli4_hba.max_cfg_param.max_vfi;
6937                if (count <= 0) {
6938                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6939                                        "3282 Invalid provisioning of "
6940                                        "vfi:%d\n", count);
6941                        rc = -EINVAL;
6942                        goto free_xri_ids;
6943                }
6944                base = phba->sli4_hba.max_cfg_param.vfi_base;
6945                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6946                phba->sli4_hba.vfi_bmask = kcalloc(longs,
6947                                                   sizeof(unsigned long),
6948                                                   GFP_KERNEL);
6949                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6950                        rc = -ENOMEM;
6951                        goto free_xri_ids;
6952                }
6953                phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6954                                                 GFP_KERNEL);
6955                if (unlikely(!phba->sli4_hba.vfi_ids)) {
6956                        rc = -ENOMEM;
6957                        goto free_vfi_bmask;
6958                }
6959
6960                for (i = 0; i < count; i++)
6961                        phba->sli4_hba.vfi_ids[i] = base + i;
6962
6963                /*
6964                 * Mark all resources ready.  An HBA reset doesn't need
6965                 * to reset the initialization.
6966                 */
6967                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6968                       LPFC_IDX_RSRC_RDY);
6969                return 0;
6970        }
6971
6972 free_vfi_bmask:
6973        kfree(phba->sli4_hba.vfi_bmask);
6974        phba->sli4_hba.vfi_bmask = NULL;
6975 free_xri_ids:
6976        kfree(phba->sli4_hba.xri_ids);
6977        phba->sli4_hba.xri_ids = NULL;
6978 free_xri_bmask:
6979        kfree(phba->sli4_hba.xri_bmask);
6980        phba->sli4_hba.xri_bmask = NULL;
6981 free_vpi_ids:
6982        kfree(phba->vpi_ids);
6983        phba->vpi_ids = NULL;
6984 free_vpi_bmask:
6985        kfree(phba->vpi_bmask);
6986        phba->vpi_bmask = NULL;
6987 free_rpi_ids:
6988        kfree(phba->sli4_hba.rpi_ids);
6989        phba->sli4_hba.rpi_ids = NULL;
6990 free_rpi_bmask:
6991        kfree(phba->sli4_hba.rpi_bmask);
6992        phba->sli4_hba.rpi_bmask = NULL;
6993 err_exit:
6994        return rc;
6995}
6996
6997/**
6998 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6999 * @phba: Pointer to HBA context object.
7000 *
7001 * This function allocates the number of elements for the specified
7002 * resource type.
7003 **/
7004int
7005lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7006{
7007        if (phba->sli4_hba.extents_in_use) {
7008                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7009                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7010                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7011                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7012        } else {
7013                kfree(phba->vpi_bmask);
7014                phba->sli4_hba.max_cfg_param.vpi_used = 0;
7015                kfree(phba->vpi_ids);
7016                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7017                kfree(phba->sli4_hba.xri_bmask);
7018                kfree(phba->sli4_hba.xri_ids);
7019                kfree(phba->sli4_hba.vfi_bmask);
7020                kfree(phba->sli4_hba.vfi_ids);
7021                bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7022                bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7023        }
7024
7025        return 0;
7026}
7027
7028/**
7029 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7030 * @phba: Pointer to HBA context object.
7031 * @type: The resource extent type.
7032 * @extnt_cnt: buffer to hold port extent count response
7033 * @extnt_size: buffer to hold port extent size response.
7034 *
7035 * This function calls the port to read the host allocated extents
7036 * for a particular type.
7037 **/
7038int
7039lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7040                               uint16_t *extnt_cnt, uint16_t *extnt_size)
7041{
7042        bool emb;
7043        int rc = 0;
7044        uint16_t curr_blks = 0;
7045        uint32_t req_len, emb_len;
7046        uint32_t alloc_len, mbox_tmo;
7047        struct list_head *blk_list_head;
7048        struct lpfc_rsrc_blks *rsrc_blk;
7049        LPFC_MBOXQ_t *mbox;
7050        void *virtaddr = NULL;
7051        struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7052        struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7053        union  lpfc_sli4_cfg_shdr *shdr;
7054
7055        switch (type) {
7056        case LPFC_RSC_TYPE_FCOE_VPI:
7057                blk_list_head = &phba->lpfc_vpi_blk_list;
7058                break;
7059        case LPFC_RSC_TYPE_FCOE_XRI:
7060                blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7061                break;
7062        case LPFC_RSC_TYPE_FCOE_VFI:
7063                blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7064                break;
7065        case LPFC_RSC_TYPE_FCOE_RPI:
7066                blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7067                break;
7068        default:
7069                return -EIO;
7070        }
7071
7072        /* Count the number of extents currently allocatd for this type. */
7073        list_for_each_entry(rsrc_blk, blk_list_head, list) {
7074                if (curr_blks == 0) {
7075                        /*
7076                         * The GET_ALLOCATED mailbox does not return the size,
7077                         * just the count.  The size should be just the size
7078                         * stored in the current allocated block and all sizes
7079                         * for an extent type are the same so set the return
7080                         * value now.
7081                         */
7082                        *extnt_size = rsrc_blk->rsrc_size;
7083                }
7084                curr_blks++;
7085        }
7086
7087        /*
7088         * Calculate the size of an embedded mailbox.  The uint32_t
7089         * accounts for extents-specific word.
7090         */
7091        emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7092                sizeof(uint32_t);
7093
7094        /*
7095         * Presume the allocation and response will fit into an embedded
7096         * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7097         */
7098        emb = LPFC_SLI4_MBX_EMBED;
7099        req_len = emb_len;
7100        if (req_len > emb_len) {
7101                req_len = curr_blks * sizeof(uint16_t) +
7102                        sizeof(union lpfc_sli4_cfg_shdr) +
7103                        sizeof(uint32_t);
7104                emb = LPFC_SLI4_MBX_NEMBED;
7105        }
7106
7107        mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7108        if (!mbox)
7109                return -ENOMEM;
7110        memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7111
7112        alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7113                                     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7114                                     req_len, emb);
7115        if (alloc_len < req_len) {
7116                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7117                        "2983 Allocated DMA memory size (x%x) is "
7118                        "less than the requested DMA memory "
7119                        "size (x%x)\n", alloc_len, req_len);
7120                rc = -ENOMEM;
7121                goto err_exit;
7122        }
7123        rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7124        if (unlikely(rc)) {
7125                rc = -EIO;
7126                goto err_exit;
7127        }
7128
7129        if (!phba->sli4_hba.intr_enable)
7130                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7131        else {
7132                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7133                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7134        }
7135
7136        if (unlikely(rc)) {
7137                rc = -EIO;
7138                goto err_exit;
7139        }
7140
7141        /*
7142         * Figure out where the response is located.  Then get local pointers
7143         * to the response data.  The port does not guarantee to respond to
7144         * all extents counts request so update the local variable with the
7145         * allocated count from the port.
7146         */
7147        if (emb == LPFC_SLI4_MBX_EMBED) {
7148                rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7149                shdr = &rsrc_ext->header.cfg_shdr;
7150                *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7151        } else {
7152                virtaddr = mbox->sge_array->addr[0];
7153                n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7154                shdr = &n_rsrc->cfg_shdr;
7155                *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7156        }
7157
7158        if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7159                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7160                        "2984 Failed to read allocated resources "
7161                        "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7162                        type,
7163                        bf_get(lpfc_mbox_hdr_status, &shdr->response),
7164                        bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7165                rc = -EIO;
7166                goto err_exit;
7167        }
7168 err_exit:
7169        lpfc_sli4_mbox_cmd_free(phba, mbox);
7170        return rc;
7171}
7172
7173/**
7174 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7175 * @phba: pointer to lpfc hba data structure.
7176 * @sgl_list: linked link of sgl buffers to post
7177 * @cnt: number of linked list buffers
7178 *
7179 * This routine walks the list of buffers that have been allocated and
7180 * repost them to the port by using SGL block post. This is needed after a
7181 * pci_function_reset/warm_start or start. It attempts to construct blocks
7182 * of buffer sgls which contains contiguous xris and uses the non-embedded
7183 * SGL block post mailbox commands to post them to the port. For single
7184 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7185 * mailbox command for posting.
7186 *
7187 * Returns: 0 = success, non-zero failure.
7188 **/
7189static int
7190lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7191                          struct list_head *sgl_list, int cnt)
7192{
7193        struct lpfc_sglq *sglq_entry = NULL;
7194        struct lpfc_sglq *sglq_entry_next = NULL;
7195        struct lpfc_sglq *sglq_entry_first = NULL;
7196        int status, total_cnt;
7197        int post_cnt = 0, num_posted = 0, block_cnt = 0;
7198        int last_xritag = NO_XRI;
7199        LIST_HEAD(prep_sgl_list);
7200        LIST_HEAD(blck_sgl_list);
7201        LIST_HEAD(allc_sgl_list);
7202        LIST_HEAD(post_sgl_list);
7203        LIST_HEAD(free_sgl_list);
7204
7205        spin_lock_irq(&phba->hbalock);
7206        spin_lock(&phba->sli4_hba.sgl_list_lock);
7207        list_splice_init(sgl_list, &allc_sgl_list);
7208        spin_unlock(&phba->sli4_hba.sgl_list_lock);
7209        spin_unlock_irq(&phba->hbalock);
7210
7211        total_cnt = cnt;
7212        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7213                                 &allc_sgl_list, list) {
7214                list_del_init(&sglq_entry->list);
7215                block_cnt++;
7216                if ((last_xritag != NO_XRI) &&
7217                    (sglq_entry->sli4_xritag != last_xritag + 1)) {
7218                        /* a hole in xri block, form a sgl posting block */
7219                        list_splice_init(&prep_sgl_list, &blck_sgl_list);
7220                        post_cnt = block_cnt - 1;
7221                        /* prepare list for next posting block */
7222                        list_add_tail(&sglq_entry->list, &prep_sgl_list);
7223                        block_cnt = 1;
7224                } else {
7225                        /* prepare list for next posting block */
7226                        list_add_tail(&sglq_entry->list, &prep_sgl_list);
7227                        /* enough sgls for non-embed sgl mbox command */
7228                        if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7229                                list_splice_init(&prep_sgl_list,
7230                                                 &blck_sgl_list);
7231                                post_cnt = block_cnt;
7232                                block_cnt = 0;
7233                        }
7234                }
7235                num_posted++;
7236
7237                /* keep track of last sgl's xritag */
7238                last_xritag = sglq_entry->sli4_xritag;
7239
7240                /* end of repost sgl list condition for buffers */
7241                if (num_posted == total_cnt) {
7242                        if (post_cnt == 0) {
7243                                list_splice_init(&prep_sgl_list,
7244                                                 &blck_sgl_list);
7245                                post_cnt = block_cnt;
7246                        } else if (block_cnt == 1) {
7247                                status = lpfc_sli4_post_sgl(phba,
7248                                                sglq_entry->phys, 0,
7249                                                sglq_entry->sli4_xritag);
7250                                if (!status) {
7251                                        /* successful, put sgl to posted list */
7252                                        list_add_tail(&sglq_entry->list,
7253                                                      &post_sgl_list);
7254                                } else {
7255                                        /* Failure, put sgl to free list */
7256                                        lpfc_printf_log(phba, KERN_WARNING,
7257                                                LOG_SLI,
7258                                                "3159 Failed to post "
7259                                                "sgl, xritag:x%x\n",
7260                                                sglq_entry->sli4_xritag);
7261                                        list_add_tail(&sglq_entry->list,
7262                                                      &free_sgl_list);
7263                                        total_cnt--;
7264                                }
7265                        }
7266                }
7267
7268                /* continue until a nembed page worth of sgls */
7269                if (post_cnt == 0)
7270                        continue;
7271
7272                /* post the buffer list sgls as a block */
7273                status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7274                                                 post_cnt);
7275
7276                if (!status) {
7277                        /* success, put sgl list to posted sgl list */
7278                        list_splice_init(&blck_sgl_list, &post_sgl_list);
7279                } else {
7280                        /* Failure, put sgl list to free sgl list */
7281                        sglq_entry_first = list_first_entry(&blck_sgl_list,
7282                                                            struct lpfc_sglq,
7283                                                            list);
7284                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7285                                        "3160 Failed to post sgl-list, "
7286                                        "xritag:x%x-x%x\n",
7287                                        sglq_entry_first->sli4_xritag,
7288                                        (sglq_entry_first->sli4_xritag +
7289                                         post_cnt - 1));
7290                        list_splice_init(&blck_sgl_list, &free_sgl_list);
7291                        total_cnt -= post_cnt;
7292                }
7293
7294                /* don't reset xirtag due to hole in xri block */
7295                if (block_cnt == 0)
7296                        last_xritag = NO_XRI;
7297
7298                /* reset sgl post count for next round of posting */
7299                post_cnt = 0;
7300        }
7301
7302        /* free the sgls failed to post */
7303        lpfc_free_sgl_list(phba, &free_sgl_list);
7304
7305        /* push sgls posted to the available list */
7306        if (!list_empty(&post_sgl_list)) {
7307                spin_lock_irq(&phba->hbalock);
7308                spin_lock(&phba->sli4_hba.sgl_list_lock);
7309                list_splice_init(&post_sgl_list, sgl_list);
7310                spin_unlock(&phba->sli4_hba.sgl_list_lock);
7311                spin_unlock_irq(&phba->hbalock);
7312        } else {
7313                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7314                                "3161 Failure to post sgl to port.\n");
7315                return -EIO;
7316        }
7317
7318        /* return the number of XRIs actually posted */
7319        return total_cnt;
7320}
7321
7322/**
7323 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7324 * @phba: pointer to lpfc hba data structure.
7325 *
7326 * This routine walks the list of nvme buffers that have been allocated and
7327 * repost them to the port by using SGL block post. This is needed after a
7328 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7329 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7330 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7331 *
7332 * Returns: 0 = success, non-zero failure.
7333 **/
7334static int
7335lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7336{
7337        LIST_HEAD(post_nblist);
7338        int num_posted, rc = 0;
7339
7340        /* get all NVME buffers need to repost to a local list */
7341        lpfc_io_buf_flush(phba, &post_nblist);
7342
7343        /* post the list of nvme buffer sgls to port if available */
7344        if (!list_empty(&post_nblist)) {
7345                num_posted = lpfc_sli4_post_io_sgl_list(
7346                        phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7347                /* failed to post any nvme buffer, return error */
7348                if (num_posted == 0)
7349                        rc = -EIO;
7350        }
7351        return rc;
7352}
7353
7354static void
7355lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7356{
7357        uint32_t len;
7358
7359        len = sizeof(struct lpfc_mbx_set_host_data) -
7360                sizeof(struct lpfc_sli4_cfg_mhdr);
7361        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7362                         LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7363                         LPFC_SLI4_MBX_EMBED);
7364
7365        mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7366        mbox->u.mqe.un.set_host_data.param_len =
7367                                        LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7368        snprintf(mbox->u.mqe.un.set_host_data.data,
7369                 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7370                 "Linux %s v"LPFC_DRIVER_VERSION,
7371                 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7372}
7373
7374int
7375lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7376                    struct lpfc_queue *drq, int count, int idx)
7377{
7378        int rc, i;
7379        struct lpfc_rqe hrqe;
7380        struct lpfc_rqe drqe;
7381        struct lpfc_rqb *rqbp;
7382        unsigned long flags;
7383        struct rqb_dmabuf *rqb_buffer;
7384        LIST_HEAD(rqb_buf_list);
7385
7386        rqbp = hrq->rqbp;
7387        for (i = 0; i < count; i++) {
7388                spin_lock_irqsave(&phba->hbalock, flags);
7389                /* IF RQ is already full, don't bother */
7390                if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7391                        spin_unlock_irqrestore(&phba->hbalock, flags);
7392                        break;
7393                }
7394                spin_unlock_irqrestore(&phba->hbalock, flags);
7395
7396                rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7397                if (!rqb_buffer)
7398                        break;
7399                rqb_buffer->hrq = hrq;
7400                rqb_buffer->drq = drq;
7401                rqb_buffer->idx = idx;
7402                list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7403        }
7404
7405        spin_lock_irqsave(&phba->hbalock, flags);
7406        while (!list_empty(&rqb_buf_list)) {
7407                list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7408                                 hbuf.list);
7409
7410                hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7411                hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7412                drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7413                drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7414                rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7415                if (rc < 0) {
7416                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7417                                        "6421 Cannot post to HRQ %d: %x %x %x "
7418                                        "DRQ %x %x\n",
7419                                        hrq->queue_id,
7420                                        hrq->host_index,
7421                                        hrq->hba_index,
7422                                        hrq->entry_count,
7423                                        drq->host_index,
7424                                        drq->hba_index);
7425                        rqbp->rqb_free_buffer(phba, rqb_buffer);
7426                } else {
7427                        list_add_tail(&rqb_buffer->hbuf.list,
7428                                      &rqbp->rqb_buffer_list);
7429                        rqbp->buffer_count++;
7430                }
7431        }
7432        spin_unlock_irqrestore(&phba->hbalock, flags);
7433        return 1;
7434}
7435
7436/**
7437 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7438 * @phba: pointer to lpfc hba data structure.
7439 *
7440 * This routine initializes the per-cq idle_stat to dynamically dictate
7441 * polling decisions.
7442 *
7443 * Return codes:
7444 *   None
7445 **/
7446static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7447{
7448        int i;
7449        struct lpfc_sli4_hdw_queue *hdwq;
7450        struct lpfc_queue *cq;
7451        struct lpfc_idle_stat *idle_stat;
7452        u64 wall;
7453
7454        for_each_present_cpu(i) {
7455                hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7456                cq = hdwq->io_cq;
7457
7458                /* Skip if we've already handled this cq's primary CPU */
7459                if (cq->chann != i)
7460                        continue;
7461
7462                idle_stat = &phba->sli4_hba.idle_stat[i];
7463
7464                idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7465                idle_stat->prev_wall = wall;
7466
7467                if (phba->nvmet_support)
7468                        cq->poll_mode = LPFC_QUEUE_WORK;
7469                else
7470                        cq->poll_mode = LPFC_IRQ_POLL;
7471        }
7472
7473        if (!phba->nvmet_support)
7474                schedule_delayed_work(&phba->idle_stat_delay_work,
7475                                      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7476}
7477
7478static void lpfc_sli4_dip(struct lpfc_hba *phba)
7479{
7480        uint32_t if_type;
7481
7482        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7483        if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7484            if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7485                struct lpfc_register reg_data;
7486
7487                if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7488                               &reg_data.word0))
7489                        return;
7490
7491                if (bf_get(lpfc_sliport_status_dip, &reg_data))
7492                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7493                                        "2904 Firmware Dump Image Present"
7494                                        " on Adapter");
7495        }
7496}
7497
7498/**
7499 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7500 * @phba: Pointer to HBA context object.
7501 *
7502 * This function is the main SLI4 device initialization PCI function. This
7503 * function is called by the HBA initialization code, HBA reset code and
7504 * HBA error attention handler code. Caller is not required to hold any
7505 * locks.
7506 **/
7507int
7508lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7509{
7510        int rc, i, cnt, len, dd;
7511        LPFC_MBOXQ_t *mboxq;
7512        struct lpfc_mqe *mqe;
7513        uint8_t *vpd;
7514        uint32_t vpd_size;
7515        uint32_t ftr_rsp = 0;
7516        struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7517        struct lpfc_vport *vport = phba->pport;
7518        struct lpfc_dmabuf *mp;
7519        struct lpfc_rqb *rqbp;
7520
7521        /* Perform a PCI function reset to start from clean */
7522        rc = lpfc_pci_function_reset(phba);
7523        if (unlikely(rc))
7524                return -ENODEV;
7525
7526        /* Check the HBA Host Status Register for readyness */
7527        rc = lpfc_sli4_post_status_check(phba);
7528        if (unlikely(rc))
7529                return -ENODEV;
7530        else {
7531                spin_lock_irq(&phba->hbalock);
7532                phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7533                spin_unlock_irq(&phba->hbalock);
7534        }
7535
7536        lpfc_sli4_dip(phba);
7537
7538        /*
7539         * Allocate a single mailbox container for initializing the
7540         * port.
7541         */
7542        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7543        if (!mboxq)
7544                return -ENOMEM;
7545
7546        /* Issue READ_REV to collect vpd and FW information. */
7547        vpd_size = SLI4_PAGE_SIZE;
7548        vpd = kzalloc(vpd_size, GFP_KERNEL);
7549        if (!vpd) {
7550                rc = -ENOMEM;
7551                goto out_free_mbox;
7552        }
7553
7554        rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7555        if (unlikely(rc)) {
7556                kfree(vpd);
7557                goto out_free_mbox;
7558        }
7559
7560        mqe = &mboxq->u.mqe;
7561        phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7562        if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7563                phba->hba_flag |= HBA_FCOE_MODE;
7564                phba->fcp_embed_io = 0; /* SLI4 FC support only */
7565        } else {
7566                phba->hba_flag &= ~HBA_FCOE_MODE;
7567        }
7568
7569        if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7570                LPFC_DCBX_CEE_MODE)
7571                phba->hba_flag |= HBA_FIP_SUPPORT;
7572        else
7573                phba->hba_flag &= ~HBA_FIP_SUPPORT;
7574
7575        phba->hba_flag &= ~HBA_IOQ_FLUSH;
7576
7577        if (phba->sli_rev != LPFC_SLI_REV4) {
7578                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7579                        "0376 READ_REV Error. SLI Level %d "
7580                        "FCoE enabled %d\n",
7581                        phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7582                rc = -EIO;
7583                kfree(vpd);
7584                goto out_free_mbox;
7585        }
7586
7587        /*
7588         * Continue initialization with default values even if driver failed
7589         * to read FCoE param config regions, only read parameters if the
7590         * board is FCoE
7591         */
7592        if (phba->hba_flag & HBA_FCOE_MODE &&
7593            lpfc_sli4_read_fcoe_params(phba))
7594                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7595                        "2570 Failed to read FCoE parameters\n");
7596
7597        /*
7598         * Retrieve sli4 device physical port name, failure of doing it
7599         * is considered as non-fatal.
7600         */
7601        rc = lpfc_sli4_retrieve_pport_name(phba);
7602        if (!rc)
7603                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7604                                "3080 Successful retrieving SLI4 device "
7605                                "physical port name: %s.\n", phba->Port);
7606
7607        rc = lpfc_sli4_get_ctl_attr(phba);
7608        if (!rc)
7609                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7610                                "8351 Successful retrieving SLI4 device "
7611                                "CTL ATTR\n");
7612
7613        /*
7614         * Evaluate the read rev and vpd data. Populate the driver
7615         * state with the results. If this routine fails, the failure
7616         * is not fatal as the driver will use generic values.
7617         */
7618        rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7619        if (unlikely(!rc)) {
7620                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7621                                "0377 Error %d parsing vpd. "
7622                                "Using defaults.\n", rc);
7623                rc = 0;
7624        }
7625        kfree(vpd);
7626
7627        /* Save information as VPD data */
7628        phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7629        phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7630
7631        /*
7632         * This is because first G7 ASIC doesn't support the standard
7633         * 0x5a NVME cmd descriptor type/subtype
7634         */
7635        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7636                        LPFC_SLI_INTF_IF_TYPE_6) &&
7637            (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7638            (phba->vpd.rev.smRev == 0) &&
7639            (phba->cfg_nvme_embed_cmd == 1))
7640                phba->cfg_nvme_embed_cmd = 0;
7641
7642        phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7643        phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7644                                         &mqe->un.read_rev);
7645        phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7646                                       &mqe->un.read_rev);
7647        phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7648                                            &mqe->un.read_rev);
7649        phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7650                                           &mqe->un.read_rev);
7651        phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7652        memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7653        phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7654        memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7655        phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7656        memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7657        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7658                        "(%d):0380 READ_REV Status x%x "
7659                        "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7660                        mboxq->vport ? mboxq->vport->vpi : 0,
7661                        bf_get(lpfc_mqe_status, mqe),
7662                        phba->vpd.rev.opFwName,
7663                        phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7664                        phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7665
7666        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7667            LPFC_SLI_INTF_IF_TYPE_0) {
7668                lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7669                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7670                if (rc == MBX_SUCCESS) {
7671                        phba->hba_flag |= HBA_RECOVERABLE_UE;
7672                        /* Set 1Sec interval to detect UE */
7673                        phba->eratt_poll_interval = 1;
7674                        phba->sli4_hba.ue_to_sr = bf_get(
7675                                        lpfc_mbx_set_feature_UESR,
7676                                        &mboxq->u.mqe.un.set_feature);
7677                        phba->sli4_hba.ue_to_rp = bf_get(
7678                                        lpfc_mbx_set_feature_UERP,
7679                                        &mboxq->u.mqe.un.set_feature);
7680                }
7681        }
7682
7683        if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7684                /* Enable MDS Diagnostics only if the SLI Port supports it */
7685                lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7686                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7687                if (rc != MBX_SUCCESS)
7688                        phba->mds_diags_support = 0;
7689        }
7690
7691        /*
7692         * Discover the port's supported feature set and match it against the
7693         * hosts requests.
7694         */
7695        lpfc_request_features(phba, mboxq);
7696        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7697        if (unlikely(rc)) {
7698                rc = -EIO;
7699                goto out_free_mbox;
7700        }
7701
7702        /* Disable VMID if app header is not supported */
7703        if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
7704                                                  &mqe->un.req_ftrs))) {
7705                bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
7706                phba->cfg_vmid_app_header = 0;
7707                lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
7708                                "1242 vmid feature not supported\n");
7709        }
7710
7711        /*
7712         * The port must support FCP initiator mode as this is the
7713         * only mode running in the host.
7714         */
7715        if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7716                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7717                                "0378 No support for fcpi mode.\n");
7718                ftr_rsp++;
7719        }
7720
7721        /* Performance Hints are ONLY for FCoE */
7722        if (phba->hba_flag & HBA_FCOE_MODE) {
7723                if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7724                        phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7725                else
7726                        phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7727        }
7728
7729        /*
7730         * If the port cannot support the host's requested features
7731         * then turn off the global config parameters to disable the
7732         * feature in the driver.  This is not a fatal error.
7733         */
7734        if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7735                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7736                        phba->cfg_enable_bg = 0;
7737                        phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7738                        ftr_rsp++;
7739                }
7740        }
7741
7742        if (phba->max_vpi && phba->cfg_enable_npiv &&
7743            !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7744                ftr_rsp++;
7745
7746        if (ftr_rsp) {
7747                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7748                                "0379 Feature Mismatch Data: x%08x %08x "
7749                                "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7750                                mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7751                                phba->cfg_enable_npiv, phba->max_vpi);
7752                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7753                        phba->cfg_enable_bg = 0;
7754                if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7755                        phba->cfg_enable_npiv = 0;
7756        }
7757
7758        /* These SLI3 features are assumed in SLI4 */
7759        spin_lock_irq(&phba->hbalock);
7760        phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7761        spin_unlock_irq(&phba->hbalock);
7762
7763        /* Always try to enable dual dump feature if we can */
7764        lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7765        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7766        dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7767        if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7768                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7769                                "6448 Dual Dump is enabled\n");
7770        else
7771                lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7772                                "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7773                                "rc:x%x dd:x%x\n",
7774                                bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7775                                lpfc_sli_config_mbox_subsys_get(
7776                                        phba, mboxq),
7777                                lpfc_sli_config_mbox_opcode_get(
7778                                        phba, mboxq),
7779                                rc, dd);
7780        /*
7781         * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
7782         * calls depends on these resources to complete port setup.
7783         */
7784        rc = lpfc_sli4_alloc_resource_identifiers(phba);
7785        if (rc) {
7786                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7787                                "2920 Failed to alloc Resource IDs "
7788                                "rc = x%x\n", rc);
7789                goto out_free_mbox;
7790        }
7791
7792        lpfc_set_host_data(phba, mboxq);
7793
7794        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7795        if (rc) {
7796                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7797                                "2134 Failed to set host os driver version %x",
7798                                rc);
7799        }
7800
7801        /* Read the port's service parameters. */
7802        rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7803        if (rc) {
7804                phba->link_state = LPFC_HBA_ERROR;
7805                rc = -ENOMEM;
7806                goto out_free_mbox;
7807        }
7808
7809        mboxq->vport = vport;
7810        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7811        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7812        if (rc == MBX_SUCCESS) {
7813                memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7814                rc = 0;
7815        }
7816
7817        /*
7818         * This memory was allocated by the lpfc_read_sparam routine. Release
7819         * it to the mbuf pool.
7820         */
7821        lpfc_mbuf_free(phba, mp->virt, mp->phys);
7822        kfree(mp);
7823        mboxq->ctx_buf = NULL;
7824        if (unlikely(rc)) {
7825                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7826                                "0382 READ_SPARAM command failed "
7827                                "status %d, mbxStatus x%x\n",
7828                                rc, bf_get(lpfc_mqe_status, mqe));
7829                phba->link_state = LPFC_HBA_ERROR;
7830                rc = -EIO;
7831                goto out_free_mbox;
7832        }
7833
7834        lpfc_update_vport_wwn(vport);
7835
7836        /* Update the fc_host data structures with new wwn. */
7837        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7838        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7839
7840        /* Create all the SLI4 queues */
7841        rc = lpfc_sli4_queue_create(phba);
7842        if (rc) {
7843                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7844                                "3089 Failed to allocate queues\n");
7845                rc = -ENODEV;
7846                goto out_free_mbox;
7847        }
7848        /* Set up all the queues to the device */
7849        rc = lpfc_sli4_queue_setup(phba);
7850        if (unlikely(rc)) {
7851                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7852                                "0381 Error %d during queue setup.\n ", rc);
7853                goto out_stop_timers;
7854        }
7855        /* Initialize the driver internal SLI layer lists. */
7856        lpfc_sli4_setup(phba);
7857        lpfc_sli4_queue_init(phba);
7858
7859        /* update host els xri-sgl sizes and mappings */
7860        rc = lpfc_sli4_els_sgl_update(phba);
7861        if (unlikely(rc)) {
7862                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7863                                "1400 Failed to update xri-sgl size and "
7864                                "mapping: %d\n", rc);
7865                goto out_destroy_queue;
7866        }
7867
7868        /* register the els sgl pool to the port */
7869        rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7870                                       phba->sli4_hba.els_xri_cnt);
7871        if (unlikely(rc < 0)) {
7872                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7873                                "0582 Error %d during els sgl post "
7874                                "operation\n", rc);
7875                rc = -ENODEV;
7876                goto out_destroy_queue;
7877        }
7878        phba->sli4_hba.els_xri_cnt = rc;
7879
7880        if (phba->nvmet_support) {
7881                /* update host nvmet xri-sgl sizes and mappings */
7882                rc = lpfc_sli4_nvmet_sgl_update(phba);
7883                if (unlikely(rc)) {
7884                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7885                                        "6308 Failed to update nvmet-sgl size "
7886                                        "and mapping: %d\n", rc);
7887                        goto out_destroy_queue;
7888                }
7889
7890                /* register the nvmet sgl pool to the port */
7891                rc = lpfc_sli4_repost_sgl_list(
7892                        phba,
7893                        &phba->sli4_hba.lpfc_nvmet_sgl_list,
7894                        phba->sli4_hba.nvmet_xri_cnt);
7895                if (unlikely(rc < 0)) {
7896                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7897                                        "3117 Error %d during nvmet "
7898                                        "sgl post\n", rc);
7899                        rc = -ENODEV;
7900                        goto out_destroy_queue;
7901                }
7902                phba->sli4_hba.nvmet_xri_cnt = rc;
7903
7904                /* We allocate an iocbq for every receive context SGL.
7905                 * The additional allocation is for abort and ls handling.
7906                 */
7907                cnt = phba->sli4_hba.nvmet_xri_cnt +
7908                        phba->sli4_hba.max_cfg_param.max_xri;
7909        } else {
7910                /* update host common xri-sgl sizes and mappings */
7911                rc = lpfc_sli4_io_sgl_update(phba);
7912                if (unlikely(rc)) {
7913                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7914                                        "6082 Failed to update nvme-sgl size "
7915                                        "and mapping: %d\n", rc);
7916                        goto out_destroy_queue;
7917                }
7918
7919                /* register the allocated common sgl pool to the port */
7920                rc = lpfc_sli4_repost_io_sgl_list(phba);
7921                if (unlikely(rc)) {
7922                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7923                                        "6116 Error %d during nvme sgl post "
7924                                        "operation\n", rc);
7925                        /* Some NVME buffers were moved to abort nvme list */
7926                        /* A pci function reset will repost them */
7927                        rc = -ENODEV;
7928                        goto out_destroy_queue;
7929                }
7930                /* Each lpfc_io_buf job structure has an iocbq element.
7931                 * This cnt provides for abort, els, ct and ls requests.
7932                 */
7933                cnt = phba->sli4_hba.max_cfg_param.max_xri;
7934        }
7935
7936        if (!phba->sli.iocbq_lookup) {
7937                /* Initialize and populate the iocb list per host */
7938                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7939                                "2821 initialize iocb list with %d entries\n",
7940                                cnt);
7941                rc = lpfc_init_iocb_list(phba, cnt);
7942                if (rc) {
7943                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7944                                        "1413 Failed to init iocb list.\n");
7945                        goto out_destroy_queue;
7946                }
7947        }
7948
7949        if (phba->nvmet_support)
7950                lpfc_nvmet_create_targetport(phba);
7951
7952        if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7953                /* Post initial buffers to all RQs created */
7954                for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7955                        rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7956                        INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7957                        rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7958                        rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7959                        rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7960                        rqbp->buffer_count = 0;
7961
7962                        lpfc_post_rq_buffer(
7963                                phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7964                                phba->sli4_hba.nvmet_mrq_data[i],
7965                                phba->cfg_nvmet_mrq_post, i);
7966                }
7967        }
7968
7969        /* Post the rpi header region to the device. */
7970        rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7971        if (unlikely(rc)) {
7972                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7973                                "0393 Error %d during rpi post operation\n",
7974                                rc);
7975                rc = -ENODEV;
7976                goto out_free_iocblist;
7977        }
7978        lpfc_sli4_node_prep(phba);
7979
7980        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7981                if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7982                        /*
7983                         * The FC Port needs to register FCFI (index 0)
7984                         */
7985                        lpfc_reg_fcfi(phba, mboxq);
7986                        mboxq->vport = phba->pport;
7987                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7988                        if (rc != MBX_SUCCESS)
7989                                goto out_unset_queue;
7990                        rc = 0;
7991                        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7992                                                &mboxq->u.mqe.un.reg_fcfi);
7993                } else {
7994                        /* We are a NVME Target mode with MRQ > 1 */
7995
7996                        /* First register the FCFI */
7997                        lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7998                        mboxq->vport = phba->pport;
7999                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8000                        if (rc != MBX_SUCCESS)
8001                                goto out_unset_queue;
8002                        rc = 0;
8003                        phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8004                                                &mboxq->u.mqe.un.reg_fcfi_mrq);
8005
8006                        /* Next register the MRQs */
8007                        lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8008                        mboxq->vport = phba->pport;
8009                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8010                        if (rc != MBX_SUCCESS)
8011                                goto out_unset_queue;
8012                        rc = 0;
8013                }
8014                /* Check if the port is configured to be disabled */
8015                lpfc_sli_read_link_ste(phba);
8016        }
8017
8018        /* Don't post more new bufs if repost already recovered
8019         * the nvme sgls.
8020         */
8021        if (phba->nvmet_support == 0) {
8022                if (phba->sli4_hba.io_xri_cnt == 0) {
8023                        len = lpfc_new_io_buf(
8024                                              phba, phba->sli4_hba.io_xri_max);
8025                        if (len == 0) {
8026                                rc = -ENOMEM;
8027                                goto out_unset_queue;
8028                        }
8029
8030                        if (phba->cfg_xri_rebalancing)
8031                                lpfc_create_multixri_pools(phba);
8032                }
8033        } else {
8034                phba->cfg_xri_rebalancing = 0;
8035        }
8036
8037        /* Allow asynchronous mailbox command to go through */
8038        spin_lock_irq(&phba->hbalock);
8039        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8040        spin_unlock_irq(&phba->hbalock);
8041
8042        /* Post receive buffers to the device */
8043        lpfc_sli4_rb_setup(phba);
8044
8045        /* Reset HBA FCF states after HBA reset */
8046        phba->fcf.fcf_flag = 0;
8047        phba->fcf.current_rec.flag = 0;
8048
8049        /* Start the ELS watchdog timer */
8050        mod_timer(&vport->els_tmofunc,
8051                  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8052
8053        /* Start heart beat timer */
8054        mod_timer(&phba->hb_tmofunc,
8055                  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8056        phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8057        phba->last_completion_time = jiffies;
8058
8059        /* start eq_delay heartbeat */
8060        if (phba->cfg_auto_imax)
8061                queue_delayed_work(phba->wq, &phba->eq_delay_work,
8062                                   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8063
8064        /* start per phba idle_stat_delay heartbeat */
8065        lpfc_init_idle_stat_hb(phba);
8066
8067        /* Start error attention (ERATT) polling timer */
8068        mod_timer(&phba->eratt_poll,
8069                  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8070
8071        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8072        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8073                rc = pci_enable_pcie_error_reporting(phba->pcidev);
8074                if (!rc) {
8075                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8076                                        "2829 This device supports "
8077                                        "Advanced Error Reporting (AER)\n");
8078                        spin_lock_irq(&phba->hbalock);
8079                        phba->hba_flag |= HBA_AER_ENABLED;
8080                        spin_unlock_irq(&phba->hbalock);
8081                } else {
8082                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8083                                        "2830 This device does not support "
8084                                        "Advanced Error Reporting (AER)\n");
8085                        phba->cfg_aer_support = 0;
8086                }
8087                rc = 0;
8088        }
8089
8090        /*
8091         * The port is ready, set the host's link state to LINK_DOWN
8092         * in preparation for link interrupts.
8093         */
8094        spin_lock_irq(&phba->hbalock);
8095        phba->link_state = LPFC_LINK_DOWN;
8096
8097        /* Check if physical ports are trunked */
8098        if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8099                phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8100        if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8101                phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8102        if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8103                phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8104        if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8105                phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8106        spin_unlock_irq(&phba->hbalock);
8107
8108        /* Arm the CQs and then EQs on device */
8109        lpfc_sli4_arm_cqeq_intr(phba);
8110
8111        /* Indicate device interrupt mode */
8112        phba->sli4_hba.intr_enable = 1;
8113
8114        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8115            (phba->hba_flag & LINK_DISABLED)) {
8116                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8117                                "3103 Adapter Link is disabled.\n");
8118                lpfc_down_link(phba, mboxq);
8119                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8120                if (rc != MBX_SUCCESS) {
8121                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8122                                        "3104 Adapter failed to issue "
8123                                        "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8124                        goto out_io_buff_free;
8125                }
8126        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8127                /* don't perform init_link on SLI4 FC port loopback test */
8128                if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8129                        rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8130                        if (rc)
8131                                goto out_io_buff_free;
8132                }
8133        }
8134        mempool_free(mboxq, phba->mbox_mem_pool);
8135        return rc;
8136out_io_buff_free:
8137        /* Free allocated IO Buffers */
8138        lpfc_io_free(phba);
8139out_unset_queue:
8140        /* Unset all the queues set up in this routine when error out */
8141        lpfc_sli4_queue_unset(phba);
8142out_free_iocblist:
8143        lpfc_free_iocb_list(phba);
8144out_destroy_queue:
8145        lpfc_sli4_queue_destroy(phba);
8146out_stop_timers:
8147        lpfc_stop_hba_timers(phba);
8148out_free_mbox:
8149        mempool_free(mboxq, phba->mbox_mem_pool);
8150        return rc;
8151}
8152
8153/**
8154 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8155 * @t: Context to fetch pointer to hba structure from.
8156 *
8157 * This is the callback function for mailbox timer. The mailbox
8158 * timer is armed when a new mailbox command is issued and the timer
8159 * is deleted when the mailbox complete. The function is called by
8160 * the kernel timer code when a mailbox does not complete within
8161 * expected time. This function wakes up the worker thread to
8162 * process the mailbox timeout and returns. All the processing is
8163 * done by the worker thread function lpfc_mbox_timeout_handler.
8164 **/
8165void
8166lpfc_mbox_timeout(struct timer_list *t)
8167{
8168        struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
8169        unsigned long iflag;
8170        uint32_t tmo_posted;
8171
8172        spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8173        tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8174        if (!tmo_posted)
8175                phba->pport->work_port_events |= WORKER_MBOX_TMO;
8176        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8177
8178        if (!tmo_posted)
8179                lpfc_worker_wake_up(phba);
8180        return;
8181}
8182
8183/**
8184 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8185 *                                    are pending
8186 * @phba: Pointer to HBA context object.
8187 *
8188 * This function checks if any mailbox completions are present on the mailbox
8189 * completion queue.
8190 **/
8191static bool
8192lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8193{
8194
8195        uint32_t idx;
8196        struct lpfc_queue *mcq;
8197        struct lpfc_mcqe *mcqe;
8198        bool pending_completions = false;
8199        uint8_t qe_valid;
8200
8201        if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8202                return false;
8203
8204        /* Check for completions on mailbox completion queue */
8205
8206        mcq = phba->sli4_hba.mbx_cq;
8207        idx = mcq->hba_index;
8208        qe_valid = mcq->qe_valid;
8209        while (bf_get_le32(lpfc_cqe_valid,
8210               (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8211                mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8212                if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8213                    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8214                        pending_completions = true;
8215                        break;
8216                }
8217                idx = (idx + 1) % mcq->entry_count;
8218                if (mcq->hba_index == idx)
8219                        break;
8220
8221                /* if the index wrapped around, toggle the valid bit */
8222                if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8223                        qe_valid = (qe_valid) ? 0 : 1;
8224        }
8225        return pending_completions;
8226
8227}
8228
8229/**
8230 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8231 *                                            that were missed.
8232 * @phba: Pointer to HBA context object.
8233 *
8234 * For sli4, it is possible to miss an interrupt. As such mbox completions
8235 * maybe missed causing erroneous mailbox timeouts to occur. This function
8236 * checks to see if mbox completions are on the mailbox completion queue
8237 * and will process all the completions associated with the eq for the
8238 * mailbox completion queue.
8239 **/
8240static bool
8241lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8242{
8243        struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8244        uint32_t eqidx;
8245        struct lpfc_queue *fpeq = NULL;
8246        struct lpfc_queue *eq;
8247        bool mbox_pending;
8248
8249        if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8250                return false;
8251
8252        /* Find the EQ associated with the mbox CQ */
8253        if (sli4_hba->hdwq) {
8254                for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8255                        eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8256                        if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8257                                fpeq = eq;
8258                                break;
8259                        }
8260                }
8261        }
8262        if (!fpeq)
8263                return false;
8264
8265        /* Turn off interrupts from this EQ */
8266
8267        sli4_hba->sli4_eq_clr_intr(fpeq);
8268
8269        /* Check to see if a mbox completion is pending */
8270
8271        mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8272
8273        /*
8274         * If a mbox completion is pending, process all the events on EQ
8275         * associated with the mbox completion queue (this could include
8276         * mailbox commands, async events, els commands, receive queue data
8277         * and fcp commands)
8278         */
8279
8280        if (mbox_pending)
8281                /* process and rearm the EQ */
8282                lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8283        else
8284                /* Always clear and re-arm the EQ */
8285                sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8286
8287        return mbox_pending;
8288
8289}
8290
8291/**
8292 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8293 * @phba: Pointer to HBA context object.
8294 *
8295 * This function is called from worker thread when a mailbox command times out.
8296 * The caller is not required to hold any locks. This function will reset the
8297 * HBA and recover all the pending commands.
8298 **/
8299void
8300lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8301{
8302        LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8303        MAILBOX_t *mb = NULL;
8304
8305        struct lpfc_sli *psli = &phba->sli;
8306
8307        /* If the mailbox completed, process the completion */
8308        lpfc_sli4_process_missed_mbox_completions(phba);
8309
8310        if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8311                return;
8312
8313        if (pmbox != NULL)
8314                mb = &pmbox->u.mb;
8315        /* Check the pmbox pointer first.  There is a race condition
8316         * between the mbox timeout handler getting executed in the
8317         * worklist and the mailbox actually completing. When this
8318         * race condition occurs, the mbox_active will be NULL.
8319         */
8320        spin_lock_irq(&phba->hbalock);
8321        if (pmbox == NULL) {
8322                lpfc_printf_log(phba, KERN_WARNING,
8323                                LOG_MBOX | LOG_SLI,
8324                                "0353 Active Mailbox cleared - mailbox timeout "
8325                                "exiting\n");
8326                spin_unlock_irq(&phba->hbalock);
8327                return;
8328        }
8329
8330        /* Mbox cmd <mbxCommand> timeout */
8331        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8332                        "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8333                        mb->mbxCommand,
8334                        phba->pport->port_state,
8335                        phba->sli.sli_flag,
8336                        phba->sli.mbox_active);
8337        spin_unlock_irq(&phba->hbalock);
8338
8339        /* Setting state unknown so lpfc_sli_abort_iocb_ring
8340         * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8341         * it to fail all outstanding SCSI IO.
8342         */
8343        spin_lock_irq(&phba->pport->work_port_lock);
8344        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8345        spin_unlock_irq(&phba->pport->work_port_lock);
8346        spin_lock_irq(&phba->hbalock);
8347        phba->link_state = LPFC_LINK_UNKNOWN;
8348        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8349        spin_unlock_irq(&phba->hbalock);
8350
8351        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8352                        "0345 Resetting board due to mailbox timeout\n");
8353
8354        /* Reset the HBA device */
8355        lpfc_reset_hba(phba);
8356}
8357
8358/**
8359 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8360 * @phba: Pointer to HBA context object.
8361 * @pmbox: Pointer to mailbox object.
8362 * @flag: Flag indicating how the mailbox need to be processed.
8363 *
8364 * This function is called by discovery code and HBA management code
8365 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8366 * function gets the hbalock to protect the data structures.
8367 * The mailbox command can be submitted in polling mode, in which case
8368 * this function will wait in a polling loop for the completion of the
8369 * mailbox.
8370 * If the mailbox is submitted in no_wait mode (not polling) the
8371 * function will submit the command and returns immediately without waiting
8372 * for the mailbox completion. The no_wait is supported only when HBA
8373 * is in SLI2/SLI3 mode - interrupts are enabled.
8374 * The SLI interface allows only one mailbox pending at a time. If the
8375 * mailbox is issued in polling mode and there is already a mailbox
8376 * pending, then the function will return an error. If the mailbox is issued
8377 * in NO_WAIT mode and there is a mailbox pending already, the function
8378 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8379 * The sli layer owns the mailbox object until the completion of mailbox
8380 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8381 * return codes the caller owns the mailbox command after the return of
8382 * the function.
8383 **/
8384static int
8385lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8386                       uint32_t flag)
8387{
8388        MAILBOX_t *mbx;
8389        struct lpfc_sli *psli = &phba->sli;
8390        uint32_t status, evtctr;
8391        uint32_t ha_copy, hc_copy;
8392        int i;
8393        unsigned long timeout;
8394        unsigned long drvr_flag = 0;
8395        uint32_t word0, ldata;
8396        void __iomem *to_slim;
8397        int processing_queue = 0;
8398
8399        spin_lock_irqsave(&phba->hbalock, drvr_flag);
8400        if (!pmbox) {
8401                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8402                /* processing mbox queue from intr_handler */
8403                if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8404                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8405                        return MBX_SUCCESS;
8406                }
8407                processing_queue = 1;
8408                pmbox = lpfc_mbox_get(phba);
8409                if (!pmbox) {
8410                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8411                        return MBX_SUCCESS;
8412                }
8413        }
8414
8415        if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8416                pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8417                if(!pmbox->vport) {
8418                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8419                        lpfc_printf_log(phba, KERN_ERR,
8420                                        LOG_MBOX | LOG_VPORT,
8421                                        "1806 Mbox x%x failed. No vport\n",
8422                                        pmbox->u.mb.mbxCommand);
8423                        dump_stack();
8424                        goto out_not_finished;
8425                }
8426        }
8427
8428        /* If the PCI channel is in offline state, do not post mbox. */
8429        if (unlikely(pci_channel_offline(phba->pcidev))) {
8430                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8431                goto out_not_finished;
8432        }
8433
8434        /* If HBA has a deferred error attention, fail the iocb. */
8435        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8436                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8437                goto out_not_finished;
8438        }
8439
8440        psli = &phba->sli;
8441
8442        mbx = &pmbox->u.mb;
8443        status = MBX_SUCCESS;
8444
8445        if (phba->link_state == LPFC_HBA_ERROR) {
8446                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8447
8448                /* Mbox command <mbxCommand> cannot issue */
8449                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8450                                "(%d):0311 Mailbox command x%x cannot "
8451                                "issue Data: x%x x%x\n",
8452                                pmbox->vport ? pmbox->vport->vpi : 0,
8453                                pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8454                goto out_not_finished;
8455        }
8456
8457        if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8458                if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8459                        !(hc_copy & HC_MBINT_ENA)) {
8460                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8461                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8462                                "(%d):2528 Mailbox command x%x cannot "
8463                                "issue Data: x%x x%x\n",
8464                                pmbox->vport ? pmbox->vport->vpi : 0,
8465                                pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8466                        goto out_not_finished;
8467                }
8468        }
8469
8470        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8471                /* Polling for a mbox command when another one is already active
8472                 * is not allowed in SLI. Also, the driver must have established
8473                 * SLI2 mode to queue and process multiple mbox commands.
8474                 */
8475
8476                if (flag & MBX_POLL) {
8477                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8478
8479                        /* Mbox command <mbxCommand> cannot issue */
8480                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8481                                        "(%d):2529 Mailbox command x%x "
8482                                        "cannot issue Data: x%x x%x\n",
8483                                        pmbox->vport ? pmbox->vport->vpi : 0,
8484                                        pmbox->u.mb.mbxCommand,
8485                                        psli->sli_flag, flag);
8486                        goto out_not_finished;
8487                }
8488
8489                if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8490                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8491                        /* Mbox command <mbxCommand> cannot issue */
8492                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8493                                        "(%d):2530 Mailbox command x%x "
8494                                        "cannot issue Data: x%x x%x\n",
8495                                        pmbox->vport ? pmbox->vport->vpi : 0,
8496                                        pmbox->u.mb.mbxCommand,
8497                                        psli->sli_flag, flag);
8498                        goto out_not_finished;
8499                }
8500
8501                /* Another mailbox command is still being processed, queue this
8502                 * command to be processed later.
8503                 */
8504                lpfc_mbox_put(phba, pmbox);
8505
8506                /* Mbox cmd issue - BUSY */
8507                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8508                                "(%d):0308 Mbox cmd issue - BUSY Data: "
8509                                "x%x x%x x%x x%x\n",
8510                                pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8511                                mbx->mbxCommand,
8512                                phba->pport ? phba->pport->port_state : 0xff,
8513                                psli->sli_flag, flag);
8514
8515                psli->slistat.mbox_busy++;
8516                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8517
8518                if (pmbox->vport) {
8519                        lpfc_debugfs_disc_trc(pmbox->vport,
8520                                LPFC_DISC_TRC_MBOX_VPORT,
8521                                "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
8522                                (uint32_t)mbx->mbxCommand,
8523                                mbx->un.varWords[0], mbx->un.varWords[1]);
8524                }
8525                else {
8526                        lpfc_debugfs_disc_trc(phba->pport,
8527                                LPFC_DISC_TRC_MBOX,
8528                                "MBOX Bsy:        cmd:x%x mb:x%x x%x",
8529                                (uint32_t)mbx->mbxCommand,
8530                                mbx->un.varWords[0], mbx->un.varWords[1]);
8531                }
8532
8533                return MBX_BUSY;
8534        }
8535
8536        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8537
8538        /* If we are not polling, we MUST be in SLI2 mode */
8539        if (flag != MBX_POLL) {
8540                if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8541                    (mbx->mbxCommand != MBX_KILL_BOARD)) {
8542                        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8543                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8544                        /* Mbox command <mbxCommand> cannot issue */
8545                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8546                                        "(%d):2531 Mailbox command x%x "
8547                                        "cannot issue Data: x%x x%x\n",
8548                                        pmbox->vport ? pmbox->vport->vpi : 0,
8549                                        pmbox->u.mb.mbxCommand,
8550                                        psli->sli_flag, flag);
8551                        goto out_not_finished;
8552                }
8553                /* timeout active mbox command */
8554                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8555                                           1000);
8556                mod_timer(&psli->mbox_tmo, jiffies + timeout);
8557        }
8558
8559        /* Mailbox cmd <cmd> issue */
8560        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8561                        "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8562                        "x%x\n",
8563                        pmbox->vport ? pmbox->vport->vpi : 0,
8564                        mbx->mbxCommand,
8565                        phba->pport ? phba->pport->port_state : 0xff,
8566                        psli->sli_flag, flag);
8567
8568        if (mbx->mbxCommand != MBX_HEARTBEAT) {
8569                if (pmbox->vport) {
8570                        lpfc_debugfs_disc_trc(pmbox->vport,
8571                                LPFC_DISC_TRC_MBOX_VPORT,
8572                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
8573                                (uint32_t)mbx->mbxCommand,
8574                                mbx->un.varWords[0], mbx->un.varWords[1]);
8575                }
8576                else {
8577                        lpfc_debugfs_disc_trc(phba->pport,
8578                                LPFC_DISC_TRC_MBOX,
8579                                "MBOX Send:       cmd:x%x mb:x%x x%x",
8580                                (uint32_t)mbx->mbxCommand,
8581                                mbx->un.varWords[0], mbx->un.varWords[1]);
8582                }
8583        }
8584
8585        psli->slistat.mbox_cmd++;
8586        evtctr = psli->slistat.mbox_event;
8587
8588        /* next set own bit for the adapter and copy over command word */
8589        mbx->mbxOwner = OWN_CHIP;
8590
8591        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8592                /* Populate mbox extension offset word. */
8593                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8594                        *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8595                                = (uint8_t *)phba->mbox_ext
8596                                  - (uint8_t *)phba->mbox;
8597                }
8598
8599                /* Copy the mailbox extension data */
8600                if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8601                        lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8602                                              (uint8_t *)phba->mbox_ext,
8603                                              pmbox->in_ext_byte_len);
8604                }
8605                /* Copy command data to host SLIM area */
8606                lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8607        } else {
8608                /* Populate mbox extension offset word. */
8609                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8610                        *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8611                                = MAILBOX_HBA_EXT_OFFSET;
8612
8613                /* Copy the mailbox extension data */
8614                if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8615                        lpfc_memcpy_to_slim(phba->MBslimaddr +
8616                                MAILBOX_HBA_EXT_OFFSET,
8617                                pmbox->ctx_buf, pmbox->in_ext_byte_len);
8618
8619                if (mbx->mbxCommand == MBX_CONFIG_PORT)
8620                        /* copy command data into host mbox for cmpl */
8621                        lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8622                                              MAILBOX_CMD_SIZE);
8623
8624                /* First copy mbox command data to HBA SLIM, skip past first
8625                   word */
8626                to_slim = phba->MBslimaddr + sizeof (uint32_t);
8627                lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8628                            MAILBOX_CMD_SIZE - sizeof (uint32_t));
8629
8630                /* Next copy over first word, with mbxOwner set */
8631                ldata = *((uint32_t *)mbx);
8632                to_slim = phba->MBslimaddr;
8633                writel(ldata, to_slim);
8634                readl(to_slim); /* flush */
8635
8636                if (mbx->mbxCommand == MBX_CONFIG_PORT)
8637                        /* switch over to host mailbox */
8638                        psli->sli_flag |= LPFC_SLI_ACTIVE;
8639        }
8640
8641        wmb();
8642
8643        switch (flag) {
8644        case MBX_NOWAIT:
8645                /* Set up reference to mailbox command */
8646                psli->mbox_active = pmbox;
8647                /* Interrupt board to do it */
8648                writel(CA_MBATT, phba->CAregaddr);
8649                readl(phba->CAregaddr); /* flush */
8650                /* Don't wait for it to finish, just return */
8651                break;
8652
8653        case MBX_POLL:
8654                /* Set up null reference to mailbox command */
8655                psli->mbox_active = NULL;
8656                /* Interrupt board to do it */
8657                writel(CA_MBATT, phba->CAregaddr);
8658                readl(phba->CAregaddr); /* flush */
8659
8660                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8661                        /* First read mbox status word */
8662                        word0 = *((uint32_t *)phba->mbox);
8663                        word0 = le32_to_cpu(word0);
8664                } else {
8665                        /* First read mbox status word */
8666                        if (lpfc_readl(phba->MBslimaddr, &word0)) {
8667                                spin_unlock_irqrestore(&phba->hbalock,
8668                                                       drvr_flag);
8669                                goto out_not_finished;
8670                        }
8671                }
8672
8673                /* Read the HBA Host Attention Register */
8674                if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8675                        spin_unlock_irqrestore(&phba->hbalock,
8676                                                       drvr_flag);
8677                        goto out_not_finished;
8678                }
8679                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8680                                                        1000) + jiffies;
8681                i = 0;
8682                /* Wait for command to complete */
8683                while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8684                       (!(ha_copy & HA_MBATT) &&
8685                        (phba->link_state > LPFC_WARM_START))) {
8686                        if (time_after(jiffies, timeout)) {
8687                                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8688                                spin_unlock_irqrestore(&phba->hbalock,
8689                                                       drvr_flag);
8690                                goto out_not_finished;
8691                        }
8692
8693                        /* Check if we took a mbox interrupt while we were
8694                           polling */
8695                        if (((word0 & OWN_CHIP) != OWN_CHIP)
8696                            && (evtctr != psli->slistat.mbox_event))
8697                                break;
8698
8699                        if (i++ > 10) {
8700                                spin_unlock_irqrestore(&phba->hbalock,
8701                                                       drvr_flag);
8702                                msleep(1);
8703                                spin_lock_irqsave(&phba->hbalock, drvr_flag);
8704                        }
8705
8706                        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8707                                /* First copy command data */
8708                                word0 = *((uint32_t *)phba->mbox);
8709                                word0 = le32_to_cpu(word0);
8710                                if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8711                                        MAILBOX_t *slimmb;
8712                                        uint32_t slimword0;
8713                                        /* Check real SLIM for any errors */
8714                                        slimword0 = readl(phba->MBslimaddr);
8715                                        slimmb = (MAILBOX_t *) & slimword0;
8716                                        if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8717                                            && slimmb->mbxStatus) {
8718                                                psli->sli_flag &=
8719                                                    ~LPFC_SLI_ACTIVE;
8720                                                word0 = slimword0;
8721                                        }
8722                                }
8723                        } else {
8724                                /* First copy command data */
8725                                word0 = readl(phba->MBslimaddr);
8726                        }
8727                        /* Read the HBA Host Attention Register */
8728                        if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8729                                spin_unlock_irqrestore(&phba->hbalock,
8730                                                       drvr_flag);
8731                                goto out_not_finished;
8732                        }
8733                }
8734
8735                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8736                        /* copy results back to user */
8737                        lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8738                                                MAILBOX_CMD_SIZE);
8739                        /* Copy the mailbox extension data */
8740                        if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8741                                lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8742                                                      pmbox->ctx_buf,
8743                                                      pmbox->out_ext_byte_len);
8744                        }
8745                } else {
8746                        /* First copy command data */
8747                        lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8748                                                MAILBOX_CMD_SIZE);
8749                        /* Copy the mailbox extension data */
8750                        if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8751                                lpfc_memcpy_from_slim(
8752                                        pmbox->ctx_buf,
8753                                        phba->MBslimaddr +
8754                                        MAILBOX_HBA_EXT_OFFSET,
8755                                        pmbox->out_ext_byte_len);
8756                        }
8757                }
8758
8759                writel(HA_MBATT, phba->HAregaddr);
8760                readl(phba->HAregaddr); /* flush */
8761
8762                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8763                status = mbx->mbxStatus;
8764        }
8765
8766        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8767        return status;
8768
8769out_not_finished:
8770        if (processing_queue) {
8771                pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8772                lpfc_mbox_cmpl_put(phba, pmbox);
8773        }
8774        return MBX_NOT_FINISHED;
8775}
8776
8777/**
8778 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8779 * @phba: Pointer to HBA context object.
8780 *
8781 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8782 * the driver internal pending mailbox queue. It will then try to wait out the
8783 * possible outstanding mailbox command before return.
8784 *
8785 * Returns:
8786 *      0 - the outstanding mailbox command completed; otherwise, the wait for
8787 *      the outstanding mailbox command timed out.
8788 **/
8789static int
8790lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8791{
8792        struct lpfc_sli *psli = &phba->sli;
8793        int rc = 0;
8794        unsigned long timeout = 0;
8795
8796        /* Mark the asynchronous mailbox command posting as blocked */
8797        spin_lock_irq(&phba->hbalock);
8798        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8799        /* Determine how long we might wait for the active mailbox
8800         * command to be gracefully completed by firmware.
8801         */
8802        if (phba->sli.mbox_active)
8803                timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8804                                                phba->sli.mbox_active) *
8805                                                1000) + jiffies;
8806        spin_unlock_irq(&phba->hbalock);
8807
8808        /* Make sure the mailbox is really active */
8809        if (timeout)
8810                lpfc_sli4_process_missed_mbox_completions(phba);
8811
8812        /* Wait for the outstnading mailbox command to complete */
8813        while (phba->sli.mbox_active) {
8814                /* Check active mailbox complete status every 2ms */
8815                msleep(2);
8816                if (time_after(jiffies, timeout)) {
8817                        /* Timeout, marked the outstanding cmd not complete */
8818                        rc = 1;
8819                        break;
8820                }
8821        }
8822
8823        /* Can not cleanly block async mailbox command, fails it */
8824        if (rc) {
8825                spin_lock_irq(&phba->hbalock);
8826                psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8827                spin_unlock_irq(&phba->hbalock);
8828        }
8829        return rc;
8830}
8831
8832/**
8833 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8834 * @phba: Pointer to HBA context object.
8835 *
8836 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8837 * commands from the driver internal pending mailbox queue. It makes sure
8838 * that there is no outstanding mailbox command before resuming posting
8839 * asynchronous mailbox commands. If, for any reason, there is outstanding
8840 * mailbox command, it will try to wait it out before resuming asynchronous
8841 * mailbox command posting.
8842 **/
8843static void
8844lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8845{
8846        struct lpfc_sli *psli = &phba->sli;
8847
8848        spin_lock_irq(&phba->hbalock);
8849        if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8850                /* Asynchronous mailbox posting is not blocked, do nothing */
8851                spin_unlock_irq(&phba->hbalock);
8852                return;
8853        }
8854
8855        /* Outstanding synchronous mailbox command is guaranteed to be done,
8856         * successful or timeout, after timing-out the outstanding mailbox
8857         * command shall always be removed, so just unblock posting async
8858         * mailbox command and resume
8859         */
8860        psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8861        spin_unlock_irq(&phba->hbalock);
8862
8863        /* wake up worker thread to post asynchronous mailbox command */
8864        lpfc_worker_wake_up(phba);
8865}
8866
8867/**
8868 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8869 * @phba: Pointer to HBA context object.
8870 * @mboxq: Pointer to mailbox object.
8871 *
8872 * The function waits for the bootstrap mailbox register ready bit from
8873 * port for twice the regular mailbox command timeout value.
8874 *
8875 *      0 - no timeout on waiting for bootstrap mailbox register ready.
8876 *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8877 **/
8878static int
8879lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8880{
8881        uint32_t db_ready;
8882        unsigned long timeout;
8883        struct lpfc_register bmbx_reg;
8884
8885        timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8886                                   * 1000) + jiffies;
8887
8888        do {
8889                bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8890                db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8891                if (!db_ready)
8892                        mdelay(2);
8893
8894                if (time_after(jiffies, timeout))
8895                        return MBXERR_ERROR;
8896        } while (!db_ready);
8897
8898        return 0;
8899}
8900
8901/**
8902 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8903 * @phba: Pointer to HBA context object.
8904 * @mboxq: Pointer to mailbox object.
8905 *
8906 * The function posts a mailbox to the port.  The mailbox is expected
8907 * to be comletely filled in and ready for the port to operate on it.
8908 * This routine executes a synchronous completion operation on the
8909 * mailbox by polling for its completion.
8910 *
8911 * The caller must not be holding any locks when calling this routine.
8912 *
8913 * Returns:
8914 *      MBX_SUCCESS - mailbox posted successfully
8915 *      Any of the MBX error values.
8916 **/
8917static int
8918lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8919{
8920        int rc = MBX_SUCCESS;
8921        unsigned long iflag;
8922        uint32_t mcqe_status;
8923        uint32_t mbx_cmnd;
8924        struct lpfc_sli *psli = &phba->sli;
8925        struct lpfc_mqe *mb = &mboxq->u.mqe;
8926        struct lpfc_bmbx_create *mbox_rgn;
8927        struct dma_address *dma_address;
8928
8929        /*
8930         * Only one mailbox can be active to the bootstrap mailbox region
8931         * at a time and there is no queueing provided.
8932         */
8933        spin_lock_irqsave(&phba->hbalock, iflag);
8934        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8935                spin_unlock_irqrestore(&phba->hbalock, iflag);
8936                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8937                                "(%d):2532 Mailbox command x%x (x%x/x%x) "
8938                                "cannot issue Data: x%x x%x\n",
8939                                mboxq->vport ? mboxq->vport->vpi : 0,
8940                                mboxq->u.mb.mbxCommand,
8941                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8942                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8943                                psli->sli_flag, MBX_POLL);
8944                return MBXERR_ERROR;
8945        }
8946        /* The server grabs the token and owns it until release */
8947        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8948        phba->sli.mbox_active = mboxq;
8949        spin_unlock_irqrestore(&phba->hbalock, iflag);
8950
8951        /* wait for bootstrap mbox register for readyness */
8952        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8953        if (rc)
8954                goto exit;
8955        /*
8956         * Initialize the bootstrap memory region to avoid stale data areas
8957         * in the mailbox post.  Then copy the caller's mailbox contents to
8958         * the bmbx mailbox region.
8959         */
8960        mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8961        memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8962        lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8963                               sizeof(struct lpfc_mqe));
8964
8965        /* Post the high mailbox dma address to the port and wait for ready. */
8966        dma_address = &phba->sli4_hba.bmbx.dma_address;
8967        writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8968
8969        /* wait for bootstrap mbox register for hi-address write done */
8970        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8971        if (rc)
8972                goto exit;
8973
8974        /* Post the low mailbox dma address to the port. */
8975        writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8976
8977        /* wait for bootstrap mbox register for low address write done */
8978        rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8979        if (rc)
8980                goto exit;
8981
8982        /*
8983         * Read the CQ to ensure the mailbox has completed.
8984         * If so, update the mailbox status so that the upper layers
8985         * can complete the request normally.
8986         */
8987        lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8988                               sizeof(struct lpfc_mqe));
8989        mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8990        lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8991                               sizeof(struct lpfc_mcqe));
8992        mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8993        /*
8994         * When the CQE status indicates a failure and the mailbox status
8995         * indicates success then copy the CQE status into the mailbox status
8996         * (and prefix it with x4000).
8997         */
8998        if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8999                if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9000                        bf_set(lpfc_mqe_status, mb,
9001                               (LPFC_MBX_ERROR_RANGE | mcqe_status));
9002                rc = MBXERR_ERROR;
9003        } else
9004                lpfc_sli4_swap_str(phba, mboxq);
9005
9006        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9007                        "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9008                        "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9009                        " x%x x%x CQ: x%x x%x x%x x%x\n",
9010                        mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9011                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9012                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9013                        bf_get(lpfc_mqe_status, mb),
9014                        mb->un.mb_words[0], mb->un.mb_words[1],
9015                        mb->un.mb_words[2], mb->un.mb_words[3],
9016                        mb->un.mb_words[4], mb->un.mb_words[5],
9017                        mb->un.mb_words[6], mb->un.mb_words[7],
9018                        mb->un.mb_words[8], mb->un.mb_words[9],
9019                        mb->un.mb_words[10], mb->un.mb_words[11],
9020                        mb->un.mb_words[12], mboxq->mcqe.word0,
9021                        mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
9022                        mboxq->mcqe.trailer);
9023exit:
9024        /* We are holding the token, no needed for lock when release */
9025        spin_lock_irqsave(&phba->hbalock, iflag);
9026        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9027        phba->sli.mbox_active = NULL;
9028        spin_unlock_irqrestore(&phba->hbalock, iflag);
9029        return rc;
9030}
9031
9032/**
9033 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9034 * @phba: Pointer to HBA context object.
9035 * @mboxq: Pointer to mailbox object.
9036 * @flag: Flag indicating how the mailbox need to be processed.
9037 *
9038 * This function is called by discovery code and HBA management code to submit
9039 * a mailbox command to firmware with SLI-4 interface spec.
9040 *
9041 * Return codes the caller owns the mailbox command after the return of the
9042 * function.
9043 **/
9044static int
9045lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9046                       uint32_t flag)
9047{
9048        struct lpfc_sli *psli = &phba->sli;
9049        unsigned long iflags;
9050        int rc;
9051
9052        /* dump from issue mailbox command if setup */
9053        lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9054
9055        rc = lpfc_mbox_dev_check(phba);
9056        if (unlikely(rc)) {
9057                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9058                                "(%d):2544 Mailbox command x%x (x%x/x%x) "
9059                                "cannot issue Data: x%x x%x\n",
9060                                mboxq->vport ? mboxq->vport->vpi : 0,
9061                                mboxq->u.mb.mbxCommand,
9062                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9063                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9064                                psli->sli_flag, flag);
9065                goto out_not_finished;
9066        }
9067
9068        /* Detect polling mode and jump to a handler */
9069        if (!phba->sli4_hba.intr_enable) {
9070                if (flag == MBX_POLL)
9071                        rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9072                else
9073                        rc = -EIO;
9074                if (rc != MBX_SUCCESS)
9075                        lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9076                                        "(%d):2541 Mailbox command x%x "
9077                                        "(x%x/x%x) failure: "
9078                                        "mqe_sta: x%x mcqe_sta: x%x/x%x "
9079                                        "Data: x%x x%x\n,",
9080                                        mboxq->vport ? mboxq->vport->vpi : 0,
9081                                        mboxq->u.mb.mbxCommand,
9082                                        lpfc_sli_config_mbox_subsys_get(phba,
9083                                                                        mboxq),
9084                                        lpfc_sli_config_mbox_opcode_get(phba,
9085                                                                        mboxq),
9086                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9087                                        bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9088                                        bf_get(lpfc_mcqe_ext_status,
9089                                               &mboxq->mcqe),
9090                                        psli->sli_flag, flag);
9091                return rc;
9092        } else if (flag == MBX_POLL) {
9093                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9094                                "(%d):2542 Try to issue mailbox command "
9095                                "x%x (x%x/x%x) synchronously ahead of async "
9096                                "mailbox command queue: x%x x%x\n",
9097                                mboxq->vport ? mboxq->vport->vpi : 0,
9098                                mboxq->u.mb.mbxCommand,
9099                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9100                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9101                                psli->sli_flag, flag);
9102                /* Try to block the asynchronous mailbox posting */
9103                rc = lpfc_sli4_async_mbox_block(phba);
9104                if (!rc) {
9105                        /* Successfully blocked, now issue sync mbox cmd */
9106                        rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9107                        if (rc != MBX_SUCCESS)
9108                                lpfc_printf_log(phba, KERN_WARNING,
9109                                        LOG_MBOX | LOG_SLI,
9110                                        "(%d):2597 Sync Mailbox command "
9111                                        "x%x (x%x/x%x) failure: "
9112                                        "mqe_sta: x%x mcqe_sta: x%x/x%x "
9113                                        "Data: x%x x%x\n,",
9114                                        mboxq->vport ? mboxq->vport->vpi : 0,
9115                                        mboxq->u.mb.mbxCommand,
9116                                        lpfc_sli_config_mbox_subsys_get(phba,
9117                                                                        mboxq),
9118                                        lpfc_sli_config_mbox_opcode_get(phba,
9119                                                                        mboxq),
9120                                        bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9121                                        bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9122                                        bf_get(lpfc_mcqe_ext_status,
9123                                               &mboxq->mcqe),
9124                                        psli->sli_flag, flag);
9125                        /* Unblock the async mailbox posting afterward */
9126                        lpfc_sli4_async_mbox_unblock(phba);
9127                }
9128                return rc;
9129        }
9130
9131        /* Now, interrupt mode asynchronous mailbox command */
9132        rc = lpfc_mbox_cmd_check(phba, mboxq);
9133        if (rc) {
9134                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9135                                "(%d):2543 Mailbox command x%x (x%x/x%x) "
9136                                "cannot issue Data: x%x x%x\n",
9137                                mboxq->vport ? mboxq->vport->vpi : 0,
9138                                mboxq->u.mb.mbxCommand,
9139                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9140                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9141                                psli->sli_flag, flag);
9142                goto out_not_finished;
9143        }
9144
9145        /* Put the mailbox command to the driver internal FIFO */
9146        psli->slistat.mbox_busy++;
9147        spin_lock_irqsave(&phba->hbalock, iflags);
9148        lpfc_mbox_put(phba, mboxq);
9149        spin_unlock_irqrestore(&phba->hbalock, iflags);
9150        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9151                        "(%d):0354 Mbox cmd issue - Enqueue Data: "
9152                        "x%x (x%x/x%x) x%x x%x x%x\n",
9153                        mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9154                        bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9155                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9156                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9157                        phba->pport->port_state,
9158                        psli->sli_flag, MBX_NOWAIT);
9159        /* Wake up worker thread to transport mailbox command from head */
9160        lpfc_worker_wake_up(phba);
9161
9162        return MBX_BUSY;
9163
9164out_not_finished:
9165        return MBX_NOT_FINISHED;
9166}
9167
9168/**
9169 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9170 * @phba: Pointer to HBA context object.
9171 *
9172 * This function is called by worker thread to send a mailbox command to
9173 * SLI4 HBA firmware.
9174 *
9175 **/
9176int
9177lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9178{
9179        struct lpfc_sli *psli = &phba->sli;
9180        LPFC_MBOXQ_t *mboxq;
9181        int rc = MBX_SUCCESS;
9182        unsigned long iflags;
9183        struct lpfc_mqe *mqe;
9184        uint32_t mbx_cmnd;
9185
9186        /* Check interrupt mode before post async mailbox command */
9187        if (unlikely(!phba->sli4_hba.intr_enable))
9188                return MBX_NOT_FINISHED;
9189
9190        /* Check for mailbox command service token */
9191        spin_lock_irqsave(&phba->hbalock, iflags);
9192        if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9193                spin_unlock_irqrestore(&phba->hbalock, iflags);
9194                return MBX_NOT_FINISHED;
9195        }
9196        if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9197                spin_unlock_irqrestore(&phba->hbalock, iflags);
9198                return MBX_NOT_FINISHED;
9199        }
9200        if (unlikely(phba->sli.mbox_active)) {
9201                spin_unlock_irqrestore(&phba->hbalock, iflags);
9202                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9203                                "0384 There is pending active mailbox cmd\n");
9204                return MBX_NOT_FINISHED;
9205        }
9206        /* Take the mailbox command service token */
9207        psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9208
9209        /* Get the next mailbox command from head of queue */
9210        mboxq = lpfc_mbox_get(phba);
9211
9212        /* If no more mailbox command waiting for post, we're done */
9213        if (!mboxq) {
9214                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9215                spin_unlock_irqrestore(&phba->hbalock, iflags);
9216                return MBX_SUCCESS;
9217        }
9218        phba->sli.mbox_active = mboxq;
9219        spin_unlock_irqrestore(&phba->hbalock, iflags);
9220
9221        /* Check device readiness for posting mailbox command */
9222        rc = lpfc_mbox_dev_check(phba);
9223        if (unlikely(rc))
9224                /* Driver clean routine will clean up pending mailbox */
9225                goto out_not_finished;
9226
9227        /* Prepare the mbox command to be posted */
9228        mqe = &mboxq->u.mqe;
9229        mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9230
9231        /* Start timer for the mbox_tmo and log some mailbox post messages */
9232        mod_timer(&psli->mbox_tmo, (jiffies +
9233                  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9234
9235        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9236                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9237                        "x%x x%x\n",
9238                        mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9239                        lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9240                        lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9241                        phba->pport->port_state, psli->sli_flag);
9242
9243        if (mbx_cmnd != MBX_HEARTBEAT) {
9244                if (mboxq->vport) {
9245                        lpfc_debugfs_disc_trc(mboxq->vport,
9246                                LPFC_DISC_TRC_MBOX_VPORT,
9247                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
9248                                mbx_cmnd, mqe->un.mb_words[0],
9249                                mqe->un.mb_words[1]);
9250                } else {
9251                        lpfc_debugfs_disc_trc(phba->pport,
9252                                LPFC_DISC_TRC_MBOX,
9253                                "MBOX Send: cmd:x%x mb:x%x x%x",
9254                                mbx_cmnd, mqe->un.mb_words[0],
9255                                mqe->un.mb_words[1]);
9256                }
9257        }
9258        psli->slistat.mbox_cmd++;
9259
9260        /* Post the mailbox command to the port */
9261        rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9262        if (rc != MBX_SUCCESS) {
9263                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9264                                "(%d):2533 Mailbox command x%x (x%x/x%x) "
9265                                "cannot issue Data: x%x x%x\n",
9266                                mboxq->vport ? mboxq->vport->vpi : 0,
9267                                mboxq->u.mb.mbxCommand,
9268                                lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9269                                lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9270                                psli->sli_flag, MBX_NOWAIT);
9271                goto out_not_finished;
9272        }
9273
9274        return rc;
9275
9276out_not_finished:
9277        spin_lock_irqsave(&phba->hbalock, iflags);
9278        if (phba->sli.mbox_active) {
9279                mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9280                __lpfc_mbox_cmpl_put(phba, mboxq);
9281                /* Release the token */
9282                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9283                phba->sli.mbox_active = NULL;
9284        }
9285        spin_unlock_irqrestore(&phba->hbalock, iflags);
9286
9287        return MBX_NOT_FINISHED;
9288}
9289
9290/**
9291 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9292 * @phba: Pointer to HBA context object.
9293 * @pmbox: Pointer to mailbox object.
9294 * @flag: Flag indicating how the mailbox need to be processed.
9295 *
9296 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9297 * the API jump table function pointer from the lpfc_hba struct.
9298 *
9299 * Return codes the caller owns the mailbox command after the return of the
9300 * function.
9301 **/
9302int
9303lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9304{
9305        return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9306}
9307
9308/**
9309 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9310 * @phba: The hba struct for which this call is being executed.
9311 * @dev_grp: The HBA PCI-Device group number.
9312 *
9313 * This routine sets up the mbox interface API function jump table in @phba
9314 * struct.
9315 * Returns: 0 - success, -ENODEV - failure.
9316 **/
9317int
9318lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9319{
9320
9321        switch (dev_grp) {
9322        case LPFC_PCI_DEV_LP:
9323                phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9324                phba->lpfc_sli_handle_slow_ring_event =
9325                                lpfc_sli_handle_slow_ring_event_s3;
9326                phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9327                phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9328                phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9329                break;
9330        case LPFC_PCI_DEV_OC:
9331                phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9332                phba->lpfc_sli_handle_slow_ring_event =
9333                                lpfc_sli_handle_slow_ring_event_s4;
9334                phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9335                phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9336                phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9337                break;
9338        default:
9339                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9340                                "1420 Invalid HBA PCI-device group: 0x%x\n",
9341                                dev_grp);
9342                return -ENODEV;
9343        }
9344        return 0;
9345}
9346
9347/**
9348 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9349 * @phba: Pointer to HBA context object.
9350 * @pring: Pointer to driver SLI ring object.
9351 * @piocb: Pointer to address of newly added command iocb.
9352 *
9353 * This function is called with hbalock held for SLI3 ports or
9354 * the ring lock held for SLI4 ports to add a command
9355 * iocb to the txq when SLI layer cannot submit the command iocb
9356 * to the ring.
9357 **/
9358void
9359__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9360                    struct lpfc_iocbq *piocb)
9361{
9362        if (phba->sli_rev == LPFC_SLI_REV4)
9363                lockdep_assert_held(&pring->ring_lock);
9364        else
9365                lockdep_assert_held(&phba->hbalock);
9366        /* Insert the caller's iocb in the txq tail for later processing. */
9367        list_add_tail(&piocb->list, &pring->txq);
9368}
9369
9370/**
9371 * lpfc_sli_next_iocb - Get the next iocb in the txq
9372 * @phba: Pointer to HBA context object.
9373 * @pring: Pointer to driver SLI ring object.
9374 * @piocb: Pointer to address of newly added command iocb.
9375 *
9376 * This function is called with hbalock held before a new
9377 * iocb is submitted to the firmware. This function checks
9378 * txq to flush the iocbs in txq to Firmware before
9379 * submitting new iocbs to the Firmware.
9380 * If there are iocbs in the txq which need to be submitted
9381 * to firmware, lpfc_sli_next_iocb returns the first element
9382 * of the txq after dequeuing it from txq.
9383 * If there is no iocb in the txq then the function will return
9384 * *piocb and *piocb is set to NULL. Caller needs to check
9385 * *piocb to find if there are more commands in the txq.
9386 **/
9387static struct lpfc_iocbq *
9388lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9389                   struct lpfc_iocbq **piocb)
9390{
9391        struct lpfc_iocbq * nextiocb;
9392
9393        lockdep_assert_held(&phba->hbalock);
9394
9395        nextiocb = lpfc_sli_ringtx_get(phba, pring);
9396        if (!nextiocb) {
9397                nextiocb = *piocb;
9398                *piocb = NULL;
9399        }
9400
9401        return nextiocb;
9402}
9403
9404/**
9405 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9406 * @phba: Pointer to HBA context object.
9407 * @ring_number: SLI ring number to issue iocb on.
9408 * @piocb: Pointer to command iocb.
9409 * @flag: Flag indicating if this command can be put into txq.
9410 *
9411 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9412 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9413 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9414 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9415 * this function allows only iocbs for posting buffers. This function finds
9416 * next available slot in the command ring and posts the command to the
9417 * available slot and writes the port attention register to request HBA start
9418 * processing new iocb. If there is no slot available in the ring and
9419 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9420 * the function returns IOCB_BUSY.
9421 *
9422 * This function is called with hbalock held. The function will return success
9423 * after it successfully submit the iocb to firmware or after adding to the
9424 * txq.
9425 **/
9426static int
9427__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9428                    struct lpfc_iocbq *piocb, uint32_t flag)
9429{
9430        struct lpfc_iocbq *nextiocb;
9431        IOCB_t *iocb;
9432        struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9433
9434        lockdep_assert_held(&phba->hbalock);
9435
9436        if (piocb->iocb_cmpl && (!piocb->vport) &&
9437           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9438           (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9439                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9440                                "1807 IOCB x%x failed. No vport\n",
9441                                piocb->iocb.ulpCommand);
9442                dump_stack();
9443                return IOCB_ERROR;
9444        }
9445
9446
9447        /* If the PCI channel is in offline state, do not post iocbs. */
9448        if (unlikely(pci_channel_offline(phba->pcidev)))
9449                return IOCB_ERROR;
9450
9451        /* If HBA has a deferred error attention, fail the iocb. */
9452        if (unlikely(phba->hba_flag & DEFER_ERATT))
9453                return IOCB_ERROR;
9454
9455        /*
9456         * We should never get an IOCB if we are in a < LINK_DOWN state
9457         */
9458        if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9459                return IOCB_ERROR;
9460
9461        /*
9462         * Check to see if we are blocking IOCB processing because of a
9463         * outstanding event.
9464         */
9465        if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9466                goto iocb_busy;
9467
9468        if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9469                /*
9470                 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9471                 * can be issued if the link is not up.
9472                 */
9473                switch (piocb->iocb.ulpCommand) {
9474                case CMD_GEN_REQUEST64_CR:
9475                case CMD_GEN_REQUEST64_CX:
9476                        if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9477                                (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9478                                        FC_RCTL_DD_UNSOL_CMD) ||
9479                                (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9480                                        MENLO_TRANSPORT_TYPE))
9481
9482                                goto iocb_busy;
9483                        break;
9484                case CMD_QUE_RING_BUF_CN:
9485                case CMD_QUE_RING_BUF64_CN:
9486                        /*
9487                         * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9488                         * completion, iocb_cmpl MUST be 0.
9489                         */
9490                        if (piocb->iocb_cmpl)
9491                                piocb->iocb_cmpl = NULL;
9492                        fallthrough;
9493                case CMD_CREATE_XRI_CR:
9494                case CMD_CLOSE_XRI_CN:
9495                case CMD_CLOSE_XRI_CX:
9496                        break;
9497                default:
9498                        goto iocb_busy;
9499                }
9500
9501        /*
9502         * For FCP commands, we must be in a state where we can process link
9503         * attention events.
9504         */
9505        } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9506                            !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9507                goto iocb_busy;
9508        }
9509
9510        while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9511               (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9512                lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9513
9514        if (iocb)
9515                lpfc_sli_update_ring(phba, pring);
9516        else
9517                lpfc_sli_update_full_ring(phba, pring);
9518
9519        if (!piocb)
9520                return IOCB_SUCCESS;
9521
9522        goto out_busy;
9523
9524 iocb_busy:
9525        pring->stats.iocb_cmd_delay++;
9526
9527 out_busy:
9528
9529        if (!(flag & SLI_IOCB_RET_IOCB)) {
9530                __lpfc_sli_ringtx_put(phba, pring, piocb);
9531                return IOCB_SUCCESS;
9532        }
9533
9534        return IOCB_BUSY;
9535}
9536
9537/**
9538 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9539 * @phba: Pointer to HBA context object.
9540 * @piocbq: Pointer to command iocb.
9541 * @sglq: Pointer to the scatter gather queue object.
9542 *
9543 * This routine converts the bpl or bde that is in the IOCB
9544 * to a sgl list for the sli4 hardware. The physical address
9545 * of the bpl/bde is converted back to a virtual address.
9546 * If the IOCB contains a BPL then the list of BDE's is
9547 * converted to sli4_sge's. If the IOCB contains a single
9548 * BDE then it is converted to a single sli_sge.
9549 * The IOCB is still in cpu endianess so the contents of
9550 * the bpl can be used without byte swapping.
9551 *
9552 * Returns valid XRI = Success, NO_XRI = Failure.
9553**/
9554static uint16_t
9555lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9556                struct lpfc_sglq *sglq)
9557{
9558        uint16_t xritag = NO_XRI;
9559        struct ulp_bde64 *bpl = NULL;
9560        struct ulp_bde64 bde;
9561        struct sli4_sge *sgl  = NULL;
9562        struct lpfc_dmabuf *dmabuf;
9563        IOCB_t *icmd;
9564        int numBdes = 0;
9565        int i = 0;
9566        uint32_t offset = 0; /* accumulated offset in the sg request list */
9567        int inbound = 0; /* number of sg reply entries inbound from firmware */
9568
9569        if (!piocbq || !sglq)
9570                return xritag;
9571
9572        sgl  = (struct sli4_sge *)sglq->sgl;
9573        icmd = &piocbq->iocb;
9574        if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9575                return sglq->sli4_xritag;
9576        if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9577                numBdes = icmd->un.genreq64.bdl.bdeSize /
9578                                sizeof(struct ulp_bde64);
9579                /* The addrHigh and addrLow fields within the IOCB
9580                 * have not been byteswapped yet so there is no
9581                 * need to swap them back.
9582                 */
9583                if (piocbq->context3)
9584                        dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9585                else
9586                        return xritag;
9587
9588                bpl  = (struct ulp_bde64 *)dmabuf->virt;
9589                if (!bpl)
9590                        return xritag;
9591
9592                for (i = 0; i < numBdes; i++) {
9593                        /* Should already be byte swapped. */
9594                        sgl->addr_hi = bpl->addrHigh;
9595                        sgl->addr_lo = bpl->addrLow;
9596
9597                        sgl->word2 = le32_to_cpu(sgl->word2);
9598                        if ((i+1) == numBdes)
9599                                bf_set(lpfc_sli4_sge_last, sgl, 1);
9600                        else
9601                                bf_set(lpfc_sli4_sge_last, sgl, 0);
9602                        /* swap the size field back to the cpu so we
9603                         * can assign it to the sgl.
9604                         */
9605                        bde.tus.w = le32_to_cpu(bpl->tus.w);
9606                        sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9607                        /* The offsets in the sgl need to be accumulated
9608                         * separately for the request and reply lists.
9609                         * The request is always first, the reply follows.
9610                         */
9611                        if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9612                                /* add up the reply sg entries */
9613                                if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9614                                        inbound++;
9615                                /* first inbound? reset the offset */
9616                                if (inbound == 1)
9617                                        offset = 0;
9618                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
9619                                bf_set(lpfc_sli4_sge_type, sgl,
9620                                        LPFC_SGE_TYPE_DATA);
9621                                offset += bde.tus.f.bdeSize;
9622                        }
9623                        sgl->word2 = cpu_to_le32(sgl->word2);
9624                        bpl++;
9625                        sgl++;
9626                }
9627        } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9628                        /* The addrHigh and addrLow fields of the BDE have not
9629                         * been byteswapped yet so they need to be swapped
9630                         * before putting them in the sgl.
9631                         */
9632                        sgl->addr_hi =
9633                                cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9634                        sgl->addr_lo =
9635                                cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9636                        sgl->word2 = le32_to_cpu(sgl->word2);
9637                        bf_set(lpfc_sli4_sge_last, sgl, 1);
9638                        sgl->word2 = cpu_to_le32(sgl->word2);
9639                        sgl->sge_len =
9640                                cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9641        }
9642        return sglq->sli4_xritag;
9643}
9644
9645/**
9646 * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
9647 * @phba: Pointer to HBA context object.
9648 * @iocbq: Pointer to command iocb.
9649 * @wqe: Pointer to the work queue entry.
9650 *
9651 * This routine converts the iocb command to its Work Queue Entry
9652 * equivalent. The wqe pointer should not have any fields set when
9653 * this routine is called because it will memcpy over them.
9654 * This routine does not set the CQ_ID or the WQEC bits in the
9655 * wqe.
9656 *
9657 * Returns: 0 = Success, IOCB_ERROR = Failure.
9658 **/
9659static int
9660lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9661                union lpfc_wqe128 *wqe)
9662{
9663        uint32_t xmit_len = 0, total_len = 0;
9664        uint8_t ct = 0;
9665        uint32_t fip;
9666        uint32_t abort_tag;
9667        uint8_t command_type = ELS_COMMAND_NON_FIP;
9668        uint8_t cmnd;
9669        uint16_t xritag;
9670        uint16_t abrt_iotag;
9671        struct lpfc_iocbq *abrtiocbq;
9672        struct ulp_bde64 *bpl = NULL;
9673        uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9674        int numBdes, i;
9675        struct ulp_bde64 bde;
9676        struct lpfc_nodelist *ndlp;
9677        uint32_t *pcmd;
9678        uint32_t if_type;
9679
9680        fip = phba->hba_flag & HBA_FIP_SUPPORT;
9681        /* The fcp commands will set command type */
9682        if (iocbq->iocb_flag &  LPFC_IO_FCP)
9683                command_type = FCP_COMMAND;
9684        else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9685                command_type = ELS_COMMAND_FIP;
9686        else
9687                command_type = ELS_COMMAND_NON_FIP;
9688
9689        if (phba->fcp_embed_io)
9690                memset(wqe, 0, sizeof(union lpfc_wqe128));
9691        /* Some of the fields are in the right position already */
9692        memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9693        /* The ct field has moved so reset */
9694        wqe->generic.wqe_com.word7 = 0;
9695        wqe->generic.wqe_com.word10 = 0;
9696
9697        abort_tag = (uint32_t) iocbq->iotag;
9698        xritag = iocbq->sli4_xritag;
9699        /* words0-2 bpl convert bde */
9700        if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9701                numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9702                                sizeof(struct ulp_bde64);
9703                bpl  = (struct ulp_bde64 *)
9704                        ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9705                if (!bpl)
9706                        return IOCB_ERROR;
9707
9708                /* Should already be byte swapped. */
9709                wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
9710                wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
9711                /* swap the size field back to the cpu so we
9712                 * can assign it to the sgl.
9713                 */
9714                wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
9715                xmit_len = wqe->generic.bde.tus.f.bdeSize;
9716                total_len = 0;
9717                for (i = 0; i < numBdes; i++) {
9718                        bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
9719                        total_len += bde.tus.f.bdeSize;
9720                }
9721        } else
9722                xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9723
9724        iocbq->iocb.ulpIoTag = iocbq->iotag;
9725        cmnd = iocbq->iocb.ulpCommand;
9726
9727        switch (iocbq->iocb.ulpCommand) {
9728        case CMD_ELS_REQUEST64_CR:
9729                if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9730                        ndlp = iocbq->context_un.ndlp;
9731                else
9732                        ndlp = (struct lpfc_nodelist *)iocbq->context1;
9733                if (!iocbq->iocb.ulpLe) {
9734                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9735                                "2007 Only Limited Edition cmd Format"
9736                                " supported 0x%x\n",
9737                                iocbq->iocb.ulpCommand);
9738                        return IOCB_ERROR;
9739                }
9740
9741                wqe->els_req.payload_len = xmit_len;
9742                /* Els_reguest64 has a TMO */
9743                bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9744                        iocbq->iocb.ulpTimeout);
9745                /* Need a VF for word 4 set the vf bit*/
9746                bf_set(els_req64_vf, &wqe->els_req, 0);
9747                /* And a VFID for word 12 */
9748                bf_set(els_req64_vfid, &wqe->els_req, 0);
9749                ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9750                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9751                       iocbq->iocb.ulpContext);
9752                bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9753                bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9754                /* CCP CCPE PV PRI in word10 were set in the memcpy */
9755                if (command_type == ELS_COMMAND_FIP)
9756                        els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9757                                        >> LPFC_FIP_ELS_ID_SHIFT);
9758                pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9759                                        iocbq->context2)->virt);
9760                if_type = bf_get(lpfc_sli_intf_if_type,
9761                                        &phba->sli4_hba.sli_intf);
9762                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9763                        if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9764                                *pcmd == ELS_CMD_SCR ||
9765                                *pcmd == ELS_CMD_RDF ||
9766                                *pcmd == ELS_CMD_RSCN_XMT ||
9767                                *pcmd == ELS_CMD_FDISC ||
9768                                *pcmd == ELS_CMD_LOGO ||
9769                                *pcmd == ELS_CMD_QFPA ||
9770                                *pcmd == ELS_CMD_UVEM ||
9771                                *pcmd == ELS_CMD_PLOGI)) {
9772                                bf_set(els_req64_sp, &wqe->els_req, 1);
9773                                bf_set(els_req64_sid, &wqe->els_req,
9774                                        iocbq->vport->fc_myDID);
9775                                if ((*pcmd == ELS_CMD_FLOGI) &&
9776                                        !(phba->fc_topology ==
9777                                                LPFC_TOPOLOGY_LOOP))
9778                                        bf_set(els_req64_sid, &wqe->els_req, 0);
9779                                bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9780                                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9781                                        phba->vpi_ids[iocbq->vport->vpi]);
9782                        } else if (pcmd && iocbq->context1) {
9783                                bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9784                                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9785                                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9786                        }
9787                }
9788                bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9789                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9790                bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9791                bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9792                bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9793                bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9794                bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9795                bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9796                wqe->els_req.max_response_payload_len = total_len - xmit_len;
9797                break;
9798        case CMD_XMIT_SEQUENCE64_CX:
9799                bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9800                       iocbq->iocb.un.ulpWord[3]);
9801                bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9802                       iocbq->iocb.unsli3.rcvsli3.ox_id);
9803                /* The entire sequence is transmitted for this IOCB */
9804                xmit_len = total_len;
9805                cmnd = CMD_XMIT_SEQUENCE64_CR;
9806                if (phba->link_flag & LS_LOOPBACK_MODE)
9807                        bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9808                fallthrough;
9809        case CMD_XMIT_SEQUENCE64_CR:
9810                /* word3 iocb=io_tag32 wqe=reserved */
9811                wqe->xmit_sequence.rsvd3 = 0;
9812                /* word4 relative_offset memcpy */
9813                /* word5 r_ctl/df_ctl memcpy */
9814                bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9815                bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9816                bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9817                       LPFC_WQE_IOD_WRITE);
9818                bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9819                       LPFC_WQE_LENLOC_WORD12);
9820                bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9821                wqe->xmit_sequence.xmit_len = xmit_len;
9822                command_type = OTHER_COMMAND;
9823                break;
9824        case CMD_XMIT_BCAST64_CN:
9825                /* word3 iocb=iotag32 wqe=seq_payload_len */
9826                wqe->xmit_bcast64.seq_payload_len = xmit_len;
9827                /* word4 iocb=rsvd wqe=rsvd */
9828                /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9829                /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9830                bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9831                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9832                bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9833                bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9834                bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9835                       LPFC_WQE_LENLOC_WORD3);
9836                bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9837                break;
9838        case CMD_FCP_IWRITE64_CR:
9839                command_type = FCP_COMMAND_DATA_OUT;
9840                /* word3 iocb=iotag wqe=payload_offset_len */
9841                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9842                bf_set(payload_offset_len, &wqe->fcp_iwrite,
9843                       xmit_len + sizeof(struct fcp_rsp));
9844                bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9845                       0);
9846                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9847                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9848                bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9849                       iocbq->iocb.ulpFCP2Rcvy);
9850                bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9851                /* Always open the exchange */
9852                bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9853                bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9854                       LPFC_WQE_LENLOC_WORD4);
9855                bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9856                bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9857                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9858                        bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9859                        bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9860                        if (iocbq->priority) {
9861                                bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9862                                       (iocbq->priority << 1));
9863                        } else {
9864                                bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9865                                       (phba->cfg_XLanePriority << 1));
9866                        }
9867                }
9868                /* Note, word 10 is already initialized to 0 */
9869
9870                /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9871                if (phba->cfg_enable_pbde)
9872                        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9873                else
9874                        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9875
9876                if (phba->fcp_embed_io) {
9877                        struct lpfc_io_buf *lpfc_cmd;
9878                        struct sli4_sge *sgl;
9879                        struct fcp_cmnd *fcp_cmnd;
9880                        uint32_t *ptr;
9881
9882                        /* 128 byte wqe support here */
9883
9884                        lpfc_cmd = iocbq->context1;
9885                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9886                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
9887
9888                        /* Word 0-2 - FCP_CMND */
9889                        wqe->generic.bde.tus.f.bdeFlags =
9890                                BUFF_TYPE_BDE_IMMED;
9891                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9892                        wqe->generic.bde.addrHigh = 0;
9893                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
9894
9895                        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9896                        bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9897
9898                        /* Word 22-29  FCP CMND Payload */
9899                        ptr = &wqe->words[22];
9900                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9901                }
9902                break;
9903        case CMD_FCP_IREAD64_CR:
9904                /* word3 iocb=iotag wqe=payload_offset_len */
9905                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9906                bf_set(payload_offset_len, &wqe->fcp_iread,
9907                       xmit_len + sizeof(struct fcp_rsp));
9908                bf_set(cmd_buff_len, &wqe->fcp_iread,
9909                       0);
9910                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9911                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9912                bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9913                       iocbq->iocb.ulpFCP2Rcvy);
9914                bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9915                /* Always open the exchange */
9916                bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9917                bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9918                       LPFC_WQE_LENLOC_WORD4);
9919                bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9920                bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9921                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9922                        bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9923                        bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9924                        if (iocbq->priority) {
9925                                bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9926                                       (iocbq->priority << 1));
9927                        } else {
9928                                bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9929                                       (phba->cfg_XLanePriority << 1));
9930                        }
9931                }
9932                /* Note, word 10 is already initialized to 0 */
9933
9934                /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9935                if (phba->cfg_enable_pbde)
9936                        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9937                else
9938                        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9939
9940                if (phba->fcp_embed_io) {
9941                        struct lpfc_io_buf *lpfc_cmd;
9942                        struct sli4_sge *sgl;
9943                        struct fcp_cmnd *fcp_cmnd;
9944                        uint32_t *ptr;
9945
9946                        /* 128 byte wqe support here */
9947
9948                        lpfc_cmd = iocbq->context1;
9949                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9950                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
9951
9952                        /* Word 0-2 - FCP_CMND */
9953                        wqe->generic.bde.tus.f.bdeFlags =
9954                                BUFF_TYPE_BDE_IMMED;
9955                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9956                        wqe->generic.bde.addrHigh = 0;
9957                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
9958
9959                        bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9960                        bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9961
9962                        /* Word 22-29  FCP CMND Payload */
9963                        ptr = &wqe->words[22];
9964                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9965                }
9966                break;
9967        case CMD_FCP_ICMND64_CR:
9968                /* word3 iocb=iotag wqe=payload_offset_len */
9969                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9970                bf_set(payload_offset_len, &wqe->fcp_icmd,
9971                       xmit_len + sizeof(struct fcp_rsp));
9972                bf_set(cmd_buff_len, &wqe->fcp_icmd,
9973                       0);
9974                /* word3 iocb=IO_TAG wqe=reserved */
9975                bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9976                /* Always open the exchange */
9977                bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9978                bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9979                bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9980                bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9981                       LPFC_WQE_LENLOC_NONE);
9982                bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9983                       iocbq->iocb.ulpFCP2Rcvy);
9984                if (iocbq->iocb_flag & LPFC_IO_OAS) {
9985                        bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9986                        bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9987                        if (iocbq->priority) {
9988                                bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9989                                       (iocbq->priority << 1));
9990                        } else {
9991                                bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9992                                       (phba->cfg_XLanePriority << 1));
9993                        }
9994                }
9995                /* Note, word 10 is already initialized to 0 */
9996
9997                if (phba->fcp_embed_io) {
9998                        struct lpfc_io_buf *lpfc_cmd;
9999                        struct sli4_sge *sgl;
10000                        struct fcp_cmnd *fcp_cmnd;
10001                        uint32_t *ptr;
10002
10003                        /* 128 byte wqe support here */
10004
10005                        lpfc_cmd = iocbq->context1;
10006                        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10007                        fcp_cmnd = lpfc_cmd->fcp_cmnd;
10008
10009                        /* Word 0-2 - FCP_CMND */
10010                        wqe->generic.bde.tus.f.bdeFlags =
10011                                BUFF_TYPE_BDE_IMMED;
10012                        wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10013                        wqe->generic.bde.addrHigh = 0;
10014                        wqe->generic.bde.addrLow =  88;  /* Word 22 */
10015
10016                        bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10017                        bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10018
10019                        /* Word 22-29  FCP CMND Payload */
10020                        ptr = &wqe->words[22];
10021                        memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10022                }
10023                break;
10024        case CMD_GEN_REQUEST64_CR:
10025                /* For this command calculate the xmit length of the
10026                 * request bde.
10027                 */
10028                xmit_len = 0;
10029                numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10030                        sizeof(struct ulp_bde64);
10031                for (i = 0; i < numBdes; i++) {
10032                        bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10033                        if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10034                                break;
10035                        xmit_len += bde.tus.f.bdeSize;
10036                }
10037                /* word3 iocb=IO_TAG wqe=request_payload_len */
10038                wqe->gen_req.request_payload_len = xmit_len;
10039                /* word4 iocb=parameter wqe=relative_offset memcpy */
10040                /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10041                /* word6 context tag copied in memcpy */
10042                if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
10043                        ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10044                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10045                                "2015 Invalid CT %x command 0x%x\n",
10046                                ct, iocbq->iocb.ulpCommand);
10047                        return IOCB_ERROR;
10048                }
10049                bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10050                bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10051                bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10052                bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10053                bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10054                bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10055                bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10056                bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10057                wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10058                command_type = OTHER_COMMAND;
10059                break;
10060        case CMD_XMIT_ELS_RSP64_CX:
10061                ndlp = (struct lpfc_nodelist *)iocbq->context1;
10062                /* words0-2 BDE memcpy */
10063                /* word3 iocb=iotag32 wqe=response_payload_len */
10064                wqe->xmit_els_rsp.response_payload_len = xmit_len;
10065                /* word4 */
10066                wqe->xmit_els_rsp.word4 = 0;
10067                /* word5 iocb=rsvd wge=did */
10068                bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10069                         iocbq->iocb.un.xseq64.xmit_els_remoteID);
10070
10071                if_type = bf_get(lpfc_sli_intf_if_type,
10072                                        &phba->sli4_hba.sli_intf);
10073                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10074                        if (iocbq->vport->fc_flag & FC_PT2PT) {
10075                                bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10076                                bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10077                                        iocbq->vport->fc_myDID);
10078                                if (iocbq->vport->fc_myDID == Fabric_DID) {
10079                                        bf_set(wqe_els_did,
10080                                                &wqe->xmit_els_rsp.wqe_dest, 0);
10081                                }
10082                        }
10083                }
10084                bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10085                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10086                bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10087                bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10088                       iocbq->iocb.unsli3.rcvsli3.ox_id);
10089                if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10090                        bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10091                               phba->vpi_ids[iocbq->vport->vpi]);
10092                bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10093                bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10094                bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10095                bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10096                       LPFC_WQE_LENLOC_WORD3);
10097                bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10098                bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10099                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10100                pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10101                                        iocbq->context2)->virt);
10102                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10103                                bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10104                                bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10105                                        iocbq->vport->fc_myDID);
10106                                bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10107                                bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10108                                        phba->vpi_ids[phba->pport->vpi]);
10109                }
10110                command_type = OTHER_COMMAND;
10111                break;
10112        case CMD_CLOSE_XRI_CN:
10113        case CMD_ABORT_XRI_CN:
10114        case CMD_ABORT_XRI_CX:
10115                /* words 0-2 memcpy should be 0 rserved */
10116                /* port will send abts */
10117                abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10118                if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10119                        abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10120                        fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10121                } else
10122                        fip = 0;
10123
10124                if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10125                        /*
10126                         * The link is down, or the command was ELS_FIP
10127                         * so the fw does not need to send abts
10128                         * on the wire.
10129                         */
10130                        bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10131                else
10132                        bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10133                bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10134                /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10135                wqe->abort_cmd.rsrvd5 = 0;
10136                bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10137                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10138                abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10139                /*
10140                 * The abort handler will send us CMD_ABORT_XRI_CN or
10141                 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10142                 */
10143                bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10144                bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10145                bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10146                       LPFC_WQE_LENLOC_NONE);
10147                cmnd = CMD_ABORT_XRI_CX;
10148                command_type = OTHER_COMMAND;
10149                xritag = 0;
10150                break;
10151        case CMD_XMIT_BLS_RSP64_CX:
10152                ndlp = (struct lpfc_nodelist *)iocbq->context1;
10153                /* As BLS ABTS RSP WQE is very different from other WQEs,
10154                 * we re-construct this WQE here based on information in
10155                 * iocbq from scratch.
10156                 */
10157                memset(wqe, 0, sizeof(*wqe));
10158                /* OX_ID is invariable to who sent ABTS to CT exchange */
10159                bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10160                       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10161                if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10162                    LPFC_ABTS_UNSOL_INT) {
10163                        /* ABTS sent by initiator to CT exchange, the
10164                         * RX_ID field will be filled with the newly
10165                         * allocated responder XRI.
10166                         */
10167                        bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10168                               iocbq->sli4_xritag);
10169                } else {
10170                        /* ABTS sent by responder to CT exchange, the
10171                         * RX_ID field will be filled with the responder
10172                         * RX_ID from ABTS.
10173                         */
10174                        bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10175                               bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10176                }
10177                bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10178                bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10179
10180                /* Use CT=VPI */
10181                bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10182                        ndlp->nlp_DID);
10183                bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10184                        iocbq->iocb.ulpContext);
10185                bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10186                bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10187                        phba->vpi_ids[phba->pport->vpi]);
10188                bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10189                bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10190                       LPFC_WQE_LENLOC_NONE);
10191                /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10192                command_type = OTHER_COMMAND;
10193                if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10194                        bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10195                               bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10196                        bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10197                               bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10198                        bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10199                               bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10200                }
10201
10202                break;
10203        case CMD_SEND_FRAME:
10204                bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10205                bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10206                bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10207                bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10208                bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10209                bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10210                bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10211                bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10212                bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10213                bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10214                bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10215                return 0;
10216        case CMD_XRI_ABORTED_CX:
10217        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10218        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10219        case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10220        case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10221        case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10222        default:
10223                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10224                                "2014 Invalid command 0x%x\n",
10225                                iocbq->iocb.ulpCommand);
10226                return IOCB_ERROR;
10227        }
10228
10229        if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10230                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10231        else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10232                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10233        else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10234                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10235        iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10236                              LPFC_IO_DIF_INSERT);
10237        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10238        bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10239        wqe->generic.wqe_com.abort_tag = abort_tag;
10240        bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10241        bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10242        bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10243        bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10244        return 0;
10245}
10246
10247/**
10248 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10249 * @phba: Pointer to HBA context object.
10250 * @ring_number: SLI ring number to issue wqe on.
10251 * @piocb: Pointer to command iocb.
10252 * @flag: Flag indicating if this command can be put into txq.
10253 *
10254 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10255 * send  an iocb command to an HBA with SLI-4 interface spec.
10256 *
10257 * This function takes the hbalock before invoking the lockless version.
10258 * The function will return success after it successfully submit the wqe to
10259 * firmware or after adding to the txq.
10260 **/
10261static int
10262__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10263                           struct lpfc_iocbq *piocb, uint32_t flag)
10264{
10265        unsigned long iflags;
10266        int rc;
10267
10268        spin_lock_irqsave(&phba->hbalock, iflags);
10269        rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10270        spin_unlock_irqrestore(&phba->hbalock, iflags);
10271
10272        return rc;
10273}
10274
10275/**
10276 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10277 * @phba: Pointer to HBA context object.
10278 * @ring_number: SLI ring number to issue wqe on.
10279 * @piocb: Pointer to command iocb.
10280 * @flag: Flag indicating if this command can be put into txq.
10281 *
10282 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10283 * an wqe command to an HBA with SLI-4 interface spec.
10284 *
10285 * This function is a lockless version. The function will return success
10286 * after it successfully submit the wqe to firmware or after adding to the
10287 * txq.
10288 **/
10289static int
10290__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10291                           struct lpfc_iocbq *piocb, uint32_t flag)
10292{
10293        int rc;
10294        struct lpfc_io_buf *lpfc_cmd =
10295                (struct lpfc_io_buf *)piocb->context1;
10296        union lpfc_wqe128 *wqe = &piocb->wqe;
10297        struct sli4_sge *sgl;
10298
10299        /* 128 byte wqe support here */
10300        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10301
10302        if (phba->fcp_embed_io) {
10303                struct fcp_cmnd *fcp_cmnd;
10304                u32 *ptr;
10305
10306                fcp_cmnd = lpfc_cmd->fcp_cmnd;
10307
10308                /* Word 0-2 - FCP_CMND */
10309                wqe->generic.bde.tus.f.bdeFlags =
10310                        BUFF_TYPE_BDE_IMMED;
10311                wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10312                wqe->generic.bde.addrHigh = 0;
10313                wqe->generic.bde.addrLow =  88;  /* Word 22 */
10314
10315                bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10316                bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10317
10318                /* Word 22-29  FCP CMND Payload */
10319                ptr = &wqe->words[22];
10320                memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10321        } else {
10322                /* Word 0-2 - Inline BDE */
10323                wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10324                wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10325                wqe->generic.bde.addrHigh = sgl->addr_hi;
10326                wqe->generic.bde.addrLow =  sgl->addr_lo;
10327
10328                /* Word 10 */
10329                bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10330                bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10331        }
10332
10333        /* add the VMID tags as per switch response */
10334        if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
10335                if (phba->pport->vmid_priority_tagging) {
10336                        bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10337                        bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10338                                        (piocb->vmid_tag.cs_ctl_vmid));
10339                } else {
10340                        bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10341                        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10342                        wqe->words[31] = piocb->vmid_tag.app_id;
10343                }
10344        }
10345        rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10346        return rc;
10347}
10348
10349/**
10350 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10351 * @phba: Pointer to HBA context object.
10352 * @ring_number: SLI ring number to issue iocb on.
10353 * @piocb: Pointer to command iocb.
10354 * @flag: Flag indicating if this command can be put into txq.
10355 *
10356 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10357 * an iocb command to an HBA with SLI-4 interface spec.
10358 *
10359 * This function is called with ringlock held. The function will return success
10360 * after it successfully submit the iocb to firmware or after adding to the
10361 * txq.
10362 **/
10363static int
10364__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10365                         struct lpfc_iocbq *piocb, uint32_t flag)
10366{
10367        struct lpfc_sglq *sglq;
10368        union lpfc_wqe128 wqe;
10369        struct lpfc_queue *wq;
10370        struct lpfc_sli_ring *pring;
10371
10372        /* Get the WQ */
10373        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10374            (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10375                wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10376        } else {
10377                wq = phba->sli4_hba.els_wq;
10378        }
10379
10380        /* Get corresponding ring */
10381        pring = wq->pring;
10382
10383        /*
10384         * The WQE can be either 64 or 128 bytes,
10385         */
10386
10387        lockdep_assert_held(&pring->ring_lock);
10388
10389        if (piocb->sli4_xritag == NO_XRI) {
10390                if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10391                    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10392                        sglq = NULL;
10393                else {
10394                        if (!list_empty(&pring->txq)) {
10395                                if (!(flag & SLI_IOCB_RET_IOCB)) {
10396                                        __lpfc_sli_ringtx_put(phba,
10397                                                pring, piocb);
10398                                        return IOCB_SUCCESS;
10399                                } else {
10400                                        return IOCB_BUSY;
10401                                }
10402                        } else {
10403                                sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10404                                if (!sglq) {
10405                                        if (!(flag & SLI_IOCB_RET_IOCB)) {
10406                                                __lpfc_sli_ringtx_put(phba,
10407                                                                pring,
10408                                                                piocb);
10409                                                return IOCB_SUCCESS;
10410                                        } else
10411                                                return IOCB_BUSY;
10412                                }
10413                        }
10414                }
10415        } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
10416                /* These IO's already have an XRI and a mapped sgl. */
10417                sglq = NULL;
10418        }
10419        else {
10420                /*
10421                 * This is a continuation of a commandi,(CX) so this
10422                 * sglq is on the active list
10423                 */
10424                sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10425                if (!sglq)
10426                        return IOCB_ERROR;
10427        }
10428
10429        if (sglq) {
10430                piocb->sli4_lxritag = sglq->sli4_lxritag;
10431                piocb->sli4_xritag = sglq->sli4_xritag;
10432                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10433                        return IOCB_ERROR;
10434        }
10435
10436        if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10437                return IOCB_ERROR;
10438
10439        if (lpfc_sli4_wq_put(wq, &wqe))
10440                return IOCB_ERROR;
10441        lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10442
10443        return 0;
10444}
10445
10446/*
10447 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10448 *
10449 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10450 * or IOCB for sli-3  function.
10451 * pointer from the lpfc_hba struct.
10452 *
10453 * Return codes:
10454 * IOCB_ERROR - Error
10455 * IOCB_SUCCESS - Success
10456 * IOCB_BUSY - Busy
10457 **/
10458int
10459lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10460                      struct lpfc_iocbq *piocb, uint32_t flag)
10461{
10462        return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10463}
10464
10465/*
10466 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10467 *
10468 * This routine wraps the actual lockless version for issusing IOCB function
10469 * pointer from the lpfc_hba struct.
10470 *
10471 * Return codes:
10472 * IOCB_ERROR - Error
10473 * IOCB_SUCCESS - Success
10474 * IOCB_BUSY - Busy
10475 **/
10476int
10477__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10478                struct lpfc_iocbq *piocb, uint32_t flag)
10479{
10480        return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10481}
10482
10483/**
10484 * lpfc_sli_api_table_setup - Set up sli api function jump table
10485 * @phba: The hba struct for which this call is being executed.
10486 * @dev_grp: The HBA PCI-Device group number.
10487 *
10488 * This routine sets up the SLI interface API function jump table in @phba
10489 * struct.
10490 * Returns: 0 - success, -ENODEV - failure.
10491 **/
10492int
10493lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10494{
10495
10496        switch (dev_grp) {
10497        case LPFC_PCI_DEV_LP:
10498                phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10499                phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10500                phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10501                break;
10502        case LPFC_PCI_DEV_OC:
10503                phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10504                phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10505                phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10506                break;
10507        default:
10508                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509                                "1419 Invalid HBA PCI-device group: 0x%x\n",
10510                                dev_grp);
10511                return -ENODEV;
10512        }
10513        phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10514        return 0;
10515}
10516
10517/**
10518 * lpfc_sli4_calc_ring - Calculates which ring to use
10519 * @phba: Pointer to HBA context object.
10520 * @piocb: Pointer to command iocb.
10521 *
10522 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10523 * hba_wqidx, thus we need to calculate the corresponding ring.
10524 * Since ABORTS must go on the same WQ of the command they are
10525 * aborting, we use command's hba_wqidx.
10526 */
10527struct lpfc_sli_ring *
10528lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10529{
10530        struct lpfc_io_buf *lpfc_cmd;
10531
10532        if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10533                if (unlikely(!phba->sli4_hba.hdwq))
10534                        return NULL;
10535                /*
10536                 * for abort iocb hba_wqidx should already
10537                 * be setup based on what work queue we used.
10538                 */
10539                if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10540                        lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10541                        piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10542                }
10543                return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10544        } else {
10545                if (unlikely(!phba->sli4_hba.els_wq))
10546                        return NULL;
10547                piocb->hba_wqidx = 0;
10548                return phba->sli4_hba.els_wq->pring;
10549        }
10550}
10551
10552/**
10553 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10554 * @phba: Pointer to HBA context object.
10555 * @ring_number: Ring number
10556 * @piocb: Pointer to command iocb.
10557 * @flag: Flag indicating if this command can be put into txq.
10558 *
10559 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10560 * function. This function gets the hbalock and calls
10561 * __lpfc_sli_issue_iocb function and will return the error returned
10562 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10563 * functions which do not hold hbalock.
10564 **/
10565int
10566lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10567                    struct lpfc_iocbq *piocb, uint32_t flag)
10568{
10569        struct lpfc_sli_ring *pring;
10570        struct lpfc_queue *eq;
10571        unsigned long iflags;
10572        int rc;
10573
10574        if (phba->sli_rev == LPFC_SLI_REV4) {
10575                eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10576
10577                pring = lpfc_sli4_calc_ring(phba, piocb);
10578                if (unlikely(pring == NULL))
10579                        return IOCB_ERROR;
10580
10581                spin_lock_irqsave(&pring->ring_lock, iflags);
10582                rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10583                spin_unlock_irqrestore(&pring->ring_lock, iflags);
10584
10585                lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10586        } else {
10587                /* For now, SLI2/3 will still use hbalock */
10588                spin_lock_irqsave(&phba->hbalock, iflags);
10589                rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10590                spin_unlock_irqrestore(&phba->hbalock, iflags);
10591        }
10592        return rc;
10593}
10594
10595/**
10596 * lpfc_extra_ring_setup - Extra ring setup function
10597 * @phba: Pointer to HBA context object.
10598 *
10599 * This function is called while driver attaches with the
10600 * HBA to setup the extra ring. The extra ring is used
10601 * only when driver needs to support target mode functionality
10602 * or IP over FC functionalities.
10603 *
10604 * This function is called with no lock held. SLI3 only.
10605 **/
10606static int
10607lpfc_extra_ring_setup( struct lpfc_hba *phba)
10608{
10609        struct lpfc_sli *psli;
10610        struct lpfc_sli_ring *pring;
10611
10612        psli = &phba->sli;
10613
10614        /* Adjust cmd/rsp ring iocb entries more evenly */
10615
10616        /* Take some away from the FCP ring */
10617        pring = &psli->sli3_ring[LPFC_FCP_RING];
10618        pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10619        pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10620        pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10621        pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10622
10623        /* and give them to the extra ring */
10624        pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10625
10626        pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10627        pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10628        pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10629        pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10630
10631        /* Setup default profile for this ring */
10632        pring->iotag_max = 4096;
10633        pring->num_mask = 1;
10634        pring->prt[0].profile = 0;      /* Mask 0 */
10635        pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10636        pring->prt[0].type = phba->cfg_multi_ring_type;
10637        pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10638        return 0;
10639}
10640
10641static void
10642lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10643                             struct lpfc_nodelist *ndlp)
10644{
10645        unsigned long iflags;
10646        struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
10647
10648        spin_lock_irqsave(&phba->hbalock, iflags);
10649        if (!list_empty(&evtp->evt_listp)) {
10650                spin_unlock_irqrestore(&phba->hbalock, iflags);
10651                return;
10652        }
10653
10654        /* Incrementing the reference count until the queued work is done. */
10655        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
10656        if (!evtp->evt_arg1) {
10657                spin_unlock_irqrestore(&phba->hbalock, iflags);
10658                return;
10659        }
10660        evtp->evt = LPFC_EVT_RECOVER_PORT;
10661        list_add_tail(&evtp->evt_listp, &phba->work_list);
10662        spin_unlock_irqrestore(&phba->hbalock, iflags);
10663
10664        lpfc_worker_wake_up(phba);
10665}
10666
10667/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10668 * @phba: Pointer to HBA context object.
10669 * @iocbq: Pointer to iocb object.
10670 *
10671 * The async_event handler calls this routine when it receives
10672 * an ASYNC_STATUS_CN event from the port.  The port generates
10673 * this event when an Abort Sequence request to an rport fails
10674 * twice in succession.  The abort could be originated by the
10675 * driver or by the port.  The ABTS could have been for an ELS
10676 * or FCP IO.  The port only generates this event when an ABTS
10677 * fails to complete after one retry.
10678 */
10679static void
10680lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10681                          struct lpfc_iocbq *iocbq)
10682{
10683        struct lpfc_nodelist *ndlp = NULL;
10684        uint16_t rpi = 0, vpi = 0;
10685        struct lpfc_vport *vport = NULL;
10686
10687        /* The rpi in the ulpContext is vport-sensitive. */
10688        vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10689        rpi = iocbq->iocb.ulpContext;
10690
10691        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10692                        "3092 Port generated ABTS async event "
10693                        "on vpi %d rpi %d status 0x%x\n",
10694                        vpi, rpi, iocbq->iocb.ulpStatus);
10695
10696        vport = lpfc_find_vport_by_vpid(phba, vpi);
10697        if (!vport)
10698                goto err_exit;
10699        ndlp = lpfc_findnode_rpi(vport, rpi);
10700        if (!ndlp)
10701                goto err_exit;
10702
10703        if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10704                lpfc_sli_abts_recover_port(vport, ndlp);
10705        return;
10706
10707 err_exit:
10708        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10709                        "3095 Event Context not found, no "
10710                        "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10711                        iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10712                        vpi, rpi);
10713}
10714
10715/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10716 * @phba: pointer to HBA context object.
10717 * @ndlp: nodelist pointer for the impacted rport.
10718 * @axri: pointer to the wcqe containing the failed exchange.
10719 *
10720 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10721 * port.  The port generates this event when an abort exchange request to an
10722 * rport fails twice in succession with no reply.  The abort could be originated
10723 * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
10724 */
10725void
10726lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10727                           struct lpfc_nodelist *ndlp,
10728                           struct sli4_wcqe_xri_aborted *axri)
10729{
10730        uint32_t ext_status = 0;
10731
10732        if (!ndlp) {
10733                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10734                                "3115 Node Context not found, driver "
10735                                "ignoring abts err event\n");
10736                return;
10737        }
10738
10739        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10740                        "3116 Port generated FCP XRI ABORT event on "
10741                        "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10742                        ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10743                        bf_get(lpfc_wcqe_xa_xri, axri),
10744                        bf_get(lpfc_wcqe_xa_status, axri),
10745                        axri->parameter);
10746
10747        /*
10748         * Catch the ABTS protocol failure case.  Older OCe FW releases returned
10749         * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10750         * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10751         */
10752        ext_status = axri->parameter & IOERR_PARAM_MASK;
10753        if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10754            ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10755                lpfc_sli_post_recovery_event(phba, ndlp);
10756}
10757
10758/**
10759 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10760 * @phba: Pointer to HBA context object.
10761 * @pring: Pointer to driver SLI ring object.
10762 * @iocbq: Pointer to iocb object.
10763 *
10764 * This function is called by the slow ring event handler
10765 * function when there is an ASYNC event iocb in the ring.
10766 * This function is called with no lock held.
10767 * Currently this function handles only temperature related
10768 * ASYNC events. The function decodes the temperature sensor
10769 * event message and posts events for the management applications.
10770 **/
10771static void
10772lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10773        struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10774{
10775        IOCB_t *icmd;
10776        uint16_t evt_code;
10777        struct temp_event temp_event_data;
10778        struct Scsi_Host *shost;
10779        uint32_t *iocb_w;
10780
10781        icmd = &iocbq->iocb;
10782        evt_code = icmd->un.asyncstat.evt_code;
10783
10784        switch (evt_code) {
10785        case ASYNC_TEMP_WARN:
10786        case ASYNC_TEMP_SAFE:
10787                temp_event_data.data = (uint32_t) icmd->ulpContext;
10788                temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10789                if (evt_code == ASYNC_TEMP_WARN) {
10790                        temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10791                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10792                                "0347 Adapter is very hot, please take "
10793                                "corrective action. temperature : %d Celsius\n",
10794                                (uint32_t) icmd->ulpContext);
10795                } else {
10796                        temp_event_data.event_code = LPFC_NORMAL_TEMP;
10797                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10798                                "0340 Adapter temperature is OK now. "
10799                                "temperature : %d Celsius\n",
10800                                (uint32_t) icmd->ulpContext);
10801                }
10802
10803                /* Send temperature change event to applications */
10804                shost = lpfc_shost_from_vport(phba->pport);
10805                fc_host_post_vendor_event(shost, fc_get_event_number(),
10806                        sizeof(temp_event_data), (char *) &temp_event_data,
10807                        LPFC_NL_VENDOR_ID);
10808                break;
10809        case ASYNC_STATUS_CN:
10810                lpfc_sli_abts_err_handler(phba, iocbq);
10811                break;
10812        default:
10813                iocb_w = (uint32_t *) icmd;
10814                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10815                        "0346 Ring %d handler: unexpected ASYNC_STATUS"
10816                        " evt_code 0x%x\n"
10817                        "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
10818                        "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
10819                        "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
10820                        "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10821                        pring->ringno, icmd->un.asyncstat.evt_code,
10822                        iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10823                        iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10824                        iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10825                        iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10826
10827                break;
10828        }
10829}
10830
10831
10832/**
10833 * lpfc_sli4_setup - SLI ring setup function
10834 * @phba: Pointer to HBA context object.
10835 *
10836 * lpfc_sli_setup sets up rings of the SLI interface with
10837 * number of iocbs per ring and iotags. This function is
10838 * called while driver attach to the HBA and before the
10839 * interrupts are enabled. So there is no need for locking.
10840 *
10841 * This function always returns 0.
10842 **/
10843int
10844lpfc_sli4_setup(struct lpfc_hba *phba)
10845{
10846        struct lpfc_sli_ring *pring;
10847
10848        pring = phba->sli4_hba.els_wq->pring;
10849        pring->num_mask = LPFC_MAX_RING_MASK;
10850        pring->prt[0].profile = 0;      /* Mask 0 */
10851        pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10852        pring->prt[0].type = FC_TYPE_ELS;
10853        pring->prt[0].lpfc_sli_rcv_unsol_event =
10854            lpfc_els_unsol_event;
10855        pring->prt[1].profile = 0;      /* Mask 1 */
10856        pring->prt[1].rctl = FC_RCTL_ELS_REP;
10857        pring->prt[1].type = FC_TYPE_ELS;
10858        pring->prt[1].lpfc_sli_rcv_unsol_event =
10859            lpfc_els_unsol_event;
10860        pring->prt[2].profile = 0;      /* Mask 2 */
10861        /* NameServer Inquiry */
10862        pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10863        /* NameServer */
10864        pring->prt[2].type = FC_TYPE_CT;
10865        pring->prt[2].lpfc_sli_rcv_unsol_event =
10866            lpfc_ct_unsol_event;
10867        pring->prt[3].profile = 0;      /* Mask 3 */
10868        /* NameServer response */
10869        pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10870        /* NameServer */
10871        pring->prt[3].type = FC_TYPE_CT;
10872        pring->prt[3].lpfc_sli_rcv_unsol_event =
10873            lpfc_ct_unsol_event;
10874        return 0;
10875}
10876
10877/**
10878 * lpfc_sli_setup - SLI ring setup function
10879 * @phba: Pointer to HBA context object.
10880 *
10881 * lpfc_sli_setup sets up rings of the SLI interface with
10882 * number of iocbs per ring and iotags. This function is
10883 * called while driver attach to the HBA and before the
10884 * interrupts are enabled. So there is no need for locking.
10885 *
10886 * This function always returns 0. SLI3 only.
10887 **/
10888int
10889lpfc_sli_setup(struct lpfc_hba *phba)
10890{
10891        int i, totiocbsize = 0;
10892        struct lpfc_sli *psli = &phba->sli;
10893        struct lpfc_sli_ring *pring;
10894
10895        psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10896        psli->sli_flag = 0;
10897
10898        psli->iocbq_lookup = NULL;
10899        psli->iocbq_lookup_len = 0;
10900        psli->last_iotag = 0;
10901
10902        for (i = 0; i < psli->num_rings; i++) {
10903                pring = &psli->sli3_ring[i];
10904                switch (i) {
10905                case LPFC_FCP_RING:     /* ring 0 - FCP */
10906                        /* numCiocb and numRiocb are used in config_port */
10907                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10908                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10909                        pring->sli.sli3.numCiocb +=
10910                                SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10911                        pring->sli.sli3.numRiocb +=
10912                                SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10913                        pring->sli.sli3.numCiocb +=
10914                                SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10915                        pring->sli.sli3.numRiocb +=
10916                                SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10917                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10918                                                        SLI3_IOCB_CMD_SIZE :
10919                                                        SLI2_IOCB_CMD_SIZE;
10920                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10921                                                        SLI3_IOCB_RSP_SIZE :
10922                                                        SLI2_IOCB_RSP_SIZE;
10923                        pring->iotag_ctr = 0;
10924                        pring->iotag_max =
10925                            (phba->cfg_hba_queue_depth * 2);
10926                        pring->fast_iotag = pring->iotag_max;
10927                        pring->num_mask = 0;
10928                        break;
10929                case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
10930                        /* numCiocb and numRiocb are used in config_port */
10931                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10932                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10933                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10934                                                        SLI3_IOCB_CMD_SIZE :
10935                                                        SLI2_IOCB_CMD_SIZE;
10936                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10937                                                        SLI3_IOCB_RSP_SIZE :
10938                                                        SLI2_IOCB_RSP_SIZE;
10939                        pring->iotag_max = phba->cfg_hba_queue_depth;
10940                        pring->num_mask = 0;
10941                        break;
10942                case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
10943                        /* numCiocb and numRiocb are used in config_port */
10944                        pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10945                        pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10946                        pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10947                                                        SLI3_IOCB_CMD_SIZE :
10948                                                        SLI2_IOCB_CMD_SIZE;
10949                        pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10950                                                        SLI3_IOCB_RSP_SIZE :
10951                                                        SLI2_IOCB_RSP_SIZE;
10952                        pring->fast_iotag = 0;
10953                        pring->iotag_ctr = 0;
10954                        pring->iotag_max = 4096;
10955                        pring->lpfc_sli_rcv_async_status =
10956                                lpfc_sli_async_event_handler;
10957                        pring->num_mask = LPFC_MAX_RING_MASK;
10958                        pring->prt[0].profile = 0;      /* Mask 0 */
10959                        pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10960                        pring->prt[0].type = FC_TYPE_ELS;
10961                        pring->prt[0].lpfc_sli_rcv_unsol_event =
10962                            lpfc_els_unsol_event;
10963                        pring->prt[1].profile = 0;      /* Mask 1 */
10964                        pring->prt[1].rctl = FC_RCTL_ELS_REP;
10965                        pring->prt[1].type = FC_TYPE_ELS;
10966                        pring->prt[1].lpfc_sli_rcv_unsol_event =
10967                            lpfc_els_unsol_event;
10968                        pring->prt[2].profile = 0;      /* Mask 2 */
10969                        /* NameServer Inquiry */
10970                        pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10971                        /* NameServer */
10972                        pring->prt[2].type = FC_TYPE_CT;
10973                        pring->prt[2].lpfc_sli_rcv_unsol_event =
10974                            lpfc_ct_unsol_event;
10975                        pring->prt[3].profile = 0;      /* Mask 3 */
10976                        /* NameServer response */
10977                        pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10978                        /* NameServer */
10979                        pring->prt[3].type = FC_TYPE_CT;
10980                        pring->prt[3].lpfc_sli_rcv_unsol_event =
10981                            lpfc_ct_unsol_event;
10982                        break;
10983                }
10984                totiocbsize += (pring->sli.sli3.numCiocb *
10985                        pring->sli.sli3.sizeCiocb) +
10986                        (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10987        }
10988        if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10989                /* Too many cmd / rsp ring entries in SLI2 SLIM */
10990                printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10991                       "SLI2 SLIM Data: x%x x%lx\n",
10992                       phba->brd_no, totiocbsize,
10993                       (unsigned long) MAX_SLIM_IOCB_SIZE);
10994        }
10995        if (phba->cfg_multi_ring_support == 2)
10996                lpfc_extra_ring_setup(phba);
10997
10998        return 0;
10999}
11000
11001/**
11002 * lpfc_sli4_queue_init - Queue initialization function
11003 * @phba: Pointer to HBA context object.
11004 *
11005 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11006 * ring. This function also initializes ring indices of each ring.
11007 * This function is called during the initialization of the SLI
11008 * interface of an HBA.
11009 * This function is called with no lock held and always returns
11010 * 1.
11011 **/
11012void
11013lpfc_sli4_queue_init(struct lpfc_hba *phba)
11014{
11015        struct lpfc_sli *psli;
11016        struct lpfc_sli_ring *pring;
11017        int i;
11018
11019        psli = &phba->sli;
11020        spin_lock_irq(&phba->hbalock);
11021        INIT_LIST_HEAD(&psli->mboxq);
11022        INIT_LIST_HEAD(&psli->mboxq_cmpl);
11023        /* Initialize list headers for txq and txcmplq as double linked lists */
11024        for (i = 0; i < phba->cfg_hdw_queue; i++) {
11025                pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11026                pring->flag = 0;
11027                pring->ringno = LPFC_FCP_RING;
11028                pring->txcmplq_cnt = 0;
11029                INIT_LIST_HEAD(&pring->txq);
11030                INIT_LIST_HEAD(&pring->txcmplq);
11031                INIT_LIST_HEAD(&pring->iocb_continueq);
11032                spin_lock_init(&pring->ring_lock);
11033        }
11034        pring = phba->sli4_hba.els_wq->pring;
11035        pring->flag = 0;
11036        pring->ringno = LPFC_ELS_RING;
11037        pring->txcmplq_cnt = 0;
11038        INIT_LIST_HEAD(&pring->txq);
11039        INIT_LIST_HEAD(&pring->txcmplq);
11040        INIT_LIST_HEAD(&pring->iocb_continueq);
11041        spin_lock_init(&pring->ring_lock);
11042
11043        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11044                pring = phba->sli4_hba.nvmels_wq->pring;
11045                pring->flag = 0;
11046                pring->ringno = LPFC_ELS_RING;
11047                pring->txcmplq_cnt = 0;
11048                INIT_LIST_HEAD(&pring->txq);
11049                INIT_LIST_HEAD(&pring->txcmplq);
11050                INIT_LIST_HEAD(&pring->iocb_continueq);
11051                spin_lock_init(&pring->ring_lock);
11052        }
11053
11054        spin_unlock_irq(&phba->hbalock);
11055}
11056
11057/**
11058 * lpfc_sli_queue_init - Queue initialization function
11059 * @phba: Pointer to HBA context object.
11060 *
11061 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11062 * ring. This function also initializes ring indices of each ring.
11063 * This function is called during the initialization of the SLI
11064 * interface of an HBA.
11065 * This function is called with no lock held and always returns
11066 * 1.
11067 **/
11068void
11069lpfc_sli_queue_init(struct lpfc_hba *phba)
11070{
11071        struct lpfc_sli *psli;
11072        struct lpfc_sli_ring *pring;
11073        int i;
11074
11075        psli = &phba->sli;
11076        spin_lock_irq(&phba->hbalock);
11077        INIT_LIST_HEAD(&psli->mboxq);
11078        INIT_LIST_HEAD(&psli->mboxq_cmpl);
11079        /* Initialize list headers for txq and txcmplq as double linked lists */
11080        for (i = 0; i < psli->num_rings; i++) {
11081                pring = &psli->sli3_ring[i];
11082                pring->ringno = i;
11083                pring->sli.sli3.next_cmdidx  = 0;
11084                pring->sli.sli3.local_getidx = 0;
11085                pring->sli.sli3.cmdidx = 0;
11086                INIT_LIST_HEAD(&pring->iocb_continueq);
11087                INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11088                INIT_LIST_HEAD(&pring->postbufq);
11089                pring->flag = 0;
11090                INIT_LIST_HEAD(&pring->txq);
11091                INIT_LIST_HEAD(&pring->txcmplq);
11092                spin_lock_init(&pring->ring_lock);
11093        }
11094        spin_unlock_irq(&phba->hbalock);
11095}
11096
11097/**
11098 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11099 * @phba: Pointer to HBA context object.
11100 *
11101 * This routine flushes the mailbox command subsystem. It will unconditionally
11102 * flush all the mailbox commands in the three possible stages in the mailbox
11103 * command sub-system: pending mailbox command queue; the outstanding mailbox
11104 * command; and completed mailbox command queue. It is caller's responsibility
11105 * to make sure that the driver is in the proper state to flush the mailbox
11106 * command sub-system. Namely, the posting of mailbox commands into the
11107 * pending mailbox command queue from the various clients must be stopped;
11108 * either the HBA is in a state that it will never works on the outstanding
11109 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11110 * mailbox command has been completed.
11111 **/
11112static void
11113lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11114{
11115        LIST_HEAD(completions);
11116        struct lpfc_sli *psli = &phba->sli;
11117        LPFC_MBOXQ_t *pmb;
11118        unsigned long iflag;
11119
11120        /* Disable softirqs, including timers from obtaining phba->hbalock */
11121        local_bh_disable();
11122
11123        /* Flush all the mailbox commands in the mbox system */
11124        spin_lock_irqsave(&phba->hbalock, iflag);
11125
11126        /* The pending mailbox command queue */
11127        list_splice_init(&phba->sli.mboxq, &completions);
11128        /* The outstanding active mailbox command */
11129        if (psli->mbox_active) {
11130                list_add_tail(&psli->mbox_active->list, &completions);
11131                psli->mbox_active = NULL;
11132                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11133        }
11134        /* The completed mailbox command queue */
11135        list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11136        spin_unlock_irqrestore(&phba->hbalock, iflag);
11137
11138        /* Enable softirqs again, done with phba->hbalock */
11139        local_bh_enable();
11140
11141        /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11142        while (!list_empty(&completions)) {
11143                list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11144                pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11145                if (pmb->mbox_cmpl)
11146                        pmb->mbox_cmpl(phba, pmb);
11147        }
11148}
11149
11150/**
11151 * lpfc_sli_host_down - Vport cleanup function
11152 * @vport: Pointer to virtual port object.
11153 *
11154 * lpfc_sli_host_down is called to clean up the resources
11155 * associated with a vport before destroying virtual
11156 * port data structures.
11157 * This function does following operations:
11158 * - Free discovery resources associated with this virtual
11159 *   port.
11160 * - Free iocbs associated with this virtual port in
11161 *   the txq.
11162 * - Send abort for all iocb commands associated with this
11163 *   vport in txcmplq.
11164 *
11165 * This function is called with no lock held and always returns 1.
11166 **/
11167int
11168lpfc_sli_host_down(struct lpfc_vport *vport)
11169{
11170        LIST_HEAD(completions);
11171        struct lpfc_hba *phba = vport->phba;
11172        struct lpfc_sli *psli = &phba->sli;
11173        struct lpfc_queue *qp = NULL;
11174        struct lpfc_sli_ring *pring;
11175        struct lpfc_iocbq *iocb, *next_iocb;
11176        int i;
11177        unsigned long flags = 0;
11178        uint16_t prev_pring_flag;
11179
11180        lpfc_cleanup_discovery_resources(vport);
11181
11182        spin_lock_irqsave(&phba->hbalock, flags);
11183
11184        /*
11185         * Error everything on the txq since these iocbs
11186         * have not been given to the FW yet.
11187         * Also issue ABTS for everything on the txcmplq
11188         */
11189        if (phba->sli_rev != LPFC_SLI_REV4) {
11190                for (i = 0; i < psli->num_rings; i++) {
11191                        pring = &psli->sli3_ring[i];
11192                        prev_pring_flag = pring->flag;
11193                        /* Only slow rings */
11194                        if (pring->ringno == LPFC_ELS_RING) {
11195                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
11196                                /* Set the lpfc data pending flag */
11197                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11198                        }
11199                        list_for_each_entry_safe(iocb, next_iocb,
11200                                                 &pring->txq, list) {
11201                                if (iocb->vport != vport)
11202                                        continue;
11203                                list_move_tail(&iocb->list, &completions);
11204                        }
11205                        list_for_each_entry_safe(iocb, next_iocb,
11206                                                 &pring->txcmplq, list) {
11207                                if (iocb->vport != vport)
11208                                        continue;
11209                                lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11210                                                           NULL);
11211                        }
11212                        pring->flag = prev_pring_flag;
11213                }
11214        } else {
11215                list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11216                        pring = qp->pring;
11217                        if (!pring)
11218                                continue;
11219                        if (pring == phba->sli4_hba.els_wq->pring) {
11220                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
11221                                /* Set the lpfc data pending flag */
11222                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11223                        }
11224                        prev_pring_flag = pring->flag;
11225                        spin_lock(&pring->ring_lock);
11226                        list_for_each_entry_safe(iocb, next_iocb,
11227                                                 &pring->txq, list) {
11228                                if (iocb->vport != vport)
11229                                        continue;
11230                                list_move_tail(&iocb->list, &completions);
11231                        }
11232                        spin_unlock(&pring->ring_lock);
11233                        list_for_each_entry_safe(iocb, next_iocb,
11234                                                 &pring->txcmplq, list) {
11235                                if (iocb->vport != vport)
11236                                        continue;
11237                                lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11238                                                           NULL);
11239                        }
11240                        pring->flag = prev_pring_flag;
11241                }
11242        }
11243        spin_unlock_irqrestore(&phba->hbalock, flags);
11244
11245        /* Make sure HBA is alive */
11246        lpfc_issue_hb_tmo(phba);
11247
11248        /* Cancel all the IOCBs from the completions list */
11249        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11250                              IOERR_SLI_DOWN);
11251        return 1;
11252}
11253
11254/**
11255 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11256 * @phba: Pointer to HBA context object.
11257 *
11258 * This function cleans up all iocb, buffers, mailbox commands
11259 * while shutting down the HBA. This function is called with no
11260 * lock held and always returns 1.
11261 * This function does the following to cleanup driver resources:
11262 * - Free discovery resources for each virtual port
11263 * - Cleanup any pending fabric iocbs
11264 * - Iterate through the iocb txq and free each entry
11265 *   in the list.
11266 * - Free up any buffer posted to the HBA
11267 * - Free mailbox commands in the mailbox queue.
11268 **/
11269int
11270lpfc_sli_hba_down(struct lpfc_hba *phba)
11271{
11272        LIST_HEAD(completions);
11273        struct lpfc_sli *psli = &phba->sli;
11274        struct lpfc_queue *qp = NULL;
11275        struct lpfc_sli_ring *pring;
11276        struct lpfc_dmabuf *buf_ptr;
11277        unsigned long flags = 0;
11278        int i;
11279
11280        /* Shutdown the mailbox command sub-system */
11281        lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11282
11283        lpfc_hba_down_prep(phba);
11284
11285        /* Disable softirqs, including timers from obtaining phba->hbalock */
11286        local_bh_disable();
11287
11288        lpfc_fabric_abort_hba(phba);
11289
11290        spin_lock_irqsave(&phba->hbalock, flags);
11291
11292        /*
11293         * Error everything on the txq since these iocbs
11294         * have not been given to the FW yet.
11295         */
11296        if (phba->sli_rev != LPFC_SLI_REV4) {
11297                for (i = 0; i < psli->num_rings; i++) {
11298                        pring = &psli->sli3_ring[i];
11299                        /* Only slow rings */
11300                        if (pring->ringno == LPFC_ELS_RING) {
11301                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
11302                                /* Set the lpfc data pending flag */
11303                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11304                        }
11305                        list_splice_init(&pring->txq, &completions);
11306                }
11307        } else {
11308                list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11309                        pring = qp->pring;
11310                        if (!pring)
11311                                continue;
11312                        spin_lock(&pring->ring_lock);
11313                        list_splice_init(&pring->txq, &completions);
11314                        spin_unlock(&pring->ring_lock);
11315                        if (pring == phba->sli4_hba.els_wq->pring) {
11316                                pring->flag |= LPFC_DEFERRED_RING_EVENT;
11317                                /* Set the lpfc data pending flag */
11318                                set_bit(LPFC_DATA_READY, &phba->data_flags);
11319                        }
11320                }
11321        }
11322        spin_unlock_irqrestore(&phba->hbalock, flags);
11323
11324        /* Cancel all the IOCBs from the completions list */
11325        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11326                              IOERR_SLI_DOWN);
11327
11328        spin_lock_irqsave(&phba->hbalock, flags);
11329        list_splice_init(&phba->elsbuf, &completions);
11330        phba->elsbuf_cnt = 0;
11331        phba->elsbuf_prev_cnt = 0;
11332        spin_unlock_irqrestore(&phba->hbalock, flags);
11333
11334        while (!list_empty(&completions)) {
11335                list_remove_head(&completions, buf_ptr,
11336                        struct lpfc_dmabuf, list);
11337                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11338                kfree(buf_ptr);
11339        }
11340
11341        /* Enable softirqs again, done with phba->hbalock */
11342        local_bh_enable();
11343
11344        /* Return any active mbox cmds */
11345        del_timer_sync(&psli->mbox_tmo);
11346
11347        spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11348        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11349        spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11350
11351        return 1;
11352}
11353
11354/**
11355 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11356 * @srcp: Source memory pointer.
11357 * @destp: Destination memory pointer.
11358 * @cnt: Number of words required to be copied.
11359 *
11360 * This function is used for copying data between driver memory
11361 * and the SLI memory. This function also changes the endianness
11362 * of each word if native endianness is different from SLI
11363 * endianness. This function can be called with or without
11364 * lock.
11365 **/
11366void
11367lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11368{
11369        uint32_t *src = srcp;
11370        uint32_t *dest = destp;
11371        uint32_t ldata;
11372        int i;
11373
11374        for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11375                ldata = *src;
11376                ldata = le32_to_cpu(ldata);
11377                *dest = ldata;
11378                src++;
11379                dest++;
11380        }
11381}
11382
11383
11384/**
11385 * lpfc_sli_bemem_bcopy - SLI memory copy function
11386 * @srcp: Source memory pointer.
11387 * @destp: Destination memory pointer.
11388 * @cnt: Number of words required to be copied.
11389 *
11390 * This function is used for copying data between a data structure
11391 * with big endian representation to local endianness.
11392 * This function can be called with or without lock.
11393 **/
11394void
11395lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11396{
11397        uint32_t *src = srcp;
11398        uint32_t *dest = destp;
11399        uint32_t ldata;
11400        int i;
11401
11402        for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11403                ldata = *src;
11404                ldata = be32_to_cpu(ldata);
11405                *dest = ldata;
11406                src++;
11407                dest++;
11408        }
11409}
11410
11411/**
11412 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11413 * @phba: Pointer to HBA context object.
11414 * @pring: Pointer to driver SLI ring object.
11415 * @mp: Pointer to driver buffer object.
11416 *
11417 * This function is called with no lock held.
11418 * It always return zero after adding the buffer to the postbufq
11419 * buffer list.
11420 **/
11421int
11422lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11423                         struct lpfc_dmabuf *mp)
11424{
11425        /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11426           later */
11427        spin_lock_irq(&phba->hbalock);
11428        list_add_tail(&mp->list, &pring->postbufq);
11429        pring->postbufq_cnt++;
11430        spin_unlock_irq(&phba->hbalock);
11431        return 0;
11432}
11433
11434/**
11435 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11436 * @phba: Pointer to HBA context object.
11437 *
11438 * When HBQ is enabled, buffers are searched based on tags. This function
11439 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11440 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11441 * does not conflict with tags of buffer posted for unsolicited events.
11442 * The function returns the allocated tag. The function is called with
11443 * no locks held.
11444 **/
11445uint32_t
11446lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11447{
11448        spin_lock_irq(&phba->hbalock);
11449        phba->buffer_tag_count++;
11450        /*
11451         * Always set the QUE_BUFTAG_BIT to distiguish between
11452         * a tag assigned by HBQ.
11453         */
11454        phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11455        spin_unlock_irq(&phba->hbalock);
11456        return phba->buffer_tag_count;
11457}
11458
11459/**
11460 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11461 * @phba: Pointer to HBA context object.
11462 * @pring: Pointer to driver SLI ring object.
11463 * @tag: Buffer tag.
11464 *
11465 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11466 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11467 * iocb is posted to the response ring with the tag of the buffer.
11468 * This function searches the pring->postbufq list using the tag
11469 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11470 * iocb. If the buffer is found then lpfc_dmabuf object of the
11471 * buffer is returned to the caller else NULL is returned.
11472 * This function is called with no lock held.
11473 **/
11474struct lpfc_dmabuf *
11475lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11476                        uint32_t tag)
11477{
11478        struct lpfc_dmabuf *mp, *next_mp;
11479        struct list_head *slp = &pring->postbufq;
11480
11481        /* Search postbufq, from the beginning, looking for a match on tag */
11482        spin_lock_irq(&phba->hbalock);
11483        list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11484                if (mp->buffer_tag == tag) {
11485                        list_del_init(&mp->list);
11486                        pring->postbufq_cnt--;
11487                        spin_unlock_irq(&phba->hbalock);
11488                        return mp;
11489                }
11490        }
11491
11492        spin_unlock_irq(&phba->hbalock);
11493        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11494                        "0402 Cannot find virtual addr for buffer tag on "
11495                        "ring %d Data x%lx x%px x%px x%x\n",
11496                        pring->ringno, (unsigned long) tag,
11497                        slp->next, slp->prev, pring->postbufq_cnt);
11498
11499        return NULL;
11500}
11501
11502/**
11503 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11504 * @phba: Pointer to HBA context object.
11505 * @pring: Pointer to driver SLI ring object.
11506 * @phys: DMA address of the buffer.
11507 *
11508 * This function searches the buffer list using the dma_address
11509 * of unsolicited event to find the driver's lpfc_dmabuf object
11510 * corresponding to the dma_address. The function returns the
11511 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11512 * This function is called by the ct and els unsolicited event
11513 * handlers to get the buffer associated with the unsolicited
11514 * event.
11515 *
11516 * This function is called with no lock held.
11517 **/
11518struct lpfc_dmabuf *
11519lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11520                         dma_addr_t phys)
11521{
11522        struct lpfc_dmabuf *mp, *next_mp;
11523        struct list_head *slp = &pring->postbufq;
11524
11525        /* Search postbufq, from the beginning, looking for a match on phys */
11526        spin_lock_irq(&phba->hbalock);
11527        list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11528                if (mp->phys == phys) {
11529                        list_del_init(&mp->list);
11530                        pring->postbufq_cnt--;
11531                        spin_unlock_irq(&phba->hbalock);
11532                        return mp;
11533                }
11534        }
11535
11536        spin_unlock_irq(&phba->hbalock);
11537        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11538                        "0410 Cannot find virtual addr for mapped buf on "
11539                        "ring %d Data x%llx x%px x%px x%x\n",
11540                        pring->ringno, (unsigned long long)phys,
11541                        slp->next, slp->prev, pring->postbufq_cnt);
11542        return NULL;
11543}
11544
11545/**
11546 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11547 * @phba: Pointer to HBA context object.
11548 * @cmdiocb: Pointer to driver command iocb object.
11549 * @rspiocb: Pointer to driver response iocb object.
11550 *
11551 * This function is the completion handler for the abort iocbs for
11552 * ELS commands. This function is called from the ELS ring event
11553 * handler with no lock held. This function frees memory resources
11554 * associated with the abort iocb.
11555 **/
11556static void
11557lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11558                        struct lpfc_iocbq *rspiocb)
11559{
11560        IOCB_t *irsp = &rspiocb->iocb;
11561        uint16_t abort_iotag, abort_context;
11562        struct lpfc_iocbq *abort_iocb = NULL;
11563
11564        if (irsp->ulpStatus) {
11565
11566                /*
11567                 * Assume that the port already completed and returned, or
11568                 * will return the iocb. Just Log the message.
11569                 */
11570                abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11571                abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11572
11573                spin_lock_irq(&phba->hbalock);
11574                if (phba->sli_rev < LPFC_SLI_REV4) {
11575                        if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11576                            irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11577                            irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11578                                spin_unlock_irq(&phba->hbalock);
11579                                goto release_iocb;
11580                        }
11581                        if (abort_iotag != 0 &&
11582                                abort_iotag <= phba->sli.last_iotag)
11583                                abort_iocb =
11584                                        phba->sli.iocbq_lookup[abort_iotag];
11585                } else
11586                        /* For sli4 the abort_tag is the XRI,
11587                         * so the abort routine puts the iotag  of the iocb
11588                         * being aborted in the context field of the abort
11589                         * IOCB.
11590                         */
11591                        abort_iocb = phba->sli.iocbq_lookup[abort_context];
11592
11593                lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11594                                "0327 Cannot abort els iocb x%px "
11595                                "with tag %x context %x, abort status %x, "
11596                                "abort code %x\n",
11597                                abort_iocb, abort_iotag, abort_context,
11598                                irsp->ulpStatus, irsp->un.ulpWord[4]);
11599
11600                spin_unlock_irq(&phba->hbalock);
11601        }
11602release_iocb:
11603        lpfc_sli_release_iocbq(phba, cmdiocb);
11604        return;
11605}
11606
11607/**
11608 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11609 * @phba: Pointer to HBA context object.
11610 * @cmdiocb: Pointer to driver command iocb object.
11611 * @rspiocb: Pointer to driver response iocb object.
11612 *
11613 * The function is called from SLI ring event handler with no
11614 * lock held. This function is the completion handler for ELS commands
11615 * which are aborted. The function frees memory resources used for
11616 * the aborted ELS commands.
11617 **/
11618void
11619lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11620                     struct lpfc_iocbq *rspiocb)
11621{
11622        IOCB_t *irsp = &rspiocb->iocb;
11623
11624        /* ELS cmd tag <ulpIoTag> completes */
11625        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11626                        "0139 Ignoring ELS cmd tag x%x completion Data: "
11627                        "x%x x%x x%x\n",
11628                        irsp->ulpIoTag, irsp->ulpStatus,
11629                        irsp->un.ulpWord[4], irsp->ulpTimeout);
11630        lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
11631        if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11632                lpfc_ct_free_iocb(phba, cmdiocb);
11633        else
11634                lpfc_els_free_iocb(phba, cmdiocb);
11635}
11636
11637/**
11638 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11639 * @phba: Pointer to HBA context object.
11640 * @pring: Pointer to driver SLI ring object.
11641 * @cmdiocb: Pointer to driver command iocb object.
11642 * @cmpl: completion function.
11643 *
11644 * This function issues an abort iocb for the provided command iocb. In case
11645 * of unloading, the abort iocb will not be issued to commands on the ELS
11646 * ring. Instead, the callback function shall be changed to those commands
11647 * so that nothing happens when them finishes. This function is called with
11648 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
11649 * when the command iocb is an abort request.
11650 *
11651 **/
11652int
11653lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11654                           struct lpfc_iocbq *cmdiocb, void *cmpl)
11655{
11656        struct lpfc_vport *vport = cmdiocb->vport;
11657        struct lpfc_iocbq *abtsiocbp;
11658        IOCB_t *icmd = NULL;
11659        IOCB_t *iabt = NULL;
11660        int retval = IOCB_ERROR;
11661        unsigned long iflags;
11662        struct lpfc_nodelist *ndlp;
11663
11664        /*
11665         * There are certain command types we don't want to abort.  And we
11666         * don't want to abort commands that are already in the process of
11667         * being aborted.
11668         */
11669        icmd = &cmdiocb->iocb;
11670        if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11671            icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11672            cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
11673                return IOCB_ABORTING;
11674
11675        if (!pring) {
11676                if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11677                        cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11678                else
11679                        cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11680                return retval;
11681        }
11682
11683        /*
11684         * If we're unloading, don't abort iocb on the ELS ring, but change
11685         * the callback so that nothing happens when it finishes.
11686         */
11687        if ((vport->load_flag & FC_UNLOADING) &&
11688            pring->ringno == LPFC_ELS_RING) {
11689                if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11690                        cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11691                else
11692                        cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11693                return retval;
11694        }
11695
11696        /* issue ABTS for this IOCB based on iotag */
11697        abtsiocbp = __lpfc_sli_get_iocbq(phba);
11698        if (abtsiocbp == NULL)
11699                return IOCB_NORESOURCE;
11700
11701        /* This signals the response to set the correct status
11702         * before calling the completion handler
11703         */
11704        cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11705
11706        iabt = &abtsiocbp->iocb;
11707        iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11708        iabt->un.acxri.abortContextTag = icmd->ulpContext;
11709        if (phba->sli_rev == LPFC_SLI_REV4) {
11710                iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11711                if (pring->ringno == LPFC_ELS_RING)
11712                        iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11713        } else {
11714                iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11715                if (pring->ringno == LPFC_ELS_RING) {
11716                        ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11717                        iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11718                }
11719        }
11720        iabt->ulpLe = 1;
11721        iabt->ulpClass = icmd->ulpClass;
11722
11723        /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11724        abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11725        if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
11726                abtsiocbp->iocb_flag |= LPFC_IO_FCP;
11727                abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11728        }
11729        if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11730                abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11731
11732        if (phba->link_state >= LPFC_LINK_UP)
11733                iabt->ulpCommand = CMD_ABORT_XRI_CN;
11734        else
11735                iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11736
11737        if (cmpl)
11738                abtsiocbp->iocb_cmpl = cmpl;
11739        else
11740                abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11741        abtsiocbp->vport = vport;
11742
11743        if (phba->sli_rev == LPFC_SLI_REV4) {
11744                pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11745                if (unlikely(pring == NULL))
11746                        goto abort_iotag_exit;
11747                /* Note: both hbalock and ring_lock need to be set here */
11748                spin_lock_irqsave(&pring->ring_lock, iflags);
11749                retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11750                        abtsiocbp, 0);
11751                spin_unlock_irqrestore(&pring->ring_lock, iflags);
11752        } else {
11753                retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11754                        abtsiocbp, 0);
11755        }
11756
11757abort_iotag_exit:
11758
11759        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11760                         "0339 Abort xri x%x, original iotag x%x, "
11761                         "abort cmd iotag x%x retval x%x\n",
11762                         iabt->un.acxri.abortIoTag,
11763                         iabt->un.acxri.abortContextTag,
11764                         abtsiocbp->iotag, retval);
11765
11766        if (retval) {
11767                cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11768                __lpfc_sli_release_iocbq(phba, abtsiocbp);
11769        }
11770
11771        /*
11772         * Caller to this routine should check for IOCB_ERROR
11773         * and handle it properly.  This routine no longer removes
11774         * iocb off txcmplq and call compl in case of IOCB_ERROR.
11775         */
11776        return retval;
11777}
11778
11779/**
11780 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11781 * @phba: pointer to lpfc HBA data structure.
11782 *
11783 * This routine will abort all pending and outstanding iocbs to an HBA.
11784 **/
11785void
11786lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11787{
11788        struct lpfc_sli *psli = &phba->sli;
11789        struct lpfc_sli_ring *pring;
11790        struct lpfc_queue *qp = NULL;
11791        int i;
11792
11793        if (phba->sli_rev != LPFC_SLI_REV4) {
11794                for (i = 0; i < psli->num_rings; i++) {
11795                        pring = &psli->sli3_ring[i];
11796                        lpfc_sli_abort_iocb_ring(phba, pring);
11797                }
11798                return;
11799        }
11800        list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11801                pring = qp->pring;
11802                if (!pring)
11803                        continue;
11804                lpfc_sli_abort_iocb_ring(phba, pring);
11805        }
11806}
11807
11808/**
11809 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11810 * @iocbq: Pointer to driver iocb object.
11811 * @vport: Pointer to driver virtual port object.
11812 * @tgt_id: SCSI ID of the target.
11813 * @lun_id: LUN ID of the scsi device.
11814 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11815 *
11816 * This function acts as an iocb filter for functions which abort or count
11817 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11818 * 0 if the filtering criteria is met for the given iocb and will return
11819 * 1 if the filtering criteria is not met.
11820 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11821 * given iocb is for the SCSI device specified by vport, tgt_id and
11822 * lun_id parameter.
11823 * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
11824 * given iocb is for the SCSI target specified by vport and tgt_id
11825 * parameters.
11826 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11827 * given iocb is for the SCSI host associated with the given vport.
11828 * This function is called with no locks held.
11829 **/
11830static int
11831lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11832                           uint16_t tgt_id, uint64_t lun_id,
11833                           lpfc_ctx_cmd ctx_cmd)
11834{
11835        struct lpfc_io_buf *lpfc_cmd;
11836        IOCB_t *icmd = NULL;
11837        int rc = 1;
11838
11839        if (!iocbq || iocbq->vport != vport)
11840                return rc;
11841
11842        if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11843            !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11844              iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11845                return rc;
11846
11847        icmd = &iocbq->iocb;
11848        if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11849            icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11850                return rc;
11851
11852        lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11853
11854        if (lpfc_cmd->pCmd == NULL)
11855                return rc;
11856
11857        switch (ctx_cmd) {
11858        case LPFC_CTX_LUN:
11859                if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11860                    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11861                    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11862                        rc = 0;
11863                break;
11864        case LPFC_CTX_TGT:
11865                if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11866                    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11867                        rc = 0;
11868                break;
11869        case LPFC_CTX_HOST:
11870                rc = 0;
11871                break;
11872        default:
11873                printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11874                        __func__, ctx_cmd);
11875                break;
11876        }
11877
11878        return rc;
11879}
11880
11881/**
11882 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11883 * @vport: Pointer to virtual port.
11884 * @tgt_id: SCSI ID of the target.
11885 * @lun_id: LUN ID of the scsi device.
11886 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11887 *
11888 * This function returns number of FCP commands pending for the vport.
11889 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11890 * commands pending on the vport associated with SCSI device specified
11891 * by tgt_id and lun_id parameters.
11892 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11893 * commands pending on the vport associated with SCSI target specified
11894 * by tgt_id parameter.
11895 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11896 * commands pending on the vport.
11897 * This function returns the number of iocbs which satisfy the filter.
11898 * This function is called without any lock held.
11899 **/
11900int
11901lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11902                  lpfc_ctx_cmd ctx_cmd)
11903{
11904        struct lpfc_hba *phba = vport->phba;
11905        struct lpfc_iocbq *iocbq;
11906        int sum, i;
11907
11908        spin_lock_irq(&phba->hbalock);
11909        for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11910                iocbq = phba->sli.iocbq_lookup[i];
11911
11912                if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11913                                                ctx_cmd) == 0)
11914                        sum++;
11915        }
11916        spin_unlock_irq(&phba->hbalock);
11917
11918        return sum;
11919}
11920
11921/**
11922 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11923 * @phba: Pointer to HBA context object
11924 * @cmdiocb: Pointer to command iocb object.
11925 * @wcqe: pointer to the complete wcqe
11926 *
11927 * This function is called when an aborted FCP iocb completes. This
11928 * function is called by the ring event handler with no lock held.
11929 * This function frees the iocb. It is called for sli-4 adapters.
11930 **/
11931void
11932lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11933                         struct lpfc_wcqe_complete *wcqe)
11934{
11935        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11936                        "3017 ABORT_XRI_CN completing on rpi x%x "
11937                        "original iotag x%x, abort cmd iotag x%x "
11938                        "status 0x%x, reason 0x%x\n",
11939                        cmdiocb->iocb.un.acxri.abortContextTag,
11940                        cmdiocb->iocb.un.acxri.abortIoTag,
11941                        cmdiocb->iotag,
11942                        (bf_get(lpfc_wcqe_c_status, wcqe)
11943                        & LPFC_IOCB_STATUS_MASK),
11944                        wcqe->parameter);
11945        lpfc_sli_release_iocbq(phba, cmdiocb);
11946}
11947
11948/**
11949 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11950 * @phba: Pointer to HBA context object
11951 * @cmdiocb: Pointer to command iocb object.
11952 * @rspiocb: Pointer to response iocb object.
11953 *
11954 * This function is called when an aborted FCP iocb completes. This
11955 * function is called by the ring event handler with no lock held.
11956 * This function frees the iocb.
11957 **/
11958void
11959lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11960                        struct lpfc_iocbq *rspiocb)
11961{
11962        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11963                        "3096 ABORT_XRI_CN completing on rpi x%x "
11964                        "original iotag x%x, abort cmd iotag x%x "
11965                        "status 0x%x, reason 0x%x\n",
11966                        cmdiocb->iocb.un.acxri.abortContextTag,
11967                        cmdiocb->iocb.un.acxri.abortIoTag,
11968                        cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11969                        rspiocb->iocb.un.ulpWord[4]);
11970        lpfc_sli_release_iocbq(phba, cmdiocb);
11971        return;
11972}
11973
11974/**
11975 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11976 * @vport: Pointer to virtual port.
11977 * @tgt_id: SCSI ID of the target.
11978 * @lun_id: LUN ID of the scsi device.
11979 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11980 *
11981 * This function sends an abort command for every SCSI command
11982 * associated with the given virtual port pending on the ring
11983 * filtered by lpfc_sli_validate_fcp_iocb function.
11984 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11985 * FCP iocbs associated with lun specified by tgt_id and lun_id
11986 * parameters
11987 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11988 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11989 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11990 * FCP iocbs associated with virtual port.
11991 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
11992 * lpfc_sli4_calc_ring is used.
11993 * This function returns number of iocbs it failed to abort.
11994 * This function is called with no locks held.
11995 **/
11996int
11997lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
11998                    lpfc_ctx_cmd abort_cmd)
11999{
12000        struct lpfc_hba *phba = vport->phba;
12001        struct lpfc_sli_ring *pring = NULL;
12002        struct lpfc_iocbq *iocbq;
12003        int errcnt = 0, ret_val = 0;
12004        unsigned long iflags;
12005        int i;
12006        void *fcp_cmpl = NULL;
12007
12008        /* all I/Os are in process of being flushed */
12009        if (phba->hba_flag & HBA_IOQ_FLUSH)
12010                return errcnt;
12011
12012        for (i = 1; i <= phba->sli.last_iotag; i++) {
12013                iocbq = phba->sli.iocbq_lookup[i];
12014
12015                if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12016                                               abort_cmd) != 0)
12017                        continue;
12018
12019                spin_lock_irqsave(&phba->hbalock, iflags);
12020                if (phba->sli_rev == LPFC_SLI_REV3) {
12021                        pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12022                        fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12023                } else if (phba->sli_rev == LPFC_SLI_REV4) {
12024                        pring = lpfc_sli4_calc_ring(phba, iocbq);
12025                        fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12026                }
12027                ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12028                                                     fcp_cmpl);
12029                spin_unlock_irqrestore(&phba->hbalock, iflags);
12030                if (ret_val != IOCB_SUCCESS)
12031                        errcnt++;
12032        }
12033
12034        return errcnt;
12035}
12036
12037/**
12038 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12039 * @vport: Pointer to virtual port.
12040 * @pring: Pointer to driver SLI ring object.
12041 * @tgt_id: SCSI ID of the target.
12042 * @lun_id: LUN ID of the scsi device.
12043 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12044 *
12045 * This function sends an abort command for every SCSI command
12046 * associated with the given virtual port pending on the ring
12047 * filtered by lpfc_sli_validate_fcp_iocb function.
12048 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12049 * FCP iocbs associated with lun specified by tgt_id and lun_id
12050 * parameters
12051 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12052 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12053 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12054 * FCP iocbs associated with virtual port.
12055 * This function returns number of iocbs it aborted .
12056 * This function is called with no locks held right after a taskmgmt
12057 * command is sent.
12058 **/
12059int
12060lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12061                        uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12062{
12063        struct lpfc_hba *phba = vport->phba;
12064        struct lpfc_io_buf *lpfc_cmd;
12065        struct lpfc_iocbq *abtsiocbq;
12066        struct lpfc_nodelist *ndlp;
12067        struct lpfc_iocbq *iocbq;
12068        IOCB_t *icmd;
12069        int sum, i, ret_val;
12070        unsigned long iflags;
12071        struct lpfc_sli_ring *pring_s4 = NULL;
12072
12073        spin_lock_irqsave(&phba->hbalock, iflags);
12074
12075        /* all I/Os are in process of being flushed */
12076        if (phba->hba_flag & HBA_IOQ_FLUSH) {
12077                spin_unlock_irqrestore(&phba->hbalock, iflags);
12078                return 0;
12079        }
12080        sum = 0;
12081
12082        for (i = 1; i <= phba->sli.last_iotag; i++) {
12083                iocbq = phba->sli.iocbq_lookup[i];
12084
12085                if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12086                                               cmd) != 0)
12087                        continue;
12088
12089                /* Guard against IO completion being called at same time */
12090                lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12091                spin_lock(&lpfc_cmd->buf_lock);
12092
12093                if (!lpfc_cmd->pCmd) {
12094                        spin_unlock(&lpfc_cmd->buf_lock);
12095                        continue;
12096                }
12097
12098                if (phba->sli_rev == LPFC_SLI_REV4) {
12099                        pring_s4 =
12100                            phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12101                        if (!pring_s4) {
12102                                spin_unlock(&lpfc_cmd->buf_lock);
12103                                continue;
12104                        }
12105                        /* Note: both hbalock and ring_lock must be set here */
12106                        spin_lock(&pring_s4->ring_lock);
12107                }
12108
12109                /*
12110                 * If the iocbq is already being aborted, don't take a second
12111                 * action, but do count it.
12112                 */
12113                if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12114                    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12115                        if (phba->sli_rev == LPFC_SLI_REV4)
12116                                spin_unlock(&pring_s4->ring_lock);
12117                        spin_unlock(&lpfc_cmd->buf_lock);
12118                        continue;
12119                }
12120
12121                /* issue ABTS for this IOCB based on iotag */
12122                abtsiocbq = __lpfc_sli_get_iocbq(phba);
12123                if (!abtsiocbq) {
12124                        if (phba->sli_rev == LPFC_SLI_REV4)
12125                                spin_unlock(&pring_s4->ring_lock);
12126                        spin_unlock(&lpfc_cmd->buf_lock);
12127                        continue;
12128                }
12129
12130                icmd = &iocbq->iocb;
12131                abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12132                abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12133                if (phba->sli_rev == LPFC_SLI_REV4)
12134                        abtsiocbq->iocb.un.acxri.abortIoTag =
12135                                                         iocbq->sli4_xritag;
12136                else
12137                        abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12138                abtsiocbq->iocb.ulpLe = 1;
12139                abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12140                abtsiocbq->vport = vport;
12141
12142                /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12143                abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12144                if (iocbq->iocb_flag & LPFC_IO_FCP)
12145                        abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
12146                if (iocbq->iocb_flag & LPFC_IO_FOF)
12147                        abtsiocbq->iocb_flag |= LPFC_IO_FOF;
12148
12149                ndlp = lpfc_cmd->rdata->pnode;
12150
12151                if (lpfc_is_link_up(phba) &&
12152                    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12153                        abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12154                else
12155                        abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12156
12157                /* Setup callback routine and issue the command. */
12158                abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12159
12160                /*
12161                 * Indicate the IO is being aborted by the driver and set
12162                 * the caller's flag into the aborted IO.
12163                 */
12164                iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12165
12166                if (phba->sli_rev == LPFC_SLI_REV4) {
12167                        ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12168                                                        abtsiocbq, 0);
12169                        spin_unlock(&pring_s4->ring_lock);
12170                } else {
12171                        ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12172                                                        abtsiocbq, 0);
12173                }
12174
12175                spin_unlock(&lpfc_cmd->buf_lock);
12176
12177                if (ret_val == IOCB_ERROR)
12178                        __lpfc_sli_release_iocbq(phba, abtsiocbq);
12179                else
12180                        sum++;
12181        }
12182        spin_unlock_irqrestore(&phba->hbalock, iflags);
12183        return sum;
12184}
12185
12186/**
12187 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12188 * @phba: Pointer to HBA context object.
12189 * @cmdiocbq: Pointer to command iocb.
12190 * @rspiocbq: Pointer to response iocb.
12191 *
12192 * This function is the completion handler for iocbs issued using
12193 * lpfc_sli_issue_iocb_wait function. This function is called by the
12194 * ring event handler function without any lock held. This function
12195 * can be called from both worker thread context and interrupt
12196 * context. This function also can be called from other thread which
12197 * cleans up the SLI layer objects.
12198 * This function copy the contents of the response iocb to the
12199 * response iocb memory object provided by the caller of
12200 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12201 * sleeps for the iocb completion.
12202 **/
12203static void
12204lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12205                        struct lpfc_iocbq *cmdiocbq,
12206                        struct lpfc_iocbq *rspiocbq)
12207{
12208        wait_queue_head_t *pdone_q;
12209        unsigned long iflags;
12210        struct lpfc_io_buf *lpfc_cmd;
12211
12212        spin_lock_irqsave(&phba->hbalock, iflags);
12213        if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12214
12215                /*
12216                 * A time out has occurred for the iocb.  If a time out
12217                 * completion handler has been supplied, call it.  Otherwise,
12218                 * just free the iocbq.
12219                 */
12220
12221                spin_unlock_irqrestore(&phba->hbalock, iflags);
12222                cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12223                cmdiocbq->wait_iocb_cmpl = NULL;
12224                if (cmdiocbq->iocb_cmpl)
12225                        (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12226                else
12227                        lpfc_sli_release_iocbq(phba, cmdiocbq);
12228                return;
12229        }
12230
12231        cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12232        if (cmdiocbq->context2 && rspiocbq)
12233                memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12234                       &rspiocbq->iocb, sizeof(IOCB_t));
12235
12236        /* Set the exchange busy flag for task management commands */
12237        if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12238                !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12239                lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12240                        cur_iocbq);
12241                if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12242                        lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12243                else
12244                        lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12245        }
12246
12247        pdone_q = cmdiocbq->context_un.wait_queue;
12248        if (pdone_q)
12249                wake_up(pdone_q);
12250        spin_unlock_irqrestore(&phba->hbalock, iflags);
12251        return;
12252}
12253
12254/**
12255 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12256 * @phba: Pointer to HBA context object..
12257 * @piocbq: Pointer to command iocb.
12258 * @flag: Flag to test.
12259 *
12260 * This routine grabs the hbalock and then test the iocb_flag to
12261 * see if the passed in flag is set.
12262 * Returns:
12263 * 1 if flag is set.
12264 * 0 if flag is not set.
12265 **/
12266static int
12267lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12268                 struct lpfc_iocbq *piocbq, uint32_t flag)
12269{
12270        unsigned long iflags;
12271        int ret;
12272
12273        spin_lock_irqsave(&phba->hbalock, iflags);
12274        ret = piocbq->iocb_flag & flag;
12275        spin_unlock_irqrestore(&phba->hbalock, iflags);
12276        return ret;
12277
12278}
12279
12280/**
12281 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12282 * @phba: Pointer to HBA context object..
12283 * @ring_number: Ring number
12284 * @piocb: Pointer to command iocb.
12285 * @prspiocbq: Pointer to response iocb.
12286 * @timeout: Timeout in number of seconds.
12287 *
12288 * This function issues the iocb to firmware and waits for the
12289 * iocb to complete. The iocb_cmpl field of the shall be used
12290 * to handle iocbs which time out. If the field is NULL, the
12291 * function shall free the iocbq structure.  If more clean up is
12292 * needed, the caller is expected to provide a completion function
12293 * that will provide the needed clean up.  If the iocb command is
12294 * not completed within timeout seconds, the function will either
12295 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12296 * completion function set in the iocb_cmpl field and then return
12297 * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
12298 * resources if this function returns IOCB_TIMEDOUT.
12299 * The function waits for the iocb completion using an
12300 * non-interruptible wait.
12301 * This function will sleep while waiting for iocb completion.
12302 * So, this function should not be called from any context which
12303 * does not allow sleeping. Due to the same reason, this function
12304 * cannot be called with interrupt disabled.
12305 * This function assumes that the iocb completions occur while
12306 * this function sleep. So, this function cannot be called from
12307 * the thread which process iocb completion for this ring.
12308 * This function clears the iocb_flag of the iocb object before
12309 * issuing the iocb and the iocb completion handler sets this
12310 * flag and wakes this thread when the iocb completes.
12311 * The contents of the response iocb will be copied to prspiocbq
12312 * by the completion handler when the command completes.
12313 * This function returns IOCB_SUCCESS when success.
12314 * This function is called with no lock held.
12315 **/
12316int
12317lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12318                         uint32_t ring_number,
12319                         struct lpfc_iocbq *piocb,
12320                         struct lpfc_iocbq *prspiocbq,
12321                         uint32_t timeout)
12322{
12323        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12324        long timeleft, timeout_req = 0;
12325        int retval = IOCB_SUCCESS;
12326        uint32_t creg_val;
12327        struct lpfc_iocbq *iocb;
12328        int txq_cnt = 0;
12329        int txcmplq_cnt = 0;
12330        struct lpfc_sli_ring *pring;
12331        unsigned long iflags;
12332        bool iocb_completed = true;
12333
12334        if (phba->sli_rev >= LPFC_SLI_REV4)
12335                pring = lpfc_sli4_calc_ring(phba, piocb);
12336        else
12337                pring = &phba->sli.sli3_ring[ring_number];
12338        /*
12339         * If the caller has provided a response iocbq buffer, then context2
12340         * is NULL or its an error.
12341         */
12342        if (prspiocbq) {
12343                if (piocb->context2)
12344                        return IOCB_ERROR;
12345                piocb->context2 = prspiocbq;
12346        }
12347
12348        piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12349        piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12350        piocb->context_un.wait_queue = &done_q;
12351        piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12352
12353        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12354                if (lpfc_readl(phba->HCregaddr, &creg_val))
12355                        return IOCB_ERROR;
12356                creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12357                writel(creg_val, phba->HCregaddr);
12358                readl(phba->HCregaddr); /* flush */
12359        }
12360
12361        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12362                                     SLI_IOCB_RET_IOCB);
12363        if (retval == IOCB_SUCCESS) {
12364                timeout_req = msecs_to_jiffies(timeout * 1000);
12365                timeleft = wait_event_timeout(done_q,
12366                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12367                                timeout_req);
12368                spin_lock_irqsave(&phba->hbalock, iflags);
12369                if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12370
12371                        /*
12372                         * IOCB timed out.  Inform the wake iocb wait
12373                         * completion function and set local status
12374                         */
12375
12376                        iocb_completed = false;
12377                        piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12378                }
12379                spin_unlock_irqrestore(&phba->hbalock, iflags);
12380                if (iocb_completed) {
12381                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12382                                        "0331 IOCB wake signaled\n");
12383                        /* Note: we are not indicating if the IOCB has a success
12384                         * status or not - that's for the caller to check.
12385                         * IOCB_SUCCESS means just that the command was sent and
12386                         * completed. Not that it completed successfully.
12387                         * */
12388                } else if (timeleft == 0) {
12389                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12390                                        "0338 IOCB wait timeout error - no "
12391                                        "wake response Data x%x\n", timeout);
12392                        retval = IOCB_TIMEDOUT;
12393                } else {
12394                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12395                                        "0330 IOCB wake NOT set, "
12396                                        "Data x%x x%lx\n",
12397                                        timeout, (timeleft / jiffies));
12398                        retval = IOCB_TIMEDOUT;
12399                }
12400        } else if (retval == IOCB_BUSY) {
12401                if (phba->cfg_log_verbose & LOG_SLI) {
12402                        list_for_each_entry(iocb, &pring->txq, list) {
12403                                txq_cnt++;
12404                        }
12405                        list_for_each_entry(iocb, &pring->txcmplq, list) {
12406                                txcmplq_cnt++;
12407                        }
12408                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12409                                "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12410                                phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12411                }
12412                return retval;
12413        } else {
12414                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12415                                "0332 IOCB wait issue failed, Data x%x\n",
12416                                retval);
12417                retval = IOCB_ERROR;
12418        }
12419
12420        if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12421                if (lpfc_readl(phba->HCregaddr, &creg_val))
12422                        return IOCB_ERROR;
12423                creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12424                writel(creg_val, phba->HCregaddr);
12425                readl(phba->HCregaddr); /* flush */
12426        }
12427
12428        if (prspiocbq)
12429                piocb->context2 = NULL;
12430
12431        piocb->context_un.wait_queue = NULL;
12432        piocb->iocb_cmpl = NULL;
12433        return retval;
12434}
12435
12436/**
12437 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12438 * @phba: Pointer to HBA context object.
12439 * @pmboxq: Pointer to driver mailbox object.
12440 * @timeout: Timeout in number of seconds.
12441 *
12442 * This function issues the mailbox to firmware and waits for the
12443 * mailbox command to complete. If the mailbox command is not
12444 * completed within timeout seconds, it returns MBX_TIMEOUT.
12445 * The function waits for the mailbox completion using an
12446 * interruptible wait. If the thread is woken up due to a
12447 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12448 * should not free the mailbox resources, if this function returns
12449 * MBX_TIMEOUT.
12450 * This function will sleep while waiting for mailbox completion.
12451 * So, this function should not be called from any context which
12452 * does not allow sleeping. Due to the same reason, this function
12453 * cannot be called with interrupt disabled.
12454 * This function assumes that the mailbox completion occurs while
12455 * this function sleep. So, this function cannot be called from
12456 * the worker thread which processes mailbox completion.
12457 * This function is called in the context of HBA management
12458 * applications.
12459 * This function returns MBX_SUCCESS when successful.
12460 * This function is called with no lock held.
12461 **/
12462int
12463lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12464                         uint32_t timeout)
12465{
12466        struct completion mbox_done;
12467        int retval;
12468        unsigned long flag;
12469
12470        pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12471        /* setup wake call as IOCB callback */
12472        pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12473
12474        /* setup context3 field to pass wait_queue pointer to wake function  */
12475        init_completion(&mbox_done);
12476        pmboxq->context3 = &mbox_done;
12477        /* now issue the command */
12478        retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12479        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12480                wait_for_completion_timeout(&mbox_done,
12481                                            msecs_to_jiffies(timeout * 1000));
12482
12483                spin_lock_irqsave(&phba->hbalock, flag);
12484                pmboxq->context3 = NULL;
12485                /*
12486                 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12487                 * else do not free the resources.
12488                 */
12489                if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12490                        retval = MBX_SUCCESS;
12491                } else {
12492                        retval = MBX_TIMEOUT;
12493                        pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12494                }
12495                spin_unlock_irqrestore(&phba->hbalock, flag);
12496        }
12497        return retval;
12498}
12499
12500/**
12501 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12502 * @phba: Pointer to HBA context.
12503 * @mbx_action: Mailbox shutdown options.
12504 *
12505 * This function is called to shutdown the driver's mailbox sub-system.
12506 * It first marks the mailbox sub-system is in a block state to prevent
12507 * the asynchronous mailbox command from issued off the pending mailbox
12508 * command queue. If the mailbox command sub-system shutdown is due to
12509 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12510 * the mailbox sub-system flush routine to forcefully bring down the
12511 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12512 * as with offline or HBA function reset), this routine will wait for the
12513 * outstanding mailbox command to complete before invoking the mailbox
12514 * sub-system flush routine to gracefully bring down mailbox sub-system.
12515 **/
12516void
12517lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12518{
12519        struct lpfc_sli *psli = &phba->sli;
12520        unsigned long timeout;
12521
12522        if (mbx_action == LPFC_MBX_NO_WAIT) {
12523                /* delay 100ms for port state */
12524                msleep(100);
12525                lpfc_sli_mbox_sys_flush(phba);
12526                return;
12527        }
12528        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12529
12530        /* Disable softirqs, including timers from obtaining phba->hbalock */
12531        local_bh_disable();
12532
12533        spin_lock_irq(&phba->hbalock);
12534        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12535
12536        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12537                /* Determine how long we might wait for the active mailbox
12538                 * command to be gracefully completed by firmware.
12539                 */
12540                if (phba->sli.mbox_active)
12541                        timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12542                                                phba->sli.mbox_active) *
12543                                                1000) + jiffies;
12544                spin_unlock_irq(&phba->hbalock);
12545
12546                /* Enable softirqs again, done with phba->hbalock */
12547                local_bh_enable();
12548
12549                while (phba->sli.mbox_active) {
12550                        /* Check active mailbox complete status every 2ms */
12551                        msleep(2);
12552                        if (time_after(jiffies, timeout))
12553                                /* Timeout, let the mailbox flush routine to
12554                                 * forcefully release active mailbox command
12555                                 */
12556                                break;
12557                }
12558        } else {
12559                spin_unlock_irq(&phba->hbalock);
12560
12561                /* Enable softirqs again, done with phba->hbalock */
12562                local_bh_enable();
12563        }
12564
12565        lpfc_sli_mbox_sys_flush(phba);
12566}
12567
12568/**
12569 * lpfc_sli_eratt_read - read sli-3 error attention events
12570 * @phba: Pointer to HBA context.
12571 *
12572 * This function is called to read the SLI3 device error attention registers
12573 * for possible error attention events. The caller must hold the hostlock
12574 * with spin_lock_irq().
12575 *
12576 * This function returns 1 when there is Error Attention in the Host Attention
12577 * Register and returns 0 otherwise.
12578 **/
12579static int
12580lpfc_sli_eratt_read(struct lpfc_hba *phba)
12581{
12582        uint32_t ha_copy;
12583
12584        /* Read chip Host Attention (HA) register */
12585        if (lpfc_readl(phba->HAregaddr, &ha_copy))
12586                goto unplug_err;
12587
12588        if (ha_copy & HA_ERATT) {
12589                /* Read host status register to retrieve error event */
12590                if (lpfc_sli_read_hs(phba))
12591                        goto unplug_err;
12592
12593                /* Check if there is a deferred error condition is active */
12594                if ((HS_FFER1 & phba->work_hs) &&
12595                    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12596                      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12597                        phba->hba_flag |= DEFER_ERATT;
12598                        /* Clear all interrupt enable conditions */
12599                        writel(0, phba->HCregaddr);
12600                        readl(phba->HCregaddr);
12601                }
12602
12603                /* Set the driver HA work bitmap */
12604                phba->work_ha |= HA_ERATT;
12605                /* Indicate polling handles this ERATT */
12606                phba->hba_flag |= HBA_ERATT_HANDLED;
12607                return 1;
12608        }
12609        return 0;
12610
12611unplug_err:
12612        /* Set the driver HS work bitmap */
12613        phba->work_hs |= UNPLUG_ERR;
12614        /* Set the driver HA work bitmap */
12615        phba->work_ha |= HA_ERATT;
12616        /* Indicate polling handles this ERATT */
12617        phba->hba_flag |= HBA_ERATT_HANDLED;
12618        return 1;
12619}
12620
12621/**
12622 * lpfc_sli4_eratt_read - read sli-4 error attention events
12623 * @phba: Pointer to HBA context.
12624 *
12625 * This function is called to read the SLI4 device error attention registers
12626 * for possible error attention events. The caller must hold the hostlock
12627 * with spin_lock_irq().
12628 *
12629 * This function returns 1 when there is Error Attention in the Host Attention
12630 * Register and returns 0 otherwise.
12631 **/
12632static int
12633lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12634{
12635        uint32_t uerr_sta_hi, uerr_sta_lo;
12636        uint32_t if_type, portsmphr;
12637        struct lpfc_register portstat_reg;
12638
12639        /*
12640         * For now, use the SLI4 device internal unrecoverable error
12641         * registers for error attention. This can be changed later.
12642         */
12643        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12644        switch (if_type) {
12645        case LPFC_SLI_INTF_IF_TYPE_0:
12646                if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12647                        &uerr_sta_lo) ||
12648                        lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12649                        &uerr_sta_hi)) {
12650                        phba->work_hs |= UNPLUG_ERR;
12651                        phba->work_ha |= HA_ERATT;
12652                        phba->hba_flag |= HBA_ERATT_HANDLED;
12653                        return 1;
12654                }
12655                if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12656                    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12657                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12658                                        "1423 HBA Unrecoverable error: "
12659                                        "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12660                                        "ue_mask_lo_reg=0x%x, "
12661                                        "ue_mask_hi_reg=0x%x\n",
12662                                        uerr_sta_lo, uerr_sta_hi,
12663                                        phba->sli4_hba.ue_mask_lo,
12664                                        phba->sli4_hba.ue_mask_hi);
12665                        phba->work_status[0] = uerr_sta_lo;
12666                        phba->work_status[1] = uerr_sta_hi;
12667                        phba->work_ha |= HA_ERATT;
12668                        phba->hba_flag |= HBA_ERATT_HANDLED;
12669                        return 1;
12670                }
12671                break;
12672        case LPFC_SLI_INTF_IF_TYPE_2:
12673        case LPFC_SLI_INTF_IF_TYPE_6:
12674                if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12675                        &portstat_reg.word0) ||
12676                        lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12677                        &portsmphr)){
12678                        phba->work_hs |= UNPLUG_ERR;
12679                        phba->work_ha |= HA_ERATT;
12680                        phba->hba_flag |= HBA_ERATT_HANDLED;
12681                        return 1;
12682                }
12683                if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12684                        phba->work_status[0] =
12685                                readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12686                        phba->work_status[1] =
12687                                readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12688                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12689                                        "2885 Port Status Event: "
12690                                        "port status reg 0x%x, "
12691                                        "port smphr reg 0x%x, "
12692                                        "error 1=0x%x, error 2=0x%x\n",
12693                                        portstat_reg.word0,
12694                                        portsmphr,
12695                                        phba->work_status[0],
12696                                        phba->work_status[1]);
12697                        phba->work_ha |= HA_ERATT;
12698                        phba->hba_flag |= HBA_ERATT_HANDLED;
12699                        return 1;
12700                }
12701                break;
12702        case LPFC_SLI_INTF_IF_TYPE_1:
12703        default:
12704                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12705                                "2886 HBA Error Attention on unsupported "
12706                                "if type %d.", if_type);
12707                return 1;
12708        }
12709
12710        return 0;
12711}
12712
12713/**
12714 * lpfc_sli_check_eratt - check error attention events
12715 * @phba: Pointer to HBA context.
12716 *
12717 * This function is called from timer soft interrupt context to check HBA's
12718 * error attention register bit for error attention events.
12719 *
12720 * This function returns 1 when there is Error Attention in the Host Attention
12721 * Register and returns 0 otherwise.
12722 **/
12723int
12724lpfc_sli_check_eratt(struct lpfc_hba *phba)
12725{
12726        uint32_t ha_copy;
12727
12728        /* If somebody is waiting to handle an eratt, don't process it
12729         * here. The brdkill function will do this.
12730         */
12731        if (phba->link_flag & LS_IGNORE_ERATT)
12732                return 0;
12733
12734        /* Check if interrupt handler handles this ERATT */
12735        spin_lock_irq(&phba->hbalock);
12736        if (phba->hba_flag & HBA_ERATT_HANDLED) {
12737                /* Interrupt handler has handled ERATT */
12738                spin_unlock_irq(&phba->hbalock);
12739                return 0;
12740        }
12741
12742        /*
12743         * If there is deferred error attention, do not check for error
12744         * attention
12745         */
12746        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12747                spin_unlock_irq(&phba->hbalock);
12748                return 0;
12749        }
12750
12751        /* If PCI channel is offline, don't process it */
12752        if (unlikely(pci_channel_offline(phba->pcidev))) {
12753                spin_unlock_irq(&phba->hbalock);
12754                return 0;
12755        }
12756
12757        switch (phba->sli_rev) {
12758        case LPFC_SLI_REV2:
12759        case LPFC_SLI_REV3:
12760                /* Read chip Host Attention (HA) register */
12761                ha_copy = lpfc_sli_eratt_read(phba);
12762                break;
12763        case LPFC_SLI_REV4:
12764                /* Read device Uncoverable Error (UERR) registers */
12765                ha_copy = lpfc_sli4_eratt_read(phba);
12766                break;
12767        default:
12768                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12769                                "0299 Invalid SLI revision (%d)\n",
12770                                phba->sli_rev);
12771                ha_copy = 0;
12772                break;
12773        }
12774        spin_unlock_irq(&phba->hbalock);
12775
12776        return ha_copy;
12777}
12778
12779/**
12780 * lpfc_intr_state_check - Check device state for interrupt handling
12781 * @phba: Pointer to HBA context.
12782 *
12783 * This inline routine checks whether a device or its PCI slot is in a state
12784 * that the interrupt should be handled.
12785 *
12786 * This function returns 0 if the device or the PCI slot is in a state that
12787 * interrupt should be handled, otherwise -EIO.
12788 */
12789static inline int
12790lpfc_intr_state_check(struct lpfc_hba *phba)
12791{
12792        /* If the pci channel is offline, ignore all the interrupts */
12793        if (unlikely(pci_channel_offline(phba->pcidev)))
12794                return -EIO;
12795
12796        /* Update device level interrupt statistics */
12797        phba->sli.slistat.sli_intr++;
12798
12799        /* Ignore all interrupts during initialization. */
12800        if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12801                return -EIO;
12802
12803        return 0;
12804}
12805
12806/**
12807 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12808 * @irq: Interrupt number.
12809 * @dev_id: The device context pointer.
12810 *
12811 * This function is directly called from the PCI layer as an interrupt
12812 * service routine when device with SLI-3 interface spec is enabled with
12813 * MSI-X multi-message interrupt mode and there are slow-path events in
12814 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12815 * interrupt mode, this function is called as part of the device-level
12816 * interrupt handler. When the PCI slot is in error recovery or the HBA
12817 * is undergoing initialization, the interrupt handler will not process
12818 * the interrupt. The link attention and ELS ring attention events are
12819 * handled by the worker thread. The interrupt handler signals the worker
12820 * thread and returns for these events. This function is called without
12821 * any lock held. It gets the hbalock to access and update SLI data
12822 * structures.
12823 *
12824 * This function returns IRQ_HANDLED when interrupt is handled else it
12825 * returns IRQ_NONE.
12826 **/
12827irqreturn_t
12828lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12829{
12830        struct lpfc_hba  *phba;
12831        uint32_t ha_copy, hc_copy;
12832        uint32_t work_ha_copy;
12833        unsigned long status;
12834        unsigned long iflag;
12835        uint32_t control;
12836
12837        MAILBOX_t *mbox, *pmbox;
12838        struct lpfc_vport *vport;
12839        struct lpfc_nodelist *ndlp;
12840        struct lpfc_dmabuf *mp;
12841        LPFC_MBOXQ_t *pmb;
12842        int rc;
12843
12844        /*
12845         * Get the driver's phba structure from the dev_id and
12846         * assume the HBA is not interrupting.
12847         */
12848        phba = (struct lpfc_hba *)dev_id;
12849
12850        if (unlikely(!phba))
12851                return IRQ_NONE;
12852
12853        /*
12854         * Stuff needs to be attented to when this function is invoked as an
12855         * individual interrupt handler in MSI-X multi-message interrupt mode
12856         */
12857        if (phba->intr_type == MSIX) {
12858                /* Check device state for handling interrupt */
12859                if (lpfc_intr_state_check(phba))
12860                        return IRQ_NONE;
12861                /* Need to read HA REG for slow-path events */
12862                spin_lock_irqsave(&phba->hbalock, iflag);
12863                if (lpfc_readl(phba->HAregaddr, &ha_copy))
12864                        goto unplug_error;
12865                /* If somebody is waiting to handle an eratt don't process it
12866                 * here. The brdkill function will do this.
12867                 */
12868                if (phba->link_flag & LS_IGNORE_ERATT)
12869                        ha_copy &= ~HA_ERATT;
12870                /* Check the need for handling ERATT in interrupt handler */
12871                if (ha_copy & HA_ERATT) {
12872                        if (phba->hba_flag & HBA_ERATT_HANDLED)
12873                                /* ERATT polling has handled ERATT */
12874                                ha_copy &= ~HA_ERATT;
12875                        else
12876                                /* Indicate interrupt handler handles ERATT */
12877                                phba->hba_flag |= HBA_ERATT_HANDLED;
12878                }
12879
12880                /*
12881                 * If there is deferred error attention, do not check for any
12882                 * interrupt.
12883                 */
12884                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12885                        spin_unlock_irqrestore(&phba->hbalock, iflag);
12886                        return IRQ_NONE;
12887                }
12888
12889                /* Clear up only attention source related to slow-path */
12890                if (lpfc_readl(phba->HCregaddr, &hc_copy))
12891                        goto unplug_error;
12892
12893                writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12894                        HC_LAINT_ENA | HC_ERINT_ENA),
12895                        phba->HCregaddr);
12896                writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12897                        phba->HAregaddr);
12898                writel(hc_copy, phba->HCregaddr);
12899                readl(phba->HAregaddr); /* flush */
12900                spin_unlock_irqrestore(&phba->hbalock, iflag);
12901        } else
12902                ha_copy = phba->ha_copy;
12903
12904        work_ha_copy = ha_copy & phba->work_ha_mask;
12905
12906        if (work_ha_copy) {
12907                if (work_ha_copy & HA_LATT) {
12908                        if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12909                                /*
12910                                 * Turn off Link Attention interrupts
12911                                 * until CLEAR_LA done
12912                                 */
12913                                spin_lock_irqsave(&phba->hbalock, iflag);
12914                                phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12915                                if (lpfc_readl(phba->HCregaddr, &control))
12916                                        goto unplug_error;
12917                                control &= ~HC_LAINT_ENA;
12918                                writel(control, phba->HCregaddr);
12919                                readl(phba->HCregaddr); /* flush */
12920                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12921                        }
12922                        else
12923                                work_ha_copy &= ~HA_LATT;
12924                }
12925
12926                if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12927                        /*
12928                         * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12929                         * the only slow ring.
12930                         */
12931                        status = (work_ha_copy &
12932                                (HA_RXMASK  << (4*LPFC_ELS_RING)));
12933                        status >>= (4*LPFC_ELS_RING);
12934                        if (status & HA_RXMASK) {
12935                                spin_lock_irqsave(&phba->hbalock, iflag);
12936                                if (lpfc_readl(phba->HCregaddr, &control))
12937                                        goto unplug_error;
12938
12939                                lpfc_debugfs_slow_ring_trc(phba,
12940                                "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
12941                                control, status,
12942                                (uint32_t)phba->sli.slistat.sli_intr);
12943
12944                                if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12945                                        lpfc_debugfs_slow_ring_trc(phba,
12946                                                "ISR Disable ring:"
12947                                                "pwork:x%x hawork:x%x wait:x%x",
12948                                                phba->work_ha, work_ha_copy,
12949                                                (uint32_t)((unsigned long)
12950                                                &phba->work_waitq));
12951
12952                                        control &=
12953                                            ~(HC_R0INT_ENA << LPFC_ELS_RING);
12954                                        writel(control, phba->HCregaddr);
12955                                        readl(phba->HCregaddr); /* flush */
12956                                }
12957                                else {
12958                                        lpfc_debugfs_slow_ring_trc(phba,
12959                                                "ISR slow ring:   pwork:"
12960                                                "x%x hawork:x%x wait:x%x",
12961                                                phba->work_ha, work_ha_copy,
12962                                                (uint32_t)((unsigned long)
12963                                                &phba->work_waitq));
12964                                }
12965                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12966                        }
12967                }
12968                spin_lock_irqsave(&phba->hbalock, iflag);
12969                if (work_ha_copy & HA_ERATT) {
12970                        if (lpfc_sli_read_hs(phba))
12971                                goto unplug_error;
12972                        /*
12973                         * Check if there is a deferred error condition
12974                         * is active
12975                         */
12976                        if ((HS_FFER1 & phba->work_hs) &&
12977                                ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12978                                  HS_FFER6 | HS_FFER7 | HS_FFER8) &
12979                                  phba->work_hs)) {
12980                                phba->hba_flag |= DEFER_ERATT;
12981                                /* Clear all interrupt enable conditions */
12982                                writel(0, phba->HCregaddr);
12983                                readl(phba->HCregaddr);
12984                        }
12985                }
12986
12987                if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12988                        pmb = phba->sli.mbox_active;
12989                        pmbox = &pmb->u.mb;
12990                        mbox = phba->mbox;
12991                        vport = pmb->vport;
12992
12993                        /* First check out the status word */
12994                        lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12995                        if (pmbox->mbxOwner != OWN_HOST) {
12996                                spin_unlock_irqrestore(&phba->hbalock, iflag);
12997                                /*
12998                                 * Stray Mailbox Interrupt, mbxCommand <cmd>
12999                                 * mbxStatus <status>
13000                                 */
13001                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13002                                                "(%d):0304 Stray Mailbox "
13003                                                "Interrupt mbxCommand x%x "
13004                                                "mbxStatus x%x\n",
13005                                                (vport ? vport->vpi : 0),
13006                                                pmbox->mbxCommand,
13007                                                pmbox->mbxStatus);
13008                                /* clear mailbox attention bit */
13009                                work_ha_copy &= ~HA_MBATT;
13010                        } else {
13011                                phba->sli.mbox_active = NULL;
13012                                spin_unlock_irqrestore(&phba->hbalock, iflag);
13013                                phba->last_completion_time = jiffies;
13014                                del_timer(&phba->sli.mbox_tmo);
13015                                if (pmb->mbox_cmpl) {
13016                                        lpfc_sli_pcimem_bcopy(mbox, pmbox,
13017                                                        MAILBOX_CMD_SIZE);
13018                                        if (pmb->out_ext_byte_len &&
13019                                                pmb->ctx_buf)
13020                                                lpfc_sli_pcimem_bcopy(
13021                                                phba->mbox_ext,
13022                                                pmb->ctx_buf,
13023                                                pmb->out_ext_byte_len);
13024                                }
13025                                if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13026                                        pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13027
13028                                        lpfc_debugfs_disc_trc(vport,
13029                                                LPFC_DISC_TRC_MBOX_VPORT,
13030                                                "MBOX dflt rpi: : "
13031                                                "status:x%x rpi:x%x",
13032                                                (uint32_t)pmbox->mbxStatus,
13033                                                pmbox->un.varWords[0], 0);
13034
13035                                        if (!pmbox->mbxStatus) {
13036                                                mp = (struct lpfc_dmabuf *)
13037                                                        (pmb->ctx_buf);
13038                                                ndlp = (struct lpfc_nodelist *)
13039                                                        pmb->ctx_ndlp;
13040
13041                                                /* Reg_LOGIN of dflt RPI was
13042                                                 * successful. new lets get
13043                                                 * rid of the RPI using the
13044                                                 * same mbox buffer.
13045                                                 */
13046                                                lpfc_unreg_login(phba,
13047                                                        vport->vpi,
13048                                                        pmbox->un.varWords[0],
13049                                                        pmb);
13050                                                pmb->mbox_cmpl =
13051                                                        lpfc_mbx_cmpl_dflt_rpi;
13052                                                pmb->ctx_buf = mp;
13053                                                pmb->ctx_ndlp = ndlp;
13054                                                pmb->vport = vport;
13055                                                rc = lpfc_sli_issue_mbox(phba,
13056                                                                pmb,
13057                                                                MBX_NOWAIT);
13058                                                if (rc != MBX_BUSY)
13059                                                        lpfc_printf_log(phba,
13060                                                        KERN_ERR,
13061                                                        LOG_TRACE_EVENT,
13062                                                        "0350 rc should have"
13063                                                        "been MBX_BUSY\n");
13064                                                if (rc != MBX_NOT_FINISHED)
13065                                                        goto send_current_mbox;
13066                                        }
13067                                }
13068                                spin_lock_irqsave(
13069                                                &phba->pport->work_port_lock,
13070                                                iflag);
13071                                phba->pport->work_port_events &=
13072                                        ~WORKER_MBOX_TMO;
13073                                spin_unlock_irqrestore(
13074                                                &phba->pport->work_port_lock,
13075                                                iflag);
13076
13077                                /* Do NOT queue MBX_HEARTBEAT to the worker
13078                                 * thread for processing.
13079                                 */
13080                                if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13081                                        /* Process mbox now */
13082                                        phba->sli.mbox_active = NULL;
13083                                        phba->sli.sli_flag &=
13084                                                ~LPFC_SLI_MBOX_ACTIVE;
13085                                        if (pmb->mbox_cmpl)
13086                                                pmb->mbox_cmpl(phba, pmb);
13087                                } else {
13088                                        /* Queue to worker thread to process */
13089                                        lpfc_mbox_cmpl_put(phba, pmb);
13090                                }
13091                        }
13092                } else
13093                        spin_unlock_irqrestore(&phba->hbalock, iflag);
13094
13095                if ((work_ha_copy & HA_MBATT) &&
13096                    (phba->sli.mbox_active == NULL)) {
13097send_current_mbox:
13098                        /* Process next mailbox command if there is one */
13099                        do {
13100                                rc = lpfc_sli_issue_mbox(phba, NULL,
13101                                                         MBX_NOWAIT);
13102                        } while (rc == MBX_NOT_FINISHED);
13103                        if (rc != MBX_SUCCESS)
13104                                lpfc_printf_log(phba, KERN_ERR,
13105                                                LOG_TRACE_EVENT,
13106                                                "0349 rc should be "
13107                                                "MBX_SUCCESS\n");
13108                }
13109
13110                spin_lock_irqsave(&phba->hbalock, iflag);
13111                phba->work_ha |= work_ha_copy;
13112                spin_unlock_irqrestore(&phba->hbalock, iflag);
13113                lpfc_worker_wake_up(phba);
13114        }
13115        return IRQ_HANDLED;
13116unplug_error:
13117        spin_unlock_irqrestore(&phba->hbalock, iflag);
13118        return IRQ_HANDLED;
13119
13120} /* lpfc_sli_sp_intr_handler */
13121
13122/**
13123 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13124 * @irq: Interrupt number.
13125 * @dev_id: The device context pointer.
13126 *
13127 * This function is directly called from the PCI layer as an interrupt
13128 * service routine when device with SLI-3 interface spec is enabled with
13129 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13130 * ring event in the HBA. However, when the device is enabled with either
13131 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13132 * device-level interrupt handler. When the PCI slot is in error recovery
13133 * or the HBA is undergoing initialization, the interrupt handler will not
13134 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13135 * the intrrupt context. This function is called without any lock held.
13136 * It gets the hbalock to access and update SLI data structures.
13137 *
13138 * This function returns IRQ_HANDLED when interrupt is handled else it
13139 * returns IRQ_NONE.
13140 **/
13141irqreturn_t
13142lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13143{
13144        struct lpfc_hba  *phba;
13145        uint32_t ha_copy;
13146        unsigned long status;
13147        unsigned long iflag;
13148        struct lpfc_sli_ring *pring;
13149
13150        /* Get the driver's phba structure from the dev_id and
13151         * assume the HBA is not interrupting.
13152         */
13153        phba = (struct lpfc_hba *) dev_id;
13154
13155        if (unlikely(!phba))
13156                return IRQ_NONE;
13157
13158        /*
13159         * Stuff needs to be attented to when this function is invoked as an
13160         * individual interrupt handler in MSI-X multi-message interrupt mode
13161         */
13162        if (phba->intr_type == MSIX) {
13163                /* Check device state for handling interrupt */
13164                if (lpfc_intr_state_check(phba))
13165                        return IRQ_NONE;
13166                /* Need to read HA REG for FCP ring and other ring events */
13167                if (lpfc_readl(phba->HAregaddr, &ha_copy))
13168                        return IRQ_HANDLED;
13169                /* Clear up only attention source related to fast-path */
13170                spin_lock_irqsave(&phba->hbalock, iflag);
13171                /*
13172                 * If there is deferred error attention, do not check for
13173                 * any interrupt.
13174                 */
13175                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13176                        spin_unlock_irqrestore(&phba->hbalock, iflag);
13177                        return IRQ_NONE;
13178                }
13179                writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13180                        phba->HAregaddr);
13181                readl(phba->HAregaddr); /* flush */
13182                spin_unlock_irqrestore(&phba->hbalock, iflag);
13183        } else
13184                ha_copy = phba->ha_copy;
13185
13186        /*
13187         * Process all events on FCP ring. Take the optimized path for FCP IO.
13188         */
13189        ha_copy &= ~(phba->work_ha_mask);
13190
13191        status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13192        status >>= (4*LPFC_FCP_RING);
13193        pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13194        if (status & HA_RXMASK)
13195                lpfc_sli_handle_fast_ring_event(phba, pring, status);
13196
13197        if (phba->cfg_multi_ring_support == 2) {
13198                /*
13199                 * Process all events on extra ring. Take the optimized path
13200                 * for extra ring IO.
13201                 */
13202                status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13203                status >>= (4*LPFC_EXTRA_RING);
13204                if (status & HA_RXMASK) {
13205                        lpfc_sli_handle_fast_ring_event(phba,
13206                                        &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13207                                        status);
13208                }
13209        }
13210        return IRQ_HANDLED;
13211}  /* lpfc_sli_fp_intr_handler */
13212
13213/**
13214 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13215 * @irq: Interrupt number.
13216 * @dev_id: The device context pointer.
13217 *
13218 * This function is the HBA device-level interrupt handler to device with
13219 * SLI-3 interface spec, called from the PCI layer when either MSI or
13220 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13221 * requires driver attention. This function invokes the slow-path interrupt
13222 * attention handling function and fast-path interrupt attention handling
13223 * function in turn to process the relevant HBA attention events. This
13224 * function is called without any lock held. It gets the hbalock to access
13225 * and update SLI data structures.
13226 *
13227 * This function returns IRQ_HANDLED when interrupt is handled, else it
13228 * returns IRQ_NONE.
13229 **/
13230irqreturn_t
13231lpfc_sli_intr_handler(int irq, void *dev_id)
13232{
13233        struct lpfc_hba  *phba;
13234        irqreturn_t sp_irq_rc, fp_irq_rc;
13235        unsigned long status1, status2;
13236        uint32_t hc_copy;
13237
13238        /*
13239         * Get the driver's phba structure from the dev_id and
13240         * assume the HBA is not interrupting.
13241         */
13242        phba = (struct lpfc_hba *) dev_id;
13243
13244        if (unlikely(!phba))
13245                return IRQ_NONE;
13246
13247        /* Check device state for handling interrupt */
13248        if (lpfc_intr_state_check(phba))
13249                return IRQ_NONE;
13250
13251        spin_lock(&phba->hbalock);
13252        if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13253                spin_unlock(&phba->hbalock);
13254                return IRQ_HANDLED;
13255        }
13256
13257        if (unlikely(!phba->ha_copy)) {
13258                spin_unlock(&phba->hbalock);
13259                return IRQ_NONE;
13260        } else if (phba->ha_copy & HA_ERATT) {
13261                if (phba->hba_flag & HBA_ERATT_HANDLED)
13262                        /* ERATT polling has handled ERATT */
13263                        phba->ha_copy &= ~HA_ERATT;
13264                else
13265                        /* Indicate interrupt handler handles ERATT */
13266                        phba->hba_flag |= HBA_ERATT_HANDLED;
13267        }
13268
13269        /*
13270         * If there is deferred error attention, do not check for any interrupt.
13271         */
13272        if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13273                spin_unlock(&phba->hbalock);
13274                return IRQ_NONE;
13275        }
13276
13277        /* Clear attention sources except link and error attentions */
13278        if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13279                spin_unlock(&phba->hbalock);
13280                return IRQ_HANDLED;
13281        }
13282        writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13283                | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13284                phba->HCregaddr);
13285        writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13286        writel(hc_copy, phba->HCregaddr);
13287        readl(phba->HAregaddr); /* flush */
13288        spin_unlock(&phba->hbalock);
13289
13290        /*
13291         * Invokes slow-path host attention interrupt handling as appropriate.
13292         */
13293
13294        /* status of events with mailbox and link attention */
13295        status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13296
13297        /* status of events with ELS ring */
13298        status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
13299        status2 >>= (4*LPFC_ELS_RING);
13300
13301        if (status1 || (status2 & HA_RXMASK))
13302                sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13303        else
13304                sp_irq_rc = IRQ_NONE;
13305
13306        /*
13307         * Invoke fast-path host attention interrupt handling as appropriate.
13308         */
13309
13310        /* status of events with FCP ring */
13311        status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13312        status1 >>= (4*LPFC_FCP_RING);
13313
13314        /* status of events with extra ring */
13315        if (phba->cfg_multi_ring_support == 2) {
13316                status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13317                status2 >>= (4*LPFC_EXTRA_RING);
13318        } else
13319                status2 = 0;
13320
13321        if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13322                fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13323        else
13324                fp_irq_rc = IRQ_NONE;
13325
13326        /* Return device-level interrupt handling status */
13327        return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13328}  /* lpfc_sli_intr_handler */
13329
13330/**
13331 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13332 * @phba: pointer to lpfc hba data structure.
13333 *
13334 * This routine is invoked by the worker thread to process all the pending
13335 * SLI4 els abort xri events.
13336 **/
13337void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13338{
13339        struct lpfc_cq_event *cq_event;
13340        unsigned long iflags;
13341
13342        /* First, declare the els xri abort event has been handled */
13343        spin_lock_irqsave(&phba->hbalock, iflags);
13344        phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13345        spin_unlock_irqrestore(&phba->hbalock, iflags);
13346
13347        /* Now, handle all the els xri abort events */
13348        spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13349        while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13350                /* Get the first event from the head of the event queue */
13351                list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13352                                 cq_event, struct lpfc_cq_event, list);
13353                spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13354                                       iflags);
13355                /* Notify aborted XRI for ELS work queue */
13356                lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13357
13358                /* Free the event processed back to the free pool */
13359                lpfc_sli4_cq_event_release(phba, cq_event);
13360                spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13361                                  iflags);
13362        }
13363        spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13364}
13365
13366/**
13367 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13368 * @phba: pointer to lpfc hba data structure
13369 * @pIocbIn: pointer to the rspiocbq
13370 * @pIocbOut: pointer to the cmdiocbq
13371 * @wcqe: pointer to the complete wcqe
13372 *
13373 * This routine transfers the fields of a command iocbq to a response iocbq
13374 * by copying all the IOCB fields from command iocbq and transferring the
13375 * completion status information from the complete wcqe.
13376 **/
13377static void
13378lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13379                              struct lpfc_iocbq *pIocbIn,
13380                              struct lpfc_iocbq *pIocbOut,
13381                              struct lpfc_wcqe_complete *wcqe)
13382{
13383        int numBdes, i;
13384        unsigned long iflags;
13385        uint32_t status, max_response;
13386        struct lpfc_dmabuf *dmabuf;
13387        struct ulp_bde64 *bpl, bde;
13388        size_t offset = offsetof(struct lpfc_iocbq, iocb);
13389
13390        memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13391               sizeof(struct lpfc_iocbq) - offset);
13392        /* Map WCQE parameters into irspiocb parameters */
13393        status = bf_get(lpfc_wcqe_c_status, wcqe);
13394        pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13395        if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13396                if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13397                        pIocbIn->iocb.un.fcpi.fcpi_parm =
13398                                        pIocbOut->iocb.un.fcpi.fcpi_parm -
13399                                        wcqe->total_data_placed;
13400                else
13401                        pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13402        else {
13403                pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13404                switch (pIocbOut->iocb.ulpCommand) {
13405                case CMD_ELS_REQUEST64_CR:
13406                        dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13407                        bpl  = (struct ulp_bde64 *)dmabuf->virt;
13408                        bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13409                        max_response = bde.tus.f.bdeSize;
13410                        break;
13411                case CMD_GEN_REQUEST64_CR:
13412                        max_response = 0;
13413                        if (!pIocbOut->context3)
13414                                break;
13415                        numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13416                                        sizeof(struct ulp_bde64);
13417                        dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13418                        bpl = (struct ulp_bde64 *)dmabuf->virt;
13419                        for (i = 0; i < numBdes; i++) {
13420                                bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13421                                if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13422                                        max_response += bde.tus.f.bdeSize;
13423                        }
13424                        break;
13425                default:
13426                        max_response = wcqe->total_data_placed;
13427                        break;
13428                }
13429                if (max_response < wcqe->total_data_placed)
13430                        pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13431                else
13432                        pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13433                                wcqe->total_data_placed;
13434        }
13435
13436        /* Convert BG errors for completion status */
13437        if (status == CQE_STATUS_DI_ERROR) {
13438                pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13439
13440                if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13441                        pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13442                else
13443                        pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13444
13445                pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13446                if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13447                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13448                                BGS_GUARD_ERR_MASK;
13449                if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13450                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13451                                BGS_APPTAG_ERR_MASK;
13452                if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13453                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13454                                BGS_REFTAG_ERR_MASK;
13455
13456                /* Check to see if there was any good data before the error */
13457                if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13458                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13459                                BGS_HI_WATER_MARK_PRESENT_MASK;
13460                        pIocbIn->iocb.unsli3.sli3_bg.bghm =
13461                                wcqe->total_data_placed;
13462                }
13463
13464                /*
13465                * Set ALL the error bits to indicate we don't know what
13466                * type of error it is.
13467                */
13468                if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13469                        pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13470                                (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13471                                BGS_GUARD_ERR_MASK);
13472        }
13473
13474        /* Pick up HBA exchange busy condition */
13475        if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13476                spin_lock_irqsave(&phba->hbalock, iflags);
13477                pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13478                spin_unlock_irqrestore(&phba->hbalock, iflags);
13479        }
13480}
13481
13482/**
13483 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13484 * @phba: Pointer to HBA context object.
13485 * @irspiocbq: Pointer to work-queue completion queue entry.
13486 *
13487 * This routine handles an ELS work-queue completion event and construct
13488 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13489 * discovery engine to handle.
13490 *
13491 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13492 **/
13493static struct lpfc_iocbq *
13494lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13495                               struct lpfc_iocbq *irspiocbq)
13496{
13497        struct lpfc_sli_ring *pring;
13498        struct lpfc_iocbq *cmdiocbq;
13499        struct lpfc_wcqe_complete *wcqe;
13500        unsigned long iflags;
13501
13502        pring = lpfc_phba_elsring(phba);
13503        if (unlikely(!pring))
13504                return NULL;
13505
13506        wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13507        pring->stats.iocb_event++;
13508        /* Look up the ELS command IOCB and create pseudo response IOCB */
13509        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13510                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
13511        if (unlikely(!cmdiocbq)) {
13512                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13513                                "0386 ELS complete with no corresponding "
13514                                "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13515                                wcqe->word0, wcqe->total_data_placed,
13516                                wcqe->parameter, wcqe->word3);
13517                lpfc_sli_release_iocbq(phba, irspiocbq);
13518                return NULL;
13519        }
13520
13521        spin_lock_irqsave(&pring->ring_lock, iflags);
13522        /* Put the iocb back on the txcmplq */
13523        lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13524        spin_unlock_irqrestore(&pring->ring_lock, iflags);
13525
13526        /* Fake the irspiocbq and copy necessary response information */
13527        lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13528
13529        return irspiocbq;
13530}
13531
13532inline struct lpfc_cq_event *
13533lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13534{
13535        struct lpfc_cq_event *cq_event;
13536
13537        /* Allocate a new internal CQ_EVENT entry */
13538        cq_event = lpfc_sli4_cq_event_alloc(phba);
13539        if (!cq_event) {
13540                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13541                                "0602 Failed to alloc CQ_EVENT entry\n");
13542                return NULL;
13543        }
13544
13545        /* Move the CQE into the event */
13546        memcpy(&cq_event->cqe, entry, size);
13547        return cq_event;
13548}
13549
13550/**
13551 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13552 * @phba: Pointer to HBA context object.
13553 * @mcqe: Pointer to mailbox completion queue entry.
13554 *
13555 * This routine process a mailbox completion queue entry with asynchronous
13556 * event.
13557 *
13558 * Return: true if work posted to worker thread, otherwise false.
13559 **/
13560static bool
13561lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13562{
13563        struct lpfc_cq_event *cq_event;
13564        unsigned long iflags;
13565
13566        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13567                        "0392 Async Event: word0:x%x, word1:x%x, "
13568                        "word2:x%x, word3:x%x\n", mcqe->word0,
13569                        mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13570
13571        cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13572        if (!cq_event)
13573                return false;
13574
13575        spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13576        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13577        spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13578
13579        /* Set the async event flag */
13580        spin_lock_irqsave(&phba->hbalock, iflags);
13581        phba->hba_flag |= ASYNC_EVENT;
13582        spin_unlock_irqrestore(&phba->hbalock, iflags);
13583
13584        return true;
13585}
13586
13587/**
13588 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13589 * @phba: Pointer to HBA context object.
13590 * @mcqe: Pointer to mailbox completion queue entry.
13591 *
13592 * This routine process a mailbox completion queue entry with mailbox
13593 * completion event.
13594 *
13595 * Return: true if work posted to worker thread, otherwise false.
13596 **/
13597static bool
13598lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13599{
13600        uint32_t mcqe_status;
13601        MAILBOX_t *mbox, *pmbox;
13602        struct lpfc_mqe *mqe;
13603        struct lpfc_vport *vport;
13604        struct lpfc_nodelist *ndlp;
13605        struct lpfc_dmabuf *mp;
13606        unsigned long iflags;
13607        LPFC_MBOXQ_t *pmb;
13608        bool workposted = false;
13609        int rc;
13610
13611        /* If not a mailbox complete MCQE, out by checking mailbox consume */
13612        if (!bf_get(lpfc_trailer_completed, mcqe))
13613                goto out_no_mqe_complete;
13614
13615        /* Get the reference to the active mbox command */
13616        spin_lock_irqsave(&phba->hbalock, iflags);
13617        pmb = phba->sli.mbox_active;
13618        if (unlikely(!pmb)) {
13619                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13620                                "1832 No pending MBOX command to handle\n");
13621                spin_unlock_irqrestore(&phba->hbalock, iflags);
13622                goto out_no_mqe_complete;
13623        }
13624        spin_unlock_irqrestore(&phba->hbalock, iflags);
13625        mqe = &pmb->u.mqe;
13626        pmbox = (MAILBOX_t *)&pmb->u.mqe;
13627        mbox = phba->mbox;
13628        vport = pmb->vport;
13629
13630        /* Reset heartbeat timer */
13631        phba->last_completion_time = jiffies;
13632        del_timer(&phba->sli.mbox_tmo);
13633
13634        /* Move mbox data to caller's mailbox region, do endian swapping */
13635        if (pmb->mbox_cmpl && mbox)
13636                lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13637
13638        /*
13639         * For mcqe errors, conditionally move a modified error code to
13640         * the mbox so that the error will not be missed.
13641         */
13642        mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13643        if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13644                if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13645                        bf_set(lpfc_mqe_status, mqe,
13646                               (LPFC_MBX_ERROR_RANGE | mcqe_status));
13647        }
13648        if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13649                pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13650                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13651                                      "MBOX dflt rpi: status:x%x rpi:x%x",
13652                                      mcqe_status,
13653                                      pmbox->un.varWords[0], 0);
13654                if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13655                        mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13656                        ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13657
13658                        /* Reg_LOGIN of dflt RPI was successful. Mark the
13659                         * node as having an UNREG_LOGIN in progress to stop
13660                         * an unsolicited PLOGI from the same NPortId from
13661                         * starting another mailbox transaction.
13662                         */
13663                        spin_lock_irqsave(&ndlp->lock, iflags);
13664                        ndlp->nlp_flag |= NLP_UNREG_INP;
13665                        spin_unlock_irqrestore(&ndlp->lock, iflags);
13666                        lpfc_unreg_login(phba, vport->vpi,
13667                                         pmbox->un.varWords[0], pmb);
13668                        pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13669                        pmb->ctx_buf = mp;
13670
13671                        /* No reference taken here.  This is a default
13672                         * RPI reg/immediate unreg cycle. The reference was
13673                         * taken in the reg rpi path and is released when
13674                         * this mailbox completes.
13675                         */
13676                        pmb->ctx_ndlp = ndlp;
13677                        pmb->vport = vport;
13678                        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13679                        if (rc != MBX_BUSY)
13680                                lpfc_printf_log(phba, KERN_ERR,
13681                                                LOG_TRACE_EVENT,
13682                                                "0385 rc should "
13683                                                "have been MBX_BUSY\n");
13684                        if (rc != MBX_NOT_FINISHED)
13685                                goto send_current_mbox;
13686                }
13687        }
13688        spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13689        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13690        spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13691
13692        /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
13693        if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13694                spin_lock_irqsave(&phba->hbalock, iflags);
13695                /* Release the mailbox command posting token */
13696                phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13697                phba->sli.mbox_active = NULL;
13698                if (bf_get(lpfc_trailer_consumed, mcqe))
13699                        lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13700                spin_unlock_irqrestore(&phba->hbalock, iflags);
13701
13702                /* Post the next mbox command, if there is one */
13703                lpfc_sli4_post_async_mbox(phba);
13704
13705                /* Process cmpl now */
13706                if (pmb->mbox_cmpl)
13707                        pmb->mbox_cmpl(phba, pmb);
13708                return false;
13709        }
13710
13711        /* There is mailbox completion work to queue to the worker thread */
13712        spin_lock_irqsave(&phba->hbalock, iflags);
13713        __lpfc_mbox_cmpl_put(phba, pmb);
13714        phba->work_ha |= HA_MBATT;
13715        spin_unlock_irqrestore(&phba->hbalock, iflags);
13716        workposted = true;
13717
13718send_current_mbox:
13719        spin_lock_irqsave(&phba->hbalock, iflags);
13720        /* Release the mailbox command posting token */
13721        phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13722        /* Setting active mailbox pointer need to be in sync to flag clear */
13723        phba->sli.mbox_active = NULL;
13724        if (bf_get(lpfc_trailer_consumed, mcqe))
13725                lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13726        spin_unlock_irqrestore(&phba->hbalock, iflags);
13727        /* Wake up worker thread to post the next pending mailbox command */
13728        lpfc_worker_wake_up(phba);
13729        return workposted;
13730
13731out_no_mqe_complete:
13732        spin_lock_irqsave(&phba->hbalock, iflags);
13733        if (bf_get(lpfc_trailer_consumed, mcqe))
13734                lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13735        spin_unlock_irqrestore(&phba->hbalock, iflags);
13736        return false;
13737}
13738
13739/**
13740 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13741 * @phba: Pointer to HBA context object.
13742 * @cq: Pointer to associated CQ
13743 * @cqe: Pointer to mailbox completion queue entry.
13744 *
13745 * This routine process a mailbox completion queue entry, it invokes the
13746 * proper mailbox complete handling or asynchronous event handling routine
13747 * according to the MCQE's async bit.
13748 *
13749 * Return: true if work posted to worker thread, otherwise false.
13750 **/
13751static bool
13752lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13753                         struct lpfc_cqe *cqe)
13754{
13755        struct lpfc_mcqe mcqe;
13756        bool workposted;
13757
13758        cq->CQ_mbox++;
13759
13760        /* Copy the mailbox MCQE and convert endian order as needed */
13761        lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13762
13763        /* Invoke the proper event handling routine */
13764        if (!bf_get(lpfc_trailer_async, &mcqe))
13765                workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13766        else
13767                workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13768        return workposted;
13769}
13770
13771/**
13772 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13773 * @phba: Pointer to HBA context object.
13774 * @cq: Pointer to associated CQ
13775 * @wcqe: Pointer to work-queue completion queue entry.
13776 *
13777 * This routine handles an ELS work-queue completion event.
13778 *
13779 * Return: true if work posted to worker thread, otherwise false.
13780 **/
13781static bool
13782lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13783                             struct lpfc_wcqe_complete *wcqe)
13784{
13785        struct lpfc_iocbq *irspiocbq;
13786        unsigned long iflags;
13787        struct lpfc_sli_ring *pring = cq->pring;
13788        int txq_cnt = 0;
13789        int txcmplq_cnt = 0;
13790
13791        /* Check for response status */
13792        if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13793                /* Log the error status */
13794                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13795                                "0357 ELS CQE error: status=x%x: "
13796                                "CQE: %08x %08x %08x %08x\n",
13797                                bf_get(lpfc_wcqe_c_status, wcqe),
13798                                wcqe->word0, wcqe->total_data_placed,
13799                                wcqe->parameter, wcqe->word3);
13800        }
13801
13802        /* Get an irspiocbq for later ELS response processing use */
13803        irspiocbq = lpfc_sli_get_iocbq(phba);
13804        if (!irspiocbq) {
13805                if (!list_empty(&pring->txq))
13806                        txq_cnt++;
13807                if (!list_empty(&pring->txcmplq))
13808                        txcmplq_cnt++;
13809                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13810                        "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13811                        "els_txcmplq_cnt=%d\n",
13812                        txq_cnt, phba->iocb_cnt,
13813                        txcmplq_cnt);
13814                return false;
13815        }
13816
13817        /* Save off the slow-path queue event for work thread to process */
13818        memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13819        spin_lock_irqsave(&phba->hbalock, iflags);
13820        list_add_tail(&irspiocbq->cq_event.list,
13821                      &phba->sli4_hba.sp_queue_event);
13822        phba->hba_flag |= HBA_SP_QUEUE_EVT;
13823        spin_unlock_irqrestore(&phba->hbalock, iflags);
13824
13825        return true;
13826}
13827
13828/**
13829 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13830 * @phba: Pointer to HBA context object.
13831 * @wcqe: Pointer to work-queue completion queue entry.
13832 *
13833 * This routine handles slow-path WQ entry consumed event by invoking the
13834 * proper WQ release routine to the slow-path WQ.
13835 **/
13836static void
13837lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13838                             struct lpfc_wcqe_release *wcqe)
13839{
13840        /* sanity check on queue memory */
13841        if (unlikely(!phba->sli4_hba.els_wq))
13842                return;
13843        /* Check for the slow-path ELS work queue */
13844        if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13845                lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13846                                     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13847        else
13848                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13849                                "2579 Slow-path wqe consume event carries "
13850                                "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13851                                bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13852                                phba->sli4_hba.els_wq->queue_id);
13853}
13854
13855/**
13856 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13857 * @phba: Pointer to HBA context object.
13858 * @cq: Pointer to a WQ completion queue.
13859 * @wcqe: Pointer to work-queue completion queue entry.
13860 *
13861 * This routine handles an XRI abort event.
13862 *
13863 * Return: true if work posted to worker thread, otherwise false.
13864 **/
13865static bool
13866lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13867                                   struct lpfc_queue *cq,
13868                                   struct sli4_wcqe_xri_aborted *wcqe)
13869{
13870        bool workposted = false;
13871        struct lpfc_cq_event *cq_event;
13872        unsigned long iflags;
13873
13874        switch (cq->subtype) {
13875        case LPFC_IO:
13876                lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13877                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13878                        /* Notify aborted XRI for NVME work queue */
13879                        if (phba->nvmet_support)
13880                                lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13881                }
13882                workposted = false;
13883                break;
13884        case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13885        case LPFC_ELS:
13886                cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13887                if (!cq_event) {
13888                        workposted = false;
13889                        break;
13890                }
13891                cq_event->hdwq = cq->hdwq;
13892                spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13893                                  iflags);
13894                list_add_tail(&cq_event->list,
13895                              &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13896                /* Set the els xri abort event flag */
13897                phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13898                spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13899                                       iflags);
13900                workposted = true;
13901                break;
13902        default:
13903                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13904                                "0603 Invalid CQ subtype %d: "
13905                                "%08x %08x %08x %08x\n",
13906                                cq->subtype, wcqe->word0, wcqe->parameter,
13907                                wcqe->word2, wcqe->word3);
13908                workposted = false;
13909                break;
13910        }
13911        return workposted;
13912}
13913
13914#define FC_RCTL_MDS_DIAGS       0xF4
13915
13916/**
13917 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13918 * @phba: Pointer to HBA context object.
13919 * @rcqe: Pointer to receive-queue completion queue entry.
13920 *
13921 * This routine process a receive-queue completion queue entry.
13922 *
13923 * Return: true if work posted to worker thread, otherwise false.
13924 **/
13925static bool
13926lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13927{
13928        bool workposted = false;
13929        struct fc_frame_header *fc_hdr;
13930        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13931        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13932        struct lpfc_nvmet_tgtport *tgtp;
13933        struct hbq_dmabuf *dma_buf;
13934        uint32_t status, rq_id;
13935        unsigned long iflags;
13936
13937        /* sanity check on queue memory */
13938        if (unlikely(!hrq) || unlikely(!drq))
13939                return workposted;
13940
13941        if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13942                rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13943        else
13944                rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13945        if (rq_id != hrq->queue_id)
13946                goto out;
13947
13948        status = bf_get(lpfc_rcqe_status, rcqe);
13949        switch (status) {
13950        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13951                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13952                                "2537 Receive Frame Truncated!!\n");
13953                fallthrough;
13954        case FC_STATUS_RQ_SUCCESS:
13955                spin_lock_irqsave(&phba->hbalock, iflags);
13956                lpfc_sli4_rq_release(hrq, drq);
13957                dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13958                if (!dma_buf) {
13959                        hrq->RQ_no_buf_found++;
13960                        spin_unlock_irqrestore(&phba->hbalock, iflags);
13961                        goto out;
13962                }
13963                hrq->RQ_rcv_buf++;
13964                hrq->RQ_buf_posted--;
13965                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13966
13967                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13968
13969                if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13970                    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13971                        spin_unlock_irqrestore(&phba->hbalock, iflags);
13972                        /* Handle MDS Loopback frames */
13973                        if  (!(phba->pport->load_flag & FC_UNLOADING))
13974                                lpfc_sli4_handle_mds_loopback(phba->pport,
13975                                                              dma_buf);
13976                        else
13977                                lpfc_in_buf_free(phba, &dma_buf->dbuf);
13978                        break;
13979                }
13980
13981                /* save off the frame for the work thread to process */
13982                list_add_tail(&dma_buf->cq_event.list,
13983                              &phba->sli4_hba.sp_queue_event);
13984                /* Frame received */
13985                phba->hba_flag |= HBA_SP_QUEUE_EVT;
13986                spin_unlock_irqrestore(&phba->hbalock, iflags);
13987                workposted = true;
13988                break;
13989        case FC_STATUS_INSUFF_BUF_FRM_DISC:
13990                if (phba->nvmet_support) {
13991                        tgtp = phba->targetport->private;
13992                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13993                                        "6402 RQE Error x%x, posted %d err_cnt "
13994                                        "%d: %x %x %x\n",
13995                                        status, hrq->RQ_buf_posted,
13996                                        hrq->RQ_no_posted_buf,
13997                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
13998                                        atomic_read(&tgtp->rcv_fcp_cmd_out),
13999                                        atomic_read(&tgtp->xmt_fcp_release));
14000                }
14001                fallthrough;
14002
14003        case FC_STATUS_INSUFF_BUF_NEED_BUF:
14004                hrq->RQ_no_posted_buf++;
14005                /* Post more buffers if possible */
14006                spin_lock_irqsave(&phba->hbalock, iflags);
14007                phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14008                spin_unlock_irqrestore(&phba->hbalock, iflags);
14009                workposted = true;
14010                break;
14011        }
14012out:
14013        return workposted;
14014}
14015
14016/**
14017 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14018 * @phba: Pointer to HBA context object.
14019 * @cq: Pointer to the completion queue.
14020 * @cqe: Pointer to a completion queue entry.
14021 *
14022 * This routine process a slow-path work-queue or receive queue completion queue
14023 * entry.
14024 *
14025 * Return: true if work posted to worker thread, otherwise false.
14026 **/
14027static bool
14028lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14029                         struct lpfc_cqe *cqe)
14030{
14031        struct lpfc_cqe cqevt;
14032        bool workposted = false;
14033
14034        /* Copy the work queue CQE and convert endian order if needed */
14035        lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14036
14037        /* Check and process for different type of WCQE and dispatch */
14038        switch (bf_get(lpfc_cqe_code, &cqevt)) {
14039        case CQE_CODE_COMPL_WQE:
14040                /* Process the WQ/RQ complete event */
14041                phba->last_completion_time = jiffies;
14042                workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14043                                (struct lpfc_wcqe_complete *)&cqevt);
14044                break;
14045        case CQE_CODE_RELEASE_WQE:
14046                /* Process the WQ release event */
14047                lpfc_sli4_sp_handle_rel_wcqe(phba,
14048                                (struct lpfc_wcqe_release *)&cqevt);
14049                break;
14050        case CQE_CODE_XRI_ABORTED:
14051                /* Process the WQ XRI abort event */
14052                phba->last_completion_time = jiffies;
14053                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14054                                (struct sli4_wcqe_xri_aborted *)&cqevt);
14055                break;
14056        case CQE_CODE_RECEIVE:
14057        case CQE_CODE_RECEIVE_V1:
14058                /* Process the RQ event */
14059                phba->last_completion_time = jiffies;
14060                workposted = lpfc_sli4_sp_handle_rcqe(phba,
14061                                (struct lpfc_rcqe *)&cqevt);
14062                break;
14063        default:
14064                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14065                                "0388 Not a valid WCQE code: x%x\n",
14066                                bf_get(lpfc_cqe_code, &cqevt));
14067                break;
14068        }
14069        return workposted;
14070}
14071
14072/**
14073 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14074 * @phba: Pointer to HBA context object.
14075 * @eqe: Pointer to fast-path event queue entry.
14076 * @speq: Pointer to slow-path event queue.
14077 *
14078 * This routine process a event queue entry from the slow-path event queue.
14079 * It will check the MajorCode and MinorCode to determine this is for a
14080 * completion event on a completion queue, if not, an error shall be logged
14081 * and just return. Otherwise, it will get to the corresponding completion
14082 * queue and process all the entries on that completion queue, rearm the
14083 * completion queue, and then return.
14084 *
14085 **/
14086static void
14087lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14088        struct lpfc_queue *speq)
14089{
14090        struct lpfc_queue *cq = NULL, *childq;
14091        uint16_t cqid;
14092        int ret = 0;
14093
14094        /* Get the reference to the corresponding CQ */
14095        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14096
14097        list_for_each_entry(childq, &speq->child_list, list) {
14098                if (childq->queue_id == cqid) {
14099                        cq = childq;
14100                        break;
14101                }
14102        }
14103        if (unlikely(!cq)) {
14104                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14105                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14106                                        "0365 Slow-path CQ identifier "
14107                                        "(%d) does not exist\n", cqid);
14108                return;
14109        }
14110
14111        /* Save EQ associated with this CQ */
14112        cq->assoc_qp = speq;
14113
14114        if (is_kdump_kernel())
14115                ret = queue_work(phba->wq, &cq->spwork);
14116        else
14117                ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14118
14119        if (!ret)
14120                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14121                                "0390 Cannot schedule queue work "
14122                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14123                                cqid, cq->queue_id, raw_smp_processor_id());
14124}
14125
14126/**
14127 * __lpfc_sli4_process_cq - Process elements of a CQ
14128 * @phba: Pointer to HBA context object.
14129 * @cq: Pointer to CQ to be processed
14130 * @handler: Routine to process each cqe
14131 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14132 * @poll_mode: Polling mode we were called from
14133 *
14134 * This routine processes completion queue entries in a CQ. While a valid
14135 * queue element is found, the handler is called. During processing checks
14136 * are made for periodic doorbell writes to let the hardware know of
14137 * element consumption.
14138 *
14139 * If the max limit on cqes to process is hit, or there are no more valid
14140 * entries, the loop stops. If we processed a sufficient number of elements,
14141 * meaning there is sufficient load, rather than rearming and generating
14142 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14143 * indicates no rescheduling.
14144 *
14145 * Returns True if work scheduled, False otherwise.
14146 **/
14147static bool
14148__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14149        bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14150                        struct lpfc_cqe *), unsigned long *delay,
14151                        enum lpfc_poll_mode poll_mode)
14152{
14153        struct lpfc_cqe *cqe;
14154        bool workposted = false;
14155        int count = 0, consumed = 0;
14156        bool arm = true;
14157
14158        /* default - no reschedule */
14159        *delay = 0;
14160
14161        if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14162                goto rearm_and_exit;
14163
14164        /* Process all the entries to the CQ */
14165        cq->q_flag = 0;
14166        cqe = lpfc_sli4_cq_get(cq);
14167        while (cqe) {
14168                workposted |= handler(phba, cq, cqe);
14169                __lpfc_sli4_consume_cqe(phba, cq, cqe);
14170
14171                consumed++;
14172                if (!(++count % cq->max_proc_limit))
14173                        break;
14174
14175                if (!(count % cq->notify_interval)) {
14176                        phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14177                                                LPFC_QUEUE_NOARM);
14178                        consumed = 0;
14179                        cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14180                }
14181
14182                if (count == LPFC_NVMET_CQ_NOTIFY)
14183                        cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14184
14185                cqe = lpfc_sli4_cq_get(cq);
14186        }
14187        if (count >= phba->cfg_cq_poll_threshold) {
14188                *delay = 1;
14189                arm = false;
14190        }
14191
14192        /* Note: complete the irq_poll softirq before rearming CQ */
14193        if (poll_mode == LPFC_IRQ_POLL)
14194                irq_poll_complete(&cq->iop);
14195
14196        /* Track the max number of CQEs processed in 1 EQ */
14197        if (count > cq->CQ_max_cqe)
14198                cq->CQ_max_cqe = count;
14199
14200        cq->assoc_qp->EQ_cqe_cnt += count;
14201
14202        /* Catch the no cq entry condition */
14203        if (unlikely(count == 0))
14204                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14205                                "0369 No entry from completion queue "
14206                                "qid=%d\n", cq->queue_id);
14207
14208        xchg(&cq->queue_claimed, 0);
14209
14210rearm_and_exit:
14211        phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14212                        arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14213
14214        return workposted;
14215}
14216
14217/**
14218 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14219 * @cq: pointer to CQ to process
14220 *
14221 * This routine calls the cq processing routine with a handler specific
14222 * to the type of queue bound to it.
14223 *
14224 * The CQ routine returns two values: the first is the calling status,
14225 * which indicates whether work was queued to the  background discovery
14226 * thread. If true, the routine should wakeup the discovery thread;
14227 * the second is the delay parameter. If non-zero, rather than rearming
14228 * the CQ and yet another interrupt, the CQ handler should be queued so
14229 * that it is processed in a subsequent polling action. The value of
14230 * the delay indicates when to reschedule it.
14231 **/
14232static void
14233__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14234{
14235        struct lpfc_hba *phba = cq->phba;
14236        unsigned long delay;
14237        bool workposted = false;
14238        int ret = 0;
14239
14240        /* Process and rearm the CQ */
14241        switch (cq->type) {
14242        case LPFC_MCQ:
14243                workposted |= __lpfc_sli4_process_cq(phba, cq,
14244                                                lpfc_sli4_sp_handle_mcqe,
14245                                                &delay, LPFC_QUEUE_WORK);
14246                break;
14247        case LPFC_WCQ:
14248                if (cq->subtype == LPFC_IO)
14249                        workposted |= __lpfc_sli4_process_cq(phba, cq,
14250                                                lpfc_sli4_fp_handle_cqe,
14251                                                &delay, LPFC_QUEUE_WORK);
14252                else
14253                        workposted |= __lpfc_sli4_process_cq(phba, cq,
14254                                                lpfc_sli4_sp_handle_cqe,
14255                                                &delay, LPFC_QUEUE_WORK);
14256                break;
14257        default:
14258                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14259                                "0370 Invalid completion queue type (%d)\n",
14260                                cq->type);
14261                return;
14262        }
14263
14264        if (delay) {
14265                if (is_kdump_kernel())
14266                        ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14267                                                delay);
14268                else
14269                        ret = queue_delayed_work_on(cq->chann, phba->wq,
14270                                                &cq->sched_spwork, delay);
14271                if (!ret)
14272                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14273                                "0394 Cannot schedule queue work "
14274                                "for cqid=%d on CPU %d\n",
14275                                cq->queue_id, cq->chann);
14276        }
14277
14278        /* wake up worker thread if there are works to be done */
14279        if (workposted)
14280                lpfc_worker_wake_up(phba);
14281}
14282
14283/**
14284 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14285 *   interrupt
14286 * @work: pointer to work element
14287 *
14288 * translates from the work handler and calls the slow-path handler.
14289 **/
14290static void
14291lpfc_sli4_sp_process_cq(struct work_struct *work)
14292{
14293        struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14294
14295        __lpfc_sli4_sp_process_cq(cq);
14296}
14297
14298/**
14299 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14300 * @work: pointer to work element
14301 *
14302 * translates from the work handler and calls the slow-path handler.
14303 **/
14304static void
14305lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14306{
14307        struct lpfc_queue *cq = container_of(to_delayed_work(work),
14308                                        struct lpfc_queue, sched_spwork);
14309
14310        __lpfc_sli4_sp_process_cq(cq);
14311}
14312
14313/**
14314 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14315 * @phba: Pointer to HBA context object.
14316 * @cq: Pointer to associated CQ
14317 * @wcqe: Pointer to work-queue completion queue entry.
14318 *
14319 * This routine process a fast-path work queue completion entry from fast-path
14320 * event queue for FCP command response completion.
14321 **/
14322static void
14323lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14324                             struct lpfc_wcqe_complete *wcqe)
14325{
14326        struct lpfc_sli_ring *pring = cq->pring;
14327        struct lpfc_iocbq *cmdiocbq;
14328        struct lpfc_iocbq irspiocbq;
14329        unsigned long iflags;
14330
14331        /* Check for response status */
14332        if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14333                /* If resource errors reported from HBA, reduce queue
14334                 * depth of the SCSI device.
14335                 */
14336                if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14337                     IOSTAT_LOCAL_REJECT)) &&
14338                    ((wcqe->parameter & IOERR_PARAM_MASK) ==
14339                     IOERR_NO_RESOURCES))
14340                        phba->lpfc_rampdown_queue_depth(phba);
14341
14342                /* Log the cmpl status */
14343                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14344                                "0373 FCP CQE cmpl: status=x%x: "
14345                                "CQE: %08x %08x %08x %08x\n",
14346                                bf_get(lpfc_wcqe_c_status, wcqe),
14347                                wcqe->word0, wcqe->total_data_placed,
14348                                wcqe->parameter, wcqe->word3);
14349        }
14350
14351        /* Look up the FCP command IOCB and create pseudo response IOCB */
14352        spin_lock_irqsave(&pring->ring_lock, iflags);
14353        pring->stats.iocb_event++;
14354        spin_unlock_irqrestore(&pring->ring_lock, iflags);
14355        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14356                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14357        if (unlikely(!cmdiocbq)) {
14358                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14359                                "0374 FCP complete with no corresponding "
14360                                "cmdiocb: iotag (%d)\n",
14361                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14362                return;
14363        }
14364#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14365        cmdiocbq->isr_timestamp = cq->isr_timestamp;
14366#endif
14367        if (cmdiocbq->iocb_cmpl == NULL) {
14368                if (cmdiocbq->wqe_cmpl) {
14369                        /* For FCP the flag is cleared in wqe_cmpl */
14370                        if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
14371                            cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14372                                spin_lock_irqsave(&phba->hbalock, iflags);
14373                                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14374                                spin_unlock_irqrestore(&phba->hbalock, iflags);
14375                        }
14376
14377                        /* Pass the cmd_iocb and the wcqe to the upper layer */
14378                        (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14379                        return;
14380                }
14381                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14382                                "0375 FCP cmdiocb not callback function "
14383                                "iotag: (%d)\n",
14384                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
14385                return;
14386        }
14387
14388        /* Only SLI4 non-IO commands stil use IOCB */
14389        /* Fake the irspiocb and copy necessary response information */
14390        lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14391
14392        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14393                spin_lock_irqsave(&phba->hbalock, iflags);
14394                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14395                spin_unlock_irqrestore(&phba->hbalock, iflags);
14396        }
14397
14398        /* Pass the cmd_iocb and the rsp state to the upper layer */
14399        (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14400}
14401
14402/**
14403 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14404 * @phba: Pointer to HBA context object.
14405 * @cq: Pointer to completion queue.
14406 * @wcqe: Pointer to work-queue completion queue entry.
14407 *
14408 * This routine handles an fast-path WQ entry consumed event by invoking the
14409 * proper WQ release routine to the slow-path WQ.
14410 **/
14411static void
14412lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14413                             struct lpfc_wcqe_release *wcqe)
14414{
14415        struct lpfc_queue *childwq;
14416        bool wqid_matched = false;
14417        uint16_t hba_wqid;
14418
14419        /* Check for fast-path FCP work queue release */
14420        hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14421        list_for_each_entry(childwq, &cq->child_list, list) {
14422                if (childwq->queue_id == hba_wqid) {
14423                        lpfc_sli4_wq_release(childwq,
14424                                        bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14425                        if (childwq->q_flag & HBA_NVMET_WQFULL)
14426                                lpfc_nvmet_wqfull_process(phba, childwq);
14427                        wqid_matched = true;
14428                        break;
14429                }
14430        }
14431        /* Report warning log message if no match found */
14432        if (wqid_matched != true)
14433                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14434                                "2580 Fast-path wqe consume event carries "
14435                                "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14436}
14437
14438/**
14439 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14440 * @phba: Pointer to HBA context object.
14441 * @cq: Pointer to completion queue.
14442 * @rcqe: Pointer to receive-queue completion queue entry.
14443 *
14444 * This routine process a receive-queue completion queue entry.
14445 *
14446 * Return: true if work posted to worker thread, otherwise false.
14447 **/
14448static bool
14449lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14450                            struct lpfc_rcqe *rcqe)
14451{
14452        bool workposted = false;
14453        struct lpfc_queue *hrq;
14454        struct lpfc_queue *drq;
14455        struct rqb_dmabuf *dma_buf;
14456        struct fc_frame_header *fc_hdr;
14457        struct lpfc_nvmet_tgtport *tgtp;
14458        uint32_t status, rq_id;
14459        unsigned long iflags;
14460        uint32_t fctl, idx;
14461
14462        if ((phba->nvmet_support == 0) ||
14463            (phba->sli4_hba.nvmet_cqset == NULL))
14464                return workposted;
14465
14466        idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14467        hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14468        drq = phba->sli4_hba.nvmet_mrq_data[idx];
14469
14470        /* sanity check on queue memory */
14471        if (unlikely(!hrq) || unlikely(!drq))
14472                return workposted;
14473
14474        if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14475                rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14476        else
14477                rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14478
14479        if ((phba->nvmet_support == 0) ||
14480            (rq_id != hrq->queue_id))
14481                return workposted;
14482
14483        status = bf_get(lpfc_rcqe_status, rcqe);
14484        switch (status) {
14485        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14486                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14487                                "6126 Receive Frame Truncated!!\n");
14488                fallthrough;
14489        case FC_STATUS_RQ_SUCCESS:
14490                spin_lock_irqsave(&phba->hbalock, iflags);
14491                lpfc_sli4_rq_release(hrq, drq);
14492                dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14493                if (!dma_buf) {
14494                        hrq->RQ_no_buf_found++;
14495                        spin_unlock_irqrestore(&phba->hbalock, iflags);
14496                        goto out;
14497                }
14498                spin_unlock_irqrestore(&phba->hbalock, iflags);
14499                hrq->RQ_rcv_buf++;
14500                hrq->RQ_buf_posted--;
14501                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14502
14503                /* Just some basic sanity checks on FCP Command frame */
14504                fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14505                        fc_hdr->fh_f_ctl[1] << 8 |
14506                        fc_hdr->fh_f_ctl[2]);
14507                if (((fctl &
14508                    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14509                    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14510                    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14511                        goto drop;
14512
14513                if (fc_hdr->fh_type == FC_TYPE_FCP) {
14514                        dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14515                        lpfc_nvmet_unsol_fcp_event(
14516                                phba, idx, dma_buf, cq->isr_timestamp,
14517                                cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14518                        return false;
14519                }
14520drop:
14521                lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14522                break;
14523        case FC_STATUS_INSUFF_BUF_FRM_DISC:
14524                if (phba->nvmet_support) {
14525                        tgtp = phba->targetport->private;
14526                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14527                                        "6401 RQE Error x%x, posted %d err_cnt "
14528                                        "%d: %x %x %x\n",
14529                                        status, hrq->RQ_buf_posted,
14530                                        hrq->RQ_no_posted_buf,
14531                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
14532                                        atomic_read(&tgtp->rcv_fcp_cmd_out),
14533                                        atomic_read(&tgtp->xmt_fcp_release));
14534                }
14535                fallthrough;
14536
14537        case FC_STATUS_INSUFF_BUF_NEED_BUF:
14538                hrq->RQ_no_posted_buf++;
14539                /* Post more buffers if possible */
14540                break;
14541        }
14542out:
14543        return workposted;
14544}
14545
14546/**
14547 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14548 * @phba: adapter with cq
14549 * @cq: Pointer to the completion queue.
14550 * @cqe: Pointer to fast-path completion queue entry.
14551 *
14552 * This routine process a fast-path work queue completion entry from fast-path
14553 * event queue for FCP command response completion.
14554 *
14555 * Return: true if work posted to worker thread, otherwise false.
14556 **/
14557static bool
14558lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14559                         struct lpfc_cqe *cqe)
14560{
14561        struct lpfc_wcqe_release wcqe;
14562        bool workposted = false;
14563
14564        /* Copy the work queue CQE and convert endian order if needed */
14565        lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14566
14567        /* Check and process for different type of WCQE and dispatch */
14568        switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14569        case CQE_CODE_COMPL_WQE:
14570        case CQE_CODE_NVME_ERSP:
14571                cq->CQ_wq++;
14572                /* Process the WQ complete event */
14573                phba->last_completion_time = jiffies;
14574                if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14575                        lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14576                                (struct lpfc_wcqe_complete *)&wcqe);
14577                break;
14578        case CQE_CODE_RELEASE_WQE:
14579                cq->CQ_release_wqe++;
14580                /* Process the WQ release event */
14581                lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14582                                (struct lpfc_wcqe_release *)&wcqe);
14583                break;
14584        case CQE_CODE_XRI_ABORTED:
14585                cq->CQ_xri_aborted++;
14586                /* Process the WQ XRI abort event */
14587                phba->last_completion_time = jiffies;
14588                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14589                                (struct sli4_wcqe_xri_aborted *)&wcqe);
14590                break;
14591        case CQE_CODE_RECEIVE_V1:
14592        case CQE_CODE_RECEIVE:
14593                phba->last_completion_time = jiffies;
14594                if (cq->subtype == LPFC_NVMET) {
14595                        workposted = lpfc_sli4_nvmet_handle_rcqe(
14596                                phba, cq, (struct lpfc_rcqe *)&wcqe);
14597                }
14598                break;
14599        default:
14600                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14601                                "0144 Not a valid CQE code: x%x\n",
14602                                bf_get(lpfc_wcqe_c_code, &wcqe));
14603                break;
14604        }
14605        return workposted;
14606}
14607
14608/**
14609 * lpfc_sli4_sched_cq_work - Schedules cq work
14610 * @phba: Pointer to HBA context object.
14611 * @cq: Pointer to CQ
14612 * @cqid: CQ ID
14613 *
14614 * This routine checks the poll mode of the CQ corresponding to
14615 * cq->chann, then either schedules a softirq or queue_work to complete
14616 * cq work.
14617 *
14618 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14619 * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
14620 *
14621 **/
14622static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14623                                    struct lpfc_queue *cq, uint16_t cqid)
14624{
14625        int ret = 0;
14626
14627        switch (cq->poll_mode) {
14628        case LPFC_IRQ_POLL:
14629                irq_poll_sched(&cq->iop);
14630                break;
14631        case LPFC_QUEUE_WORK:
14632        default:
14633                if (is_kdump_kernel())
14634                        ret = queue_work(phba->wq, &cq->irqwork);
14635                else
14636                        ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14637                if (!ret)
14638                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14639                                        "0383 Cannot schedule queue work "
14640                                        "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14641                                        cqid, cq->queue_id,
14642                                        raw_smp_processor_id());
14643        }
14644}
14645
14646/**
14647 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14648 * @phba: Pointer to HBA context object.
14649 * @eq: Pointer to the queue structure.
14650 * @eqe: Pointer to fast-path event queue entry.
14651 *
14652 * This routine process a event queue entry from the fast-path event queue.
14653 * It will check the MajorCode and MinorCode to determine this is for a
14654 * completion event on a completion queue, if not, an error shall be logged
14655 * and just return. Otherwise, it will get to the corresponding completion
14656 * queue and process all the entries on the completion queue, rearm the
14657 * completion queue, and then return.
14658 **/
14659static void
14660lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14661                         struct lpfc_eqe *eqe)
14662{
14663        struct lpfc_queue *cq = NULL;
14664        uint32_t qidx = eq->hdwq;
14665        uint16_t cqid, id;
14666
14667        if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14668                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14669                                "0366 Not a valid completion "
14670                                "event: majorcode=x%x, minorcode=x%x\n",
14671                                bf_get_le32(lpfc_eqe_major_code, eqe),
14672                                bf_get_le32(lpfc_eqe_minor_code, eqe));
14673                return;
14674        }
14675
14676        /* Get the reference to the corresponding CQ */
14677        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14678
14679        /* Use the fast lookup method first */
14680        if (cqid <= phba->sli4_hba.cq_max) {
14681                cq = phba->sli4_hba.cq_lookup[cqid];
14682                if (cq)
14683                        goto  work_cq;
14684        }
14685
14686        /* Next check for NVMET completion */
14687        if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14688                id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14689                if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14690                        /* Process NVMET unsol rcv */
14691                        cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14692                        goto  process_cq;
14693                }
14694        }
14695
14696        if (phba->sli4_hba.nvmels_cq &&
14697            (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14698                /* Process NVME unsol rcv */
14699                cq = phba->sli4_hba.nvmels_cq;
14700        }
14701
14702        /* Otherwise this is a Slow path event */
14703        if (cq == NULL) {
14704                lpfc_sli4_sp_handle_eqe(phba, eqe,
14705                                        phba->sli4_hba.hdwq[qidx].hba_eq);
14706                return;
14707        }
14708
14709process_cq:
14710        if (unlikely(cqid != cq->queue_id)) {
14711                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14712                                "0368 Miss-matched fast-path completion "
14713                                "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14714                                cqid, cq->queue_id);
14715                return;
14716        }
14717
14718work_cq:
14719#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14720        if (phba->ktime_on)
14721                cq->isr_timestamp = ktime_get_ns();
14722        else
14723                cq->isr_timestamp = 0;
14724#endif
14725        lpfc_sli4_sched_cq_work(phba, cq, cqid);
14726}
14727
14728/**
14729 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14730 * @cq: Pointer to CQ to be processed
14731 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14732 *
14733 * This routine calls the cq processing routine with the handler for
14734 * fast path CQEs.
14735 *
14736 * The CQ routine returns two values: the first is the calling status,
14737 * which indicates whether work was queued to the  background discovery
14738 * thread. If true, the routine should wakeup the discovery thread;
14739 * the second is the delay parameter. If non-zero, rather than rearming
14740 * the CQ and yet another interrupt, the CQ handler should be queued so
14741 * that it is processed in a subsequent polling action. The value of
14742 * the delay indicates when to reschedule it.
14743 **/
14744static void
14745__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14746                           enum lpfc_poll_mode poll_mode)
14747{
14748        struct lpfc_hba *phba = cq->phba;
14749        unsigned long delay;
14750        bool workposted = false;
14751        int ret = 0;
14752
14753        /* process and rearm the CQ */
14754        workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14755                                             &delay, poll_mode);
14756
14757        if (delay) {
14758                if (is_kdump_kernel())
14759                        ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14760                                                delay);
14761                else
14762                        ret = queue_delayed_work_on(cq->chann, phba->wq,
14763                                                &cq->sched_irqwork, delay);
14764                if (!ret)
14765                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14766                                        "0367 Cannot schedule queue work "
14767                                        "for cqid=%d on CPU %d\n",
14768                                        cq->queue_id, cq->chann);
14769        }
14770
14771        /* wake up worker thread if there are works to be done */
14772        if (workposted)
14773                lpfc_worker_wake_up(phba);
14774}
14775
14776/**
14777 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14778 *   interrupt
14779 * @work: pointer to work element
14780 *
14781 * translates from the work handler and calls the fast-path handler.
14782 **/
14783static void
14784lpfc_sli4_hba_process_cq(struct work_struct *work)
14785{
14786        struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14787
14788        __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14789}
14790
14791/**
14792 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
14793 * @work: pointer to work element
14794 *
14795 * translates from the work handler and calls the fast-path handler.
14796 **/
14797static void
14798lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14799{
14800        struct lpfc_queue *cq = container_of(to_delayed_work(work),
14801                                        struct lpfc_queue, sched_irqwork);
14802
14803        __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14804}
14805
14806/**
14807 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14808 * @irq: Interrupt number.
14809 * @dev_id: The device context pointer.
14810 *
14811 * This function is directly called from the PCI layer as an interrupt
14812 * service routine when device with SLI-4 interface spec is enabled with
14813 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14814 * ring event in the HBA. However, when the device is enabled with either
14815 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14816 * device-level interrupt handler. When the PCI slot is in error recovery
14817 * or the HBA is undergoing initialization, the interrupt handler will not
14818 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14819 * the intrrupt context. This function is called without any lock held.
14820 * It gets the hbalock to access and update SLI data structures. Note that,
14821 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14822 * equal to that of FCP CQ index.
14823 *
14824 * The link attention and ELS ring attention events are handled
14825 * by the worker thread. The interrupt handler signals the worker thread
14826 * and returns for these events. This function is called without any lock
14827 * held. It gets the hbalock to access and update SLI data structures.
14828 *
14829 * This function returns IRQ_HANDLED when interrupt is handled else it
14830 * returns IRQ_NONE.
14831 **/
14832irqreturn_t
14833lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14834{
14835        struct lpfc_hba *phba;
14836        struct lpfc_hba_eq_hdl *hba_eq_hdl;
14837        struct lpfc_queue *fpeq;
14838        unsigned long iflag;
14839        int ecount = 0;
14840        int hba_eqidx;
14841        struct lpfc_eq_intr_info *eqi;
14842
14843        /* Get the driver's phba structure from the dev_id */
14844        hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14845        phba = hba_eq_hdl->phba;
14846        hba_eqidx = hba_eq_hdl->idx;
14847
14848        if (unlikely(!phba))
14849                return IRQ_NONE;
14850        if (unlikely(!phba->sli4_hba.hdwq))
14851                return IRQ_NONE;
14852
14853        /* Get to the EQ struct associated with this vector */
14854        fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14855        if (unlikely(!fpeq))
14856                return IRQ_NONE;
14857
14858        /* Check device state for handling interrupt */
14859        if (unlikely(lpfc_intr_state_check(phba))) {
14860                /* Check again for link_state with lock held */
14861                spin_lock_irqsave(&phba->hbalock, iflag);
14862                if (phba->link_state < LPFC_LINK_DOWN)
14863                        /* Flush, clear interrupt, and rearm the EQ */
14864                        lpfc_sli4_eqcq_flush(phba, fpeq);
14865                spin_unlock_irqrestore(&phba->hbalock, iflag);
14866                return IRQ_NONE;
14867        }
14868
14869        eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14870        eqi->icnt++;
14871
14872        fpeq->last_cpu = raw_smp_processor_id();
14873
14874        if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14875            fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14876            phba->cfg_auto_imax &&
14877            fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14878            phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14879                lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14880
14881        /* process and rearm the EQ */
14882        ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14883
14884        if (unlikely(ecount == 0)) {
14885                fpeq->EQ_no_entry++;
14886                if (phba->intr_type == MSIX)
14887                        /* MSI-X treated interrupt served as no EQ share INT */
14888                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14889                                        "0358 MSI-X interrupt with no EQE\n");
14890                else
14891                        /* Non MSI-X treated on interrupt as EQ share INT */
14892                        return IRQ_NONE;
14893        }
14894
14895        return IRQ_HANDLED;
14896} /* lpfc_sli4_hba_intr_handler */
14897
14898/**
14899 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14900 * @irq: Interrupt number.
14901 * @dev_id: The device context pointer.
14902 *
14903 * This function is the device-level interrupt handler to device with SLI-4
14904 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14905 * interrupt mode is enabled and there is an event in the HBA which requires
14906 * driver attention. This function invokes the slow-path interrupt attention
14907 * handling function and fast-path interrupt attention handling function in
14908 * turn to process the relevant HBA attention events. This function is called
14909 * without any lock held. It gets the hbalock to access and update SLI data
14910 * structures.
14911 *
14912 * This function returns IRQ_HANDLED when interrupt is handled, else it
14913 * returns IRQ_NONE.
14914 **/
14915irqreturn_t
14916lpfc_sli4_intr_handler(int irq, void *dev_id)
14917{
14918        struct lpfc_hba  *phba;
14919        irqreturn_t hba_irq_rc;
14920        bool hba_handled = false;
14921        int qidx;
14922
14923        /* Get the driver's phba structure from the dev_id */
14924        phba = (struct lpfc_hba *)dev_id;
14925
14926        if (unlikely(!phba))
14927                return IRQ_NONE;
14928
14929        /*
14930         * Invoke fast-path host attention interrupt handling as appropriate.
14931         */
14932        for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14933                hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14934                                        &phba->sli4_hba.hba_eq_hdl[qidx]);
14935                if (hba_irq_rc == IRQ_HANDLED)
14936                        hba_handled |= true;
14937        }
14938
14939        return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14940} /* lpfc_sli4_intr_handler */
14941
14942void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14943{
14944        struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14945        struct lpfc_queue *eq;
14946        int i = 0;
14947
14948        rcu_read_lock();
14949
14950        list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14951                i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14952        if (!list_empty(&phba->poll_list))
14953                mod_timer(&phba->cpuhp_poll_timer,
14954                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14955
14956        rcu_read_unlock();
14957}
14958
14959inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14960{
14961        struct lpfc_hba *phba = eq->phba;
14962        int i = 0;
14963
14964        /*
14965         * Unlocking an irq is one of the entry point to check
14966         * for re-schedule, but we are good for io submission
14967         * path as midlayer does a get_cpu to glue us in. Flush
14968         * out the invalidate queue so we can see the updated
14969         * value for flag.
14970         */
14971        smp_rmb();
14972
14973        if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14974                /* We will not likely get the completion for the caller
14975                 * during this iteration but i guess that's fine.
14976                 * Future io's coming on this eq should be able to
14977                 * pick it up.  As for the case of single io's, they
14978                 * will be handled through a sched from polling timer
14979                 * function which is currently triggered every 1msec.
14980                 */
14981                i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14982
14983        return i;
14984}
14985
14986static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14987{
14988        struct lpfc_hba *phba = eq->phba;
14989
14990        /* kickstart slowpath processing if needed */
14991        if (list_empty(&phba->poll_list))
14992                mod_timer(&phba->cpuhp_poll_timer,
14993                          jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14994
14995        list_add_rcu(&eq->_poll_list, &phba->poll_list);
14996        synchronize_rcu();
14997}
14998
14999static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15000{
15001        struct lpfc_hba *phba = eq->phba;
15002
15003        /* Disable slowpath processing for this eq.  Kick start the eq
15004         * by RE-ARMING the eq's ASAP
15005         */
15006        list_del_rcu(&eq->_poll_list);
15007        synchronize_rcu();
15008
15009        if (list_empty(&phba->poll_list))
15010                del_timer_sync(&phba->cpuhp_poll_timer);
15011}
15012
15013void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15014{
15015        struct lpfc_queue *eq, *next;
15016
15017        list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15018                list_del(&eq->_poll_list);
15019
15020        INIT_LIST_HEAD(&phba->poll_list);
15021        synchronize_rcu();
15022}
15023
15024static inline void
15025__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15026{
15027        if (mode == eq->mode)
15028                return;
15029        /*
15030         * currently this function is only called during a hotplug
15031         * event and the cpu on which this function is executing
15032         * is going offline.  By now the hotplug has instructed
15033         * the scheduler to remove this cpu from cpu active mask.
15034         * So we don't need to work about being put aside by the
15035         * scheduler for a high priority process.  Yes, the inte-
15036         * rrupts could come but they are known to retire ASAP.
15037         */
15038
15039        /* Disable polling in the fastpath */
15040        WRITE_ONCE(eq->mode, mode);
15041        /* flush out the store buffer */
15042        smp_wmb();
15043
15044        /*
15045         * Add this eq to the polling list and start polling. For
15046         * a grace period both interrupt handler and poller will
15047         * try to process the eq _but_ that's fine.  We have a
15048         * synchronization mechanism in place (queue_claimed) to
15049         * deal with it.  This is just a draining phase for int-
15050         * errupt handler (not eq's) as we have guranteed through
15051         * barrier that all the CPUs have seen the new CQ_POLLED
15052         * state. which will effectively disable the REARMING of
15053         * the EQ.  The whole idea is eq's die off eventually as
15054         * we are not rearming EQ's anymore.
15055         */
15056        mode ? lpfc_sli4_add_to_poll_list(eq) :
15057               lpfc_sli4_remove_from_poll_list(eq);
15058}
15059
15060void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15061{
15062        __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15063}
15064
15065void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15066{
15067        struct lpfc_hba *phba = eq->phba;
15068
15069        __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15070
15071        /* Kick start for the pending io's in h/w.
15072         * Once we switch back to interrupt processing on a eq
15073         * the io path completion will only arm eq's when it
15074         * receives a completion.  But since eq's are in disa-
15075         * rmed state it doesn't receive a completion.  This
15076         * creates a deadlock scenaro.
15077         */
15078        phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15079}
15080
15081/**
15082 * lpfc_sli4_queue_free - free a queue structure and associated memory
15083 * @queue: The queue structure to free.
15084 *
15085 * This function frees a queue structure and the DMAable memory used for
15086 * the host resident queue. This function must be called after destroying the
15087 * queue on the HBA.
15088 **/
15089void
15090lpfc_sli4_queue_free(struct lpfc_queue *queue)
15091{
15092        struct lpfc_dmabuf *dmabuf;
15093
15094        if (!queue)
15095                return;
15096
15097        if (!list_empty(&queue->wq_list))
15098                list_del(&queue->wq_list);
15099
15100        while (!list_empty(&queue->page_list)) {
15101                list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15102                                 list);
15103                dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15104                                  dmabuf->virt, dmabuf->phys);
15105                kfree(dmabuf);
15106        }
15107        if (queue->rqbp) {
15108                lpfc_free_rq_buffer(queue->phba, queue);
15109                kfree(queue->rqbp);
15110        }
15111
15112        if (!list_empty(&queue->cpu_list))
15113                list_del(&queue->cpu_list);
15114
15115        kfree(queue);
15116        return;
15117}
15118
15119/**
15120 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15121 * @phba: The HBA that this queue is being created on.
15122 * @page_size: The size of a queue page
15123 * @entry_size: The size of each queue entry for this queue.
15124 * @entry_count: The number of entries that this queue will handle.
15125 * @cpu: The cpu that will primarily utilize this queue.
15126 *
15127 * This function allocates a queue structure and the DMAable memory used for
15128 * the host resident queue. This function must be called before creating the
15129 * queue on the HBA.
15130 **/
15131struct lpfc_queue *
15132lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15133                      uint32_t entry_size, uint32_t entry_count, int cpu)
15134{
15135        struct lpfc_queue *queue;
15136        struct lpfc_dmabuf *dmabuf;
15137        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15138        uint16_t x, pgcnt;
15139
15140        if (!phba->sli4_hba.pc_sli4_params.supported)
15141                hw_page_size = page_size;
15142
15143        pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15144
15145        /* If needed, Adjust page count to match the max the adapter supports */
15146        if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15147                pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15148
15149        queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15150                             GFP_KERNEL, cpu_to_node(cpu));
15151        if (!queue)
15152                return NULL;
15153
15154        INIT_LIST_HEAD(&queue->list);
15155        INIT_LIST_HEAD(&queue->_poll_list);
15156        INIT_LIST_HEAD(&queue->wq_list);
15157        INIT_LIST_HEAD(&queue->wqfull_list);
15158        INIT_LIST_HEAD(&queue->page_list);
15159        INIT_LIST_HEAD(&queue->child_list);
15160        INIT_LIST_HEAD(&queue->cpu_list);
15161
15162        /* Set queue parameters now.  If the system cannot provide memory
15163         * resources, the free routine needs to know what was allocated.
15164         */
15165        queue->page_count = pgcnt;
15166        queue->q_pgs = (void **)&queue[1];
15167        queue->entry_cnt_per_pg = hw_page_size / entry_size;
15168        queue->entry_size = entry_size;
15169        queue->entry_count = entry_count;
15170        queue->page_size = hw_page_size;
15171        queue->phba = phba;
15172
15173        for (x = 0; x < queue->page_count; x++) {
15174                dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15175                                      dev_to_node(&phba->pcidev->dev));
15176                if (!dmabuf)
15177                        goto out_fail;
15178                dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15179                                                  hw_page_size, &dmabuf->phys,
15180                                                  GFP_KERNEL);
15181                if (!dmabuf->virt) {
15182                        kfree(dmabuf);
15183                        goto out_fail;
15184                }
15185                dmabuf->buffer_tag = x;
15186                list_add_tail(&dmabuf->list, &queue->page_list);
15187                /* use lpfc_sli4_qe to index a paritcular entry in this page */
15188                queue->q_pgs[x] = dmabuf->virt;
15189        }
15190        INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15191        INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15192        INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15193        INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15194
15195        /* notify_interval will be set during q creation */
15196
15197        return queue;
15198out_fail:
15199        lpfc_sli4_queue_free(queue);
15200        return NULL;
15201}
15202
15203/**
15204 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15205 * @phba: HBA structure that indicates port to create a queue on.
15206 * @pci_barset: PCI BAR set flag.
15207 *
15208 * This function shall perform iomap of the specified PCI BAR address to host
15209 * memory address if not already done so and return it. The returned host
15210 * memory address can be NULL.
15211 */
15212static void __iomem *
15213lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15214{
15215        if (!phba->pcidev)
15216                return NULL;
15217
15218        switch (pci_barset) {
15219        case WQ_PCI_BAR_0_AND_1:
15220                return phba->pci_bar0_memmap_p;
15221        case WQ_PCI_BAR_2_AND_3:
15222                return phba->pci_bar2_memmap_p;
15223        case WQ_PCI_BAR_4_AND_5:
15224                return phba->pci_bar4_memmap_p;
15225        default:
15226                break;
15227        }
15228        return NULL;
15229}
15230
15231/**
15232 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15233 * @phba: HBA structure that EQs are on.
15234 * @startq: The starting EQ index to modify
15235 * @numq: The number of EQs (consecutive indexes) to modify
15236 * @usdelay: amount of delay
15237 *
15238 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15239 * is set either by writing to a register (if supported by the SLI Port)
15240 * or by mailbox command. The mailbox command allows several EQs to be
15241 * updated at once.
15242 *
15243 * The @phba struct is used to send a mailbox command to HBA. The @startq
15244 * is used to get the starting EQ index to change. The @numq value is
15245 * used to specify how many consecutive EQ indexes, starting at EQ index,
15246 * are to be changed. This function is asynchronous and will wait for any
15247 * mailbox commands to finish before returning.
15248 *
15249 * On success this function will return a zero. If unable to allocate
15250 * enough memory this function will return -ENOMEM. If a mailbox command
15251 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15252 * have had their delay multipler changed.
15253 **/
15254void
15255lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15256                         uint32_t numq, uint32_t usdelay)
15257{
15258        struct lpfc_mbx_modify_eq_delay *eq_delay;
15259        LPFC_MBOXQ_t *mbox;
15260        struct lpfc_queue *eq;
15261        int cnt = 0, rc, length;
15262        uint32_t shdr_status, shdr_add_status;
15263        uint32_t dmult;
15264        int qidx;
15265        union lpfc_sli4_cfg_shdr *shdr;
15266
15267        if (startq >= phba->cfg_irq_chann)
15268                return;
15269
15270        if (usdelay > 0xFFFF) {
15271                lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15272                                "6429 usdelay %d too large. Scaled down to "
15273                                "0xFFFF.\n", usdelay);
15274                usdelay = 0xFFFF;
15275        }
15276
15277        /* set values by EQ_DELAY register if supported */
15278        if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15279                for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15280                        eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15281                        if (!eq)
15282                                continue;
15283
15284                        lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15285
15286                        if (++cnt >= numq)
15287                                break;
15288                }
15289                return;
15290        }
15291
15292        /* Otherwise, set values by mailbox cmd */
15293
15294        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15295        if (!mbox) {
15296                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15297                                "6428 Failed allocating mailbox cmd buffer."
15298                                " EQ delay was not set.\n");
15299                return;
15300        }
15301        length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15302                  sizeof(struct lpfc_sli4_cfg_mhdr));
15303        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15304                         LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15305                         length, LPFC_SLI4_MBX_EMBED);
15306        eq_delay = &mbox->u.mqe.un.eq_delay;
15307
15308        /* Calculate delay multiper from maximum interrupt per second */
15309        dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15310        if (dmult)
15311                dmult--;
15312        if (dmult > LPFC_DMULT_MAX)
15313                dmult = LPFC_DMULT_MAX;
15314
15315        for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15316                eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15317                if (!eq)
15318                        continue;
15319                eq->q_mode = usdelay;
15320                eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15321                eq_delay->u.request.eq[cnt].phase = 0;
15322                eq_delay->u.request.eq[cnt].delay_multi = dmult;
15323
15324                if (++cnt >= numq)
15325                        break;
15326        }
15327        eq_delay->u.request.num_eq = cnt;
15328
15329        mbox->vport = phba->pport;
15330        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15331        mbox->ctx_buf = NULL;
15332        mbox->ctx_ndlp = NULL;
15333        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15334        shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15335        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15336        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15337        if (shdr_status || shdr_add_status || rc) {
15338                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15339                                "2512 MODIFY_EQ_DELAY mailbox failed with "
15340                                "status x%x add_status x%x, mbx status x%x\n",
15341                                shdr_status, shdr_add_status, rc);
15342        }
15343        mempool_free(mbox, phba->mbox_mem_pool);
15344        return;
15345}
15346
15347/**
15348 * lpfc_eq_create - Create an Event Queue on the HBA
15349 * @phba: HBA structure that indicates port to create a queue on.
15350 * @eq: The queue structure to use to create the event queue.
15351 * @imax: The maximum interrupt per second limit.
15352 *
15353 * This function creates an event queue, as detailed in @eq, on a port,
15354 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15355 *
15356 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15357 * is used to get the entry count and entry size that are necessary to
15358 * determine the number of pages to allocate and use for this queue. This
15359 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15360 * event queue. This function is asynchronous and will wait for the mailbox
15361 * command to finish before continuing.
15362 *
15363 * On success this function will return a zero. If unable to allocate enough
15364 * memory this function will return -ENOMEM. If the queue create mailbox command
15365 * fails this function will return -ENXIO.
15366 **/
15367int
15368lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15369{
15370        struct lpfc_mbx_eq_create *eq_create;
15371        LPFC_MBOXQ_t *mbox;
15372        int rc, length, status = 0;
15373        struct lpfc_dmabuf *dmabuf;
15374        uint32_t shdr_status, shdr_add_status;
15375        union lpfc_sli4_cfg_shdr *shdr;
15376        uint16_t dmult;
15377        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15378
15379        /* sanity check on queue memory */
15380        if (!eq)
15381                return -ENODEV;
15382        if (!phba->sli4_hba.pc_sli4_params.supported)
15383                hw_page_size = SLI4_PAGE_SIZE;
15384
15385        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15386        if (!mbox)
15387                return -ENOMEM;
15388        length = (sizeof(struct lpfc_mbx_eq_create) -
15389                  sizeof(struct lpfc_sli4_cfg_mhdr));
15390        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15391                         LPFC_MBOX_OPCODE_EQ_CREATE,
15392                         length, LPFC_SLI4_MBX_EMBED);
15393        eq_create = &mbox->u.mqe.un.eq_create;
15394        shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15395        bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15396               eq->page_count);
15397        bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15398               LPFC_EQE_SIZE);
15399        bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15400
15401        /* Use version 2 of CREATE_EQ if eqav is set */
15402        if (phba->sli4_hba.pc_sli4_params.eqav) {
15403                bf_set(lpfc_mbox_hdr_version, &shdr->request,
15404                       LPFC_Q_CREATE_VERSION_2);
15405                bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15406                       phba->sli4_hba.pc_sli4_params.eqav);
15407        }
15408
15409        /* don't setup delay multiplier using EQ_CREATE */
15410        dmult = 0;
15411        bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15412               dmult);
15413        switch (eq->entry_count) {
15414        default:
15415                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15416                                "0360 Unsupported EQ count. (%d)\n",
15417                                eq->entry_count);
15418                if (eq->entry_count < 256) {
15419                        status = -EINVAL;
15420                        goto out;
15421                }
15422                fallthrough;    /* otherwise default to smallest count */
15423        case 256:
15424                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15425                       LPFC_EQ_CNT_256);
15426                break;
15427        case 512:
15428                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15429                       LPFC_EQ_CNT_512);
15430                break;
15431        case 1024:
15432                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15433                       LPFC_EQ_CNT_1024);
15434                break;
15435        case 2048:
15436                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15437                       LPFC_EQ_CNT_2048);
15438                break;
15439        case 4096:
15440                bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15441                       LPFC_EQ_CNT_4096);
15442                break;
15443        }
15444        list_for_each_entry(dmabuf, &eq->page_list, list) {
15445                memset(dmabuf->virt, 0, hw_page_size);
15446                eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15447                                        putPaddrLow(dmabuf->phys);
15448                eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15449                                        putPaddrHigh(dmabuf->phys);
15450        }
15451        mbox->vport = phba->pport;
15452        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15453        mbox->ctx_buf = NULL;
15454        mbox->ctx_ndlp = NULL;
15455        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15456        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15457        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15458        if (shdr_status || shdr_add_status || rc) {
15459                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15460                                "2500 EQ_CREATE mailbox failed with "
15461                                "status x%x add_status x%x, mbx status x%x\n",
15462                                shdr_status, shdr_add_status, rc);
15463                status = -ENXIO;
15464        }
15465        eq->type = LPFC_EQ;
15466        eq->subtype = LPFC_NONE;
15467        eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15468        if (eq->queue_id == 0xFFFF)
15469                status = -ENXIO;
15470        eq->host_index = 0;
15471        eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15472        eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15473out:
15474        mempool_free(mbox, phba->mbox_mem_pool);
15475        return status;
15476}
15477
15478static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15479{
15480        struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15481
15482        __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15483
15484        return 1;
15485}
15486
15487/**
15488 * lpfc_cq_create - Create a Completion Queue on the HBA
15489 * @phba: HBA structure that indicates port to create a queue on.
15490 * @cq: The queue structure to use to create the completion queue.
15491 * @eq: The event queue to bind this completion queue to.
15492 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15493 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15494 *
15495 * This function creates a completion queue, as detailed in @wq, on a port,
15496 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15497 *
15498 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15499 * is used to get the entry count and entry size that are necessary to
15500 * determine the number of pages to allocate and use for this queue. The @eq
15501 * is used to indicate which event queue to bind this completion queue to. This
15502 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15503 * completion queue. This function is asynchronous and will wait for the mailbox
15504 * command to finish before continuing.
15505 *
15506 * On success this function will return a zero. If unable to allocate enough
15507 * memory this function will return -ENOMEM. If the queue create mailbox command
15508 * fails this function will return -ENXIO.
15509 **/
15510int
15511lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15512               struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15513{
15514        struct lpfc_mbx_cq_create *cq_create;
15515        struct lpfc_dmabuf *dmabuf;
15516        LPFC_MBOXQ_t *mbox;
15517        int rc, length, status = 0;
15518        uint32_t shdr_status, shdr_add_status;
15519        union lpfc_sli4_cfg_shdr *shdr;
15520
15521        /* sanity check on queue memory */
15522        if (!cq || !eq)
15523                return -ENODEV;
15524
15525        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15526        if (!mbox)
15527                return -ENOMEM;
15528        length = (sizeof(struct lpfc_mbx_cq_create) -
15529                  sizeof(struct lpfc_sli4_cfg_mhdr));
15530        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15531                         LPFC_MBOX_OPCODE_CQ_CREATE,
15532                         length, LPFC_SLI4_MBX_EMBED);
15533        cq_create = &mbox->u.mqe.un.cq_create;
15534        shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15535        bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15536                    cq->page_count);
15537        bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15538        bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15539        bf_set(lpfc_mbox_hdr_version, &shdr->request,
15540               phba->sli4_hba.pc_sli4_params.cqv);
15541        if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15542                bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15543                       (cq->page_size / SLI4_PAGE_SIZE));
15544                bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15545                       eq->queue_id);
15546                bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15547                       phba->sli4_hba.pc_sli4_params.cqav);
15548        } else {
15549                bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15550                       eq->queue_id);
15551        }
15552        switch (cq->entry_count) {
15553        case 2048:
15554        case 4096:
15555                if (phba->sli4_hba.pc_sli4_params.cqv ==
15556                    LPFC_Q_CREATE_VERSION_2) {
15557                        cq_create->u.request.context.lpfc_cq_context_count =
15558                                cq->entry_count;
15559                        bf_set(lpfc_cq_context_count,
15560                               &cq_create->u.request.context,
15561                               LPFC_CQ_CNT_WORD7);
15562                        break;
15563                }
15564                fallthrough;
15565        default:
15566                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567                                "0361 Unsupported CQ count: "
15568                                "entry cnt %d sz %d pg cnt %d\n",
15569                                cq->entry_count, cq->entry_size,
15570                                cq->page_count);
15571                if (cq->entry_count < 256) {
15572                        status = -EINVAL;
15573                        goto out;
15574                }
15575                fallthrough;    /* otherwise default to smallest count */
15576        case 256:
15577                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15578                       LPFC_CQ_CNT_256);
15579                break;
15580        case 512:
15581                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15582                       LPFC_CQ_CNT_512);
15583                break;
15584        case 1024:
15585                bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15586                       LPFC_CQ_CNT_1024);
15587                break;
15588        }
15589        list_for_each_entry(dmabuf, &cq->page_list, list) {
15590                memset(dmabuf->virt, 0, cq->page_size);
15591                cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15592                                        putPaddrLow(dmabuf->phys);
15593                cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15594                                        putPaddrHigh(dmabuf->phys);
15595        }
15596        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15597
15598        /* The IOCTL status is embedded in the mailbox subheader. */
15599        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15600        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15601        if (shdr_status || shdr_add_status || rc) {
15602                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15603                                "2501 CQ_CREATE mailbox failed with "
15604                                "status x%x add_status x%x, mbx status x%x\n",
15605                                shdr_status, shdr_add_status, rc);
15606                status = -ENXIO;
15607                goto out;
15608        }
15609        cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15610        if (cq->queue_id == 0xFFFF) {
15611                status = -ENXIO;
15612                goto out;
15613        }
15614        /* link the cq onto the parent eq child list */
15615        list_add_tail(&cq->list, &eq->child_list);
15616        /* Set up completion queue's type and subtype */
15617        cq->type = type;
15618        cq->subtype = subtype;
15619        cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15620        cq->assoc_qid = eq->queue_id;
15621        cq->assoc_qp = eq;
15622        cq->host_index = 0;
15623        cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15624        cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15625
15626        if (cq->queue_id > phba->sli4_hba.cq_max)
15627                phba->sli4_hba.cq_max = cq->queue_id;
15628
15629        irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15630out:
15631        mempool_free(mbox, phba->mbox_mem_pool);
15632        return status;
15633}
15634
15635/**
15636 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15637 * @phba: HBA structure that indicates port to create a queue on.
15638 * @cqp: The queue structure array to use to create the completion queues.
15639 * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
15640 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15641 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15642 *
15643 * This function creates a set of  completion queue, s to support MRQ
15644 * as detailed in @cqp, on a port,
15645 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15646 *
15647 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15648 * is used to get the entry count and entry size that are necessary to
15649 * determine the number of pages to allocate and use for this queue. The @eq
15650 * is used to indicate which event queue to bind this completion queue to. This
15651 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15652 * completion queue. This function is asynchronous and will wait for the mailbox
15653 * command to finish before continuing.
15654 *
15655 * On success this function will return a zero. If unable to allocate enough
15656 * memory this function will return -ENOMEM. If the queue create mailbox command
15657 * fails this function will return -ENXIO.
15658 **/
15659int
15660lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15661                   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15662                   uint32_t subtype)
15663{
15664        struct lpfc_queue *cq;
15665        struct lpfc_queue *eq;
15666        struct lpfc_mbx_cq_create_set *cq_set;
15667        struct lpfc_dmabuf *dmabuf;
15668        LPFC_MBOXQ_t *mbox;
15669        int rc, length, alloclen, status = 0;
15670        int cnt, idx, numcq, page_idx = 0;
15671        uint32_t shdr_status, shdr_add_status;
15672        union lpfc_sli4_cfg_shdr *shdr;
15673        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15674
15675        /* sanity check on queue memory */
15676        numcq = phba->cfg_nvmet_mrq;
15677        if (!cqp || !hdwq || !numcq)
15678                return -ENODEV;
15679
15680        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15681        if (!mbox)
15682                return -ENOMEM;
15683
15684        length = sizeof(struct lpfc_mbx_cq_create_set);
15685        length += ((numcq * cqp[0]->page_count) *
15686                   sizeof(struct dma_address));
15687        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15688                        LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15689                        LPFC_SLI4_MBX_NEMBED);
15690        if (alloclen < length) {
15691                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15692                                "3098 Allocated DMA memory size (%d) is "
15693                                "less than the requested DMA memory size "
15694                                "(%d)\n", alloclen, length);
15695                status = -ENOMEM;
15696                goto out;
15697        }
15698        cq_set = mbox->sge_array->addr[0];
15699        shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15700        bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15701
15702        for (idx = 0; idx < numcq; idx++) {
15703                cq = cqp[idx];
15704                eq = hdwq[idx].hba_eq;
15705                if (!cq || !eq) {
15706                        status = -ENOMEM;
15707                        goto out;
15708                }
15709                if (!phba->sli4_hba.pc_sli4_params.supported)
15710                        hw_page_size = cq->page_size;
15711
15712                switch (idx) {
15713                case 0:
15714                        bf_set(lpfc_mbx_cq_create_set_page_size,
15715                               &cq_set->u.request,
15716                               (hw_page_size / SLI4_PAGE_SIZE));
15717                        bf_set(lpfc_mbx_cq_create_set_num_pages,
15718                               &cq_set->u.request, cq->page_count);
15719                        bf_set(lpfc_mbx_cq_create_set_evt,
15720                               &cq_set->u.request, 1);
15721                        bf_set(lpfc_mbx_cq_create_set_valid,
15722                               &cq_set->u.request, 1);
15723                        bf_set(lpfc_mbx_cq_create_set_cqe_size,
15724                               &cq_set->u.request, 0);
15725                        bf_set(lpfc_mbx_cq_create_set_num_cq,
15726                               &cq_set->u.request, numcq);
15727                        bf_set(lpfc_mbx_cq_create_set_autovalid,
15728                               &cq_set->u.request,
15729                               phba->sli4_hba.pc_sli4_params.cqav);
15730                        switch (cq->entry_count) {
15731                        case 2048:
15732                        case 4096:
15733                                if (phba->sli4_hba.pc_sli4_params.cqv ==
15734                                    LPFC_Q_CREATE_VERSION_2) {
15735                                        bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15736                                               &cq_set->u.request,
15737                                                cq->entry_count);
15738                                        bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15739                                               &cq_set->u.request,
15740                                               LPFC_CQ_CNT_WORD7);
15741                                        break;
15742                                }
15743                                fallthrough;
15744                        default:
15745                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15746                                                "3118 Bad CQ count. (%d)\n",
15747                                                cq->entry_count);
15748                                if (cq->entry_count < 256) {
15749                                        status = -EINVAL;
15750                                        goto out;
15751                                }
15752                                fallthrough;    /* otherwise default to smallest */
15753                        case 256:
15754                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15755                                       &cq_set->u.request, LPFC_CQ_CNT_256);
15756                                break;
15757                        case 512:
15758                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15759                                       &cq_set->u.request, LPFC_CQ_CNT_512);
15760                                break;
15761                        case 1024:
15762                                bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15763                                       &cq_set->u.request, LPFC_CQ_CNT_1024);
15764                                break;
15765                        }
15766                        bf_set(lpfc_mbx_cq_create_set_eq_id0,
15767                               &cq_set->u.request, eq->queue_id);
15768                        break;
15769                case 1:
15770                        bf_set(lpfc_mbx_cq_create_set_eq_id1,
15771                               &cq_set->u.request, eq->queue_id);
15772                        break;
15773                case 2:
15774                        bf_set(lpfc_mbx_cq_create_set_eq_id2,
15775                               &cq_set->u.request, eq->queue_id);
15776                        break;
15777                case 3:
15778                        bf_set(lpfc_mbx_cq_create_set_eq_id3,
15779                               &cq_set->u.request, eq->queue_id);
15780                        break;
15781                case 4:
15782                        bf_set(lpfc_mbx_cq_create_set_eq_id4,
15783                               &cq_set->u.request, eq->queue_id);
15784                        break;
15785                case 5:
15786                        bf_set(lpfc_mbx_cq_create_set_eq_id5,
15787                               &cq_set->u.request, eq->queue_id);
15788                        break;
15789                case 6:
15790                        bf_set(lpfc_mbx_cq_create_set_eq_id6,
15791                               &cq_set->u.request, eq->queue_id);
15792                        break;
15793                case 7:
15794                        bf_set(lpfc_mbx_cq_create_set_eq_id7,
15795                               &cq_set->u.request, eq->queue_id);
15796                        break;
15797                case 8:
15798                        bf_set(lpfc_mbx_cq_create_set_eq_id8,
15799                               &cq_set->u.request, eq->queue_id);
15800                        break;
15801                case 9:
15802                        bf_set(lpfc_mbx_cq_create_set_eq_id9,
15803                               &cq_set->u.request, eq->queue_id);
15804                        break;
15805                case 10:
15806                        bf_set(lpfc_mbx_cq_create_set_eq_id10,
15807                               &cq_set->u.request, eq->queue_id);
15808                        break;
15809                case 11:
15810                        bf_set(lpfc_mbx_cq_create_set_eq_id11,
15811                               &cq_set->u.request, eq->queue_id);
15812                        break;
15813                case 12:
15814                        bf_set(lpfc_mbx_cq_create_set_eq_id12,
15815                               &cq_set->u.request, eq->queue_id);
15816                        break;
15817                case 13:
15818                        bf_set(lpfc_mbx_cq_create_set_eq_id13,
15819                               &cq_set->u.request, eq->queue_id);
15820                        break;
15821                case 14:
15822                        bf_set(lpfc_mbx_cq_create_set_eq_id14,
15823                               &cq_set->u.request, eq->queue_id);
15824                        break;
15825                case 15:
15826                        bf_set(lpfc_mbx_cq_create_set_eq_id15,
15827                               &cq_set->u.request, eq->queue_id);
15828                        break;
15829                }
15830
15831                /* link the cq onto the parent eq child list */
15832                list_add_tail(&cq->list, &eq->child_list);
15833                /* Set up completion queue's type and subtype */
15834                cq->type = type;
15835                cq->subtype = subtype;
15836                cq->assoc_qid = eq->queue_id;
15837                cq->assoc_qp = eq;
15838                cq->host_index = 0;
15839                cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15840                cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15841                                         cq->entry_count);
15842                cq->chann = idx;
15843
15844                rc = 0;
15845                list_for_each_entry(dmabuf, &cq->page_list, list) {
15846                        memset(dmabuf->virt, 0, hw_page_size);
15847                        cnt = page_idx + dmabuf->buffer_tag;
15848                        cq_set->u.request.page[cnt].addr_lo =
15849                                        putPaddrLow(dmabuf->phys);
15850                        cq_set->u.request.page[cnt].addr_hi =
15851                                        putPaddrHigh(dmabuf->phys);
15852                        rc++;
15853                }
15854                page_idx += rc;
15855        }
15856
15857        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15858
15859        /* The IOCTL status is embedded in the mailbox subheader. */
15860        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15861        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15862        if (shdr_status || shdr_add_status || rc) {
15863                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15864                                "3119 CQ_CREATE_SET mailbox failed with "
15865                                "status x%x add_status x%x, mbx status x%x\n",
15866                                shdr_status, shdr_add_status, rc);
15867                status = -ENXIO;
15868                goto out;
15869        }
15870        rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15871        if (rc == 0xFFFF) {
15872                status = -ENXIO;
15873                goto out;
15874        }
15875
15876        for (idx = 0; idx < numcq; idx++) {
15877                cq = cqp[idx];
15878                cq->queue_id = rc + idx;
15879                if (cq->queue_id > phba->sli4_hba.cq_max)
15880                        phba->sli4_hba.cq_max = cq->queue_id;
15881        }
15882
15883out:
15884        lpfc_sli4_mbox_cmd_free(phba, mbox);
15885        return status;
15886}
15887
15888/**
15889 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15890 * @phba: HBA structure that indicates port to create a queue on.
15891 * @mq: The queue structure to use to create the mailbox queue.
15892 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15893 * @cq: The completion queue to associate with this cq.
15894 *
15895 * This function provides failback (fb) functionality when the
15896 * mq_create_ext fails on older FW generations.  It's purpose is identical
15897 * to mq_create_ext otherwise.
15898 *
15899 * This routine cannot fail as all attributes were previously accessed and
15900 * initialized in mq_create_ext.
15901 **/
15902static void
15903lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15904                       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15905{
15906        struct lpfc_mbx_mq_create *mq_create;
15907        struct lpfc_dmabuf *dmabuf;
15908        int length;
15909
15910        length = (sizeof(struct lpfc_mbx_mq_create) -
15911                  sizeof(struct lpfc_sli4_cfg_mhdr));
15912        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15913                         LPFC_MBOX_OPCODE_MQ_CREATE,
15914                         length, LPFC_SLI4_MBX_EMBED);
15915        mq_create = &mbox->u.mqe.un.mq_create;
15916        bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15917               mq->page_count);
15918        bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15919               cq->queue_id);
15920        bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15921        switch (mq->entry_count) {
15922        case 16:
15923                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15924                       LPFC_MQ_RING_SIZE_16);
15925                break;
15926        case 32:
15927                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15928                       LPFC_MQ_RING_SIZE_32);
15929                break;
15930        case 64:
15931                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15932                       LPFC_MQ_RING_SIZE_64);
15933                break;
15934        case 128:
15935                bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15936                       LPFC_MQ_RING_SIZE_128);
15937                break;
15938        }
15939        list_for_each_entry(dmabuf, &mq->page_list, list) {
15940                mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15941                        putPaddrLow(dmabuf->phys);
15942                mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15943                        putPaddrHigh(dmabuf->phys);
15944        }
15945}
15946
15947/**
15948 * lpfc_mq_create - Create a mailbox Queue on the HBA
15949 * @phba: HBA structure that indicates port to create a queue on.
15950 * @mq: The queue structure to use to create the mailbox queue.
15951 * @cq: The completion queue to associate with this cq.
15952 * @subtype: The queue's subtype.
15953 *
15954 * This function creates a mailbox queue, as detailed in @mq, on a port,
15955 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15956 *
15957 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15958 * is used to get the entry count and entry size that are necessary to
15959 * determine the number of pages to allocate and use for this queue. This
15960 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15961 * mailbox queue. This function is asynchronous and will wait for the mailbox
15962 * command to finish before continuing.
15963 *
15964 * On success this function will return a zero. If unable to allocate enough
15965 * memory this function will return -ENOMEM. If the queue create mailbox command
15966 * fails this function will return -ENXIO.
15967 **/
15968int32_t
15969lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15970               struct lpfc_queue *cq, uint32_t subtype)
15971{
15972        struct lpfc_mbx_mq_create *mq_create;
15973        struct lpfc_mbx_mq_create_ext *mq_create_ext;
15974        struct lpfc_dmabuf *dmabuf;
15975        LPFC_MBOXQ_t *mbox;
15976        int rc, length, status = 0;
15977        uint32_t shdr_status, shdr_add_status;
15978        union lpfc_sli4_cfg_shdr *shdr;
15979        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15980
15981        /* sanity check on queue memory */
15982        if (!mq || !cq)
15983                return -ENODEV;
15984        if (!phba->sli4_hba.pc_sli4_params.supported)
15985                hw_page_size = SLI4_PAGE_SIZE;
15986
15987        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15988        if (!mbox)
15989                return -ENOMEM;
15990        length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15991                  sizeof(struct lpfc_sli4_cfg_mhdr));
15992        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15993                         LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15994                         length, LPFC_SLI4_MBX_EMBED);
15995
15996        mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15997        shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15998        bf_set(lpfc_mbx_mq_create_ext_num_pages,
15999               &mq_create_ext->u.request, mq->page_count);
16000        bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16001               &mq_create_ext->u.request, 1);
16002        bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16003               &mq_create_ext->u.request, 1);
16004        bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16005               &mq_create_ext->u.request, 1);
16006        bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16007               &mq_create_ext->u.request, 1);
16008        bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16009               &mq_create_ext->u.request, 1);
16010        bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16011        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16012               phba->sli4_hba.pc_sli4_params.mqv);
16013        if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16014                bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16015                       cq->queue_id);
16016        else
16017                bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16018                       cq->queue_id);
16019        switch (mq->entry_count) {
16020        default:
16021                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16022                                "0362 Unsupported MQ count. (%d)\n",
16023                                mq->entry_count);
16024                if (mq->entry_count < 16) {
16025                        status = -EINVAL;
16026                        goto out;
16027                }
16028                fallthrough;    /* otherwise default to smallest count */
16029        case 16:
16030                bf_set(lpfc_mq_context_ring_size,
16031                       &mq_create_ext->u.request.context,
16032                       LPFC_MQ_RING_SIZE_16);
16033                break;
16034        case 32:
16035                bf_set(lpfc_mq_context_ring_size,
16036                       &mq_create_ext->u.request.context,
16037                       LPFC_MQ_RING_SIZE_32);
16038                break;
16039        case 64:
16040                bf_set(lpfc_mq_context_ring_size,
16041                       &mq_create_ext->u.request.context,
16042                       LPFC_MQ_RING_SIZE_64);
16043                break;
16044        case 128:
16045                bf_set(lpfc_mq_context_ring_size,
16046                       &mq_create_ext->u.request.context,
16047                       LPFC_MQ_RING_SIZE_128);
16048                break;
16049        }
16050        list_for_each_entry(dmabuf, &mq->page_list, list) {
16051                memset(dmabuf->virt, 0, hw_page_size);
16052                mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16053                                        putPaddrLow(dmabuf->phys);
16054                mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16055                                        putPaddrHigh(dmabuf->phys);
16056        }
16057        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16058        mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16059                              &mq_create_ext->u.response);
16060        if (rc != MBX_SUCCESS) {
16061                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16062                                "2795 MQ_CREATE_EXT failed with "
16063                                "status x%x. Failback to MQ_CREATE.\n",
16064                                rc);
16065                lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16066                mq_create = &mbox->u.mqe.un.mq_create;
16067                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16068                shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16069                mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16070                                      &mq_create->u.response);
16071        }
16072
16073        /* The IOCTL status is embedded in the mailbox subheader. */
16074        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16075        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16076        if (shdr_status || shdr_add_status || rc) {
16077                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16078                                "2502 MQ_CREATE mailbox failed with "
16079                                "status x%x add_status x%x, mbx status x%x\n",
16080                                shdr_status, shdr_add_status, rc);
16081                status = -ENXIO;
16082                goto out;
16083        }
16084        if (mq->queue_id == 0xFFFF) {
16085                status = -ENXIO;
16086                goto out;
16087        }
16088        mq->type = LPFC_MQ;
16089        mq->assoc_qid = cq->queue_id;
16090        mq->subtype = subtype;
16091        mq->host_index = 0;
16092        mq->hba_index = 0;
16093
16094        /* link the mq onto the parent cq child list */
16095        list_add_tail(&mq->list, &cq->child_list);
16096out:
16097        mempool_free(mbox, phba->mbox_mem_pool);
16098        return status;
16099}
16100
16101/**
16102 * lpfc_wq_create - Create a Work Queue on the HBA
16103 * @phba: HBA structure that indicates port to create a queue on.
16104 * @wq: The queue structure to use to create the work queue.
16105 * @cq: The completion queue to bind this work queue to.
16106 * @subtype: The subtype of the work queue indicating its functionality.
16107 *
16108 * This function creates a work queue, as detailed in @wq, on a port, described
16109 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16110 *
16111 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16112 * is used to get the entry count and entry size that are necessary to
16113 * determine the number of pages to allocate and use for this queue. The @cq
16114 * is used to indicate which completion queue to bind this work queue to. This
16115 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16116 * work queue. This function is asynchronous and will wait for the mailbox
16117 * command to finish before continuing.
16118 *
16119 * On success this function will return a zero. If unable to allocate enough
16120 * memory this function will return -ENOMEM. If the queue create mailbox command
16121 * fails this function will return -ENXIO.
16122 **/
16123int
16124lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16125               struct lpfc_queue *cq, uint32_t subtype)
16126{
16127        struct lpfc_mbx_wq_create *wq_create;
16128        struct lpfc_dmabuf *dmabuf;
16129        LPFC_MBOXQ_t *mbox;
16130        int rc, length, status = 0;
16131        uint32_t shdr_status, shdr_add_status;
16132        union lpfc_sli4_cfg_shdr *shdr;
16133        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16134        struct dma_address *page;
16135        void __iomem *bar_memmap_p;
16136        uint32_t db_offset;
16137        uint16_t pci_barset;
16138        uint8_t dpp_barset;
16139        uint32_t dpp_offset;
16140        uint8_t wq_create_version;
16141#ifdef CONFIG_X86
16142        unsigned long pg_addr;
16143#endif
16144
16145        /* sanity check on queue memory */
16146        if (!wq || !cq)
16147                return -ENODEV;
16148        if (!phba->sli4_hba.pc_sli4_params.supported)
16149                hw_page_size = wq->page_size;
16150
16151        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16152        if (!mbox)
16153                return -ENOMEM;
16154        length = (sizeof(struct lpfc_mbx_wq_create) -
16155                  sizeof(struct lpfc_sli4_cfg_mhdr));
16156        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16157                         LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16158                         length, LPFC_SLI4_MBX_EMBED);
16159        wq_create = &mbox->u.mqe.un.wq_create;
16160        shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16161        bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16162                    wq->page_count);
16163        bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16164                    cq->queue_id);
16165
16166        /* wqv is the earliest version supported, NOT the latest */
16167        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16168               phba->sli4_hba.pc_sli4_params.wqv);
16169
16170        if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16171            (wq->page_size > SLI4_PAGE_SIZE))
16172                wq_create_version = LPFC_Q_CREATE_VERSION_1;
16173        else
16174                wq_create_version = LPFC_Q_CREATE_VERSION_0;
16175
16176        switch (wq_create_version) {
16177        case LPFC_Q_CREATE_VERSION_1:
16178                bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16179                       wq->entry_count);
16180                bf_set(lpfc_mbox_hdr_version, &shdr->request,
16181                       LPFC_Q_CREATE_VERSION_1);
16182
16183                switch (wq->entry_size) {
16184                default:
16185                case 64:
16186                        bf_set(lpfc_mbx_wq_create_wqe_size,
16187                               &wq_create->u.request_1,
16188                               LPFC_WQ_WQE_SIZE_64);
16189                        break;
16190                case 128:
16191                        bf_set(lpfc_mbx_wq_create_wqe_size,
16192                               &wq_create->u.request_1,
16193                               LPFC_WQ_WQE_SIZE_128);
16194                        break;
16195                }
16196                /* Request DPP by default */
16197                bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16198                bf_set(lpfc_mbx_wq_create_page_size,
16199                       &wq_create->u.request_1,
16200                       (wq->page_size / SLI4_PAGE_SIZE));
16201                page = wq_create->u.request_1.page;
16202                break;
16203        default:
16204                page = wq_create->u.request.page;
16205                break;
16206        }
16207
16208        list_for_each_entry(dmabuf, &wq->page_list, list) {
16209                memset(dmabuf->virt, 0, hw_page_size);
16210                page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16211                page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16212        }
16213
16214        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16215                bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16216
16217        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16218        /* The IOCTL status is embedded in the mailbox subheader. */
16219        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16220        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16221        if (shdr_status || shdr_add_status || rc) {
16222                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16223                                "2503 WQ_CREATE mailbox failed with "
16224                                "status x%x add_status x%x, mbx status x%x\n",
16225                                shdr_status, shdr_add_status, rc);
16226                status = -ENXIO;
16227                goto out;
16228        }
16229
16230        if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16231                wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16232                                        &wq_create->u.response);
16233        else
16234                wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16235                                        &wq_create->u.response_1);
16236
16237        if (wq->queue_id == 0xFFFF) {
16238                status = -ENXIO;
16239                goto out;
16240        }
16241
16242        wq->db_format = LPFC_DB_LIST_FORMAT;
16243        if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16244                if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16245                        wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16246                                               &wq_create->u.response);
16247                        if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16248                            (wq->db_format != LPFC_DB_RING_FORMAT)) {
16249                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16250                                                "3265 WQ[%d] doorbell format "
16251                                                "not supported: x%x\n",
16252                                                wq->queue_id, wq->db_format);
16253                                status = -EINVAL;
16254                                goto out;
16255                        }
16256                        pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16257                                            &wq_create->u.response);
16258                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16259                                                                   pci_barset);
16260                        if (!bar_memmap_p) {
16261                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16262                                                "3263 WQ[%d] failed to memmap "
16263                                                "pci barset:x%x\n",
16264                                                wq->queue_id, pci_barset);
16265                                status = -ENOMEM;
16266                                goto out;
16267                        }
16268                        db_offset = wq_create->u.response.doorbell_offset;
16269                        if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16270                            (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16271                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16272                                                "3252 WQ[%d] doorbell offset "
16273                                                "not supported: x%x\n",
16274                                                wq->queue_id, db_offset);
16275                                status = -EINVAL;
16276                                goto out;
16277                        }
16278                        wq->db_regaddr = bar_memmap_p + db_offset;
16279                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16280                                        "3264 WQ[%d]: barset:x%x, offset:x%x, "
16281                                        "format:x%x\n", wq->queue_id,
16282                                        pci_barset, db_offset, wq->db_format);
16283                } else
16284                        wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16285        } else {
16286                /* Check if DPP was honored by the firmware */
16287                wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16288                                    &wq_create->u.response_1);
16289                if (wq->dpp_enable) {
16290                        pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16291                                            &wq_create->u.response_1);
16292                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16293                                                                   pci_barset);
16294                        if (!bar_memmap_p) {
16295                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16296                                                "3267 WQ[%d] failed to memmap "
16297                                                "pci barset:x%x\n",
16298                                                wq->queue_id, pci_barset);
16299                                status = -ENOMEM;
16300                                goto out;
16301                        }
16302                        db_offset = wq_create->u.response_1.doorbell_offset;
16303                        wq->db_regaddr = bar_memmap_p + db_offset;
16304                        wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16305                                            &wq_create->u.response_1);
16306                        dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16307                                            &wq_create->u.response_1);
16308                        bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16309                                                                   dpp_barset);
16310                        if (!bar_memmap_p) {
16311                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16312                                                "3268 WQ[%d] failed to memmap "
16313                                                "pci barset:x%x\n",
16314                                                wq->queue_id, dpp_barset);
16315                                status = -ENOMEM;
16316                                goto out;
16317                        }
16318                        dpp_offset = wq_create->u.response_1.dpp_offset;
16319                        wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16320                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16321                                        "3271 WQ[%d]: barset:x%x, offset:x%x, "
16322                                        "dpp_id:x%x dpp_barset:x%x "
16323                                        "dpp_offset:x%x\n",
16324                                        wq->queue_id, pci_barset, db_offset,
16325                                        wq->dpp_id, dpp_barset, dpp_offset);
16326
16327#ifdef CONFIG_X86
16328                        /* Enable combined writes for DPP aperture */
16329                        pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16330                        rc = set_memory_wc(pg_addr, 1);
16331                        if (rc) {
16332                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16333                                        "3272 Cannot setup Combined "
16334                                        "Write on WQ[%d] - disable DPP\n",
16335                                        wq->queue_id);
16336                                phba->cfg_enable_dpp = 0;
16337                        }
16338#else
16339                        phba->cfg_enable_dpp = 0;
16340#endif
16341                } else
16342                        wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16343        }
16344        wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16345        if (wq->pring == NULL) {
16346                status = -ENOMEM;
16347                goto out;
16348        }
16349        wq->type = LPFC_WQ;
16350        wq->assoc_qid = cq->queue_id;
16351        wq->subtype = subtype;
16352        wq->host_index = 0;
16353        wq->hba_index = 0;
16354        wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16355
16356        /* link the wq onto the parent cq child list */
16357        list_add_tail(&wq->list, &cq->child_list);
16358out:
16359        mempool_free(mbox, phba->mbox_mem_pool);
16360        return status;
16361}
16362
16363/**
16364 * lpfc_rq_create - Create a Receive Queue on the HBA
16365 * @phba: HBA structure that indicates port to create a queue on.
16366 * @hrq: The queue structure to use to create the header receive queue.
16367 * @drq: The queue structure to use to create the data receive queue.
16368 * @cq: The completion queue to bind this work queue to.
16369 * @subtype: The subtype of the work queue indicating its functionality.
16370 *
16371 * This function creates a receive buffer queue pair , as detailed in @hrq and
16372 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16373 * to the HBA.
16374 *
16375 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16376 * struct is used to get the entry count that is necessary to determine the
16377 * number of pages to use for this queue. The @cq is used to indicate which
16378 * completion queue to bind received buffers that are posted to these queues to.
16379 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16380 * receive queue pair. This function is asynchronous and will wait for the
16381 * mailbox command to finish before continuing.
16382 *
16383 * On success this function will return a zero. If unable to allocate enough
16384 * memory this function will return -ENOMEM. If the queue create mailbox command
16385 * fails this function will return -ENXIO.
16386 **/
16387int
16388lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16389               struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16390{
16391        struct lpfc_mbx_rq_create *rq_create;
16392        struct lpfc_dmabuf *dmabuf;
16393        LPFC_MBOXQ_t *mbox;
16394        int rc, length, status = 0;
16395        uint32_t shdr_status, shdr_add_status;
16396        union lpfc_sli4_cfg_shdr *shdr;
16397        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16398        void __iomem *bar_memmap_p;
16399        uint32_t db_offset;
16400        uint16_t pci_barset;
16401
16402        /* sanity check on queue memory */
16403        if (!hrq || !drq || !cq)
16404                return -ENODEV;
16405        if (!phba->sli4_hba.pc_sli4_params.supported)
16406                hw_page_size = SLI4_PAGE_SIZE;
16407
16408        if (hrq->entry_count != drq->entry_count)
16409                return -EINVAL;
16410        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16411        if (!mbox)
16412                return -ENOMEM;
16413        length = (sizeof(struct lpfc_mbx_rq_create) -
16414                  sizeof(struct lpfc_sli4_cfg_mhdr));
16415        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16416                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16417                         length, LPFC_SLI4_MBX_EMBED);
16418        rq_create = &mbox->u.mqe.un.rq_create;
16419        shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16420        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16421               phba->sli4_hba.pc_sli4_params.rqv);
16422        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16423                bf_set(lpfc_rq_context_rqe_count_1,
16424                       &rq_create->u.request.context,
16425                       hrq->entry_count);
16426                rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16427                bf_set(lpfc_rq_context_rqe_size,
16428                       &rq_create->u.request.context,
16429                       LPFC_RQE_SIZE_8);
16430                bf_set(lpfc_rq_context_page_size,
16431                       &rq_create->u.request.context,
16432                       LPFC_RQ_PAGE_SIZE_4096);
16433        } else {
16434                switch (hrq->entry_count) {
16435                default:
16436                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16437                                        "2535 Unsupported RQ count. (%d)\n",
16438                                        hrq->entry_count);
16439                        if (hrq->entry_count < 512) {
16440                                status = -EINVAL;
16441                                goto out;
16442                        }
16443                        fallthrough;    /* otherwise default to smallest count */
16444                case 512:
16445                        bf_set(lpfc_rq_context_rqe_count,
16446                               &rq_create->u.request.context,
16447                               LPFC_RQ_RING_SIZE_512);
16448                        break;
16449                case 1024:
16450                        bf_set(lpfc_rq_context_rqe_count,
16451                               &rq_create->u.request.context,
16452                               LPFC_RQ_RING_SIZE_1024);
16453                        break;
16454                case 2048:
16455                        bf_set(lpfc_rq_context_rqe_count,
16456                               &rq_create->u.request.context,
16457                               LPFC_RQ_RING_SIZE_2048);
16458                        break;
16459                case 4096:
16460                        bf_set(lpfc_rq_context_rqe_count,
16461                               &rq_create->u.request.context,
16462                               LPFC_RQ_RING_SIZE_4096);
16463                        break;
16464                }
16465                bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16466                       LPFC_HDR_BUF_SIZE);
16467        }
16468        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16469               cq->queue_id);
16470        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16471               hrq->page_count);
16472        list_for_each_entry(dmabuf, &hrq->page_list, list) {
16473                memset(dmabuf->virt, 0, hw_page_size);
16474                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16475                                        putPaddrLow(dmabuf->phys);
16476                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16477                                        putPaddrHigh(dmabuf->phys);
16478        }
16479        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16480                bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16481
16482        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16483        /* The IOCTL status is embedded in the mailbox subheader. */
16484        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16485        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16486        if (shdr_status || shdr_add_status || rc) {
16487                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16488                                "2504 RQ_CREATE mailbox failed with "
16489                                "status x%x add_status x%x, mbx status x%x\n",
16490                                shdr_status, shdr_add_status, rc);
16491                status = -ENXIO;
16492                goto out;
16493        }
16494        hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16495        if (hrq->queue_id == 0xFFFF) {
16496                status = -ENXIO;
16497                goto out;
16498        }
16499
16500        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16501                hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16502                                        &rq_create->u.response);
16503                if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16504                    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16505                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16506                                        "3262 RQ [%d] doorbell format not "
16507                                        "supported: x%x\n", hrq->queue_id,
16508                                        hrq->db_format);
16509                        status = -EINVAL;
16510                        goto out;
16511                }
16512
16513                pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16514                                    &rq_create->u.response);
16515                bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16516                if (!bar_memmap_p) {
16517                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16518                                        "3269 RQ[%d] failed to memmap pci "
16519                                        "barset:x%x\n", hrq->queue_id,
16520                                        pci_barset);
16521                        status = -ENOMEM;
16522                        goto out;
16523                }
16524
16525                db_offset = rq_create->u.response.doorbell_offset;
16526                if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16527                    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16528                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16529                                        "3270 RQ[%d] doorbell offset not "
16530                                        "supported: x%x\n", hrq->queue_id,
16531                                        db_offset);
16532                        status = -EINVAL;
16533                        goto out;
16534                }
16535                hrq->db_regaddr = bar_memmap_p + db_offset;
16536                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16537                                "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16538                                "format:x%x\n", hrq->queue_id, pci_barset,
16539                                db_offset, hrq->db_format);
16540        } else {
16541                hrq->db_format = LPFC_DB_RING_FORMAT;
16542                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16543        }
16544        hrq->type = LPFC_HRQ;
16545        hrq->assoc_qid = cq->queue_id;
16546        hrq->subtype = subtype;
16547        hrq->host_index = 0;
16548        hrq->hba_index = 0;
16549        hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16550
16551        /* now create the data queue */
16552        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16553                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16554                         length, LPFC_SLI4_MBX_EMBED);
16555        bf_set(lpfc_mbox_hdr_version, &shdr->request,
16556               phba->sli4_hba.pc_sli4_params.rqv);
16557        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16558                bf_set(lpfc_rq_context_rqe_count_1,
16559                       &rq_create->u.request.context, hrq->entry_count);
16560                if (subtype == LPFC_NVMET)
16561                        rq_create->u.request.context.buffer_size =
16562                                LPFC_NVMET_DATA_BUF_SIZE;
16563                else
16564                        rq_create->u.request.context.buffer_size =
16565                                LPFC_DATA_BUF_SIZE;
16566                bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16567                       LPFC_RQE_SIZE_8);
16568                bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16569                       (PAGE_SIZE/SLI4_PAGE_SIZE));
16570        } else {
16571                switch (drq->entry_count) {
16572                default:
16573                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16574                                        "2536 Unsupported RQ count. (%d)\n",
16575                                        drq->entry_count);
16576                        if (drq->entry_count < 512) {
16577                                status = -EINVAL;
16578                                goto out;
16579                        }
16580                        fallthrough;    /* otherwise default to smallest count */
16581                case 512:
16582                        bf_set(lpfc_rq_context_rqe_count,
16583                               &rq_create->u.request.context,
16584                               LPFC_RQ_RING_SIZE_512);
16585                        break;
16586                case 1024:
16587                        bf_set(lpfc_rq_context_rqe_count,
16588                               &rq_create->u.request.context,
16589                               LPFC_RQ_RING_SIZE_1024);
16590                        break;
16591                case 2048:
16592                        bf_set(lpfc_rq_context_rqe_count,
16593                               &rq_create->u.request.context,
16594                               LPFC_RQ_RING_SIZE_2048);
16595                        break;
16596                case 4096:
16597                        bf_set(lpfc_rq_context_rqe_count,
16598                               &rq_create->u.request.context,
16599                               LPFC_RQ_RING_SIZE_4096);
16600                        break;
16601                }
16602                if (subtype == LPFC_NVMET)
16603                        bf_set(lpfc_rq_context_buf_size,
16604                               &rq_create->u.request.context,
16605                               LPFC_NVMET_DATA_BUF_SIZE);
16606                else
16607                        bf_set(lpfc_rq_context_buf_size,
16608                               &rq_create->u.request.context,
16609                               LPFC_DATA_BUF_SIZE);
16610        }
16611        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16612               cq->queue_id);
16613        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16614               drq->page_count);
16615        list_for_each_entry(dmabuf, &drq->page_list, list) {
16616                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16617                                        putPaddrLow(dmabuf->phys);
16618                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16619                                        putPaddrHigh(dmabuf->phys);
16620        }
16621        if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16622                bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16623        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16624        /* The IOCTL status is embedded in the mailbox subheader. */
16625        shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16626        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16627        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16628        if (shdr_status || shdr_add_status || rc) {
16629                status = -ENXIO;
16630                goto out;
16631        }
16632        drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16633        if (drq->queue_id == 0xFFFF) {
16634                status = -ENXIO;
16635                goto out;
16636        }
16637        drq->type = LPFC_DRQ;
16638        drq->assoc_qid = cq->queue_id;
16639        drq->subtype = subtype;
16640        drq->host_index = 0;
16641        drq->hba_index = 0;
16642        drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16643
16644        /* link the header and data RQs onto the parent cq child list */
16645        list_add_tail(&hrq->list, &cq->child_list);
16646        list_add_tail(&drq->list, &cq->child_list);
16647
16648out:
16649        mempool_free(mbox, phba->mbox_mem_pool);
16650        return status;
16651}
16652
16653/**
16654 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16655 * @phba: HBA structure that indicates port to create a queue on.
16656 * @hrqp: The queue structure array to use to create the header receive queues.
16657 * @drqp: The queue structure array to use to create the data receive queues.
16658 * @cqp: The completion queue array to bind these receive queues to.
16659 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16660 *
16661 * This function creates a receive buffer queue pair , as detailed in @hrq and
16662 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16663 * to the HBA.
16664 *
16665 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16666 * struct is used to get the entry count that is necessary to determine the
16667 * number of pages to use for this queue. The @cq is used to indicate which
16668 * completion queue to bind received buffers that are posted to these queues to.
16669 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16670 * receive queue pair. This function is asynchronous and will wait for the
16671 * mailbox command to finish before continuing.
16672 *
16673 * On success this function will return a zero. If unable to allocate enough
16674 * memory this function will return -ENOMEM. If the queue create mailbox command
16675 * fails this function will return -ENXIO.
16676 **/
16677int
16678lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16679                struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16680                uint32_t subtype)
16681{
16682        struct lpfc_queue *hrq, *drq, *cq;
16683        struct lpfc_mbx_rq_create_v2 *rq_create;
16684        struct lpfc_dmabuf *dmabuf;
16685        LPFC_MBOXQ_t *mbox;
16686        int rc, length, alloclen, status = 0;
16687        int cnt, idx, numrq, page_idx = 0;
16688        uint32_t shdr_status, shdr_add_status;
16689        union lpfc_sli4_cfg_shdr *shdr;
16690        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16691
16692        numrq = phba->cfg_nvmet_mrq;
16693        /* sanity check on array memory */
16694        if (!hrqp || !drqp || !cqp || !numrq)
16695                return -ENODEV;
16696        if (!phba->sli4_hba.pc_sli4_params.supported)
16697                hw_page_size = SLI4_PAGE_SIZE;
16698
16699        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16700        if (!mbox)
16701                return -ENOMEM;
16702
16703        length = sizeof(struct lpfc_mbx_rq_create_v2);
16704        length += ((2 * numrq * hrqp[0]->page_count) *
16705                   sizeof(struct dma_address));
16706
16707        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16708                                    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16709                                    LPFC_SLI4_MBX_NEMBED);
16710        if (alloclen < length) {
16711                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16712                                "3099 Allocated DMA memory size (%d) is "
16713                                "less than the requested DMA memory size "
16714                                "(%d)\n", alloclen, length);
16715                status = -ENOMEM;
16716                goto out;
16717        }
16718
16719
16720
16721        rq_create = mbox->sge_array->addr[0];
16722        shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16723
16724        bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16725        cnt = 0;
16726
16727        for (idx = 0; idx < numrq; idx++) {
16728                hrq = hrqp[idx];
16729                drq = drqp[idx];
16730                cq  = cqp[idx];
16731
16732                /* sanity check on queue memory */
16733                if (!hrq || !drq || !cq) {
16734                        status = -ENODEV;
16735                        goto out;
16736                }
16737
16738                if (hrq->entry_count != drq->entry_count) {
16739                        status = -EINVAL;
16740                        goto out;
16741                }
16742
16743                if (idx == 0) {
16744                        bf_set(lpfc_mbx_rq_create_num_pages,
16745                               &rq_create->u.request,
16746                               hrq->page_count);
16747                        bf_set(lpfc_mbx_rq_create_rq_cnt,
16748                               &rq_create->u.request, (numrq * 2));
16749                        bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16750                               1);
16751                        bf_set(lpfc_rq_context_base_cq,
16752                               &rq_create->u.request.context,
16753                               cq->queue_id);
16754                        bf_set(lpfc_rq_context_data_size,
16755                               &rq_create->u.request.context,
16756                               LPFC_NVMET_DATA_BUF_SIZE);
16757                        bf_set(lpfc_rq_context_hdr_size,
16758                               &rq_create->u.request.context,
16759                               LPFC_HDR_BUF_SIZE);
16760                        bf_set(lpfc_rq_context_rqe_count_1,
16761                               &rq_create->u.request.context,
16762                               hrq->entry_count);
16763                        bf_set(lpfc_rq_context_rqe_size,
16764                               &rq_create->u.request.context,
16765                               LPFC_RQE_SIZE_8);
16766                        bf_set(lpfc_rq_context_page_size,
16767                               &rq_create->u.request.context,
16768                               (PAGE_SIZE/SLI4_PAGE_SIZE));
16769                }
16770                rc = 0;
16771                list_for_each_entry(dmabuf, &hrq->page_list, list) {
16772                        memset(dmabuf->virt, 0, hw_page_size);
16773                        cnt = page_idx + dmabuf->buffer_tag;
16774                        rq_create->u.request.page[cnt].addr_lo =
16775                                        putPaddrLow(dmabuf->phys);
16776                        rq_create->u.request.page[cnt].addr_hi =
16777                                        putPaddrHigh(dmabuf->phys);
16778                        rc++;
16779                }
16780                page_idx += rc;
16781
16782                rc = 0;
16783                list_for_each_entry(dmabuf, &drq->page_list, list) {
16784                        memset(dmabuf->virt, 0, hw_page_size);
16785                        cnt = page_idx + dmabuf->buffer_tag;
16786                        rq_create->u.request.page[cnt].addr_lo =
16787                                        putPaddrLow(dmabuf->phys);
16788                        rq_create->u.request.page[cnt].addr_hi =
16789                                        putPaddrHigh(dmabuf->phys);
16790                        rc++;
16791                }
16792                page_idx += rc;
16793
16794                hrq->db_format = LPFC_DB_RING_FORMAT;
16795                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16796                hrq->type = LPFC_HRQ;
16797                hrq->assoc_qid = cq->queue_id;
16798                hrq->subtype = subtype;
16799                hrq->host_index = 0;
16800                hrq->hba_index = 0;
16801                hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16802
16803                drq->db_format = LPFC_DB_RING_FORMAT;
16804                drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16805                drq->type = LPFC_DRQ;
16806                drq->assoc_qid = cq->queue_id;
16807                drq->subtype = subtype;
16808                drq->host_index = 0;
16809                drq->hba_index = 0;
16810                drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16811
16812                list_add_tail(&hrq->list, &cq->child_list);
16813                list_add_tail(&drq->list, &cq->child_list);
16814        }
16815
16816        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16817        /* The IOCTL status is embedded in the mailbox subheader. */
16818        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16819        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16820        if (shdr_status || shdr_add_status || rc) {
16821                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16822                                "3120 RQ_CREATE mailbox failed with "
16823                                "status x%x add_status x%x, mbx status x%x\n",
16824                                shdr_status, shdr_add_status, rc);
16825                status = -ENXIO;
16826                goto out;
16827        }
16828        rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16829        if (rc == 0xFFFF) {
16830                status = -ENXIO;
16831                goto out;
16832        }
16833
16834        /* Initialize all RQs with associated queue id */
16835        for (idx = 0; idx < numrq; idx++) {
16836                hrq = hrqp[idx];
16837                hrq->queue_id = rc + (2 * idx);
16838                drq = drqp[idx];
16839                drq->queue_id = rc + (2 * idx) + 1;
16840        }
16841
16842out:
16843        lpfc_sli4_mbox_cmd_free(phba, mbox);
16844        return status;
16845}
16846
16847/**
16848 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16849 * @phba: HBA structure that indicates port to destroy a queue on.
16850 * @eq: The queue structure associated with the queue to destroy.
16851 *
16852 * This function destroys a queue, as detailed in @eq by sending an mailbox
16853 * command, specific to the type of queue, to the HBA.
16854 *
16855 * The @eq struct is used to get the queue ID of the queue to destroy.
16856 *
16857 * On success this function will return a zero. If the queue destroy mailbox
16858 * command fails this function will return -ENXIO.
16859 **/
16860int
16861lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16862{
16863        LPFC_MBOXQ_t *mbox;
16864        int rc, length, status = 0;
16865        uint32_t shdr_status, shdr_add_status;
16866        union lpfc_sli4_cfg_shdr *shdr;
16867
16868        /* sanity check on queue memory */
16869        if (!eq)
16870                return -ENODEV;
16871
16872        mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16873        if (!mbox)
16874                return -ENOMEM;
16875        length = (sizeof(struct lpfc_mbx_eq_destroy) -
16876                  sizeof(struct lpfc_sli4_cfg_mhdr));
16877        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16878                         LPFC_MBOX_OPCODE_EQ_DESTROY,
16879                         length, LPFC_SLI4_MBX_EMBED);
16880        bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16881               eq->queue_id);
16882        mbox->vport = eq->phba->pport;
16883        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16884
16885        rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16886        /* The IOCTL status is embedded in the mailbox subheader. */
16887        shdr = (union lpfc_sli4_cfg_shdr *)
16888                &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16889        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16890        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16891        if (shdr_status || shdr_add_status || rc) {
16892                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16893                                "2505 EQ_DESTROY mailbox failed with "
16894                                "status x%x add_status x%x, mbx status x%x\n",
16895                                shdr_status, shdr_add_status, rc);
16896                status = -ENXIO;
16897        }
16898
16899        /* Remove eq from any list */
16900        list_del_init(&eq->list);
16901        mempool_free(mbox, eq->phba->mbox_mem_pool);
16902        return status;
16903}
16904
16905/**
16906 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16907 * @phba: HBA structure that indicates port to destroy a queue on.
16908 * @cq: The queue structure associated with the queue to destroy.
16909 *
16910 * This function destroys a queue, as detailed in @cq by sending an mailbox
16911 * command, specific to the type of queue, to the HBA.
16912 *
16913 * The @cq struct is used to get the queue ID of the queue to destroy.
16914 *
16915 * On success this function will return a zero. If the queue destroy mailbox
16916 * command fails this function will return -ENXIO.
16917 **/
16918int
16919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16920{
16921        LPFC_MBOXQ_t *mbox;
16922        int rc, length, status = 0;
16923        uint32_t shdr_status, shdr_add_status;
16924        union lpfc_sli4_cfg_shdr *shdr;
16925
16926        /* sanity check on queue memory */
16927        if (!cq)
16928                return -ENODEV;
16929        mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16930        if (!mbox)
16931                return -ENOMEM;
16932        length = (sizeof(struct lpfc_mbx_cq_destroy) -
16933                  sizeof(struct lpfc_sli4_cfg_mhdr));
16934        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16935                         LPFC_MBOX_OPCODE_CQ_DESTROY,
16936                         length, LPFC_SLI4_MBX_EMBED);
16937        bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16938               cq->queue_id);
16939        mbox->vport = cq->phba->pport;
16940        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16941        rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16942        /* The IOCTL status is embedded in the mailbox subheader. */
16943        shdr = (union lpfc_sli4_cfg_shdr *)
16944                &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16945        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16946        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16947        if (shdr_status || shdr_add_status || rc) {
16948                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16949                                "2506 CQ_DESTROY mailbox failed with "
16950                                "status x%x add_status x%x, mbx status x%x\n",
16951                                shdr_status, shdr_add_status, rc);
16952                status = -ENXIO;
16953        }
16954        /* Remove cq from any list */
16955        list_del_init(&cq->list);
16956        mempool_free(mbox, cq->phba->mbox_mem_pool);
16957        return status;
16958}
16959
16960/**
16961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16962 * @phba: HBA structure that indicates port to destroy a queue on.
16963 * @mq: The queue structure associated with the queue to destroy.
16964 *
16965 * This function destroys a queue, as detailed in @mq by sending an mailbox
16966 * command, specific to the type of queue, to the HBA.
16967 *
16968 * The @mq struct is used to get the queue ID of the queue to destroy.
16969 *
16970 * On success this function will return a zero. If the queue destroy mailbox
16971 * command fails this function will return -ENXIO.
16972 **/
16973int
16974lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16975{
16976        LPFC_MBOXQ_t *mbox;
16977        int rc, length, status = 0;
16978        uint32_t shdr_status, shdr_add_status;
16979        union lpfc_sli4_cfg_shdr *shdr;
16980
16981        /* sanity check on queue memory */
16982        if (!mq)
16983                return -ENODEV;
16984        mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16985        if (!mbox)
16986                return -ENOMEM;
16987        length = (sizeof(struct lpfc_mbx_mq_destroy) -
16988                  sizeof(struct lpfc_sli4_cfg_mhdr));
16989        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16990                         LPFC_MBOX_OPCODE_MQ_DESTROY,
16991                         length, LPFC_SLI4_MBX_EMBED);
16992        bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16993               mq->queue_id);
16994        mbox->vport = mq->phba->pport;
16995        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16996        rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16997        /* The IOCTL status is embedded in the mailbox subheader. */
16998        shdr = (union lpfc_sli4_cfg_shdr *)
16999                &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17000        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17001        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17002        if (shdr_status || shdr_add_status || rc) {
17003                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17004                                "2507 MQ_DESTROY mailbox failed with "
17005                                "status x%x add_status x%x, mbx status x%x\n",
17006                                shdr_status, shdr_add_status, rc);
17007                status = -ENXIO;
17008        }
17009        /* Remove mq from any list */
17010        list_del_init(&mq->list);
17011        mempool_free(mbox, mq->phba->mbox_mem_pool);
17012        return status;
17013}
17014
17015/**
17016 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17017 * @phba: HBA structure that indicates port to destroy a queue on.
17018 * @wq: The queue structure associated with the queue to destroy.
17019 *
17020 * This function destroys a queue, as detailed in @wq by sending an mailbox
17021 * command, specific to the type of queue, to the HBA.
17022 *
17023 * The @wq struct is used to get the queue ID of the queue to destroy.
17024 *
17025 * On success this function will return a zero. If the queue destroy mailbox
17026 * command fails this function will return -ENXIO.
17027 **/
17028int
17029lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17030{
17031        LPFC_MBOXQ_t *mbox;
17032        int rc, length, status = 0;
17033        uint32_t shdr_status, shdr_add_status;
17034        union lpfc_sli4_cfg_shdr *shdr;
17035
17036        /* sanity check on queue memory */
17037        if (!wq)
17038                return -ENODEV;
17039        mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17040        if (!mbox)
17041                return -ENOMEM;
17042        length = (sizeof(struct lpfc_mbx_wq_destroy) -
17043                  sizeof(struct lpfc_sli4_cfg_mhdr));
17044        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17045                         LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17046                         length, LPFC_SLI4_MBX_EMBED);
17047        bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17048               wq->queue_id);
17049        mbox->vport = wq->phba->pport;
17050        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17051        rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17052        shdr = (union lpfc_sli4_cfg_shdr *)
17053                &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17054        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17055        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17056        if (shdr_status || shdr_add_status || rc) {
17057                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17058                                "2508 WQ_DESTROY mailbox failed with "
17059                                "status x%x add_status x%x, mbx status x%x\n",
17060                                shdr_status, shdr_add_status, rc);
17061                status = -ENXIO;
17062        }
17063        /* Remove wq from any list */
17064        list_del_init(&wq->list);
17065        kfree(wq->pring);
17066        wq->pring = NULL;
17067        mempool_free(mbox, wq->phba->mbox_mem_pool);
17068        return status;
17069}
17070
17071/**
17072 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17073 * @phba: HBA structure that indicates port to destroy a queue on.
17074 * @hrq: The queue structure associated with the queue to destroy.
17075 * @drq: The queue structure associated with the queue to destroy.
17076 *
17077 * This function destroys a queue, as detailed in @rq by sending an mailbox
17078 * command, specific to the type of queue, to the HBA.
17079 *
17080 * The @rq struct is used to get the queue ID of the queue to destroy.
17081 *
17082 * On success this function will return a zero. If the queue destroy mailbox
17083 * command fails this function will return -ENXIO.
17084 **/
17085int
17086lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17087                struct lpfc_queue *drq)
17088{
17089        LPFC_MBOXQ_t *mbox;
17090        int rc, length, status = 0;
17091        uint32_t shdr_status, shdr_add_status;
17092        union lpfc_sli4_cfg_shdr *shdr;
17093
17094        /* sanity check on queue memory */
17095        if (!hrq || !drq)
17096                return -ENODEV;
17097        mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17098        if (!mbox)
17099                return -ENOMEM;
17100        length = (sizeof(struct lpfc_mbx_rq_destroy) -
17101                  sizeof(struct lpfc_sli4_cfg_mhdr));
17102        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17103                         LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17104                         length, LPFC_SLI4_MBX_EMBED);
17105        bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17106               hrq->queue_id);
17107        mbox->vport = hrq->phba->pport;
17108        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17109        rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17110        /* The IOCTL status is embedded in the mailbox subheader. */
17111        shdr = (union lpfc_sli4_cfg_shdr *)
17112                &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17113        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17114        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17115        if (shdr_status || shdr_add_status || rc) {
17116                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17117                                "2509 RQ_DESTROY mailbox failed with "
17118                                "status x%x add_status x%x, mbx status x%x\n",
17119                                shdr_status, shdr_add_status, rc);
17120                mempool_free(mbox, hrq->phba->mbox_mem_pool);
17121                return -ENXIO;
17122        }
17123        bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17124               drq->queue_id);
17125        rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17126        shdr = (union lpfc_sli4_cfg_shdr *)
17127                &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17128        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17129        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17130        if (shdr_status || shdr_add_status || rc) {
17131                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17132                                "2510 RQ_DESTROY mailbox failed with "
17133                                "status x%x add_status x%x, mbx status x%x\n",
17134                                shdr_status, shdr_add_status, rc);
17135                status = -ENXIO;
17136        }
17137        list_del_init(&hrq->list);
17138        list_del_init(&drq->list);
17139        mempool_free(mbox, hrq->phba->mbox_mem_pool);
17140        return status;
17141}
17142
17143/**
17144 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17145 * @phba: The virtual port for which this call being executed.
17146 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17147 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17148 * @xritag: the xritag that ties this io to the SGL pages.
17149 *
17150 * This routine will post the sgl pages for the IO that has the xritag
17151 * that is in the iocbq structure. The xritag is assigned during iocbq
17152 * creation and persists for as long as the driver is loaded.
17153 * if the caller has fewer than 256 scatter gather segments to map then
17154 * pdma_phys_addr1 should be 0.
17155 * If the caller needs to map more than 256 scatter gather segment then
17156 * pdma_phys_addr1 should be a valid physical address.
17157 * physical address for SGLs must be 64 byte aligned.
17158 * If you are going to map 2 SGL's then the first one must have 256 entries
17159 * the second sgl can have between 1 and 256 entries.
17160 *
17161 * Return codes:
17162 *      0 - Success
17163 *      -ENXIO, -ENOMEM - Failure
17164 **/
17165int
17166lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17167                dma_addr_t pdma_phys_addr0,
17168                dma_addr_t pdma_phys_addr1,
17169                uint16_t xritag)
17170{
17171        struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17172        LPFC_MBOXQ_t *mbox;
17173        int rc;
17174        uint32_t shdr_status, shdr_add_status;
17175        uint32_t mbox_tmo;
17176        union lpfc_sli4_cfg_shdr *shdr;
17177
17178        if (xritag == NO_XRI) {
17179                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17180                                "0364 Invalid param:\n");
17181                return -EINVAL;
17182        }
17183
17184        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17185        if (!mbox)
17186                return -ENOMEM;
17187
17188        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17189                        LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17190                        sizeof(struct lpfc_mbx_post_sgl_pages) -
17191                        sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17192
17193        post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17194                                &mbox->u.mqe.un.post_sgl_pages;
17195        bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17196        bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17197
17198        post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17199                                cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17200        post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17201                                cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17202
17203        post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17204                                cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17205        post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17206                                cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17207        if (!phba->sli4_hba.intr_enable)
17208                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17209        else {
17210                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17211                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17212        }
17213        /* The IOCTL status is embedded in the mailbox subheader. */
17214        shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17215        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17216        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17217        if (!phba->sli4_hba.intr_enable)
17218                mempool_free(mbox, phba->mbox_mem_pool);
17219        else if (rc != MBX_TIMEOUT)
17220                mempool_free(mbox, phba->mbox_mem_pool);
17221        if (shdr_status || shdr_add_status || rc) {
17222                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17223                                "2511 POST_SGL mailbox failed with "
17224                                "status x%x add_status x%x, mbx status x%x\n",
17225                                shdr_status, shdr_add_status, rc);
17226        }
17227        return 0;
17228}
17229
17230/**
17231 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17232 * @phba: pointer to lpfc hba data structure.
17233 *
17234 * This routine is invoked to post rpi header templates to the
17235 * HBA consistent with the SLI-4 interface spec.  This routine
17236 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17237 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17238 *
17239 * Returns
17240 *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17241 *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
17242 **/
17243static uint16_t
17244lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17245{
17246        unsigned long xri;
17247
17248        /*
17249         * Fetch the next logical xri.  Because this index is logical,
17250         * the driver starts at 0 each time.
17251         */
17252        spin_lock_irq(&phba->hbalock);
17253        xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17254                                 phba->sli4_hba.max_cfg_param.max_xri, 0);
17255        if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17256                spin_unlock_irq(&phba->hbalock);
17257                return NO_XRI;
17258        } else {
17259                set_bit(xri, phba->sli4_hba.xri_bmask);
17260                phba->sli4_hba.max_cfg_param.xri_used++;
17261        }
17262        spin_unlock_irq(&phba->hbalock);
17263        return xri;
17264}
17265
17266/**
17267 * __lpfc_sli4_free_xri - Release an xri for reuse.
17268 * @phba: pointer to lpfc hba data structure.
17269 * @xri: xri to release.
17270 *
17271 * This routine is invoked to release an xri to the pool of
17272 * available rpis maintained by the driver.
17273 **/
17274static void
17275__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17276{
17277        if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17278                phba->sli4_hba.max_cfg_param.xri_used--;
17279        }
17280}
17281
17282/**
17283 * lpfc_sli4_free_xri - Release an xri for reuse.
17284 * @phba: pointer to lpfc hba data structure.
17285 * @xri: xri to release.
17286 *
17287 * This routine is invoked to release an xri to the pool of
17288 * available rpis maintained by the driver.
17289 **/
17290void
17291lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17292{
17293        spin_lock_irq(&phba->hbalock);
17294        __lpfc_sli4_free_xri(phba, xri);
17295        spin_unlock_irq(&phba->hbalock);
17296}
17297
17298/**
17299 * lpfc_sli4_next_xritag - Get an xritag for the io
17300 * @phba: Pointer to HBA context object.
17301 *
17302 * This function gets an xritag for the iocb. If there is no unused xritag
17303 * it will return 0xffff.
17304 * The function returns the allocated xritag if successful, else returns zero.
17305 * Zero is not a valid xritag.
17306 * The caller is not required to hold any lock.
17307 **/
17308uint16_t
17309lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17310{
17311        uint16_t xri_index;
17312
17313        xri_index = lpfc_sli4_alloc_xri(phba);
17314        if (xri_index == NO_XRI)
17315                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17316                                "2004 Failed to allocate XRI.last XRITAG is %d"
17317                                " Max XRI is %d, Used XRI is %d\n",
17318                                xri_index,
17319                                phba->sli4_hba.max_cfg_param.max_xri,
17320                                phba->sli4_hba.max_cfg_param.xri_used);
17321        return xri_index;
17322}
17323
17324/**
17325 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17326 * @phba: pointer to lpfc hba data structure.
17327 * @post_sgl_list: pointer to els sgl entry list.
17328 * @post_cnt: number of els sgl entries on the list.
17329 *
17330 * This routine is invoked to post a block of driver's sgl pages to the
17331 * HBA using non-embedded mailbox command. No Lock is held. This routine
17332 * is only called when the driver is loading and after all IO has been
17333 * stopped.
17334 **/
17335static int
17336lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17337                            struct list_head *post_sgl_list,
17338                            int post_cnt)
17339{
17340        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17341        struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17342        struct sgl_page_pairs *sgl_pg_pairs;
17343        void *viraddr;
17344        LPFC_MBOXQ_t *mbox;
17345        uint32_t reqlen, alloclen, pg_pairs;
17346        uint32_t mbox_tmo;
17347        uint16_t xritag_start = 0;
17348        int rc = 0;
17349        uint32_t shdr_status, shdr_add_status;
17350        union lpfc_sli4_cfg_shdr *shdr;
17351
17352        reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17353                 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17354        if (reqlen > SLI4_PAGE_SIZE) {
17355                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17356                                "2559 Block sgl registration required DMA "
17357                                "size (%d) great than a page\n", reqlen);
17358                return -ENOMEM;
17359        }
17360
17361        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17362        if (!mbox)
17363                return -ENOMEM;
17364
17365        /* Allocate DMA memory and set up the non-embedded mailbox command */
17366        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17367                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17368                         LPFC_SLI4_MBX_NEMBED);
17369
17370        if (alloclen < reqlen) {
17371                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17372                                "0285 Allocated DMA memory size (%d) is "
17373                                "less than the requested DMA memory "
17374                                "size (%d)\n", alloclen, reqlen);
17375                lpfc_sli4_mbox_cmd_free(phba, mbox);
17376                return -ENOMEM;
17377        }
17378        /* Set up the SGL pages in the non-embedded DMA pages */
17379        viraddr = mbox->sge_array->addr[0];
17380        sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17381        sgl_pg_pairs = &sgl->sgl_pg_pairs;
17382
17383        pg_pairs = 0;
17384        list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17385                /* Set up the sge entry */
17386                sgl_pg_pairs->sgl_pg0_addr_lo =
17387                                cpu_to_le32(putPaddrLow(sglq_entry->phys));
17388                sgl_pg_pairs->sgl_pg0_addr_hi =
17389                                cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17390                sgl_pg_pairs->sgl_pg1_addr_lo =
17391                                cpu_to_le32(putPaddrLow(0));
17392                sgl_pg_pairs->sgl_pg1_addr_hi =
17393                                cpu_to_le32(putPaddrHigh(0));
17394
17395                /* Keep the first xritag on the list */
17396                if (pg_pairs == 0)
17397                        xritag_start = sglq_entry->sli4_xritag;
17398                sgl_pg_pairs++;
17399                pg_pairs++;
17400        }
17401
17402        /* Complete initialization and perform endian conversion. */
17403        bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17404        bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17405        sgl->word0 = cpu_to_le32(sgl->word0);
17406
17407        if (!phba->sli4_hba.intr_enable)
17408                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17409        else {
17410                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17411                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17412        }
17413        shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17414        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17415        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17416        if (!phba->sli4_hba.intr_enable)
17417                lpfc_sli4_mbox_cmd_free(phba, mbox);
17418        else if (rc != MBX_TIMEOUT)
17419                lpfc_sli4_mbox_cmd_free(phba, mbox);
17420        if (shdr_status || shdr_add_status || rc) {
17421                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17422                                "2513 POST_SGL_BLOCK mailbox command failed "
17423                                "status x%x add_status x%x mbx status x%x\n",
17424                                shdr_status, shdr_add_status, rc);
17425                rc = -ENXIO;
17426        }
17427        return rc;
17428}
17429
17430/**
17431 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17432 * @phba: pointer to lpfc hba data structure.
17433 * @nblist: pointer to nvme buffer list.
17434 * @count: number of scsi buffers on the list.
17435 *
17436 * This routine is invoked to post a block of @count scsi sgl pages from a
17437 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17438 * No Lock is held.
17439 *
17440 **/
17441static int
17442lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17443                            int count)
17444{
17445        struct lpfc_io_buf *lpfc_ncmd;
17446        struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17447        struct sgl_page_pairs *sgl_pg_pairs;
17448        void *viraddr;
17449        LPFC_MBOXQ_t *mbox;
17450        uint32_t reqlen, alloclen, pg_pairs;
17451        uint32_t mbox_tmo;
17452        uint16_t xritag_start = 0;
17453        int rc = 0;
17454        uint32_t shdr_status, shdr_add_status;
17455        dma_addr_t pdma_phys_bpl1;
17456        union lpfc_sli4_cfg_shdr *shdr;
17457
17458        /* Calculate the requested length of the dma memory */
17459        reqlen = count * sizeof(struct sgl_page_pairs) +
17460                 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17461        if (reqlen > SLI4_PAGE_SIZE) {
17462                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17463                                "6118 Block sgl registration required DMA "
17464                                "size (%d) great than a page\n", reqlen);
17465                return -ENOMEM;
17466        }
17467        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17468        if (!mbox) {
17469                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17470                                "6119 Failed to allocate mbox cmd memory\n");
17471                return -ENOMEM;
17472        }
17473
17474        /* Allocate DMA memory and set up the non-embedded mailbox command */
17475        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17476                                    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17477                                    reqlen, LPFC_SLI4_MBX_NEMBED);
17478
17479        if (alloclen < reqlen) {
17480                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17481                                "6120 Allocated DMA memory size (%d) is "
17482                                "less than the requested DMA memory "
17483                                "size (%d)\n", alloclen, reqlen);
17484                lpfc_sli4_mbox_cmd_free(phba, mbox);
17485                return -ENOMEM;
17486        }
17487
17488        /* Get the first SGE entry from the non-embedded DMA memory */
17489        viraddr = mbox->sge_array->addr[0];
17490
17491        /* Set up the SGL pages in the non-embedded DMA pages */
17492        sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17493        sgl_pg_pairs = &sgl->sgl_pg_pairs;
17494
17495        pg_pairs = 0;
17496        list_for_each_entry(lpfc_ncmd, nblist, list) {
17497                /* Set up the sge entry */
17498                sgl_pg_pairs->sgl_pg0_addr_lo =
17499                        cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17500                sgl_pg_pairs->sgl_pg0_addr_hi =
17501                        cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17502                if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17503                        pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17504                                                SGL_PAGE_SIZE;
17505                else
17506                        pdma_phys_bpl1 = 0;
17507                sgl_pg_pairs->sgl_pg1_addr_lo =
17508                        cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17509                sgl_pg_pairs->sgl_pg1_addr_hi =
17510                        cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17511                /* Keep the first xritag on the list */
17512                if (pg_pairs == 0)
17513                        xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17514                sgl_pg_pairs++;
17515                pg_pairs++;
17516        }
17517        bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17518        bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17519        /* Perform endian conversion if necessary */
17520        sgl->word0 = cpu_to_le32(sgl->word0);
17521
17522        if (!phba->sli4_hba.intr_enable) {
17523                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17524        } else {
17525                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17526                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17527        }
17528        shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17529        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17530        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17531        if (!phba->sli4_hba.intr_enable)
17532                lpfc_sli4_mbox_cmd_free(phba, mbox);
17533        else if (rc != MBX_TIMEOUT)
17534                lpfc_sli4_mbox_cmd_free(phba, mbox);
17535        if (shdr_status || shdr_add_status || rc) {
17536                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17537                                "6125 POST_SGL_BLOCK mailbox command failed "
17538                                "status x%x add_status x%x mbx status x%x\n",
17539                                shdr_status, shdr_add_status, rc);
17540                rc = -ENXIO;
17541        }
17542        return rc;
17543}
17544
17545/**
17546 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17547 * @phba: pointer to lpfc hba data structure.
17548 * @post_nblist: pointer to the nvme buffer list.
17549 * @sb_count: number of nvme buffers.
17550 *
17551 * This routine walks a list of nvme buffers that was passed in. It attempts
17552 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17553 * uses the non-embedded SGL block post mailbox commands to post to the port.
17554 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17555 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17556 * must be local list, thus no lock is needed when manipulate the list.
17557 *
17558 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17559 **/
17560int
17561lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17562                           struct list_head *post_nblist, int sb_count)
17563{
17564        struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17565        int status, sgl_size;
17566        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17567        dma_addr_t pdma_phys_sgl1;
17568        int last_xritag = NO_XRI;
17569        int cur_xritag;
17570        LIST_HEAD(prep_nblist);
17571        LIST_HEAD(blck_nblist);
17572        LIST_HEAD(nvme_nblist);
17573
17574        /* sanity check */
17575        if (sb_count <= 0)
17576                return -EINVAL;
17577
17578        sgl_size = phba->cfg_sg_dma_buf_size;
17579        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17580                list_del_init(&lpfc_ncmd->list);
17581                block_cnt++;
17582                if ((last_xritag != NO_XRI) &&
17583                    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17584                        /* a hole in xri block, form a sgl posting block */
17585                        list_splice_init(&prep_nblist, &blck_nblist);
17586                        post_cnt = block_cnt - 1;
17587                        /* prepare list for next posting block */
17588                        list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17589                        block_cnt = 1;
17590                } else {
17591                        /* prepare list for next posting block */
17592                        list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17593                        /* enough sgls for non-embed sgl mbox command */
17594                        if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17595                                list_splice_init(&prep_nblist, &blck_nblist);
17596                                post_cnt = block_cnt;
17597                                block_cnt = 0;
17598                        }
17599                }
17600                num_posting++;
17601                last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17602
17603                /* end of repost sgl list condition for NVME buffers */
17604                if (num_posting == sb_count) {
17605                        if (post_cnt == 0) {
17606                                /* last sgl posting block */
17607                                list_splice_init(&prep_nblist, &blck_nblist);
17608                                post_cnt = block_cnt;
17609                        } else if (block_cnt == 1) {
17610                                /* last single sgl with non-contiguous xri */
17611                                if (sgl_size > SGL_PAGE_SIZE)
17612                                        pdma_phys_sgl1 =
17613                                                lpfc_ncmd->dma_phys_sgl +
17614                                                SGL_PAGE_SIZE;
17615                                else
17616                                        pdma_phys_sgl1 = 0;
17617                                cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17618                                status = lpfc_sli4_post_sgl(
17619                                                phba, lpfc_ncmd->dma_phys_sgl,
17620                                                pdma_phys_sgl1, cur_xritag);
17621                                if (status) {
17622                                        /* Post error.  Buffer unavailable. */
17623                                        lpfc_ncmd->flags |=
17624                                                LPFC_SBUF_NOT_POSTED;
17625                                } else {
17626                                        /* Post success. Bffer available. */
17627                                        lpfc_ncmd->flags &=
17628                                                ~LPFC_SBUF_NOT_POSTED;
17629                                        lpfc_ncmd->status = IOSTAT_SUCCESS;
17630                                        num_posted++;
17631                                }
17632                                /* success, put on NVME buffer sgl list */
17633                                list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17634                        }
17635                }
17636
17637                /* continue until a nembed page worth of sgls */
17638                if (post_cnt == 0)
17639                        continue;
17640
17641                /* post block of NVME buffer list sgls */
17642                status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17643                                                     post_cnt);
17644
17645                /* don't reset xirtag due to hole in xri block */
17646                if (block_cnt == 0)
17647                        last_xritag = NO_XRI;
17648
17649                /* reset NVME buffer post count for next round of posting */
17650                post_cnt = 0;
17651
17652                /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17653                while (!list_empty(&blck_nblist)) {
17654                        list_remove_head(&blck_nblist, lpfc_ncmd,
17655                                         struct lpfc_io_buf, list);
17656                        if (status) {
17657                                /* Post error.  Mark buffer unavailable. */
17658                                lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17659                        } else {
17660                                /* Post success, Mark buffer available. */
17661                                lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17662                                lpfc_ncmd->status = IOSTAT_SUCCESS;
17663                                num_posted++;
17664                        }
17665                        list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17666                }
17667        }
17668        /* Push NVME buffers with sgl posted to the available list */
17669        lpfc_io_buf_replenish(phba, &nvme_nblist);
17670
17671        return num_posted;
17672}
17673
17674/**
17675 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17676 * @phba: pointer to lpfc_hba struct that the frame was received on
17677 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17678 *
17679 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17680 * valid type of frame that the LPFC driver will handle. This function will
17681 * return a zero if the frame is a valid frame or a non zero value when the
17682 * frame does not pass the check.
17683 **/
17684static int
17685lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17686{
17687        /*  make rctl_names static to save stack space */
17688        struct fc_vft_header *fc_vft_hdr;
17689        uint32_t *header = (uint32_t *) fc_hdr;
17690
17691#define FC_RCTL_MDS_DIAGS       0xF4
17692
17693        switch (fc_hdr->fh_r_ctl) {
17694        case FC_RCTL_DD_UNCAT:          /* uncategorized information */
17695        case FC_RCTL_DD_SOL_DATA:       /* solicited data */
17696        case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
17697        case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
17698        case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
17699        case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
17700        case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
17701        case FC_RCTL_DD_CMD_STATUS:     /* command status */
17702        case FC_RCTL_ELS_REQ:   /* extended link services request */
17703        case FC_RCTL_ELS_REP:   /* extended link services reply */
17704        case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
17705        case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
17706        case FC_RCTL_BA_NOP:    /* basic link service NOP */
17707        case FC_RCTL_BA_ABTS:   /* basic link service abort */
17708        case FC_RCTL_BA_RMC:    /* remove connection */
17709        case FC_RCTL_BA_ACC:    /* basic accept */
17710        case FC_RCTL_BA_RJT:    /* basic reject */
17711        case FC_RCTL_BA_PRMT:
17712        case FC_RCTL_ACK_1:     /* acknowledge_1 */
17713        case FC_RCTL_ACK_0:     /* acknowledge_0 */
17714        case FC_RCTL_P_RJT:     /* port reject */
17715        case FC_RCTL_F_RJT:     /* fabric reject */
17716        case FC_RCTL_P_BSY:     /* port busy */
17717        case FC_RCTL_F_BSY:     /* fabric busy to data frame */
17718        case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
17719        case FC_RCTL_LCR:       /* link credit reset */
17720        case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17721        case FC_RCTL_END:       /* end */
17722                break;
17723        case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
17724                fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17725                fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17726                return lpfc_fc_frame_check(phba, fc_hdr);
17727        default:
17728                goto drop;
17729        }
17730
17731        switch (fc_hdr->fh_type) {
17732        case FC_TYPE_BLS:
17733        case FC_TYPE_ELS:
17734        case FC_TYPE_FCP:
17735        case FC_TYPE_CT:
17736        case FC_TYPE_NVME:
17737                break;
17738        case FC_TYPE_IP:
17739        case FC_TYPE_ILS:
17740        default:
17741                goto drop;
17742        }
17743
17744        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17745                        "2538 Received frame rctl:x%x, type:x%x, "
17746                        "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17747                        fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17748                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17749                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17750                        be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17751                        be32_to_cpu(header[6]));
17752        return 0;
17753drop:
17754        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17755                        "2539 Dropped frame rctl:x%x type:x%x\n",
17756                        fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17757        return 1;
17758}
17759
17760/**
17761 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17762 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17763 *
17764 * This function processes the FC header to retrieve the VFI from the VF
17765 * header, if one exists. This function will return the VFI if one exists
17766 * or 0 if no VSAN Header exists.
17767 **/
17768static uint32_t
17769lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17770{
17771        struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17772
17773        if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17774                return 0;
17775        return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17776}
17777
17778/**
17779 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17780 * @phba: Pointer to the HBA structure to search for the vport on
17781 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17782 * @fcfi: The FC Fabric ID that the frame came from
17783 * @did: Destination ID to match against
17784 *
17785 * This function searches the @phba for a vport that matches the content of the
17786 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17787 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17788 * returns the matching vport pointer or NULL if unable to match frame to a
17789 * vport.
17790 **/
17791static struct lpfc_vport *
17792lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17793                       uint16_t fcfi, uint32_t did)
17794{
17795        struct lpfc_vport **vports;
17796        struct lpfc_vport *vport = NULL;
17797        int i;
17798
17799        if (did == Fabric_DID)
17800                return phba->pport;
17801        if ((phba->pport->fc_flag & FC_PT2PT) &&
17802                !(phba->link_state == LPFC_HBA_READY))
17803                return phba->pport;
17804
17805        vports = lpfc_create_vport_work_array(phba);
17806        if (vports != NULL) {
17807                for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17808                        if (phba->fcf.fcfi == fcfi &&
17809                            vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17810                            vports[i]->fc_myDID == did) {
17811                                vport = vports[i];
17812                                break;
17813                        }
17814                }
17815        }
17816        lpfc_destroy_vport_work_array(phba, vports);
17817        return vport;
17818}
17819
17820/**
17821 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17822 * @vport: The vport to work on.
17823 *
17824 * This function updates the receive sequence time stamp for this vport. The
17825 * receive sequence time stamp indicates the time that the last frame of the
17826 * the sequence that has been idle for the longest amount of time was received.
17827 * the driver uses this time stamp to indicate if any received sequences have
17828 * timed out.
17829 **/
17830static void
17831lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17832{
17833        struct lpfc_dmabuf *h_buf;
17834        struct hbq_dmabuf *dmabuf = NULL;
17835
17836        /* get the oldest sequence on the rcv list */
17837        h_buf = list_get_first(&vport->rcv_buffer_list,
17838                               struct lpfc_dmabuf, list);
17839        if (!h_buf)
17840                return;
17841        dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17842        vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17843}
17844
17845/**
17846 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17847 * @vport: The vport that the received sequences were sent to.
17848 *
17849 * This function cleans up all outstanding received sequences. This is called
17850 * by the driver when a link event or user action invalidates all the received
17851 * sequences.
17852 **/
17853void
17854lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17855{
17856        struct lpfc_dmabuf *h_buf, *hnext;
17857        struct lpfc_dmabuf *d_buf, *dnext;
17858        struct hbq_dmabuf *dmabuf = NULL;
17859
17860        /* start with the oldest sequence on the rcv list */
17861        list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17862                dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17863                list_del_init(&dmabuf->hbuf.list);
17864                list_for_each_entry_safe(d_buf, dnext,
17865                                         &dmabuf->dbuf.list, list) {
17866                        list_del_init(&d_buf->list);
17867                        lpfc_in_buf_free(vport->phba, d_buf);
17868                }
17869                lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17870        }
17871}
17872
17873/**
17874 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17875 * @vport: The vport that the received sequences were sent to.
17876 *
17877 * This function determines whether any received sequences have timed out by
17878 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17879 * indicates that there is at least one timed out sequence this routine will
17880 * go through the received sequences one at a time from most inactive to most
17881 * active to determine which ones need to be cleaned up. Once it has determined
17882 * that a sequence needs to be cleaned up it will simply free up the resources
17883 * without sending an abort.
17884 **/
17885void
17886lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17887{
17888        struct lpfc_dmabuf *h_buf, *hnext;
17889        struct lpfc_dmabuf *d_buf, *dnext;
17890        struct hbq_dmabuf *dmabuf = NULL;
17891        unsigned long timeout;
17892        int abort_count = 0;
17893
17894        timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17895                   vport->rcv_buffer_time_stamp);
17896        if (list_empty(&vport->rcv_buffer_list) ||
17897            time_before(jiffies, timeout))
17898                return;
17899        /* start with the oldest sequence on the rcv list */
17900        list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17901                dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17902                timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17903                           dmabuf->time_stamp);
17904                if (time_before(jiffies, timeout))
17905                        break;
17906                abort_count++;
17907                list_del_init(&dmabuf->hbuf.list);
17908                list_for_each_entry_safe(d_buf, dnext,
17909                                         &dmabuf->dbuf.list, list) {
17910                        list_del_init(&d_buf->list);
17911                        lpfc_in_buf_free(vport->phba, d_buf);
17912                }
17913                lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17914        }
17915        if (abort_count)
17916                lpfc_update_rcv_time_stamp(vport);
17917}
17918
17919/**
17920 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17921 * @vport: pointer to a vitural port
17922 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17923 *
17924 * This function searches through the existing incomplete sequences that have
17925 * been sent to this @vport. If the frame matches one of the incomplete
17926 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17927 * make up that sequence. If no sequence is found that matches this frame then
17928 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17929 * This function returns a pointer to the first dmabuf in the sequence list that
17930 * the frame was linked to.
17931 **/
17932static struct hbq_dmabuf *
17933lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17934{
17935        struct fc_frame_header *new_hdr;
17936        struct fc_frame_header *temp_hdr;
17937        struct lpfc_dmabuf *d_buf;
17938        struct lpfc_dmabuf *h_buf;
17939        struct hbq_dmabuf *seq_dmabuf = NULL;
17940        struct hbq_dmabuf *temp_dmabuf = NULL;
17941        uint8_t found = 0;
17942
17943        INIT_LIST_HEAD(&dmabuf->dbuf.list);
17944        dmabuf->time_stamp = jiffies;
17945        new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17946
17947        /* Use the hdr_buf to find the sequence that this frame belongs to */
17948        list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17949                temp_hdr = (struct fc_frame_header *)h_buf->virt;
17950                if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17951                    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17952                    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17953                        continue;
17954                /* found a pending sequence that matches this frame */
17955                seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17956                break;
17957        }
17958        if (!seq_dmabuf) {
17959                /*
17960                 * This indicates first frame received for this sequence.
17961                 * Queue the buffer on the vport's rcv_buffer_list.
17962                 */
17963                list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17964                lpfc_update_rcv_time_stamp(vport);
17965                return dmabuf;
17966        }
17967        temp_hdr = seq_dmabuf->hbuf.virt;
17968        if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17969                be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17970                list_del_init(&seq_dmabuf->hbuf.list);
17971                list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17972                list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17973                lpfc_update_rcv_time_stamp(vport);
17974                return dmabuf;
17975        }
17976        /* move this sequence to the tail to indicate a young sequence */
17977        list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17978        seq_dmabuf->time_stamp = jiffies;
17979        lpfc_update_rcv_time_stamp(vport);
17980        if (list_empty(&seq_dmabuf->dbuf.list)) {
17981                list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17982                return seq_dmabuf;
17983        }
17984        /* find the correct place in the sequence to insert this frame */
17985        d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17986        while (!found) {
17987                temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17988                temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17989                /*
17990                 * If the frame's sequence count is greater than the frame on
17991                 * the list then insert the frame right after this frame
17992                 */
17993                if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17994                        be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17995                        list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17996                        found = 1;
17997                        break;
17998                }
17999
18000                if (&d_buf->list == &seq_dmabuf->dbuf.list)
18001                        break;
18002                d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18003        }
18004
18005        if (found)
18006                return seq_dmabuf;
18007        return NULL;
18008}
18009
18010/**
18011 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18012 * @vport: pointer to a vitural port
18013 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18014 *
18015 * This function tries to abort from the partially assembed sequence, described
18016 * by the information from basic abbort @dmabuf. It checks to see whether such
18017 * partially assembled sequence held by the driver. If so, it shall free up all
18018 * the frames from the partially assembled sequence.
18019 *
18020 * Return
18021 * true  -- if there is matching partially assembled sequence present and all
18022 *          the frames freed with the sequence;
18023 * false -- if there is no matching partially assembled sequence present so
18024 *          nothing got aborted in the lower layer driver
18025 **/
18026static bool
18027lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18028                            struct hbq_dmabuf *dmabuf)
18029{
18030        struct fc_frame_header *new_hdr;
18031        struct fc_frame_header *temp_hdr;
18032        struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18033        struct hbq_dmabuf *seq_dmabuf = NULL;
18034
18035        /* Use the hdr_buf to find the sequence that matches this frame */
18036        INIT_LIST_HEAD(&dmabuf->dbuf.list);
18037        INIT_LIST_HEAD(&dmabuf->hbuf.list);
18038        new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18039        list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18040                temp_hdr = (struct fc_frame_header *)h_buf->virt;
18041                if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18042                    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18043                    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18044                        continue;
18045                /* found a pending sequence that matches this frame */
18046                seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18047                break;
18048        }
18049
18050        /* Free up all the frames from the partially assembled sequence */
18051        if (seq_dmabuf) {
18052                list_for_each_entry_safe(d_buf, n_buf,
18053                                         &seq_dmabuf->dbuf.list, list) {
18054                        list_del_init(&d_buf->list);
18055                        lpfc_in_buf_free(vport->phba, d_buf);
18056                }
18057                return true;
18058        }
18059        return false;
18060}
18061
18062/**
18063 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18064 * @vport: pointer to a vitural port
18065 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18066 *
18067 * This function tries to abort from the assembed sequence from upper level
18068 * protocol, described by the information from basic abbort @dmabuf. It
18069 * checks to see whether such pending context exists at upper level protocol.
18070 * If so, it shall clean up the pending context.
18071 *
18072 * Return
18073 * true  -- if there is matching pending context of the sequence cleaned
18074 *          at ulp;
18075 * false -- if there is no matching pending context of the sequence present
18076 *          at ulp.
18077 **/
18078static bool
18079lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18080{
18081        struct lpfc_hba *phba = vport->phba;
18082        int handled;
18083
18084        /* Accepting abort at ulp with SLI4 only */
18085        if (phba->sli_rev < LPFC_SLI_REV4)
18086                return false;
18087
18088        /* Register all caring upper level protocols to attend abort */
18089        handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18090        if (handled)
18091                return true;
18092
18093        return false;
18094}
18095
18096/**
18097 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18098 * @phba: Pointer to HBA context object.
18099 * @cmd_iocbq: pointer to the command iocbq structure.
18100 * @rsp_iocbq: pointer to the response iocbq structure.
18101 *
18102 * This function handles the sequence abort response iocb command complete
18103 * event. It properly releases the memory allocated to the sequence abort
18104 * accept iocb.
18105 **/
18106static void
18107lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18108                             struct lpfc_iocbq *cmd_iocbq,
18109                             struct lpfc_iocbq *rsp_iocbq)
18110{
18111        struct lpfc_nodelist *ndlp;
18112
18113        if (cmd_iocbq) {
18114                ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18115                lpfc_nlp_put(ndlp);
18116                lpfc_sli_release_iocbq(phba, cmd_iocbq);
18117        }
18118
18119        /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18120        if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18121                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18122                        "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18123                        rsp_iocbq->iocb.ulpStatus,
18124                        rsp_iocbq->iocb.un.ulpWord[4]);
18125}
18126
18127/**
18128 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18129 * @phba: Pointer to HBA context object.
18130 * @xri: xri id in transaction.
18131 *
18132 * This function validates the xri maps to the known range of XRIs allocated an
18133 * used by the driver.
18134 **/
18135uint16_t
18136lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18137                      uint16_t xri)
18138{
18139        uint16_t i;
18140
18141        for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18142                if (xri == phba->sli4_hba.xri_ids[i])
18143                        return i;
18144        }
18145        return NO_XRI;
18146}
18147
18148/**
18149 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18150 * @vport: pointer to a virtual port.
18151 * @fc_hdr: pointer to a FC frame header.
18152 * @aborted: was the partially assembled receive sequence successfully aborted
18153 *
18154 * This function sends a basic response to a previous unsol sequence abort
18155 * event after aborting the sequence handling.
18156 **/
18157void
18158lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18159                        struct fc_frame_header *fc_hdr, bool aborted)
18160{
18161        struct lpfc_hba *phba = vport->phba;
18162        struct lpfc_iocbq *ctiocb = NULL;
18163        struct lpfc_nodelist *ndlp;
18164        uint16_t oxid, rxid, xri, lxri;
18165        uint32_t sid, fctl;
18166        IOCB_t *icmd;
18167        int rc;
18168
18169        if (!lpfc_is_link_up(phba))
18170                return;
18171
18172        sid = sli4_sid_from_fc_hdr(fc_hdr);
18173        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18174        rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18175
18176        ndlp = lpfc_findnode_did(vport, sid);
18177        if (!ndlp) {
18178                ndlp = lpfc_nlp_init(vport, sid);
18179                if (!ndlp) {
18180                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18181                                         "1268 Failed to allocate ndlp for "
18182                                         "oxid:x%x SID:x%x\n", oxid, sid);
18183                        return;
18184                }
18185                /* Put ndlp onto pport node list */
18186                lpfc_enqueue_node(vport, ndlp);
18187        }
18188
18189        /* Allocate buffer for rsp iocb */
18190        ctiocb = lpfc_sli_get_iocbq(phba);
18191        if (!ctiocb)
18192                return;
18193
18194        /* Extract the F_CTL field from FC_HDR */
18195        fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18196
18197        icmd = &ctiocb->iocb;
18198        icmd->un.xseq64.bdl.bdeSize = 0;
18199        icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18200        icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18201        icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18202        icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18203
18204        /* Fill in the rest of iocb fields */
18205        icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18206        icmd->ulpBdeCount = 0;
18207        icmd->ulpLe = 1;
18208        icmd->ulpClass = CLASS3;
18209        icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18210        ctiocb->context1 = lpfc_nlp_get(ndlp);
18211        if (!ctiocb->context1) {
18212                lpfc_sli_release_iocbq(phba, ctiocb);
18213                return;
18214        }
18215
18216        ctiocb->vport = phba->pport;
18217        ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18218        ctiocb->sli4_lxritag = NO_XRI;
18219        ctiocb->sli4_xritag = NO_XRI;
18220
18221        if (fctl & FC_FC_EX_CTX)
18222                /* Exchange responder sent the abort so we
18223                 * own the oxid.
18224                 */
18225                xri = oxid;
18226        else
18227                xri = rxid;
18228        lxri = lpfc_sli4_xri_inrange(phba, xri);
18229        if (lxri != NO_XRI)
18230                lpfc_set_rrq_active(phba, ndlp, lxri,
18231                        (xri == oxid) ? rxid : oxid, 0);
18232        /* For BA_ABTS from exchange responder, if the logical xri with
18233         * the oxid maps to the FCP XRI range, the port no longer has
18234         * that exchange context, send a BLS_RJT. Override the IOCB for
18235         * a BA_RJT.
18236         */
18237        if ((fctl & FC_FC_EX_CTX) &&
18238            (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18239                icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18240                bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18241                bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18242                bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18243        }
18244
18245        /* If BA_ABTS failed to abort a partially assembled receive sequence,
18246         * the driver no longer has that exchange, send a BLS_RJT. Override
18247         * the IOCB for a BA_RJT.
18248         */
18249        if (aborted == false) {
18250                icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18251                bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18252                bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18253                bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18254        }
18255
18256        if (fctl & FC_FC_EX_CTX) {
18257                /* ABTS sent by responder to CT exchange, construction
18258                 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18259                 * field and RX_ID from ABTS for RX_ID field.
18260                 */
18261                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18262        } else {
18263                /* ABTS sent by initiator to CT exchange, construction
18264                 * of BA_ACC will need to allocate a new XRI as for the
18265                 * XRI_TAG field.
18266                 */
18267                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18268        }
18269        bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18270        bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18271
18272        /* Xmit CT abts response on exchange <xid> */
18273        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18274                         "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18275                         icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18276
18277        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18278        if (rc == IOCB_ERROR) {
18279                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18280                                 "2925 Failed to issue CT ABTS RSP x%x on "
18281                                 "xri x%x, Data x%x\n",
18282                                 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18283                                 phba->link_state);
18284                lpfc_nlp_put(ndlp);
18285                ctiocb->context1 = NULL;
18286                lpfc_sli_release_iocbq(phba, ctiocb);
18287        }
18288}
18289
18290/**
18291 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18292 * @vport: Pointer to the vport on which this sequence was received
18293 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18294 *
18295 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18296 * receive sequence is only partially assembed by the driver, it shall abort
18297 * the partially assembled frames for the sequence. Otherwise, if the
18298 * unsolicited receive sequence has been completely assembled and passed to
18299 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18300 * unsolicited sequence has been aborted. After that, it will issue a basic
18301 * accept to accept the abort.
18302 **/
18303static void
18304lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18305                             struct hbq_dmabuf *dmabuf)
18306{
18307        struct lpfc_hba *phba = vport->phba;
18308        struct fc_frame_header fc_hdr;
18309        uint32_t fctl;
18310        bool aborted;
18311
18312        /* Make a copy of fc_hdr before the dmabuf being released */
18313        memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18314        fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18315
18316        if (fctl & FC_FC_EX_CTX) {
18317                /* ABTS by responder to exchange, no cleanup needed */
18318                aborted = true;
18319        } else {
18320                /* ABTS by initiator to exchange, need to do cleanup */
18321                aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18322                if (aborted == false)
18323                        aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18324        }
18325        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18326
18327        if (phba->nvmet_support) {
18328                lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18329                return;
18330        }
18331
18332        /* Respond with BA_ACC or BA_RJT accordingly */
18333        lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18334}
18335
18336/**
18337 * lpfc_seq_complete - Indicates if a sequence is complete
18338 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18339 *
18340 * This function checks the sequence, starting with the frame described by
18341 * @dmabuf, to see if all the frames associated with this sequence are present.
18342 * the frames associated with this sequence are linked to the @dmabuf using the
18343 * dbuf list. This function looks for two major things. 1) That the first frame
18344 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18345 * set. 3) That there are no holes in the sequence count. The function will
18346 * return 1 when the sequence is complete, otherwise it will return 0.
18347 **/
18348static int
18349lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18350{
18351        struct fc_frame_header *hdr;
18352        struct lpfc_dmabuf *d_buf;
18353        struct hbq_dmabuf *seq_dmabuf;
18354        uint32_t fctl;
18355        int seq_count = 0;
18356
18357        hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18358        /* make sure first fame of sequence has a sequence count of zero */
18359        if (hdr->fh_seq_cnt != seq_count)
18360                return 0;
18361        fctl = (hdr->fh_f_ctl[0] << 16 |
18362                hdr->fh_f_ctl[1] << 8 |
18363                hdr->fh_f_ctl[2]);
18364        /* If last frame of sequence we can return success. */
18365        if (fctl & FC_FC_END_SEQ)
18366                return 1;
18367        list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18368                seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18369                hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18370                /* If there is a hole in the sequence count then fail. */
18371                if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18372                        return 0;
18373                fctl = (hdr->fh_f_ctl[0] << 16 |
18374                        hdr->fh_f_ctl[1] << 8 |
18375                        hdr->fh_f_ctl[2]);
18376                /* If last frame of sequence we can return success. */
18377                if (fctl & FC_FC_END_SEQ)
18378                        return 1;
18379        }
18380        return 0;
18381}
18382
18383/**
18384 * lpfc_prep_seq - Prep sequence for ULP processing
18385 * @vport: Pointer to the vport on which this sequence was received
18386 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18387 *
18388 * This function takes a sequence, described by a list of frames, and creates
18389 * a list of iocbq structures to describe the sequence. This iocbq list will be
18390 * used to issue to the generic unsolicited sequence handler. This routine
18391 * returns a pointer to the first iocbq in the list. If the function is unable
18392 * to allocate an iocbq then it throw out the received frames that were not
18393 * able to be described and return a pointer to the first iocbq. If unable to
18394 * allocate any iocbqs (including the first) this function will return NULL.
18395 **/
18396static struct lpfc_iocbq *
18397lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18398{
18399        struct hbq_dmabuf *hbq_buf;
18400        struct lpfc_dmabuf *d_buf, *n_buf;
18401        struct lpfc_iocbq *first_iocbq, *iocbq;
18402        struct fc_frame_header *fc_hdr;
18403        uint32_t sid;
18404        uint32_t len, tot_len;
18405        struct ulp_bde64 *pbde;
18406
18407        fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18408        /* remove from receive buffer list */
18409        list_del_init(&seq_dmabuf->hbuf.list);
18410        lpfc_update_rcv_time_stamp(vport);
18411        /* get the Remote Port's SID */
18412        sid = sli4_sid_from_fc_hdr(fc_hdr);
18413        tot_len = 0;
18414        /* Get an iocbq struct to fill in. */
18415        first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18416        if (first_iocbq) {
18417                /* Initialize the first IOCB. */
18418                first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18419                first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18420                first_iocbq->vport = vport;
18421
18422                /* Check FC Header to see what TYPE of frame we are rcv'ing */
18423                if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18424                        first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18425                        first_iocbq->iocb.un.rcvels.parmRo =
18426                                sli4_did_from_fc_hdr(fc_hdr);
18427                        first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18428                } else
18429                        first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18430                first_iocbq->iocb.ulpContext = NO_XRI;
18431                first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18432                        be16_to_cpu(fc_hdr->fh_ox_id);
18433                /* iocbq is prepped for internal consumption.  Physical vpi. */
18434                first_iocbq->iocb.unsli3.rcvsli3.vpi =
18435                        vport->phba->vpi_ids[vport->vpi];
18436                /* put the first buffer into the first IOCBq */
18437                tot_len = bf_get(lpfc_rcqe_length,
18438                                       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18439
18440                first_iocbq->context2 = &seq_dmabuf->dbuf;
18441                first_iocbq->context3 = NULL;
18442                first_iocbq->iocb.ulpBdeCount = 1;
18443                if (tot_len > LPFC_DATA_BUF_SIZE)
18444                        first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18445                                                        LPFC_DATA_BUF_SIZE;
18446                else
18447                        first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18448
18449                first_iocbq->iocb.un.rcvels.remoteID = sid;
18450
18451                first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18452        }
18453        iocbq = first_iocbq;
18454        /*
18455         * Each IOCBq can have two Buffers assigned, so go through the list
18456         * of buffers for this sequence and save two buffers in each IOCBq
18457         */
18458        list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18459                if (!iocbq) {
18460                        lpfc_in_buf_free(vport->phba, d_buf);
18461                        continue;
18462                }
18463                if (!iocbq->context3) {
18464                        iocbq->context3 = d_buf;
18465                        iocbq->iocb.ulpBdeCount++;
18466                        /* We need to get the size out of the right CQE */
18467                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18468                        len = bf_get(lpfc_rcqe_length,
18469                                       &hbq_buf->cq_event.cqe.rcqe_cmpl);
18470                        pbde = (struct ulp_bde64 *)
18471                                        &iocbq->iocb.unsli3.sli3Words[4];
18472                        if (len > LPFC_DATA_BUF_SIZE)
18473                                pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18474                        else
18475                                pbde->tus.f.bdeSize = len;
18476
18477                        iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18478                        tot_len += len;
18479                } else {
18480                        iocbq = lpfc_sli_get_iocbq(vport->phba);
18481                        if (!iocbq) {
18482                                if (first_iocbq) {
18483                                        first_iocbq->iocb.ulpStatus =
18484                                                        IOSTAT_FCP_RSP_ERROR;
18485                                        first_iocbq->iocb.un.ulpWord[4] =
18486                                                        IOERR_NO_RESOURCES;
18487                                }
18488                                lpfc_in_buf_free(vport->phba, d_buf);
18489                                continue;
18490                        }
18491                        /* We need to get the size out of the right CQE */
18492                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18493                        len = bf_get(lpfc_rcqe_length,
18494                                       &hbq_buf->cq_event.cqe.rcqe_cmpl);
18495                        iocbq->context2 = d_buf;
18496                        iocbq->context3 = NULL;
18497                        iocbq->iocb.ulpBdeCount = 1;
18498                        if (len > LPFC_DATA_BUF_SIZE)
18499                                iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18500                                                        LPFC_DATA_BUF_SIZE;
18501                        else
18502                                iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18503
18504                        tot_len += len;
18505                        iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18506
18507                        iocbq->iocb.un.rcvels.remoteID = sid;
18508                        list_add_tail(&iocbq->list, &first_iocbq->list);
18509                }
18510        }
18511        /* Free the sequence's header buffer */
18512        if (!first_iocbq)
18513                lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18514
18515        return first_iocbq;
18516}
18517
18518static void
18519lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18520                          struct hbq_dmabuf *seq_dmabuf)
18521{
18522        struct fc_frame_header *fc_hdr;
18523        struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18524        struct lpfc_hba *phba = vport->phba;
18525
18526        fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18527        iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18528        if (!iocbq) {
18529                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18530                                "2707 Ring %d handler: Failed to allocate "
18531                                "iocb Rctl x%x Type x%x received\n",
18532                                LPFC_ELS_RING,
18533                                fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18534                return;
18535        }
18536        if (!lpfc_complete_unsol_iocb(phba,
18537                                      phba->sli4_hba.els_wq->pring,
18538                                      iocbq, fc_hdr->fh_r_ctl,
18539                                      fc_hdr->fh_type))
18540                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541                                "2540 Ring %d handler: unexpected Rctl "
18542                                "x%x Type x%x received\n",
18543                                LPFC_ELS_RING,
18544                                fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18545
18546        /* Free iocb created in lpfc_prep_seq */
18547        list_for_each_entry_safe(curr_iocb, next_iocb,
18548                &iocbq->list, list) {
18549                list_del_init(&curr_iocb->list);
18550                lpfc_sli_release_iocbq(phba, curr_iocb);
18551        }
18552        lpfc_sli_release_iocbq(phba, iocbq);
18553}
18554
18555static void
18556lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18557                            struct lpfc_iocbq *rspiocb)
18558{
18559        struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18560
18561        if (pcmd && pcmd->virt)
18562                dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18563        kfree(pcmd);
18564        lpfc_sli_release_iocbq(phba, cmdiocb);
18565        lpfc_drain_txq(phba);
18566}
18567
18568static void
18569lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18570                              struct hbq_dmabuf *dmabuf)
18571{
18572        struct fc_frame_header *fc_hdr;
18573        struct lpfc_hba *phba = vport->phba;
18574        struct lpfc_iocbq *iocbq = NULL;
18575        union  lpfc_wqe *wqe;
18576        struct lpfc_dmabuf *pcmd = NULL;
18577        uint32_t frame_len;
18578        int rc;
18579        unsigned long iflags;
18580
18581        fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18582        frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18583
18584        /* Send the received frame back */
18585        iocbq = lpfc_sli_get_iocbq(phba);
18586        if (!iocbq) {
18587                /* Queue cq event and wakeup worker thread to process it */
18588                spin_lock_irqsave(&phba->hbalock, iflags);
18589                list_add_tail(&dmabuf->cq_event.list,
18590                              &phba->sli4_hba.sp_queue_event);
18591                phba->hba_flag |= HBA_SP_QUEUE_EVT;
18592                spin_unlock_irqrestore(&phba->hbalock, iflags);
18593                lpfc_worker_wake_up(phba);
18594                return;
18595        }
18596
18597        /* Allocate buffer for command payload */
18598        pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18599        if (pcmd)
18600                pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18601                                            &pcmd->phys);
18602        if (!pcmd || !pcmd->virt)
18603                goto exit;
18604
18605        INIT_LIST_HEAD(&pcmd->list);
18606
18607        /* copyin the payload */
18608        memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18609
18610        /* fill in BDE's for command */
18611        iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18612        iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18613        iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18614        iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18615
18616        iocbq->context2 = pcmd;
18617        iocbq->vport = vport;
18618        iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18619        iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18620
18621        /*
18622         * Setup rest of the iocb as though it were a WQE
18623         * Build the SEND_FRAME WQE
18624         */
18625        wqe = (union lpfc_wqe *)&iocbq->iocb;
18626
18627        wqe->send_frame.frame_len = frame_len;
18628        wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18629        wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18630        wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18631        wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18632        wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18633        wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18634
18635        iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18636        iocbq->iocb.ulpLe = 1;
18637        iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18638        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18639        if (rc == IOCB_ERROR)
18640                goto exit;
18641
18642        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18643        return;
18644
18645exit:
18646        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18647                        "2023 Unable to process MDS loopback frame\n");
18648        if (pcmd && pcmd->virt)
18649                dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18650        kfree(pcmd);
18651        if (iocbq)
18652                lpfc_sli_release_iocbq(phba, iocbq);
18653        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18654}
18655
18656/**
18657 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18658 * @phba: Pointer to HBA context object.
18659 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18660 *
18661 * This function is called with no lock held. This function processes all
18662 * the received buffers and gives it to upper layers when a received buffer
18663 * indicates that it is the final frame in the sequence. The interrupt
18664 * service routine processes received buffers at interrupt contexts.
18665 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18666 * appropriate receive function when the final frame in a sequence is received.
18667 **/
18668void
18669lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18670                                 struct hbq_dmabuf *dmabuf)
18671{
18672        struct hbq_dmabuf *seq_dmabuf;
18673        struct fc_frame_header *fc_hdr;
18674        struct lpfc_vport *vport;
18675        uint32_t fcfi;
18676        uint32_t did;
18677
18678        /* Process each received buffer */
18679        fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18680
18681        if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18682            fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18683                vport = phba->pport;
18684                /* Handle MDS Loopback frames */
18685                if  (!(phba->pport->load_flag & FC_UNLOADING))
18686                        lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18687                else
18688                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18689                return;
18690        }
18691
18692        /* check to see if this a valid type of frame */
18693        if (lpfc_fc_frame_check(phba, fc_hdr)) {
18694                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18695                return;
18696        }
18697
18698        if ((bf_get(lpfc_cqe_code,
18699                    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18700                fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18701                              &dmabuf->cq_event.cqe.rcqe_cmpl);
18702        else
18703                fcfi = bf_get(lpfc_rcqe_fcf_id,
18704                              &dmabuf->cq_event.cqe.rcqe_cmpl);
18705
18706        if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18707                vport = phba->pport;
18708                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18709                                "2023 MDS Loopback %d bytes\n",
18710                                bf_get(lpfc_rcqe_length,
18711                                       &dmabuf->cq_event.cqe.rcqe_cmpl));
18712                /* Handle MDS Loopback frames */
18713                lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18714                return;
18715        }
18716
18717        /* d_id this frame is directed to */
18718        did = sli4_did_from_fc_hdr(fc_hdr);
18719
18720        vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18721        if (!vport) {
18722                /* throw out the frame */
18723                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18724                return;
18725        }
18726
18727        /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18728        if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18729                (did != Fabric_DID)) {
18730                /*
18731                 * Throw out the frame if we are not pt2pt.
18732                 * The pt2pt protocol allows for discovery frames
18733                 * to be received without a registered VPI.
18734                 */
18735                if (!(vport->fc_flag & FC_PT2PT) ||
18736                        (phba->link_state == LPFC_HBA_READY)) {
18737                        lpfc_in_buf_free(phba, &dmabuf->dbuf);
18738                        return;
18739                }
18740        }
18741
18742        /* Handle the basic abort sequence (BA_ABTS) event */
18743        if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18744                lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18745                return;
18746        }
18747
18748        /* Link this frame */
18749        seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18750        if (!seq_dmabuf) {
18751                /* unable to add frame to vport - throw it out */
18752                lpfc_in_buf_free(phba, &dmabuf->dbuf);
18753                return;
18754        }
18755        /* If not last frame in sequence continue processing frames. */
18756        if (!lpfc_seq_complete(seq_dmabuf))
18757                return;
18758
18759        /* Send the complete sequence to the upper layer protocol */
18760        lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18761}
18762
18763/**
18764 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18765 * @phba: pointer to lpfc hba data structure.
18766 *
18767 * This routine is invoked to post rpi header templates to the
18768 * HBA consistent with the SLI-4 interface spec.  This routine
18769 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18770 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18771 *
18772 * This routine does not require any locks.  It's usage is expected
18773 * to be driver load or reset recovery when the driver is
18774 * sequential.
18775 *
18776 * Return codes
18777 *      0 - successful
18778 *      -EIO - The mailbox failed to complete successfully.
18779 *      When this error occurs, the driver is not guaranteed
18780 *      to have any rpi regions posted to the device and
18781 *      must either attempt to repost the regions or take a
18782 *      fatal error.
18783 **/
18784int
18785lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18786{
18787        struct lpfc_rpi_hdr *rpi_page;
18788        uint32_t rc = 0;
18789        uint16_t lrpi = 0;
18790
18791        /* SLI4 ports that support extents do not require RPI headers. */
18792        if (!phba->sli4_hba.rpi_hdrs_in_use)
18793                goto exit;
18794        if (phba->sli4_hba.extents_in_use)
18795                return -EIO;
18796
18797        list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18798                /*
18799                 * Assign the rpi headers a physical rpi only if the driver
18800                 * has not initialized those resources.  A port reset only
18801                 * needs the headers posted.
18802                 */
18803                if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18804                    LPFC_RPI_RSRC_RDY)
18805                        rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18806
18807                rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18808                if (rc != MBX_SUCCESS) {
18809                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18810                                        "2008 Error %d posting all rpi "
18811                                        "headers\n", rc);
18812                        rc = -EIO;
18813                        break;
18814                }
18815        }
18816
18817 exit:
18818        bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18819               LPFC_RPI_RSRC_RDY);
18820        return rc;
18821}
18822
18823/**
18824 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18825 * @phba: pointer to lpfc hba data structure.
18826 * @rpi_page:  pointer to the rpi memory region.
18827 *
18828 * This routine is invoked to post a single rpi header to the
18829 * HBA consistent with the SLI-4 interface spec.  This memory region
18830 * maps up to 64 rpi context regions.
18831 *
18832 * Return codes
18833 *      0 - successful
18834 *      -ENOMEM - No available memory
18835 *      -EIO - The mailbox failed to complete successfully.
18836 **/
18837int
18838lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18839{
18840        LPFC_MBOXQ_t *mboxq;
18841        struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18842        uint32_t rc = 0;
18843        uint32_t shdr_status, shdr_add_status;
18844        union lpfc_sli4_cfg_shdr *shdr;
18845
18846        /* SLI4 ports that support extents do not require RPI headers. */
18847        if (!phba->sli4_hba.rpi_hdrs_in_use)
18848                return rc;
18849        if (phba->sli4_hba.extents_in_use)
18850                return -EIO;
18851
18852        /* The port is notified of the header region via a mailbox command. */
18853        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18854        if (!mboxq) {
18855                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18856                                "2001 Unable to allocate memory for issuing "
18857                                "SLI_CONFIG_SPECIAL mailbox command\n");
18858                return -ENOMEM;
18859        }
18860
18861        /* Post all rpi memory regions to the port. */
18862        hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18863        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18864                         LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18865                         sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18866                         sizeof(struct lpfc_sli4_cfg_mhdr),
18867                         LPFC_SLI4_MBX_EMBED);
18868
18869
18870        /* Post the physical rpi to the port for this rpi header. */
18871        bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18872               rpi_page->start_rpi);
18873        bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18874               hdr_tmpl, rpi_page->page_count);
18875
18876        hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18877        hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18878        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18879        shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18880        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18881        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18882        mempool_free(mboxq, phba->mbox_mem_pool);
18883        if (shdr_status || shdr_add_status || rc) {
18884                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18885                                "2514 POST_RPI_HDR mailbox failed with "
18886                                "status x%x add_status x%x, mbx status x%x\n",
18887                                shdr_status, shdr_add_status, rc);
18888                rc = -ENXIO;
18889        } else {
18890                /*
18891                 * The next_rpi stores the next logical module-64 rpi value used
18892                 * to post physical rpis in subsequent rpi postings.
18893                 */
18894                spin_lock_irq(&phba->hbalock);
18895                phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18896                spin_unlock_irq(&phba->hbalock);
18897        }
18898        return rc;
18899}
18900
18901/**
18902 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18903 * @phba: pointer to lpfc hba data structure.
18904 *
18905 * This routine is invoked to post rpi header templates to the
18906 * HBA consistent with the SLI-4 interface spec.  This routine
18907 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18908 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18909 *
18910 * Returns
18911 *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18912 *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
18913 **/
18914int
18915lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18916{
18917        unsigned long rpi;
18918        uint16_t max_rpi, rpi_limit;
18919        uint16_t rpi_remaining, lrpi = 0;
18920        struct lpfc_rpi_hdr *rpi_hdr;
18921        unsigned long iflag;
18922
18923        /*
18924         * Fetch the next logical rpi.  Because this index is logical,
18925         * the  driver starts at 0 each time.
18926         */
18927        spin_lock_irqsave(&phba->hbalock, iflag);
18928        max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18929        rpi_limit = phba->sli4_hba.next_rpi;
18930
18931        rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18932        if (rpi >= rpi_limit)
18933                rpi = LPFC_RPI_ALLOC_ERROR;
18934        else {
18935                set_bit(rpi, phba->sli4_hba.rpi_bmask);
18936                phba->sli4_hba.max_cfg_param.rpi_used++;
18937                phba->sli4_hba.rpi_count++;
18938        }
18939        lpfc_printf_log(phba, KERN_INFO,
18940                        LOG_NODE | LOG_DISCOVERY,
18941                        "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18942                        (int) rpi, max_rpi, rpi_limit);
18943
18944        /*
18945         * Don't try to allocate more rpi header regions if the device limit
18946         * has been exhausted.
18947         */
18948        if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18949            (phba->sli4_hba.rpi_count >= max_rpi)) {
18950                spin_unlock_irqrestore(&phba->hbalock, iflag);
18951                return rpi;
18952        }
18953
18954        /*
18955         * RPI header postings are not required for SLI4 ports capable of
18956         * extents.
18957         */
18958        if (!phba->sli4_hba.rpi_hdrs_in_use) {
18959                spin_unlock_irqrestore(&phba->hbalock, iflag);
18960                return rpi;
18961        }
18962
18963        /*
18964         * If the driver is running low on rpi resources, allocate another
18965         * page now.  Note that the next_rpi value is used because
18966         * it represents how many are actually in use whereas max_rpi notes
18967         * how many are supported max by the device.
18968         */
18969        rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18970        spin_unlock_irqrestore(&phba->hbalock, iflag);
18971        if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18972                rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18973                if (!rpi_hdr) {
18974                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18975                                        "2002 Error Could not grow rpi "
18976                                        "count\n");
18977                } else {
18978                        lrpi = rpi_hdr->start_rpi;
18979                        rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18980                        lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18981                }
18982        }
18983
18984        return rpi;
18985}
18986
18987/**
18988 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
18989 * @phba: pointer to lpfc hba data structure.
18990 * @rpi: rpi to free
18991 *
18992 * This routine is invoked to release an rpi to the pool of
18993 * available rpis maintained by the driver.
18994 **/
18995static void
18996__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18997{
18998        /*
18999         * if the rpi value indicates a prior unreg has already
19000         * been done, skip the unreg.
19001         */
19002        if (rpi == LPFC_RPI_ALLOC_ERROR)
19003                return;
19004
19005        if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19006                phba->sli4_hba.rpi_count--;
19007                phba->sli4_hba.max_cfg_param.rpi_used--;
19008        } else {
19009                lpfc_printf_log(phba, KERN_INFO,
19010                                LOG_NODE | LOG_DISCOVERY,
19011                                "2016 rpi %x not inuse\n",
19012                                rpi);
19013        }
19014}
19015
19016/**
19017 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19018 * @phba: pointer to lpfc hba data structure.
19019 * @rpi: rpi to free
19020 *
19021 * This routine is invoked to release an rpi to the pool of
19022 * available rpis maintained by the driver.
19023 **/
19024void
19025lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19026{
19027        spin_lock_irq(&phba->hbalock);
19028        __lpfc_sli4_free_rpi(phba, rpi);
19029        spin_unlock_irq(&phba->hbalock);
19030}
19031
19032/**
19033 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19034 * @phba: pointer to lpfc hba data structure.
19035 *
19036 * This routine is invoked to remove the memory region that
19037 * provided rpi via a bitmask.
19038 **/
19039void
19040lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19041{
19042        kfree(phba->sli4_hba.rpi_bmask);
19043        kfree(phba->sli4_hba.rpi_ids);
19044        bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19045}
19046
19047/**
19048 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19049 * @ndlp: pointer to lpfc nodelist data structure.
19050 * @cmpl: completion call-back.
19051 * @arg: data to load as MBox 'caller buffer information'
19052 *
19053 * This routine is invoked to remove the memory region that
19054 * provided rpi via a bitmask.
19055 **/
19056int
19057lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19058        void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19059{
19060        LPFC_MBOXQ_t *mboxq;
19061        struct lpfc_hba *phba = ndlp->phba;
19062        int rc;
19063
19064        /* The port is notified of the header region via a mailbox command. */
19065        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19066        if (!mboxq)
19067                return -ENOMEM;
19068
19069        /* If cmpl assigned, then this nlp_get pairs with
19070         * lpfc_mbx_cmpl_resume_rpi.
19071         *
19072         * Else cmpl is NULL, then this nlp_get pairs with
19073         * lpfc_sli_def_mbox_cmpl.
19074         */
19075        if (!lpfc_nlp_get(ndlp)) {
19076                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19077                                "2122 %s: Failed to get nlp ref\n",
19078                                __func__);
19079                mempool_free(mboxq, phba->mbox_mem_pool);
19080                return -EIO;
19081        }
19082
19083        /* Post all rpi memory regions to the port. */
19084        lpfc_resume_rpi(mboxq, ndlp);
19085        if (cmpl) {
19086                mboxq->mbox_cmpl = cmpl;
19087                mboxq->ctx_buf = arg;
19088        } else
19089                mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19090        mboxq->ctx_ndlp = ndlp;
19091        mboxq->vport = ndlp->vport;
19092        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19093        if (rc == MBX_NOT_FINISHED) {
19094                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19095                                "2010 Resume RPI Mailbox failed "
19096                                "status %d, mbxStatus x%x\n", rc,
19097                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19098                lpfc_nlp_put(ndlp);
19099                mempool_free(mboxq, phba->mbox_mem_pool);
19100                return -EIO;
19101        }
19102        return 0;
19103}
19104
19105/**
19106 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19107 * @vport: Pointer to the vport for which the vpi is being initialized
19108 *
19109 * This routine is invoked to activate a vpi with the port.
19110 *
19111 * Returns:
19112 *    0 success
19113 *    -Evalue otherwise
19114 **/
19115int
19116lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19117{
19118        LPFC_MBOXQ_t *mboxq;
19119        int rc = 0;
19120        int retval = MBX_SUCCESS;
19121        uint32_t mbox_tmo;
19122        struct lpfc_hba *phba = vport->phba;
19123        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19124        if (!mboxq)
19125                return -ENOMEM;
19126        lpfc_init_vpi(phba, mboxq, vport->vpi);
19127        mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19128        rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19129        if (rc != MBX_SUCCESS) {
19130                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19131                                "2022 INIT VPI Mailbox failed "
19132                                "status %d, mbxStatus x%x\n", rc,
19133                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19134                retval = -EIO;
19135        }
19136        if (rc != MBX_TIMEOUT)
19137                mempool_free(mboxq, vport->phba->mbox_mem_pool);
19138
19139        return retval;
19140}
19141
19142/**
19143 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19144 * @phba: pointer to lpfc hba data structure.
19145 * @mboxq: Pointer to mailbox object.
19146 *
19147 * This routine is invoked to manually add a single FCF record. The caller
19148 * must pass a completely initialized FCF_Record.  This routine takes
19149 * care of the nonembedded mailbox operations.
19150 **/
19151static void
19152lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19153{
19154        void *virt_addr;
19155        union lpfc_sli4_cfg_shdr *shdr;
19156        uint32_t shdr_status, shdr_add_status;
19157
19158        virt_addr = mboxq->sge_array->addr[0];
19159        /* The IOCTL status is embedded in the mailbox subheader. */
19160        shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19161        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19162        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19163
19164        if ((shdr_status || shdr_add_status) &&
19165                (shdr_status != STATUS_FCF_IN_USE))
19166                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19167                        "2558 ADD_FCF_RECORD mailbox failed with "
19168                        "status x%x add_status x%x\n",
19169                        shdr_status, shdr_add_status);
19170
19171        lpfc_sli4_mbox_cmd_free(phba, mboxq);
19172}
19173
19174/**
19175 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19176 * @phba: pointer to lpfc hba data structure.
19177 * @fcf_record:  pointer to the initialized fcf record to add.
19178 *
19179 * This routine is invoked to manually add a single FCF record. The caller
19180 * must pass a completely initialized FCF_Record.  This routine takes
19181 * care of the nonembedded mailbox operations.
19182 **/
19183int
19184lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19185{
19186        int rc = 0;
19187        LPFC_MBOXQ_t *mboxq;
19188        uint8_t *bytep;
19189        void *virt_addr;
19190        struct lpfc_mbx_sge sge;
19191        uint32_t alloc_len, req_len;
19192        uint32_t fcfindex;
19193
19194        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19195        if (!mboxq) {
19196                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19197                        "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19198                return -ENOMEM;
19199        }
19200
19201        req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19202                  sizeof(uint32_t);
19203
19204        /* Allocate DMA memory and set up the non-embedded mailbox command */
19205        alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19206                                     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19207                                     req_len, LPFC_SLI4_MBX_NEMBED);
19208        if (alloc_len < req_len) {
19209                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19210                        "2523 Allocated DMA memory size (x%x) is "
19211                        "less than the requested DMA memory "
19212                        "size (x%x)\n", alloc_len, req_len);
19213                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19214                return -ENOMEM;
19215        }
19216
19217        /*
19218         * Get the first SGE entry from the non-embedded DMA memory.  This
19219         * routine only uses a single SGE.
19220         */
19221        lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19222        virt_addr = mboxq->sge_array->addr[0];
19223        /*
19224         * Configure the FCF record for FCFI 0.  This is the driver's
19225         * hardcoded default and gets used in nonFIP mode.
19226         */
19227        fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19228        bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19229        lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19230
19231        /*
19232         * Copy the fcf_index and the FCF Record Data. The data starts after
19233         * the FCoE header plus word10. The data copy needs to be endian
19234         * correct.
19235         */
19236        bytep += sizeof(uint32_t);
19237        lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19238        mboxq->vport = phba->pport;
19239        mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19240        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19241        if (rc == MBX_NOT_FINISHED) {
19242                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19243                        "2515 ADD_FCF_RECORD mailbox failed with "
19244                        "status 0x%x\n", rc);
19245                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19246                rc = -EIO;
19247        } else
19248                rc = 0;
19249
19250        return rc;
19251}
19252
19253/**
19254 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19255 * @phba: pointer to lpfc hba data structure.
19256 * @fcf_record:  pointer to the fcf record to write the default data.
19257 * @fcf_index: FCF table entry index.
19258 *
19259 * This routine is invoked to build the driver's default FCF record.  The
19260 * values used are hardcoded.  This routine handles memory initialization.
19261 *
19262 **/
19263void
19264lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19265                                struct fcf_record *fcf_record,
19266                                uint16_t fcf_index)
19267{
19268        memset(fcf_record, 0, sizeof(struct fcf_record));
19269        fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19270        fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19271        fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19272        bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19273        bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19274        bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19275        bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19276        bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19277        bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19278        bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19279        bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19280        bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19281        bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19282        bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19283        bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19284        bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19285                LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19286        /* Set the VLAN bit map */
19287        if (phba->valid_vlan) {
19288                fcf_record->vlan_bitmap[phba->vlan_id / 8]
19289                        = 1 << (phba->vlan_id % 8);
19290        }
19291}
19292
19293/**
19294 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19295 * @phba: pointer to lpfc hba data structure.
19296 * @fcf_index: FCF table entry offset.
19297 *
19298 * This routine is invoked to scan the entire FCF table by reading FCF
19299 * record and processing it one at a time starting from the @fcf_index
19300 * for initial FCF discovery or fast FCF failover rediscovery.
19301 *
19302 * Return 0 if the mailbox command is submitted successfully, none 0
19303 * otherwise.
19304 **/
19305int
19306lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19307{
19308        int rc = 0, error;
19309        LPFC_MBOXQ_t *mboxq;
19310
19311        phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19312        phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19313        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19314        if (!mboxq) {
19315                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19316                                "2000 Failed to allocate mbox for "
19317                                "READ_FCF cmd\n");
19318                error = -ENOMEM;
19319                goto fail_fcf_scan;
19320        }
19321        /* Construct the read FCF record mailbox command */
19322        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19323        if (rc) {
19324                error = -EINVAL;
19325                goto fail_fcf_scan;
19326        }
19327        /* Issue the mailbox command asynchronously */
19328        mboxq->vport = phba->pport;
19329        mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19330
19331        spin_lock_irq(&phba->hbalock);
19332        phba->hba_flag |= FCF_TS_INPROG;
19333        spin_unlock_irq(&phba->hbalock);
19334
19335        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19336        if (rc == MBX_NOT_FINISHED)
19337                error = -EIO;
19338        else {
19339                /* Reset eligible FCF count for new scan */
19340                if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19341                        phba->fcf.eligible_fcf_cnt = 0;
19342                error = 0;
19343        }
19344fail_fcf_scan:
19345        if (error) {
19346                if (mboxq)
19347                        lpfc_sli4_mbox_cmd_free(phba, mboxq);
19348                /* FCF scan failed, clear FCF_TS_INPROG flag */
19349                spin_lock_irq(&phba->hbalock);
19350                phba->hba_flag &= ~FCF_TS_INPROG;
19351                spin_unlock_irq(&phba->hbalock);
19352        }
19353        return error;
19354}
19355
19356/**
19357 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19358 * @phba: pointer to lpfc hba data structure.
19359 * @fcf_index: FCF table entry offset.
19360 *
19361 * This routine is invoked to read an FCF record indicated by @fcf_index
19362 * and to use it for FLOGI roundrobin FCF failover.
19363 *
19364 * Return 0 if the mailbox command is submitted successfully, none 0
19365 * otherwise.
19366 **/
19367int
19368lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19369{
19370        int rc = 0, error;
19371        LPFC_MBOXQ_t *mboxq;
19372
19373        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19374        if (!mboxq) {
19375                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19376                                "2763 Failed to allocate mbox for "
19377                                "READ_FCF cmd\n");
19378                error = -ENOMEM;
19379                goto fail_fcf_read;
19380        }
19381        /* Construct the read FCF record mailbox command */
19382        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19383        if (rc) {
19384                error = -EINVAL;
19385                goto fail_fcf_read;
19386        }
19387        /* Issue the mailbox command asynchronously */
19388        mboxq->vport = phba->pport;
19389        mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19390        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19391        if (rc == MBX_NOT_FINISHED)
19392                error = -EIO;
19393        else
19394                error = 0;
19395
19396fail_fcf_read:
19397        if (error && mboxq)
19398                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19399        return error;
19400}
19401
19402/**
19403 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19404 * @phba: pointer to lpfc hba data structure.
19405 * @fcf_index: FCF table entry offset.
19406 *
19407 * This routine is invoked to read an FCF record indicated by @fcf_index to
19408 * determine whether it's eligible for FLOGI roundrobin failover list.
19409 *
19410 * Return 0 if the mailbox command is submitted successfully, none 0
19411 * otherwise.
19412 **/
19413int
19414lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19415{
19416        int rc = 0, error;
19417        LPFC_MBOXQ_t *mboxq;
19418
19419        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19420        if (!mboxq) {
19421                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19422                                "2758 Failed to allocate mbox for "
19423                                "READ_FCF cmd\n");
19424                                error = -ENOMEM;
19425                                goto fail_fcf_read;
19426        }
19427        /* Construct the read FCF record mailbox command */
19428        rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19429        if (rc) {
19430                error = -EINVAL;
19431                goto fail_fcf_read;
19432        }
19433        /* Issue the mailbox command asynchronously */
19434        mboxq->vport = phba->pport;
19435        mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19436        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19437        if (rc == MBX_NOT_FINISHED)
19438                error = -EIO;
19439        else
19440                error = 0;
19441
19442fail_fcf_read:
19443        if (error && mboxq)
19444                lpfc_sli4_mbox_cmd_free(phba, mboxq);
19445        return error;
19446}
19447
19448/**
19449 * lpfc_check_next_fcf_pri_level
19450 * @phba: pointer to the lpfc_hba struct for this port.
19451 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19452 * routine when the rr_bmask is empty. The FCF indecies are put into the
19453 * rr_bmask based on their priority level. Starting from the highest priority
19454 * to the lowest. The most likely FCF candidate will be in the highest
19455 * priority group. When this routine is called it searches the fcf_pri list for
19456 * next lowest priority group and repopulates the rr_bmask with only those
19457 * fcf_indexes.
19458 * returns:
19459 * 1=success 0=failure
19460 **/
19461static int
19462lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19463{
19464        uint16_t next_fcf_pri;
19465        uint16_t last_index;
19466        struct lpfc_fcf_pri *fcf_pri;
19467        int rc;
19468        int ret = 0;
19469
19470        last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19471                        LPFC_SLI4_FCF_TBL_INDX_MAX);
19472        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19473                        "3060 Last IDX %d\n", last_index);
19474
19475        /* Verify the priority list has 2 or more entries */
19476        spin_lock_irq(&phba->hbalock);
19477        if (list_empty(&phba->fcf.fcf_pri_list) ||
19478            list_is_singular(&phba->fcf.fcf_pri_list)) {
19479                spin_unlock_irq(&phba->hbalock);
19480                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19481                        "3061 Last IDX %d\n", last_index);
19482                return 0; /* Empty rr list */
19483        }
19484        spin_unlock_irq(&phba->hbalock);
19485
19486        next_fcf_pri = 0;
19487        /*
19488         * Clear the rr_bmask and set all of the bits that are at this
19489         * priority.
19490         */
19491        memset(phba->fcf.fcf_rr_bmask, 0,
19492                        sizeof(*phba->fcf.fcf_rr_bmask));
19493        spin_lock_irq(&phba->hbalock);
19494        list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19495                if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19496                        continue;
19497                /*
19498                 * the 1st priority that has not FLOGI failed
19499                 * will be the highest.
19500                 */
19501                if (!next_fcf_pri)
19502                        next_fcf_pri = fcf_pri->fcf_rec.priority;
19503                spin_unlock_irq(&phba->hbalock);
19504                if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19505                        rc = lpfc_sli4_fcf_rr_index_set(phba,
19506                                                fcf_pri->fcf_rec.fcf_index);
19507                        if (rc)
19508                                return 0;
19509                }
19510                spin_lock_irq(&phba->hbalock);
19511        }
19512        /*
19513         * if next_fcf_pri was not set above and the list is not empty then
19514         * we have failed flogis on all of them. So reset flogi failed
19515         * and start at the beginning.
19516         */
19517        if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19518                list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19519                        fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19520                        /*
19521                         * the 1st priority that has not FLOGI failed
19522                         * will be the highest.
19523                         */
19524                        if (!next_fcf_pri)
19525                                next_fcf_pri = fcf_pri->fcf_rec.priority;
19526                        spin_unlock_irq(&phba->hbalock);
19527                        if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19528                                rc = lpfc_sli4_fcf_rr_index_set(phba,
19529                                                fcf_pri->fcf_rec.fcf_index);
19530                                if (rc)
19531                                        return 0;
19532                        }
19533                        spin_lock_irq(&phba->hbalock);
19534                }
19535        } else
19536                ret = 1;
19537        spin_unlock_irq(&phba->hbalock);
19538
19539        return ret;
19540}
19541/**
19542 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19543 * @phba: pointer to lpfc hba data structure.
19544 *
19545 * This routine is to get the next eligible FCF record index in a round
19546 * robin fashion. If the next eligible FCF record index equals to the
19547 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19548 * shall be returned, otherwise, the next eligible FCF record's index
19549 * shall be returned.
19550 **/
19551uint16_t
19552lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19553{
19554        uint16_t next_fcf_index;
19555
19556initial_priority:
19557        /* Search start from next bit of currently registered FCF index */
19558        next_fcf_index = phba->fcf.current_rec.fcf_indx;
19559
19560next_priority:
19561        /* Determine the next fcf index to check */
19562        next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19563        next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19564                                       LPFC_SLI4_FCF_TBL_INDX_MAX,
19565                                       next_fcf_index);
19566
19567        /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19568        if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19569                /*
19570                 * If we have wrapped then we need to clear the bits that
19571                 * have been tested so that we can detect when we should
19572                 * change the priority level.
19573                 */
19574                next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19575                                               LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19576        }
19577
19578
19579        /* Check roundrobin failover list empty condition */
19580        if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19581                next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19582                /*
19583                 * If next fcf index is not found check if there are lower
19584                 * Priority level fcf's in the fcf_priority list.
19585                 * Set up the rr_bmask with all of the avaiable fcf bits
19586                 * at that level and continue the selection process.
19587                 */
19588                if (lpfc_check_next_fcf_pri_level(phba))
19589                        goto initial_priority;
19590                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19591                                "2844 No roundrobin failover FCF available\n");
19592
19593                return LPFC_FCOE_FCF_NEXT_NONE;
19594        }
19595
19596        if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19597                phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19598                LPFC_FCF_FLOGI_FAILED) {
19599                if (list_is_singular(&phba->fcf.fcf_pri_list))
19600                        return LPFC_FCOE_FCF_NEXT_NONE;
19601
19602                goto next_priority;
19603        }
19604
19605        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19606                        "2845 Get next roundrobin failover FCF (x%x)\n",
19607                        next_fcf_index);
19608
19609        return next_fcf_index;
19610}
19611
19612/**
19613 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19614 * @phba: pointer to lpfc hba data structure.
19615 * @fcf_index: index into the FCF table to 'set'
19616 *
19617 * This routine sets the FCF record index in to the eligible bmask for
19618 * roundrobin failover search. It checks to make sure that the index
19619 * does not go beyond the range of the driver allocated bmask dimension
19620 * before setting the bit.
19621 *
19622 * Returns 0 if the index bit successfully set, otherwise, it returns
19623 * -EINVAL.
19624 **/
19625int
19626lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19627{
19628        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19629                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19630                                "2610 FCF (x%x) reached driver's book "
19631                                "keeping dimension:x%x\n",
19632                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19633                return -EINVAL;
19634        }
19635        /* Set the eligible FCF record index bmask */
19636        set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19637
19638        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19639                        "2790 Set FCF (x%x) to roundrobin FCF failover "
19640                        "bmask\n", fcf_index);
19641
19642        return 0;
19643}
19644
19645/**
19646 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19647 * @phba: pointer to lpfc hba data structure.
19648 * @fcf_index: index into the FCF table to 'clear'
19649 *
19650 * This routine clears the FCF record index from the eligible bmask for
19651 * roundrobin failover search. It checks to make sure that the index
19652 * does not go beyond the range of the driver allocated bmask dimension
19653 * before clearing the bit.
19654 **/
19655void
19656lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19657{
19658        struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19659        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19660                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19661                                "2762 FCF (x%x) reached driver's book "
19662                                "keeping dimension:x%x\n",
19663                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19664                return;
19665        }
19666        /* Clear the eligible FCF record index bmask */
19667        spin_lock_irq(&phba->hbalock);
19668        list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19669                                 list) {
19670                if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19671                        list_del_init(&fcf_pri->list);
19672                        break;
19673                }
19674        }
19675        spin_unlock_irq(&phba->hbalock);
19676        clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19677
19678        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19679                        "2791 Clear FCF (x%x) from roundrobin failover "
19680                        "bmask\n", fcf_index);
19681}
19682
19683/**
19684 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19685 * @phba: pointer to lpfc hba data structure.
19686 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19687 *
19688 * This routine is the completion routine for the rediscover FCF table mailbox
19689 * command. If the mailbox command returned failure, it will try to stop the
19690 * FCF rediscover wait timer.
19691 **/
19692static void
19693lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19694{
19695        struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19696        uint32_t shdr_status, shdr_add_status;
19697
19698        redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19699
19700        shdr_status = bf_get(lpfc_mbox_hdr_status,
19701                             &redisc_fcf->header.cfg_shdr.response);
19702        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19703                             &redisc_fcf->header.cfg_shdr.response);
19704        if (shdr_status || shdr_add_status) {
19705                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19706                                "2746 Requesting for FCF rediscovery failed "
19707                                "status x%x add_status x%x\n",
19708                                shdr_status, shdr_add_status);
19709                if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19710                        spin_lock_irq(&phba->hbalock);
19711                        phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19712                        spin_unlock_irq(&phba->hbalock);
19713                        /*
19714                         * CVL event triggered FCF rediscover request failed,
19715                         * last resort to re-try current registered FCF entry.
19716                         */
19717                        lpfc_retry_pport_discovery(phba);
19718                } else {
19719                        spin_lock_irq(&phba->hbalock);
19720                        phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19721                        spin_unlock_irq(&phba->hbalock);
19722                        /*
19723                         * DEAD FCF event triggered FCF rediscover request
19724                         * failed, last resort to fail over as a link down
19725                         * to FCF registration.
19726                         */
19727                        lpfc_sli4_fcf_dead_failthrough(phba);
19728                }
19729        } else {
19730                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19731                                "2775 Start FCF rediscover quiescent timer\n");
19732                /*
19733                 * Start FCF rediscovery wait timer for pending FCF
19734                 * before rescan FCF record table.
19735                 */
19736                lpfc_fcf_redisc_wait_start_timer(phba);
19737        }
19738
19739        mempool_free(mbox, phba->mbox_mem_pool);
19740}
19741
19742/**
19743 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19744 * @phba: pointer to lpfc hba data structure.
19745 *
19746 * This routine is invoked to request for rediscovery of the entire FCF table
19747 * by the port.
19748 **/
19749int
19750lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19751{
19752        LPFC_MBOXQ_t *mbox;
19753        struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19754        int rc, length;
19755
19756        /* Cancel retry delay timers to all vports before FCF rediscover */
19757        lpfc_cancel_all_vport_retry_delay_timer(phba);
19758
19759        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19760        if (!mbox) {
19761                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19762                                "2745 Failed to allocate mbox for "
19763                                "requesting FCF rediscover.\n");
19764                return -ENOMEM;
19765        }
19766
19767        length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19768                  sizeof(struct lpfc_sli4_cfg_mhdr));
19769        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19770                         LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19771                         length, LPFC_SLI4_MBX_EMBED);
19772
19773        redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19774        /* Set count to 0 for invalidating the entire FCF database */
19775        bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19776
19777        /* Issue the mailbox command asynchronously */
19778        mbox->vport = phba->pport;
19779        mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19780        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19781
19782        if (rc == MBX_NOT_FINISHED) {
19783                mempool_free(mbox, phba->mbox_mem_pool);
19784                return -EIO;
19785        }
19786        return 0;
19787}
19788
19789/**
19790 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19791 * @phba: pointer to lpfc hba data structure.
19792 *
19793 * This function is the failover routine as a last resort to the FCF DEAD
19794 * event when driver failed to perform fast FCF failover.
19795 **/
19796void
19797lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19798{
19799        uint32_t link_state;
19800
19801        /*
19802         * Last resort as FCF DEAD event failover will treat this as
19803         * a link down, but save the link state because we don't want
19804         * it to be changed to Link Down unless it is already down.
19805         */
19806        link_state = phba->link_state;
19807        lpfc_linkdown(phba);
19808        phba->link_state = link_state;
19809
19810        /* Unregister FCF if no devices connected to it */
19811        lpfc_unregister_unused_fcf(phba);
19812}
19813
19814/**
19815 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19816 * @phba: pointer to lpfc hba data structure.
19817 * @rgn23_data: pointer to configure region 23 data.
19818 *
19819 * This function gets SLI3 port configure region 23 data through memory dump
19820 * mailbox command. When it successfully retrieves data, the size of the data
19821 * will be returned, otherwise, 0 will be returned.
19822 **/
19823static uint32_t
19824lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19825{
19826        LPFC_MBOXQ_t *pmb = NULL;
19827        MAILBOX_t *mb;
19828        uint32_t offset = 0;
19829        int rc;
19830
19831        if (!rgn23_data)
19832                return 0;
19833
19834        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19835        if (!pmb) {
19836                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19837                                "2600 failed to allocate mailbox memory\n");
19838                return 0;
19839        }
19840        mb = &pmb->u.mb;
19841
19842        do {
19843                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19844                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19845
19846                if (rc != MBX_SUCCESS) {
19847                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19848                                        "2601 failed to read config "
19849                                        "region 23, rc 0x%x Status 0x%x\n",
19850                                        rc, mb->mbxStatus);
19851                        mb->un.varDmp.word_cnt = 0;
19852                }
19853                /*
19854                 * dump mem may return a zero when finished or we got a
19855                 * mailbox error, either way we are done.
19856                 */
19857                if (mb->un.varDmp.word_cnt == 0)
19858                        break;
19859
19860                if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19861                        mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19862
19863                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19864                                       rgn23_data + offset,
19865                                       mb->un.varDmp.word_cnt);
19866                offset += mb->un.varDmp.word_cnt;
19867        } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19868
19869        mempool_free(pmb, phba->mbox_mem_pool);
19870        return offset;
19871}
19872
19873/**
19874 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19875 * @phba: pointer to lpfc hba data structure.
19876 * @rgn23_data: pointer to configure region 23 data.
19877 *
19878 * This function gets SLI4 port configure region 23 data through memory dump
19879 * mailbox command. When it successfully retrieves data, the size of the data
19880 * will be returned, otherwise, 0 will be returned.
19881 **/
19882static uint32_t
19883lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19884{
19885        LPFC_MBOXQ_t *mboxq = NULL;
19886        struct lpfc_dmabuf *mp = NULL;
19887        struct lpfc_mqe *mqe;
19888        uint32_t data_length = 0;
19889        int rc;
19890
19891        if (!rgn23_data)
19892                return 0;
19893
19894        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19895        if (!mboxq) {
19896                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19897                                "3105 failed to allocate mailbox memory\n");
19898                return 0;
19899        }
19900
19901        if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19902                goto out;
19903        mqe = &mboxq->u.mqe;
19904        mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19905        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19906        if (rc)
19907                goto out;
19908        data_length = mqe->un.mb_words[5];
19909        if (data_length == 0)
19910                goto out;
19911        if (data_length > DMP_RGN23_SIZE) {
19912                data_length = 0;
19913                goto out;
19914        }
19915        lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19916out:
19917        mempool_free(mboxq, phba->mbox_mem_pool);
19918        if (mp) {
19919                lpfc_mbuf_free(phba, mp->virt, mp->phys);
19920                kfree(mp);
19921        }
19922        return data_length;
19923}
19924
19925/**
19926 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19927 * @phba: pointer to lpfc hba data structure.
19928 *
19929 * This function read region 23 and parse TLV for port status to
19930 * decide if the user disaled the port. If the TLV indicates the
19931 * port is disabled, the hba_flag is set accordingly.
19932 **/
19933void
19934lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19935{
19936        uint8_t *rgn23_data = NULL;
19937        uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19938        uint32_t offset = 0;
19939
19940        /* Get adapter Region 23 data */
19941        rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19942        if (!rgn23_data)
19943                goto out;
19944
19945        if (phba->sli_rev < LPFC_SLI_REV4)
19946                data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19947        else {
19948                if_type = bf_get(lpfc_sli_intf_if_type,
19949                                 &phba->sli4_hba.sli_intf);
19950                if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19951                        goto out;
19952                data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19953        }
19954
19955        if (!data_size)
19956                goto out;
19957
19958        /* Check the region signature first */
19959        if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19960                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19961                        "2619 Config region 23 has bad signature\n");
19962                        goto out;
19963        }
19964        offset += 4;
19965
19966        /* Check the data structure version */
19967        if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19968                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19969                        "2620 Config region 23 has bad version\n");
19970                goto out;
19971        }
19972        offset += 4;
19973
19974        /* Parse TLV entries in the region */
19975        while (offset < data_size) {
19976                if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19977                        break;
19978                /*
19979                 * If the TLV is not driver specific TLV or driver id is
19980                 * not linux driver id, skip the record.
19981                 */
19982                if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19983                    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19984                    (rgn23_data[offset + 3] != 0)) {
19985                        offset += rgn23_data[offset + 1] * 4 + 4;
19986                        continue;
19987                }
19988
19989                /* Driver found a driver specific TLV in the config region */
19990                sub_tlv_len = rgn23_data[offset + 1] * 4;
19991                offset += 4;
19992                tlv_offset = 0;
19993
19994                /*
19995                 * Search for configured port state sub-TLV.
19996                 */
19997                while ((offset < data_size) &&
19998                        (tlv_offset < sub_tlv_len)) {
19999                        if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20000                                offset += 4;
20001                                tlv_offset += 4;
20002                                break;
20003                        }
20004                        if (rgn23_data[offset] != PORT_STE_TYPE) {
20005                                offset += rgn23_data[offset + 1] * 4 + 4;
20006                                tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20007                                continue;
20008                        }
20009
20010                        /* This HBA contains PORT_STE configured */
20011                        if (!rgn23_data[offset + 2])
20012                                phba->hba_flag |= LINK_DISABLED;
20013
20014                        goto out;
20015                }
20016        }
20017
20018out:
20019        kfree(rgn23_data);
20020        return;
20021}
20022
20023/**
20024 * lpfc_wr_object - write an object to the firmware
20025 * @phba: HBA structure that indicates port to create a queue on.
20026 * @dmabuf_list: list of dmabufs to write to the port.
20027 * @size: the total byte value of the objects to write to the port.
20028 * @offset: the current offset to be used to start the transfer.
20029 *
20030 * This routine will create a wr_object mailbox command to send to the port.
20031 * the mailbox command will be constructed using the dma buffers described in
20032 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20033 * BDEs that the imbedded mailbox can support. The @offset variable will be
20034 * used to indicate the starting offset of the transfer and will also return
20035 * the offset after the write object mailbox has completed. @size is used to
20036 * determine the end of the object and whether the eof bit should be set.
20037 *
20038 * Return 0 is successful and offset will contain the the new offset to use
20039 * for the next write.
20040 * Return negative value for error cases.
20041 **/
20042int
20043lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20044               uint32_t size, uint32_t *offset)
20045{
20046        struct lpfc_mbx_wr_object *wr_object;
20047        LPFC_MBOXQ_t *mbox;
20048        int rc = 0, i = 0;
20049        uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
20050        uint32_t mbox_tmo;
20051        struct lpfc_dmabuf *dmabuf;
20052        uint32_t written = 0;
20053        bool check_change_status = false;
20054
20055        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20056        if (!mbox)
20057                return -ENOMEM;
20058
20059        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20060                        LPFC_MBOX_OPCODE_WRITE_OBJECT,
20061                        sizeof(struct lpfc_mbx_wr_object) -
20062                        sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20063
20064        wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20065        wr_object->u.request.write_offset = *offset;
20066        sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20067        wr_object->u.request.object_name[0] =
20068                cpu_to_le32(wr_object->u.request.object_name[0]);
20069        bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20070        list_for_each_entry(dmabuf, dmabuf_list, list) {
20071                if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20072                        break;
20073                wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20074                wr_object->u.request.bde[i].addrHigh =
20075                        putPaddrHigh(dmabuf->phys);
20076                if (written + SLI4_PAGE_SIZE >= size) {
20077                        wr_object->u.request.bde[i].tus.f.bdeSize =
20078                                (size - written);
20079                        written += (size - written);
20080                        bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20081                        bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20082                        check_change_status = true;
20083                } else {
20084                        wr_object->u.request.bde[i].tus.f.bdeSize =
20085                                SLI4_PAGE_SIZE;
20086                        written += SLI4_PAGE_SIZE;
20087                }
20088                i++;
20089        }
20090        wr_object->u.request.bde_count = i;
20091        bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20092        if (!phba->sli4_hba.intr_enable)
20093                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20094        else {
20095                mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20096                rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20097        }
20098        /* The IOCTL status is embedded in the mailbox subheader. */
20099        shdr_status = bf_get(lpfc_mbox_hdr_status,
20100                             &wr_object->header.cfg_shdr.response);
20101        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20102                                 &wr_object->header.cfg_shdr.response);
20103        if (check_change_status) {
20104                shdr_change_status = bf_get(lpfc_wr_object_change_status,
20105                                            &wr_object->u.response);
20106
20107                if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20108                    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20109                        shdr_csf = bf_get(lpfc_wr_object_csf,
20110                                          &wr_object->u.response);
20111                        if (shdr_csf)
20112                                shdr_change_status =
20113                                                   LPFC_CHANGE_STATUS_PCI_RESET;
20114                }
20115
20116                switch (shdr_change_status) {
20117                case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20118                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20119                                        "3198 Firmware write complete: System "
20120                                        "reboot required to instantiate\n");
20121                        break;
20122                case (LPFC_CHANGE_STATUS_FW_RESET):
20123                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20124                                        "3199 Firmware write complete: Firmware"
20125                                        " reset required to instantiate\n");
20126                        break;
20127                case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20128                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20129                                        "3200 Firmware write complete: Port "
20130                                        "Migration or PCI Reset required to "
20131                                        "instantiate\n");
20132                        break;
20133                case (LPFC_CHANGE_STATUS_PCI_RESET):
20134                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20135                                        "3201 Firmware write complete: PCI "
20136                                        "Reset required to instantiate\n");
20137                        break;
20138                default:
20139                        break;
20140                }
20141        }
20142        if (!phba->sli4_hba.intr_enable)
20143                mempool_free(mbox, phba->mbox_mem_pool);
20144        else if (rc != MBX_TIMEOUT)
20145                mempool_free(mbox, phba->mbox_mem_pool);
20146        if (shdr_status || shdr_add_status || rc) {
20147                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20148                                "3025 Write Object mailbox failed with "
20149                                "status x%x add_status x%x, mbx status x%x\n",
20150                                shdr_status, shdr_add_status, rc);
20151                rc = -ENXIO;
20152                *offset = shdr_add_status;
20153        } else
20154                *offset += wr_object->u.response.actual_write_length;
20155        return rc;
20156}
20157
20158/**
20159 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20160 * @vport: pointer to vport data structure.
20161 *
20162 * This function iterate through the mailboxq and clean up all REG_LOGIN
20163 * and REG_VPI mailbox commands associated with the vport. This function
20164 * is called when driver want to restart discovery of the vport due to
20165 * a Clear Virtual Link event.
20166 **/
20167void
20168lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20169{
20170        struct lpfc_hba *phba = vport->phba;
20171        LPFC_MBOXQ_t *mb, *nextmb;
20172        struct lpfc_dmabuf *mp;
20173        struct lpfc_nodelist *ndlp;
20174        struct lpfc_nodelist *act_mbx_ndlp = NULL;
20175        LIST_HEAD(mbox_cmd_list);
20176        uint8_t restart_loop;
20177
20178        /* Clean up internally queued mailbox commands with the vport */
20179        spin_lock_irq(&phba->hbalock);
20180        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20181                if (mb->vport != vport)
20182                        continue;
20183
20184                if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20185                        (mb->u.mb.mbxCommand != MBX_REG_VPI))
20186                        continue;
20187
20188                list_move_tail(&mb->list, &mbox_cmd_list);
20189        }
20190        /* Clean up active mailbox command with the vport */
20191        mb = phba->sli.mbox_active;
20192        if (mb && (mb->vport == vport)) {
20193                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20194                        (mb->u.mb.mbxCommand == MBX_REG_VPI))
20195                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20196                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20197                        act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20198                        /* Put reference count for delayed processing */
20199                        act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20200                        /* Unregister the RPI when mailbox complete */
20201                        mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20202                }
20203        }
20204        /* Cleanup any mailbox completions which are not yet processed */
20205        do {
20206                restart_loop = 0;
20207                list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20208                        /*
20209                         * If this mailox is already processed or it is
20210                         * for another vport ignore it.
20211                         */
20212                        if ((mb->vport != vport) ||
20213                                (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20214                                continue;
20215
20216                        if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20217                                (mb->u.mb.mbxCommand != MBX_REG_VPI))
20218                                continue;
20219
20220                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20221                        if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20222                                ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20223                                /* Unregister the RPI when mailbox complete */
20224                                mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20225                                restart_loop = 1;
20226                                spin_unlock_irq(&phba->hbalock);
20227                                spin_lock(&ndlp->lock);
20228                                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20229                                spin_unlock(&ndlp->lock);
20230                                spin_lock_irq(&phba->hbalock);
20231                                break;
20232                        }
20233                }
20234        } while (restart_loop);
20235
20236        spin_unlock_irq(&phba->hbalock);
20237
20238        /* Release the cleaned-up mailbox commands */
20239        while (!list_empty(&mbox_cmd_list)) {
20240                list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20241                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20242                        mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20243                        if (mp) {
20244                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20245                                kfree(mp);
20246                        }
20247                        mb->ctx_buf = NULL;
20248                        ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20249                        mb->ctx_ndlp = NULL;
20250                        if (ndlp) {
20251                                spin_lock(&ndlp->lock);
20252                                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20253                                spin_unlock(&ndlp->lock);
20254                                lpfc_nlp_put(ndlp);
20255                        }
20256                }
20257                mempool_free(mb, phba->mbox_mem_pool);
20258        }
20259
20260        /* Release the ndlp with the cleaned-up active mailbox command */
20261        if (act_mbx_ndlp) {
20262                spin_lock(&act_mbx_ndlp->lock);
20263                act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20264                spin_unlock(&act_mbx_ndlp->lock);
20265                lpfc_nlp_put(act_mbx_ndlp);
20266        }
20267}
20268
20269/**
20270 * lpfc_drain_txq - Drain the txq
20271 * @phba: Pointer to HBA context object.
20272 *
20273 * This function attempt to submit IOCBs on the txq
20274 * to the adapter.  For SLI4 adapters, the txq contains
20275 * ELS IOCBs that have been deferred because the there
20276 * are no SGLs.  This congestion can occur with large
20277 * vport counts during node discovery.
20278 **/
20279
20280uint32_t
20281lpfc_drain_txq(struct lpfc_hba *phba)
20282{
20283        LIST_HEAD(completions);
20284        struct lpfc_sli_ring *pring;
20285        struct lpfc_iocbq *piocbq = NULL;
20286        unsigned long iflags = 0;
20287        char *fail_msg = NULL;
20288        struct lpfc_sglq *sglq;
20289        union lpfc_wqe128 wqe;
20290        uint32_t txq_cnt = 0;
20291        struct lpfc_queue *wq;
20292
20293        if (phba->link_flag & LS_MDS_LOOPBACK) {
20294                /* MDS WQE are posted only to first WQ*/
20295                wq = phba->sli4_hba.hdwq[0].io_wq;
20296                if (unlikely(!wq))
20297                        return 0;
20298                pring = wq->pring;
20299        } else {
20300                wq = phba->sli4_hba.els_wq;
20301                if (unlikely(!wq))
20302                        return 0;
20303                pring = lpfc_phba_elsring(phba);
20304        }
20305
20306        if (unlikely(!pring) || list_empty(&pring->txq))
20307                return 0;
20308
20309        spin_lock_irqsave(&pring->ring_lock, iflags);
20310        list_for_each_entry(piocbq, &pring->txq, list) {
20311                txq_cnt++;
20312        }
20313
20314        if (txq_cnt > pring->txq_max)
20315                pring->txq_max = txq_cnt;
20316
20317        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20318
20319        while (!list_empty(&pring->txq)) {
20320                spin_lock_irqsave(&pring->ring_lock, iflags);
20321
20322                piocbq = lpfc_sli_ringtx_get(phba, pring);
20323                if (!piocbq) {
20324                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20325                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20326                                "2823 txq empty and txq_cnt is %d\n ",
20327                                txq_cnt);
20328                        break;
20329                }
20330                sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20331                if (!sglq) {
20332                        __lpfc_sli_ringtx_put(phba, pring, piocbq);
20333                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20334                        break;
20335                }
20336                txq_cnt--;
20337
20338                /* The xri and iocb resources secured,
20339                 * attempt to issue request
20340                 */
20341                piocbq->sli4_lxritag = sglq->sli4_lxritag;
20342                piocbq->sli4_xritag = sglq->sli4_xritag;
20343                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20344                        fail_msg = "to convert bpl to sgl";
20345                else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20346                        fail_msg = "to convert iocb to wqe";
20347                else if (lpfc_sli4_wq_put(wq, &wqe))
20348                        fail_msg = " - Wq is full";
20349                else
20350                        lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20351
20352                if (fail_msg) {
20353                        /* Failed means we can't issue and need to cancel */
20354                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20355                                        "2822 IOCB failed %s iotag 0x%x "
20356                                        "xri 0x%x\n",
20357                                        fail_msg,
20358                                        piocbq->iotag, piocbq->sli4_xritag);
20359                        list_add_tail(&piocbq->list, &completions);
20360                }
20361                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20362        }
20363
20364        /* Cancel all the IOCBs that cannot be issued */
20365        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20366                                IOERR_SLI_ABORTED);
20367
20368        return txq_cnt;
20369}
20370
20371/**
20372 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20373 * @phba: Pointer to HBA context object.
20374 * @pwqeq: Pointer to command WQE.
20375 * @sglq: Pointer to the scatter gather queue object.
20376 *
20377 * This routine converts the bpl or bde that is in the WQE
20378 * to a sgl list for the sli4 hardware. The physical address
20379 * of the bpl/bde is converted back to a virtual address.
20380 * If the WQE contains a BPL then the list of BDE's is
20381 * converted to sli4_sge's. If the WQE contains a single
20382 * BDE then it is converted to a single sli_sge.
20383 * The WQE is still in cpu endianness so the contents of
20384 * the bpl can be used without byte swapping.
20385 *
20386 * Returns valid XRI = Success, NO_XRI = Failure.
20387 */
20388static uint16_t
20389lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20390                 struct lpfc_sglq *sglq)
20391{
20392        uint16_t xritag = NO_XRI;
20393        struct ulp_bde64 *bpl = NULL;
20394        struct ulp_bde64 bde;
20395        struct sli4_sge *sgl  = NULL;
20396        struct lpfc_dmabuf *dmabuf;
20397        union lpfc_wqe128 *wqe;
20398        int numBdes = 0;
20399        int i = 0;
20400        uint32_t offset = 0; /* accumulated offset in the sg request list */
20401        int inbound = 0; /* number of sg reply entries inbound from firmware */
20402        uint32_t cmd;
20403
20404        if (!pwqeq || !sglq)
20405                return xritag;
20406
20407        sgl  = (struct sli4_sge *)sglq->sgl;
20408        wqe = &pwqeq->wqe;
20409        pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20410
20411        cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20412        if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20413                return sglq->sli4_xritag;
20414        numBdes = pwqeq->rsvd2;
20415        if (numBdes) {
20416                /* The addrHigh and addrLow fields within the WQE
20417                 * have not been byteswapped yet so there is no
20418                 * need to swap them back.
20419                 */
20420                if (pwqeq->context3)
20421                        dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20422                else
20423                        return xritag;
20424
20425                bpl  = (struct ulp_bde64 *)dmabuf->virt;
20426                if (!bpl)
20427                        return xritag;
20428
20429                for (i = 0; i < numBdes; i++) {
20430                        /* Should already be byte swapped. */
20431                        sgl->addr_hi = bpl->addrHigh;
20432                        sgl->addr_lo = bpl->addrLow;
20433
20434                        sgl->word2 = le32_to_cpu(sgl->word2);
20435                        if ((i+1) == numBdes)
20436                                bf_set(lpfc_sli4_sge_last, sgl, 1);
20437                        else
20438                                bf_set(lpfc_sli4_sge_last, sgl, 0);
20439                        /* swap the size field back to the cpu so we
20440                         * can assign it to the sgl.
20441                         */
20442                        bde.tus.w = le32_to_cpu(bpl->tus.w);
20443                        sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20444                        /* The offsets in the sgl need to be accumulated
20445                         * separately for the request and reply lists.
20446                         * The request is always first, the reply follows.
20447                         */
20448                        switch (cmd) {
20449                        case CMD_GEN_REQUEST64_WQE:
20450                                /* add up the reply sg entries */
20451                                if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20452                                        inbound++;
20453                                /* first inbound? reset the offset */
20454                                if (inbound == 1)
20455                                        offset = 0;
20456                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
20457                                bf_set(lpfc_sli4_sge_type, sgl,
20458                                        LPFC_SGE_TYPE_DATA);
20459                                offset += bde.tus.f.bdeSize;
20460                                break;
20461                        case CMD_FCP_TRSP64_WQE:
20462                                bf_set(lpfc_sli4_sge_offset, sgl, 0);
20463                                bf_set(lpfc_sli4_sge_type, sgl,
20464                                        LPFC_SGE_TYPE_DATA);
20465                                break;
20466                        case CMD_FCP_TSEND64_WQE:
20467                        case CMD_FCP_TRECEIVE64_WQE:
20468                                bf_set(lpfc_sli4_sge_type, sgl,
20469                                        bpl->tus.f.bdeFlags);
20470                                if (i < 3)
20471                                        offset = 0;
20472                                else
20473                                        offset += bde.tus.f.bdeSize;
20474                                bf_set(lpfc_sli4_sge_offset, sgl, offset);
20475                                break;
20476                        }
20477                        sgl->word2 = cpu_to_le32(sgl->word2);
20478                        bpl++;
20479                        sgl++;
20480                }
20481        } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20482                /* The addrHigh and addrLow fields of the BDE have not
20483                 * been byteswapped yet so they need to be swapped
20484                 * before putting them in the sgl.
20485                 */
20486                sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20487                sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20488                sgl->word2 = le32_to_cpu(sgl->word2);
20489                bf_set(lpfc_sli4_sge_last, sgl, 1);
20490                sgl->word2 = cpu_to_le32(sgl->word2);
20491                sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20492        }
20493        return sglq->sli4_xritag;
20494}
20495
20496/**
20497 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20498 * @phba: Pointer to HBA context object.
20499 * @qp: Pointer to HDW queue.
20500 * @pwqe: Pointer to command WQE.
20501 **/
20502int
20503lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20504                    struct lpfc_iocbq *pwqe)
20505{
20506        union lpfc_wqe128 *wqe = &pwqe->wqe;
20507        struct lpfc_async_xchg_ctx *ctxp;
20508        struct lpfc_queue *wq;
20509        struct lpfc_sglq *sglq;
20510        struct lpfc_sli_ring *pring;
20511        unsigned long iflags;
20512        uint32_t ret = 0;
20513
20514        /* NVME_LS and NVME_LS ABTS requests. */
20515        if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20516                pring =  phba->sli4_hba.nvmels_wq->pring;
20517                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20518                                          qp, wq_access);
20519                sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20520                if (!sglq) {
20521                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20522                        return WQE_BUSY;
20523                }
20524                pwqe->sli4_lxritag = sglq->sli4_lxritag;
20525                pwqe->sli4_xritag = sglq->sli4_xritag;
20526                if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20527                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20528                        return WQE_ERROR;
20529                }
20530                bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20531                       pwqe->sli4_xritag);
20532                ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20533                if (ret) {
20534                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20535                        return ret;
20536                }
20537
20538                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20539                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20540
20541                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20542                return 0;
20543        }
20544
20545        /* NVME_FCREQ and NVME_ABTS requests */
20546        if (pwqe->iocb_flag & LPFC_IO_NVME ||
20547            pwqe->iocb_flag & LPFC_IO_FCP) {
20548                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20549                wq = qp->io_wq;
20550                pring = wq->pring;
20551
20552                bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20553
20554                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20555                                          qp, wq_access);
20556                ret = lpfc_sli4_wq_put(wq, wqe);
20557                if (ret) {
20558                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20559                        return ret;
20560                }
20561                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20562                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20563
20564                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20565                return 0;
20566        }
20567
20568        /* NVMET requests */
20569        if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20570                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20571                wq = qp->io_wq;
20572                pring = wq->pring;
20573
20574                ctxp = pwqe->context2;
20575                sglq = ctxp->ctxbuf->sglq;
20576                if (pwqe->sli4_xritag ==  NO_XRI) {
20577                        pwqe->sli4_lxritag = sglq->sli4_lxritag;
20578                        pwqe->sli4_xritag = sglq->sli4_xritag;
20579                }
20580                bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20581                       pwqe->sli4_xritag);
20582                bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20583
20584                lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20585                                          qp, wq_access);
20586                ret = lpfc_sli4_wq_put(wq, wqe);
20587                if (ret) {
20588                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
20589                        return ret;
20590                }
20591                lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20592                spin_unlock_irqrestore(&pring->ring_lock, iflags);
20593
20594                lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20595                return 0;
20596        }
20597        return WQE_ERROR;
20598}
20599
20600/**
20601 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
20602 * @phba: Pointer to HBA context object.
20603 * @cmdiocb: Pointer to driver command iocb object.
20604 * @cmpl: completion function.
20605 *
20606 * Fill the appropriate fields for the abort WQE and call
20607 * internal routine lpfc_sli4_issue_wqe to send the WQE
20608 * This function is called with hbalock held and no ring_lock held.
20609 *
20610 * RETURNS 0 - SUCCESS
20611 **/
20612
20613int
20614lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
20615                            void *cmpl)
20616{
20617        struct lpfc_vport *vport = cmdiocb->vport;
20618        struct lpfc_iocbq *abtsiocb = NULL;
20619        union lpfc_wqe128 *abtswqe;
20620        struct lpfc_io_buf *lpfc_cmd;
20621        int retval = IOCB_ERROR;
20622        u16 xritag = cmdiocb->sli4_xritag;
20623
20624        /*
20625         * The scsi command can not be in txq and it is in flight because the
20626         * pCmd is still pointing at the SCSI command we have to abort. There
20627         * is no need to search the txcmplq. Just send an abort to the FW.
20628         */
20629
20630        abtsiocb = __lpfc_sli_get_iocbq(phba);
20631        if (!abtsiocb)
20632                return WQE_NORESOURCE;
20633
20634        /* Indicate the IO is being aborted by the driver. */
20635        cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
20636
20637        abtswqe = &abtsiocb->wqe;
20638        memset(abtswqe, 0, sizeof(*abtswqe));
20639
20640        if (!lpfc_is_link_up(phba))
20641                bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
20642        bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
20643        abtswqe->abort_cmd.rsrvd5 = 0;
20644        abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
20645        bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
20646        bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
20647        bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
20648        bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
20649        bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
20650        bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
20651
20652        /* ABTS WQE must go to the same WQ as the WQE to be aborted */
20653        abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
20654        abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
20655        if (cmdiocb->iocb_flag & LPFC_IO_FCP)
20656                abtsiocb->iocb_flag |= LPFC_IO_FCP;
20657        if (cmdiocb->iocb_flag & LPFC_IO_NVME)
20658                abtsiocb->iocb_flag |= LPFC_IO_NVME;
20659        if (cmdiocb->iocb_flag & LPFC_IO_FOF)
20660                abtsiocb->iocb_flag |= LPFC_IO_FOF;
20661        abtsiocb->vport = vport;
20662        abtsiocb->wqe_cmpl = cmpl;
20663
20664        lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
20665        retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
20666
20667        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
20668                         "0359 Abort xri x%x, original iotag x%x, "
20669                         "abort cmd iotag x%x retval x%x\n",
20670                         xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
20671
20672        if (retval) {
20673                cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
20674                __lpfc_sli_release_iocbq(phba, abtsiocb);
20675        }
20676
20677        return retval;
20678}
20679
20680#ifdef LPFC_MXP_STAT
20681/**
20682 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20683 * @phba: pointer to lpfc hba data structure.
20684 * @hwqid: belong to which HWQ.
20685 *
20686 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20687 * 15 seconds after a test case is running.
20688 *
20689 * The user should call lpfc_debugfs_multixripools_write before running a test
20690 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20691 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20692 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20693 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20694 **/
20695void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20696{
20697        struct lpfc_sli4_hdw_queue *qp;
20698        struct lpfc_multixri_pool *multixri_pool;
20699        struct lpfc_pvt_pool *pvt_pool;
20700        struct lpfc_pbl_pool *pbl_pool;
20701        u32 txcmplq_cnt;
20702
20703        qp = &phba->sli4_hba.hdwq[hwqid];
20704        multixri_pool = qp->p_multixri_pool;
20705        if (!multixri_pool)
20706                return;
20707
20708        if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20709                pvt_pool = &qp->p_multixri_pool->pvt_pool;
20710                pbl_pool = &qp->p_multixri_pool->pbl_pool;
20711                txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20712
20713                multixri_pool->stat_pbl_count = pbl_pool->count;
20714                multixri_pool->stat_pvt_count = pvt_pool->count;
20715                multixri_pool->stat_busy_count = txcmplq_cnt;
20716        }
20717
20718        multixri_pool->stat_snapshot_taken++;
20719}
20720#endif
20721
20722/**
20723 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20724 * @phba: pointer to lpfc hba data structure.
20725 * @hwqid: belong to which HWQ.
20726 *
20727 * This routine moves some XRIs from private to public pool when private pool
20728 * is not busy.
20729 **/
20730void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20731{
20732        struct lpfc_multixri_pool *multixri_pool;
20733        u32 io_req_count;
20734        u32 prev_io_req_count;
20735
20736        multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20737        if (!multixri_pool)
20738                return;
20739        io_req_count = multixri_pool->io_req_count;
20740        prev_io_req_count = multixri_pool->prev_io_req_count;
20741
20742        if (prev_io_req_count != io_req_count) {
20743                /* Private pool is busy */
20744                multixri_pool->prev_io_req_count = io_req_count;
20745        } else {
20746                /* Private pool is not busy.
20747                 * Move XRIs from private to public pool.
20748                 */
20749                lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20750        }
20751}
20752
20753/**
20754 * lpfc_adjust_high_watermark - Adjust high watermark
20755 * @phba: pointer to lpfc hba data structure.
20756 * @hwqid: belong to which HWQ.
20757 *
20758 * This routine sets high watermark as number of outstanding XRIs,
20759 * but make sure the new value is between xri_limit/2 and xri_limit.
20760 **/
20761void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20762{
20763        u32 new_watermark;
20764        u32 watermark_max;
20765        u32 watermark_min;
20766        u32 xri_limit;
20767        u32 txcmplq_cnt;
20768        u32 abts_io_bufs;
20769        struct lpfc_multixri_pool *multixri_pool;
20770        struct lpfc_sli4_hdw_queue *qp;
20771
20772        qp = &phba->sli4_hba.hdwq[hwqid];
20773        multixri_pool = qp->p_multixri_pool;
20774        if (!multixri_pool)
20775                return;
20776        xri_limit = multixri_pool->xri_limit;
20777
20778        watermark_max = xri_limit;
20779        watermark_min = xri_limit / 2;
20780
20781        txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20782        abts_io_bufs = qp->abts_scsi_io_bufs;
20783        abts_io_bufs += qp->abts_nvme_io_bufs;
20784
20785        new_watermark = txcmplq_cnt + abts_io_bufs;
20786        new_watermark = min(watermark_max, new_watermark);
20787        new_watermark = max(watermark_min, new_watermark);
20788        multixri_pool->pvt_pool.high_watermark = new_watermark;
20789
20790#ifdef LPFC_MXP_STAT
20791        multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20792                                          new_watermark);
20793#endif
20794}
20795
20796/**
20797 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20798 * @phba: pointer to lpfc hba data structure.
20799 * @hwqid: belong to which HWQ.
20800 *
20801 * This routine is called from hearbeat timer when pvt_pool is idle.
20802 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20803 * The first step moves (all - low_watermark) amount of XRIs.
20804 * The second step moves the rest of XRIs.
20805 **/
20806void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20807{
20808        struct lpfc_pbl_pool *pbl_pool;
20809        struct lpfc_pvt_pool *pvt_pool;
20810        struct lpfc_sli4_hdw_queue *qp;
20811        struct lpfc_io_buf *lpfc_ncmd;
20812        struct lpfc_io_buf *lpfc_ncmd_next;
20813        unsigned long iflag;
20814        struct list_head tmp_list;
20815        u32 tmp_count;
20816
20817        qp = &phba->sli4_hba.hdwq[hwqid];
20818        pbl_pool = &qp->p_multixri_pool->pbl_pool;
20819        pvt_pool = &qp->p_multixri_pool->pvt_pool;
20820        tmp_count = 0;
20821
20822        lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20823        lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20824
20825        if (pvt_pool->count > pvt_pool->low_watermark) {
20826                /* Step 1: move (all - low_watermark) from pvt_pool
20827                 * to pbl_pool
20828                 */
20829
20830                /* Move low watermark of bufs from pvt_pool to tmp_list */
20831                INIT_LIST_HEAD(&tmp_list);
20832                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20833                                         &pvt_pool->list, list) {
20834                        list_move_tail(&lpfc_ncmd->list, &tmp_list);
20835                        tmp_count++;
20836                        if (tmp_count >= pvt_pool->low_watermark)
20837                                break;
20838                }
20839
20840                /* Move all bufs from pvt_pool to pbl_pool */
20841                list_splice_init(&pvt_pool->list, &pbl_pool->list);
20842
20843                /* Move all bufs from tmp_list to pvt_pool */
20844                list_splice(&tmp_list, &pvt_pool->list);
20845
20846                pbl_pool->count += (pvt_pool->count - tmp_count);
20847                pvt_pool->count = tmp_count;
20848        } else {
20849                /* Step 2: move the rest from pvt_pool to pbl_pool */
20850                list_splice_init(&pvt_pool->list, &pbl_pool->list);
20851                pbl_pool->count += pvt_pool->count;
20852                pvt_pool->count = 0;
20853        }
20854
20855        spin_unlock(&pvt_pool->lock);
20856        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20857}
20858
20859/**
20860 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20861 * @phba: pointer to lpfc hba data structure
20862 * @qp: pointer to HDW queue
20863 * @pbl_pool: specified public free XRI pool
20864 * @pvt_pool: specified private free XRI pool
20865 * @count: number of XRIs to move
20866 *
20867 * This routine tries to move some free common bufs from the specified pbl_pool
20868 * to the specified pvt_pool. It might move less than count XRIs if there's not
20869 * enough in public pool.
20870 *
20871 * Return:
20872 *   true - if XRIs are successfully moved from the specified pbl_pool to the
20873 *          specified pvt_pool
20874 *   false - if the specified pbl_pool is empty or locked by someone else
20875 **/
20876static bool
20877_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20878                          struct lpfc_pbl_pool *pbl_pool,
20879                          struct lpfc_pvt_pool *pvt_pool, u32 count)
20880{
20881        struct lpfc_io_buf *lpfc_ncmd;
20882        struct lpfc_io_buf *lpfc_ncmd_next;
20883        unsigned long iflag;
20884        int ret;
20885
20886        ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20887        if (ret) {
20888                if (pbl_pool->count) {
20889                        /* Move a batch of XRIs from public to private pool */
20890                        lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20891                        list_for_each_entry_safe(lpfc_ncmd,
20892                                                 lpfc_ncmd_next,
20893                                                 &pbl_pool->list,
20894                                                 list) {
20895                                list_move_tail(&lpfc_ncmd->list,
20896                                               &pvt_pool->list);
20897                                pvt_pool->count++;
20898                                pbl_pool->count--;
20899                                count--;
20900                                if (count == 0)
20901                                        break;
20902                        }
20903
20904                        spin_unlock(&pvt_pool->lock);
20905                        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20906                        return true;
20907                }
20908                spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20909        }
20910
20911        return false;
20912}
20913
20914/**
20915 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20916 * @phba: pointer to lpfc hba data structure.
20917 * @hwqid: belong to which HWQ.
20918 * @count: number of XRIs to move
20919 *
20920 * This routine tries to find some free common bufs in one of public pools with
20921 * Round Robin method. The search always starts from local hwqid, then the next
20922 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20923 * a batch of free common bufs are moved to private pool on hwqid.
20924 * It might move less than count XRIs if there's not enough in public pool.
20925 **/
20926void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20927{
20928        struct lpfc_multixri_pool *multixri_pool;
20929        struct lpfc_multixri_pool *next_multixri_pool;
20930        struct lpfc_pvt_pool *pvt_pool;
20931        struct lpfc_pbl_pool *pbl_pool;
20932        struct lpfc_sli4_hdw_queue *qp;
20933        u32 next_hwqid;
20934        u32 hwq_count;
20935        int ret;
20936
20937        qp = &phba->sli4_hba.hdwq[hwqid];
20938        multixri_pool = qp->p_multixri_pool;
20939        pvt_pool = &multixri_pool->pvt_pool;
20940        pbl_pool = &multixri_pool->pbl_pool;
20941
20942        /* Check if local pbl_pool is available */
20943        ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20944        if (ret) {
20945#ifdef LPFC_MXP_STAT
20946                multixri_pool->local_pbl_hit_count++;
20947#endif
20948                return;
20949        }
20950
20951        hwq_count = phba->cfg_hdw_queue;
20952
20953        /* Get the next hwqid which was found last time */
20954        next_hwqid = multixri_pool->rrb_next_hwqid;
20955
20956        do {
20957                /* Go to next hwq */
20958                next_hwqid = (next_hwqid + 1) % hwq_count;
20959
20960                next_multixri_pool =
20961                        phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20962                pbl_pool = &next_multixri_pool->pbl_pool;
20963
20964                /* Check if the public free xri pool is available */
20965                ret = _lpfc_move_xri_pbl_to_pvt(
20966                        phba, qp, pbl_pool, pvt_pool, count);
20967
20968                /* Exit while-loop if success or all hwqid are checked */
20969        } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20970
20971        /* Starting point for the next time */
20972        multixri_pool->rrb_next_hwqid = next_hwqid;
20973
20974        if (!ret) {
20975                /* stats: all public pools are empty*/
20976                multixri_pool->pbl_empty_count++;
20977        }
20978
20979#ifdef LPFC_MXP_STAT
20980        if (ret) {
20981                if (next_hwqid == hwqid)
20982                        multixri_pool->local_pbl_hit_count++;
20983                else
20984                        multixri_pool->other_pbl_hit_count++;
20985        }
20986#endif
20987}
20988
20989/**
20990 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20991 * @phba: pointer to lpfc hba data structure.
20992 * @hwqid: belong to which HWQ.
20993 *
20994 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20995 * low watermark.
20996 **/
20997void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20998{
20999        struct lpfc_multixri_pool *multixri_pool;
21000        struct lpfc_pvt_pool *pvt_pool;
21001
21002        multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21003        pvt_pool = &multixri_pool->pvt_pool;
21004
21005        if (pvt_pool->count < pvt_pool->low_watermark)
21006                lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21007}
21008
21009/**
21010 * lpfc_release_io_buf - Return one IO buf back to free pool
21011 * @phba: pointer to lpfc hba data structure.
21012 * @lpfc_ncmd: IO buf to be returned.
21013 * @qp: belong to which HWQ.
21014 *
21015 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21016 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21017 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21018 * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21019 * lpfc_io_buf_list_put.
21020 **/
21021void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21022                         struct lpfc_sli4_hdw_queue *qp)
21023{
21024        unsigned long iflag;
21025        struct lpfc_pbl_pool *pbl_pool;
21026        struct lpfc_pvt_pool *pvt_pool;
21027        struct lpfc_epd_pool *epd_pool;
21028        u32 txcmplq_cnt;
21029        u32 xri_owned;
21030        u32 xri_limit;
21031        u32 abts_io_bufs;
21032
21033        /* MUST zero fields if buffer is reused by another protocol */
21034        lpfc_ncmd->nvmeCmd = NULL;
21035        lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
21036        lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
21037
21038        if (phba->cfg_xpsgl && !phba->nvmet_support &&
21039            !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21040                lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21041
21042        if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21043                lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21044
21045        if (phba->cfg_xri_rebalancing) {
21046                if (lpfc_ncmd->expedite) {
21047                        /* Return to expedite pool */
21048                        epd_pool = &phba->epd_pool;
21049                        spin_lock_irqsave(&epd_pool->lock, iflag);
21050                        list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21051                        epd_pool->count++;
21052                        spin_unlock_irqrestore(&epd_pool->lock, iflag);
21053                        return;
21054                }
21055
21056                /* Avoid invalid access if an IO sneaks in and is being rejected
21057                 * just _after_ xri pools are destroyed in lpfc_offline.
21058                 * Nothing much can be done at this point.
21059                 */
21060                if (!qp->p_multixri_pool)
21061                        return;
21062
21063                pbl_pool = &qp->p_multixri_pool->pbl_pool;
21064                pvt_pool = &qp->p_multixri_pool->pvt_pool;
21065
21066                txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21067                abts_io_bufs = qp->abts_scsi_io_bufs;
21068                abts_io_bufs += qp->abts_nvme_io_bufs;
21069
21070                xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21071                xri_limit = qp->p_multixri_pool->xri_limit;
21072
21073#ifdef LPFC_MXP_STAT
21074                if (xri_owned <= xri_limit)
21075                        qp->p_multixri_pool->below_limit_count++;
21076                else
21077                        qp->p_multixri_pool->above_limit_count++;
21078#endif
21079
21080                /* XRI goes to either public or private free xri pool
21081                 *     based on watermark and xri_limit
21082                 */
21083                if ((pvt_pool->count < pvt_pool->low_watermark) ||
21084                    (xri_owned < xri_limit &&
21085                     pvt_pool->count < pvt_pool->high_watermark)) {
21086                        lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21087                                                  qp, free_pvt_pool);
21088                        list_add_tail(&lpfc_ncmd->list,
21089                                      &pvt_pool->list);
21090                        pvt_pool->count++;
21091                        spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21092                } else {
21093                        lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21094                                                  qp, free_pub_pool);
21095                        list_add_tail(&lpfc_ncmd->list,
21096                                      &pbl_pool->list);
21097                        pbl_pool->count++;
21098                        spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21099                }
21100        } else {
21101                lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21102                                          qp, free_xri);
21103                list_add_tail(&lpfc_ncmd->list,
21104                              &qp->lpfc_io_buf_list_put);
21105                qp->put_io_bufs++;
21106                spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21107                                       iflag);
21108        }
21109}
21110
21111/**
21112 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21113 * @phba: pointer to lpfc hba data structure.
21114 * @qp: pointer to HDW queue
21115 * @pvt_pool: pointer to private pool data structure.
21116 * @ndlp: pointer to lpfc nodelist data structure.
21117 *
21118 * This routine tries to get one free IO buf from private pool.
21119 *
21120 * Return:
21121 *   pointer to one free IO buf - if private pool is not empty
21122 *   NULL - if private pool is empty
21123 **/
21124static struct lpfc_io_buf *
21125lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21126                                  struct lpfc_sli4_hdw_queue *qp,
21127                                  struct lpfc_pvt_pool *pvt_pool,
21128                                  struct lpfc_nodelist *ndlp)
21129{
21130        struct lpfc_io_buf *lpfc_ncmd;
21131        struct lpfc_io_buf *lpfc_ncmd_next;
21132        unsigned long iflag;
21133
21134        lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21135        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21136                                 &pvt_pool->list, list) {
21137                if (lpfc_test_rrq_active(
21138                        phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21139                        continue;
21140                list_del(&lpfc_ncmd->list);
21141                pvt_pool->count--;
21142                spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21143                return lpfc_ncmd;
21144        }
21145        spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21146
21147        return NULL;
21148}
21149
21150/**
21151 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21152 * @phba: pointer to lpfc hba data structure.
21153 *
21154 * This routine tries to get one free IO buf from expedite pool.
21155 *
21156 * Return:
21157 *   pointer to one free IO buf - if expedite pool is not empty
21158 *   NULL - if expedite pool is empty
21159 **/
21160static struct lpfc_io_buf *
21161lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21162{
21163        struct lpfc_io_buf *lpfc_ncmd;
21164        struct lpfc_io_buf *lpfc_ncmd_next;
21165        unsigned long iflag;
21166        struct lpfc_epd_pool *epd_pool;
21167
21168        epd_pool = &phba->epd_pool;
21169        lpfc_ncmd = NULL;
21170
21171        spin_lock_irqsave(&epd_pool->lock, iflag);
21172        if (epd_pool->count > 0) {
21173                list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21174                                         &epd_pool->list, list) {
21175                        list_del(&lpfc_ncmd->list);
21176                        epd_pool->count--;
21177                        break;
21178                }
21179        }
21180        spin_unlock_irqrestore(&epd_pool->lock, iflag);
21181
21182        return lpfc_ncmd;
21183}
21184
21185/**
21186 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21187 * @phba: pointer to lpfc hba data structure.
21188 * @ndlp: pointer to lpfc nodelist data structure.
21189 * @hwqid: belong to which HWQ
21190 * @expedite: 1 means this request is urgent.
21191 *
21192 * This routine will do the following actions and then return a pointer to
21193 * one free IO buf.
21194 *
21195 * 1. If private free xri count is empty, move some XRIs from public to
21196 *    private pool.
21197 * 2. Get one XRI from private free xri pool.
21198 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21199 *    get one free xri from expedite pool.
21200 *
21201 * Note: ndlp is only used on SCSI side for RRQ testing.
21202 *       The caller should pass NULL for ndlp on NVME side.
21203 *
21204 * Return:
21205 *   pointer to one free IO buf - if private pool is not empty
21206 *   NULL - if private pool is empty
21207 **/
21208static struct lpfc_io_buf *
21209lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21210                                    struct lpfc_nodelist *ndlp,
21211                                    int hwqid, int expedite)
21212{
21213        struct lpfc_sli4_hdw_queue *qp;
21214        struct lpfc_multixri_pool *multixri_pool;
21215        struct lpfc_pvt_pool *pvt_pool;
21216        struct lpfc_io_buf *lpfc_ncmd;
21217
21218        qp = &phba->sli4_hba.hdwq[hwqid];
21219        lpfc_ncmd = NULL;
21220        multixri_pool = qp->p_multixri_pool;
21221        pvt_pool = &multixri_pool->pvt_pool;
21222        multixri_pool->io_req_count++;
21223
21224        /* If pvt_pool is empty, move some XRIs from public to private pool */
21225        if (pvt_pool->count == 0)
21226                lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21227
21228        /* Get one XRI from private free xri pool */
21229        lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21230
21231        if (lpfc_ncmd) {
21232                lpfc_ncmd->hdwq = qp;
21233                lpfc_ncmd->hdwq_no = hwqid;
21234        } else if (expedite) {
21235                /* If we fail to get one from pvt_pool and this is an expedite
21236                 * request, get one free xri from expedite pool.
21237                 */
21238                lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21239        }
21240
21241        return lpfc_ncmd;
21242}
21243
21244static inline struct lpfc_io_buf *
21245lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21246{
21247        struct lpfc_sli4_hdw_queue *qp;
21248        struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21249
21250        qp = &phba->sli4_hba.hdwq[idx];
21251        list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21252                                 &qp->lpfc_io_buf_list_get, list) {
21253                if (lpfc_test_rrq_active(phba, ndlp,
21254                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
21255                        continue;
21256
21257                if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21258                        continue;
21259
21260                list_del_init(&lpfc_cmd->list);
21261                qp->get_io_bufs--;
21262                lpfc_cmd->hdwq = qp;
21263                lpfc_cmd->hdwq_no = idx;
21264                return lpfc_cmd;
21265        }
21266        return NULL;
21267}
21268
21269/**
21270 * lpfc_get_io_buf - Get one IO buffer from free pool
21271 * @phba: The HBA for which this call is being executed.
21272 * @ndlp: pointer to lpfc nodelist data structure.
21273 * @hwqid: belong to which HWQ
21274 * @expedite: 1 means this request is urgent.
21275 *
21276 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21277 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21278 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21279 *
21280 * Note: ndlp is only used on SCSI side for RRQ testing.
21281 *       The caller should pass NULL for ndlp on NVME side.
21282 *
21283 * Return codes:
21284 *   NULL - Error
21285 *   Pointer to lpfc_io_buf - Success
21286 **/
21287struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21288                                    struct lpfc_nodelist *ndlp,
21289                                    u32 hwqid, int expedite)
21290{
21291        struct lpfc_sli4_hdw_queue *qp;
21292        unsigned long iflag;
21293        struct lpfc_io_buf *lpfc_cmd;
21294
21295        qp = &phba->sli4_hba.hdwq[hwqid];
21296        lpfc_cmd = NULL;
21297
21298        if (phba->cfg_xri_rebalancing)
21299                lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21300                        phba, ndlp, hwqid, expedite);
21301        else {
21302                lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21303                                          qp, alloc_xri_get);
21304                if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21305                        lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21306                if (!lpfc_cmd) {
21307                        lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21308                                          qp, alloc_xri_put);
21309                        list_splice(&qp->lpfc_io_buf_list_put,
21310                                    &qp->lpfc_io_buf_list_get);
21311                        qp->get_io_bufs += qp->put_io_bufs;
21312                        INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21313                        qp->put_io_bufs = 0;
21314                        spin_unlock(&qp->io_buf_list_put_lock);
21315                        if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21316                            expedite)
21317                                lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21318                }
21319                spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21320        }
21321
21322        return lpfc_cmd;
21323}
21324
21325/**
21326 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21327 * @phba: The HBA for which this call is being executed.
21328 * @lpfc_buf: IO buf structure to append the SGL chunk
21329 *
21330 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21331 * and will allocate an SGL chunk if the pool is empty.
21332 *
21333 * Return codes:
21334 *   NULL - Error
21335 *   Pointer to sli4_hybrid_sgl - Success
21336 **/
21337struct sli4_hybrid_sgl *
21338lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21339{
21340        struct sli4_hybrid_sgl *list_entry = NULL;
21341        struct sli4_hybrid_sgl *tmp = NULL;
21342        struct sli4_hybrid_sgl *allocated_sgl = NULL;
21343        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21344        struct list_head *buf_list = &hdwq->sgl_list;
21345        unsigned long iflags;
21346
21347        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21348
21349        if (likely(!list_empty(buf_list))) {
21350                /* break off 1 chunk from the sgl_list */
21351                list_for_each_entry_safe(list_entry, tmp,
21352                                         buf_list, list_node) {
21353                        list_move_tail(&list_entry->list_node,
21354                                       &lpfc_buf->dma_sgl_xtra_list);
21355                        break;
21356                }
21357        } else {
21358                /* allocate more */
21359                spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21360                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21361                                   cpu_to_node(hdwq->io_wq->chann));
21362                if (!tmp) {
21363                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21364                                        "8353 error kmalloc memory for HDWQ "
21365                                        "%d %s\n",
21366                                        lpfc_buf->hdwq_no, __func__);
21367                        return NULL;
21368                }
21369
21370                tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21371                                              GFP_ATOMIC, &tmp->dma_phys_sgl);
21372                if (!tmp->dma_sgl) {
21373                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21374                                        "8354 error pool_alloc memory for HDWQ "
21375                                        "%d %s\n",
21376                                        lpfc_buf->hdwq_no, __func__);
21377                        kfree(tmp);
21378                        return NULL;
21379                }
21380
21381                spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21382                list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21383        }
21384
21385        allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21386                                        struct sli4_hybrid_sgl,
21387                                        list_node);
21388
21389        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21390
21391        return allocated_sgl;
21392}
21393
21394/**
21395 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21396 * @phba: The HBA for which this call is being executed.
21397 * @lpfc_buf: IO buf structure with the SGL chunk
21398 *
21399 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21400 *
21401 * Return codes:
21402 *   0 - Success
21403 *   -EINVAL - Error
21404 **/
21405int
21406lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21407{
21408        int rc = 0;
21409        struct sli4_hybrid_sgl *list_entry = NULL;
21410        struct sli4_hybrid_sgl *tmp = NULL;
21411        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21412        struct list_head *buf_list = &hdwq->sgl_list;
21413        unsigned long iflags;
21414
21415        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21416
21417        if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21418                list_for_each_entry_safe(list_entry, tmp,
21419                                         &lpfc_buf->dma_sgl_xtra_list,
21420                                         list_node) {
21421                        list_move_tail(&list_entry->list_node,
21422                                       buf_list);
21423                }
21424        } else {
21425                rc = -EINVAL;
21426        }
21427
21428        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21429        return rc;
21430}
21431
21432/**
21433 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21434 * @phba: phba object
21435 * @hdwq: hdwq to cleanup sgl buff resources on
21436 *
21437 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21438 *
21439 * Return codes:
21440 *   None
21441 **/
21442void
21443lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21444                       struct lpfc_sli4_hdw_queue *hdwq)
21445{
21446        struct list_head *buf_list = &hdwq->sgl_list;
21447        struct sli4_hybrid_sgl *list_entry = NULL;
21448        struct sli4_hybrid_sgl *tmp = NULL;
21449        unsigned long iflags;
21450
21451        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21452
21453        /* Free sgl pool */
21454        list_for_each_entry_safe(list_entry, tmp,
21455                                 buf_list, list_node) {
21456                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21457                              list_entry->dma_sgl,
21458                              list_entry->dma_phys_sgl);
21459                list_del(&list_entry->list_node);
21460                kfree(list_entry);
21461        }
21462
21463        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21464}
21465
21466/**
21467 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21468 * @phba: The HBA for which this call is being executed.
21469 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21470 *
21471 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21472 * and will allocate an CMD/RSP buffer if the pool is empty.
21473 *
21474 * Return codes:
21475 *   NULL - Error
21476 *   Pointer to fcp_cmd_rsp_buf - Success
21477 **/
21478struct fcp_cmd_rsp_buf *
21479lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21480                              struct lpfc_io_buf *lpfc_buf)
21481{
21482        struct fcp_cmd_rsp_buf *list_entry = NULL;
21483        struct fcp_cmd_rsp_buf *tmp = NULL;
21484        struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21485        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21486        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21487        unsigned long iflags;
21488
21489        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21490
21491        if (likely(!list_empty(buf_list))) {
21492                /* break off 1 chunk from the list */
21493                list_for_each_entry_safe(list_entry, tmp,
21494                                         buf_list,
21495                                         list_node) {
21496                        list_move_tail(&list_entry->list_node,
21497                                       &lpfc_buf->dma_cmd_rsp_list);
21498                        break;
21499                }
21500        } else {
21501                /* allocate more */
21502                spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21503                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21504                                   cpu_to_node(hdwq->io_wq->chann));
21505                if (!tmp) {
21506                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21507                                        "8355 error kmalloc memory for HDWQ "
21508                                        "%d %s\n",
21509                                        lpfc_buf->hdwq_no, __func__);
21510                        return NULL;
21511                }
21512
21513                tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21514                                                GFP_ATOMIC,
21515                                                &tmp->fcp_cmd_rsp_dma_handle);
21516
21517                if (!tmp->fcp_cmnd) {
21518                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21519                                        "8356 error pool_alloc memory for HDWQ "
21520                                        "%d %s\n",
21521                                        lpfc_buf->hdwq_no, __func__);
21522                        kfree(tmp);
21523                        return NULL;
21524                }
21525
21526                tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21527                                sizeof(struct fcp_cmnd));
21528
21529                spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21530                list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21531        }
21532
21533        allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21534                                        struct fcp_cmd_rsp_buf,
21535                                        list_node);
21536
21537        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21538
21539        return allocated_buf;
21540}
21541
21542/**
21543 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21544 * @phba: The HBA for which this call is being executed.
21545 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21546 *
21547 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21548 *
21549 * Return codes:
21550 *   0 - Success
21551 *   -EINVAL - Error
21552 **/
21553int
21554lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21555                              struct lpfc_io_buf *lpfc_buf)
21556{
21557        int rc = 0;
21558        struct fcp_cmd_rsp_buf *list_entry = NULL;
21559        struct fcp_cmd_rsp_buf *tmp = NULL;
21560        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21561        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21562        unsigned long iflags;
21563
21564        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21565
21566        if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21567                list_for_each_entry_safe(list_entry, tmp,
21568                                         &lpfc_buf->dma_cmd_rsp_list,
21569                                         list_node) {
21570                        list_move_tail(&list_entry->list_node,
21571                                       buf_list);
21572                }
21573        } else {
21574                rc = -EINVAL;
21575        }
21576
21577        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21578        return rc;
21579}
21580
21581/**
21582 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21583 * @phba: phba object
21584 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21585 *
21586 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21587 *
21588 * Return codes:
21589 *   None
21590 **/
21591void
21592lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21593                               struct lpfc_sli4_hdw_queue *hdwq)
21594{
21595        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21596        struct fcp_cmd_rsp_buf *list_entry = NULL;
21597        struct fcp_cmd_rsp_buf *tmp = NULL;
21598        unsigned long iflags;
21599
21600        spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21601
21602        /* Free cmd_rsp buf pool */
21603        list_for_each_entry_safe(list_entry, tmp,
21604                                 buf_list,
21605                                 list_node) {
21606                dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21607                              list_entry->fcp_cmnd,
21608                              list_entry->fcp_cmd_rsp_dma_handle);
21609                list_del(&list_entry->list_node);
21610                kfree(list_entry);
21611        }
21612
21613        spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21614}
21615