linux/drivers/scsi/lpfc/lpfc_sli4.h
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 *                                                                 *
  10 * This program is free software; you can redistribute it and/or   *
  11 * modify it under the terms of version 2 of the GNU General       *
  12 * Public License as published by the Free Software Foundation.    *
  13 * This program is distributed in the hope that it will be useful. *
  14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  18 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  19 * more details, a copy of which can be found in the file COPYING  *
  20 * included with this package.                                     *
  21 *******************************************************************/
  22
  23#include <linux/irq_poll.h>
  24#include <linux/cpufreq.h>
  25
  26#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
  27#define CONFIG_SCSI_LPFC_DEBUG_FS
  28#endif
  29
  30#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
  31#define LPFC_XRI_EXCH_BUSY_WAIT_TMO             10000
  32#define LPFC_XRI_EXCH_BUSY_WAIT_T1              10
  33#define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
  34#define LPFC_RPI_LOW_WATER_MARK                 10
  35
  36#define LPFC_UNREG_FCF                          1
  37#define LPFC_SKIP_UNREG_FCF                     0
  38
  39/* Amount of time in seconds for waiting FCF rediscovery to complete */
  40#define LPFC_FCF_REDISCOVER_WAIT_TMO            2000 /* msec */
  41
  42/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
  43#define LPFC_NEMBED_MBOX_SGL_CNT                254
  44
  45/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
  46#define LPFC_HBA_HDWQ_MIN       0
  47#define LPFC_HBA_HDWQ_MAX       256
  48#define LPFC_HBA_HDWQ_DEF       LPFC_HBA_HDWQ_MIN
  49
  50/* irq_chann range, values */
  51#define LPFC_IRQ_CHANN_MIN      0
  52#define LPFC_IRQ_CHANN_MAX      256
  53#define LPFC_IRQ_CHANN_DEF      LPFC_IRQ_CHANN_MIN
  54
  55/* FCP MQ queue count limiting */
  56#define LPFC_FCP_MQ_THRESHOLD_MIN       0
  57#define LPFC_FCP_MQ_THRESHOLD_MAX       256
  58#define LPFC_FCP_MQ_THRESHOLD_DEF       8
  59
  60/*
  61 * Provide the default FCF Record attributes used by the driver
  62 * when nonFIP mode is configured and there is no other default
  63 * FCF Record attributes.
  64 */
  65#define LPFC_FCOE_FCF_DEF_INDEX 0
  66#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
  67#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
  68
  69#define LPFC_FCOE_NULL_VID      0xFFF
  70#define LPFC_FCOE_IGNORE_VID    0xFFFF
  71
  72/* First 3 bytes of default FCF MAC is specified by FC_MAP */
  73#define LPFC_FCOE_FCF_MAC3      0xFF
  74#define LPFC_FCOE_FCF_MAC4      0xFF
  75#define LPFC_FCOE_FCF_MAC5      0xFE
  76#define LPFC_FCOE_FCF_MAP0      0x0E
  77#define LPFC_FCOE_FCF_MAP1      0xFC
  78#define LPFC_FCOE_FCF_MAP2      0x00
  79#define LPFC_FCOE_MAX_RCV_SIZE  0x800
  80#define LPFC_FCOE_FKA_ADV_PER   0
  81#define LPFC_FCOE_FIP_PRIORITY  0x80
  82
  83#define sli4_sid_from_fc_hdr(fc_hdr)  \
  84        ((fc_hdr)->fh_s_id[0] << 16 | \
  85         (fc_hdr)->fh_s_id[1] <<  8 | \
  86         (fc_hdr)->fh_s_id[2])
  87
  88#define sli4_did_from_fc_hdr(fc_hdr)  \
  89        ((fc_hdr)->fh_d_id[0] << 16 | \
  90         (fc_hdr)->fh_d_id[1] <<  8 | \
  91         (fc_hdr)->fh_d_id[2])
  92
  93#define sli4_fctl_from_fc_hdr(fc_hdr)  \
  94        ((fc_hdr)->fh_f_ctl[0] << 16 | \
  95         (fc_hdr)->fh_f_ctl[1] <<  8 | \
  96         (fc_hdr)->fh_f_ctl[2])
  97
  98#define sli4_type_from_fc_hdr(fc_hdr)  \
  99        ((fc_hdr)->fh_type)
 100
 101#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
 102
 103#define INT_FW_UPGRADE  0
 104#define RUN_FW_UPGRADE  1
 105
 106enum lpfc_sli4_queue_type {
 107        LPFC_EQ,
 108        LPFC_GCQ,
 109        LPFC_MCQ,
 110        LPFC_WCQ,
 111        LPFC_RCQ,
 112        LPFC_MQ,
 113        LPFC_WQ,
 114        LPFC_HRQ,
 115        LPFC_DRQ
 116};
 117
 118/* The queue sub-type defines the functional purpose of the queue */
 119enum lpfc_sli4_queue_subtype {
 120        LPFC_NONE,
 121        LPFC_MBOX,
 122        LPFC_IO,
 123        LPFC_ELS,
 124        LPFC_NVMET,
 125        LPFC_NVME_LS,
 126        LPFC_USOL
 127};
 128
 129/* RQ buffer list */
 130struct lpfc_rqb {
 131        uint16_t entry_count;     /* Current number of RQ slots */
 132        uint16_t buffer_count;    /* Current number of buffers posted */
 133        struct list_head rqb_buffer_list;  /* buffers assigned to this HBQ */
 134                                  /* Callback for HBQ buffer allocation */
 135        struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
 136                                  /* Callback for HBQ buffer free */
 137        void               (*rqb_free_buffer)(struct lpfc_hba *,
 138                                               struct rqb_dmabuf *);
 139};
 140
 141enum lpfc_poll_mode {
 142        LPFC_QUEUE_WORK,
 143        LPFC_IRQ_POLL
 144};
 145
 146struct lpfc_idle_stat {
 147        u64 prev_idle;
 148        u64 prev_wall;
 149};
 150
 151struct lpfc_queue {
 152        struct list_head list;
 153        struct list_head wq_list;
 154
 155        /*
 156         * If interrupts are in effect on _all_ the eq's the footprint
 157         * of polling code is zero (except mode). This memory is chec-
 158         * ked for every io to see if the io needs to be polled and
 159         * while completion to check if the eq's needs to be rearmed.
 160         * Keep in same cacheline as the queue ptr to avoid cpu fetch
 161         * stalls. Using 1B memory will leave us with 7B hole. Fill
 162         * it with other frequently used members.
 163         */
 164        uint16_t last_cpu;      /* most recent cpu */
 165        uint16_t hdwq;
 166        uint8_t  qe_valid;
 167        uint8_t  mode;  /* interrupt or polling */
 168#define LPFC_EQ_INTERRUPT       0
 169#define LPFC_EQ_POLL            1
 170
 171        struct list_head wqfull_list;
 172        enum lpfc_sli4_queue_type type;
 173        enum lpfc_sli4_queue_subtype subtype;
 174        struct lpfc_hba *phba;
 175        struct list_head child_list;
 176        struct list_head page_list;
 177        struct list_head sgl_list;
 178        struct list_head cpu_list;
 179        uint32_t entry_count;   /* Number of entries to support on the queue */
 180        uint32_t entry_size;    /* Size of each queue entry. */
 181        uint32_t entry_cnt_per_pg;
 182        uint32_t notify_interval; /* Queue Notification Interval
 183                                   * For chip->host queues (EQ, CQ, RQ):
 184                                   *  specifies the interval (number of
 185                                   *  entries) where the doorbell is rung to
 186                                   *  notify the chip of entry consumption.
 187                                   * For host->chip queues (WQ):
 188                                   *  specifies the interval (number of
 189                                   *  entries) where consumption CQE is
 190                                   *  requested to indicate WQ entries
 191                                   *  consumed by the chip.
 192                                   * Not used on an MQ.
 193                                   */
 194#define LPFC_EQ_NOTIFY_INTRVL   16
 195#define LPFC_CQ_NOTIFY_INTRVL   16
 196#define LPFC_WQ_NOTIFY_INTRVL   16
 197#define LPFC_RQ_NOTIFY_INTRVL   16
 198        uint32_t max_proc_limit; /* Queue Processing Limit
 199                                  * For chip->host queues (EQ, CQ):
 200                                  *  specifies the maximum number of
 201                                  *  entries to be consumed in one
 202                                  *  processing iteration sequence. Queue
 203                                  *  will be rearmed after each iteration.
 204                                  * Not used on an MQ, RQ or WQ.
 205                                  */
 206#define LPFC_EQ_MAX_PROC_LIMIT          256
 207#define LPFC_CQ_MIN_PROC_LIMIT          64
 208#define LPFC_CQ_MAX_PROC_LIMIT          LPFC_CQE_EXP_COUNT      // 4096
 209#define LPFC_CQ_DEF_MAX_PROC_LIMIT      LPFC_CQE_DEF_COUNT      // 1024
 210#define LPFC_CQ_MIN_THRESHOLD_TO_POLL   64
 211#define LPFC_CQ_MAX_THRESHOLD_TO_POLL   LPFC_CQ_DEF_MAX_PROC_LIMIT
 212#define LPFC_CQ_DEF_THRESHOLD_TO_POLL   LPFC_CQ_DEF_MAX_PROC_LIMIT
 213        uint32_t queue_claimed; /* indicates queue is being processed */
 214        uint32_t queue_id;      /* Queue ID assigned by the hardware */
 215        uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
 216        uint32_t host_index;    /* The host's index for putting or getting */
 217        uint32_t hba_index;     /* The last known hba index for get or put */
 218        uint32_t q_mode;
 219
 220        struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
 221        struct lpfc_rqb *rqbp;  /* ptr to RQ buffers */
 222
 223        uint16_t page_count;    /* Number of pages allocated for this queue */
 224        uint16_t page_size;     /* size of page allocated for this queue */
 225#define LPFC_EXPANDED_PAGE_SIZE 16384
 226#define LPFC_DEFAULT_PAGE_SIZE  4096
 227        uint16_t chann;         /* Hardware Queue association WQ/CQ */
 228                                /* CPU affinity for EQ */
 229#define LPFC_FIND_BY_EQ         0
 230#define LPFC_FIND_BY_HDWQ       1
 231        uint8_t db_format;
 232#define LPFC_DB_RING_FORMAT     0x01
 233#define LPFC_DB_LIST_FORMAT     0x02
 234        uint8_t q_flag;
 235#define HBA_NVMET_WQFULL        0x1 /* We hit WQ Full condition for NVMET */
 236#define HBA_NVMET_CQ_NOTIFY     0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
 237#define HBA_EQ_DELAY_CHK        0x2 /* EQ is a candidate for coalescing */
 238#define LPFC_NVMET_CQ_NOTIFY    4
 239        void __iomem *db_regaddr;
 240        uint16_t dpp_enable;
 241        uint16_t dpp_id;
 242        void __iomem *dpp_regaddr;
 243
 244        /* For q stats */
 245        uint32_t q_cnt_1;
 246        uint32_t q_cnt_2;
 247        uint32_t q_cnt_3;
 248        uint64_t q_cnt_4;
 249/* defines for EQ stats */
 250#define EQ_max_eqe              q_cnt_1
 251#define EQ_no_entry             q_cnt_2
 252#define EQ_cqe_cnt              q_cnt_3
 253#define EQ_processed            q_cnt_4
 254
 255/* defines for CQ stats */
 256#define CQ_mbox                 q_cnt_1
 257#define CQ_max_cqe              q_cnt_1
 258#define CQ_release_wqe          q_cnt_2
 259#define CQ_xri_aborted          q_cnt_3
 260#define CQ_wq                   q_cnt_4
 261
 262/* defines for WQ stats */
 263#define WQ_overflow             q_cnt_1
 264#define WQ_posted               q_cnt_4
 265
 266/* defines for RQ stats */
 267#define RQ_no_posted_buf        q_cnt_1
 268#define RQ_no_buf_found         q_cnt_2
 269#define RQ_buf_posted           q_cnt_3
 270#define RQ_rcv_buf              q_cnt_4
 271
 272        struct work_struct      irqwork;
 273        struct work_struct      spwork;
 274        struct delayed_work     sched_irqwork;
 275        struct delayed_work     sched_spwork;
 276
 277        uint64_t isr_timestamp;
 278        struct lpfc_queue *assoc_qp;
 279        struct list_head _poll_list;
 280        void **q_pgs;   /* array to index entries per page */
 281
 282#define LPFC_IRQ_POLL_WEIGHT 256
 283        struct irq_poll iop;
 284        enum lpfc_poll_mode poll_mode;
 285};
 286
 287struct lpfc_sli4_link {
 288        uint32_t speed;
 289        uint8_t duplex;
 290        uint8_t status;
 291        uint8_t type;
 292        uint8_t number;
 293        uint8_t fault;
 294        uint32_t logical_speed;
 295        uint16_t topology;
 296};
 297
 298struct lpfc_fcf_rec {
 299        uint8_t  fabric_name[8];
 300        uint8_t  switch_name[8];
 301        uint8_t  mac_addr[6];
 302        uint16_t fcf_indx;
 303        uint32_t priority;
 304        uint16_t vlan_id;
 305        uint32_t addr_mode;
 306        uint32_t flag;
 307#define BOOT_ENABLE     0x01
 308#define RECORD_VALID    0x02
 309};
 310
 311struct lpfc_fcf_pri_rec {
 312        uint16_t fcf_index;
 313#define LPFC_FCF_ON_PRI_LIST 0x0001
 314#define LPFC_FCF_FLOGI_FAILED 0x0002
 315        uint16_t flag;
 316        uint32_t priority;
 317};
 318
 319struct lpfc_fcf_pri {
 320        struct list_head list;
 321        struct lpfc_fcf_pri_rec fcf_rec;
 322};
 323
 324/*
 325 * Maximum FCF table index, it is for driver internal book keeping, it
 326 * just needs to be no less than the supported HBA's FCF table size.
 327 */
 328#define LPFC_SLI4_FCF_TBL_INDX_MAX      32
 329
 330struct lpfc_fcf {
 331        uint16_t fcfi;
 332        uint32_t fcf_flag;
 333#define FCF_AVAILABLE   0x01 /* FCF available for discovery */
 334#define FCF_REGISTERED  0x02 /* FCF registered with FW */
 335#define FCF_SCAN_DONE   0x04 /* FCF table scan done */
 336#define FCF_IN_USE      0x08 /* Atleast one discovery completed */
 337#define FCF_INIT_DISC   0x10 /* Initial FCF discovery */
 338#define FCF_DEAD_DISC   0x20 /* FCF DEAD fast FCF failover discovery */
 339#define FCF_ACVL_DISC   0x40 /* All CVL fast FCF failover discovery */
 340#define FCF_DISCOVERY   (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
 341#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
 342#define FCF_REDISC_EVT  0x100 /* FCF rediscovery event to worker thread */
 343#define FCF_REDISC_FOV  0x200 /* Post FCF rediscovery fast failover */
 344#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
 345        uint16_t fcf_redisc_attempted;
 346        uint32_t addr_mode;
 347        uint32_t eligible_fcf_cnt;
 348        struct lpfc_fcf_rec current_rec;
 349        struct lpfc_fcf_rec failover_rec;
 350        struct list_head fcf_pri_list;
 351        struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
 352        uint32_t current_fcf_scan_pri;
 353        struct timer_list redisc_wait;
 354        unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
 355};
 356
 357
 358#define LPFC_REGION23_SIGNATURE "RG23"
 359#define LPFC_REGION23_VERSION   1
 360#define LPFC_REGION23_LAST_REC  0xff
 361#define DRIVER_SPECIFIC_TYPE    0xA2
 362#define LINUX_DRIVER_ID         0x20
 363#define PORT_STE_TYPE           0x1
 364
 365struct lpfc_fip_param_hdr {
 366        uint8_t type;
 367#define FCOE_PARAM_TYPE         0xA0
 368        uint8_t length;
 369#define FCOE_PARAM_LENGTH       2
 370        uint8_t parm_version;
 371#define FIPP_VERSION            0x01
 372        uint8_t parm_flags;
 373#define lpfc_fip_param_hdr_fipp_mode_SHIFT      6
 374#define lpfc_fip_param_hdr_fipp_mode_MASK       0x3
 375#define lpfc_fip_param_hdr_fipp_mode_WORD       parm_flags
 376#define FIPP_MODE_ON                            0x1
 377#define FIPP_MODE_OFF                           0x0
 378#define FIPP_VLAN_VALID                         0x1
 379};
 380
 381struct lpfc_fcoe_params {
 382        uint8_t fc_map[3];
 383        uint8_t reserved1;
 384        uint16_t vlan_tag;
 385        uint8_t reserved[2];
 386};
 387
 388struct lpfc_fcf_conn_hdr {
 389        uint8_t type;
 390#define FCOE_CONN_TBL_TYPE              0xA1
 391        uint8_t length;   /* words */
 392        uint8_t reserved[2];
 393};
 394
 395struct lpfc_fcf_conn_rec {
 396        uint16_t flags;
 397#define FCFCNCT_VALID           0x0001
 398#define FCFCNCT_BOOT            0x0002
 399#define FCFCNCT_PRIMARY         0x0004   /* if not set, Secondary */
 400#define FCFCNCT_FBNM_VALID      0x0008
 401#define FCFCNCT_SWNM_VALID      0x0010
 402#define FCFCNCT_VLAN_VALID      0x0020
 403#define FCFCNCT_AM_VALID        0x0040
 404#define FCFCNCT_AM_PREFERRED    0x0080   /* if not set, AM Required */
 405#define FCFCNCT_AM_SPMA         0x0100   /* if not set, FPMA */
 406
 407        uint16_t vlan_tag;
 408        uint8_t fabric_name[8];
 409        uint8_t switch_name[8];
 410};
 411
 412struct lpfc_fcf_conn_entry {
 413        struct list_head list;
 414        struct lpfc_fcf_conn_rec conn_rec;
 415};
 416
 417/*
 418 * Define the host's bootstrap mailbox.  This structure contains
 419 * the member attributes needed to create, use, and destroy the
 420 * bootstrap mailbox region.
 421 *
 422 * The macro definitions for the bmbx data structure are defined
 423 * in lpfc_hw4.h with the register definition.
 424 */
 425struct lpfc_bmbx {
 426        struct lpfc_dmabuf *dmabuf;
 427        struct dma_address dma_address;
 428        void *avirt;
 429        dma_addr_t aphys;
 430        uint32_t bmbx_size;
 431};
 432
 433#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
 434
 435#define LPFC_EQE_SIZE_4B        4
 436#define LPFC_EQE_SIZE_16B       16
 437#define LPFC_CQE_SIZE           16
 438#define LPFC_WQE_SIZE           64
 439#define LPFC_WQE128_SIZE        128
 440#define LPFC_MQE_SIZE           256
 441#define LPFC_RQE_SIZE           8
 442
 443#define LPFC_EQE_DEF_COUNT      1024
 444#define LPFC_CQE_DEF_COUNT      1024
 445#define LPFC_CQE_EXP_COUNT      4096
 446#define LPFC_WQE_DEF_COUNT      256
 447#define LPFC_WQE_EXP_COUNT      1024
 448#define LPFC_MQE_DEF_COUNT      16
 449#define LPFC_RQE_DEF_COUNT      512
 450
 451#define LPFC_QUEUE_NOARM        false
 452#define LPFC_QUEUE_REARM        true
 453
 454
 455/*
 456 * SLI4 CT field defines
 457 */
 458#define SLI4_CT_RPI 0
 459#define SLI4_CT_VPI 1
 460#define SLI4_CT_VFI 2
 461#define SLI4_CT_FCFI 3
 462
 463/*
 464 * SLI4 specific data structures
 465 */
 466struct lpfc_max_cfg_param {
 467        uint16_t max_xri;
 468        uint16_t xri_base;
 469        uint16_t xri_used;
 470        uint16_t max_rpi;
 471        uint16_t rpi_base;
 472        uint16_t rpi_used;
 473        uint16_t max_vpi;
 474        uint16_t vpi_base;
 475        uint16_t vpi_used;
 476        uint16_t max_vfi;
 477        uint16_t vfi_base;
 478        uint16_t vfi_used;
 479        uint16_t max_fcfi;
 480        uint16_t fcfi_used;
 481        uint16_t max_eq;
 482        uint16_t max_rq;
 483        uint16_t max_cq;
 484        uint16_t max_wq;
 485};
 486
 487struct lpfc_hba;
 488/* SLI4 HBA multi-fcp queue handler struct */
 489#define LPFC_SLI4_HANDLER_NAME_SZ       16
 490struct lpfc_hba_eq_hdl {
 491        uint32_t idx;
 492        uint16_t irq;
 493        char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
 494        struct lpfc_hba *phba;
 495        struct lpfc_queue *eq;
 496        struct cpumask aff_mask;
 497};
 498
 499#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
 500#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
 501#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
 502
 503/*BB Credit recovery value*/
 504struct lpfc_bbscn_params {
 505        uint32_t word0;
 506#define lpfc_bbscn_min_SHIFT            0
 507#define lpfc_bbscn_min_MASK             0x0000000F
 508#define lpfc_bbscn_min_WORD             word0
 509#define lpfc_bbscn_max_SHIFT            4
 510#define lpfc_bbscn_max_MASK             0x0000000F
 511#define lpfc_bbscn_max_WORD             word0
 512#define lpfc_bbscn_def_SHIFT            8
 513#define lpfc_bbscn_def_MASK             0x0000000F
 514#define lpfc_bbscn_def_WORD             word0
 515};
 516
 517/* Port Capabilities for SLI4 Parameters */
 518struct lpfc_pc_sli4_params {
 519        uint32_t supported;
 520        uint32_t if_type;
 521        uint32_t sli_rev;
 522        uint32_t sli_family;
 523        uint32_t featurelevel_1;
 524        uint32_t featurelevel_2;
 525        uint32_t proto_types;
 526#define LPFC_SLI4_PROTO_FCOE    0x0000001
 527#define LPFC_SLI4_PROTO_FC      0x0000002
 528#define LPFC_SLI4_PROTO_NIC     0x0000004
 529#define LPFC_SLI4_PROTO_ISCSI   0x0000008
 530#define LPFC_SLI4_PROTO_RDMA    0x0000010
 531        uint32_t sge_supp_len;
 532        uint32_t if_page_sz;
 533        uint32_t rq_db_window;
 534        uint32_t loopbk_scope;
 535        uint32_t oas_supported;
 536        uint32_t eq_pages_max;
 537        uint32_t eqe_size;
 538        uint32_t cq_pages_max;
 539        uint32_t cqe_size;
 540        uint32_t mq_pages_max;
 541        uint32_t mqe_size;
 542        uint32_t mq_elem_cnt;
 543        uint32_t wq_pages_max;
 544        uint32_t wqe_size;
 545        uint32_t rq_pages_max;
 546        uint32_t rqe_size;
 547        uint32_t hdr_pages_max;
 548        uint32_t hdr_size;
 549        uint32_t hdr_pp_align;
 550        uint32_t sgl_pages_max;
 551        uint32_t sgl_pp_align;
 552        uint32_t mib_size;
 553        uint16_t mi_ver;
 554#define LPFC_MIB1_SUPPORT       1
 555#define LPFC_MIB2_SUPPORT       2
 556#define LPFC_MIB3_SUPPORT       3
 557        uint16_t mi_value;
 558#define LPFC_DFLT_MIB_VAL       2
 559        uint8_t mib_bde_cnt;
 560        uint8_t cqv;
 561        uint8_t mqv;
 562        uint8_t wqv;
 563        uint8_t rqv;
 564        uint8_t eqav;
 565        uint8_t cqav;
 566        uint8_t wqsize;
 567        uint8_t bv1s;
 568        uint8_t pls;
 569#define LPFC_WQ_SZ64_SUPPORT    1
 570#define LPFC_WQ_SZ128_SUPPORT   2
 571        uint8_t wqpcnt;
 572        uint8_t nvme;
 573};
 574
 575#define LPFC_CQ_4K_PAGE_SZ      0x1
 576#define LPFC_CQ_16K_PAGE_SZ     0x4
 577#define LPFC_WQ_4K_PAGE_SZ      0x1
 578#define LPFC_WQ_16K_PAGE_SZ     0x4
 579
 580struct lpfc_iov {
 581        uint32_t pf_number;
 582        uint32_t vf_number;
 583};
 584
 585struct lpfc_sli4_lnk_info {
 586        uint8_t lnk_dv;
 587#define LPFC_LNK_DAT_INVAL      0
 588#define LPFC_LNK_DAT_VAL        1
 589        uint8_t lnk_tp;
 590#define LPFC_LNK_GE             0x0 /* FCoE */
 591#define LPFC_LNK_FC             0x1 /* FC */
 592#define LPFC_LNK_FC_TRUNKED     0x2 /* FC_Trunked */
 593        uint8_t lnk_no;
 594        uint8_t optic_state;
 595};
 596
 597#define LPFC_SLI4_HANDLER_CNT           (LPFC_HBA_IO_CHAN_MAX+ \
 598                                         LPFC_FOF_IO_CHAN_NUM)
 599
 600/* Used for tracking CPU mapping attributes */
 601struct lpfc_vector_map_info {
 602        uint16_t        phys_id;
 603        uint16_t        core_id;
 604        uint16_t        eq;
 605        uint16_t        hdwq;
 606        uint16_t        flag;
 607#define LPFC_CPU_MAP_HYPER      0x1
 608#define LPFC_CPU_MAP_UNASSIGN   0x2
 609#define LPFC_CPU_FIRST_IRQ      0x4
 610};
 611#define LPFC_VECTOR_MAP_EMPTY   0xffff
 612
 613/* Multi-XRI pool */
 614#define XRI_BATCH               8
 615
 616struct lpfc_pbl_pool {
 617        struct list_head list;
 618        u32 count;
 619        spinlock_t lock;        /* lock for pbl_pool*/
 620};
 621
 622struct lpfc_pvt_pool {
 623        u32 low_watermark;
 624        u32 high_watermark;
 625
 626        struct list_head list;
 627        u32 count;
 628        spinlock_t lock;        /* lock for pvt_pool */
 629};
 630
 631struct lpfc_multixri_pool {
 632        u32 xri_limit;
 633
 634        /* Starting point when searching a pbl_pool with round-robin method */
 635        u32 rrb_next_hwqid;
 636
 637        /* Used by lpfc_adjust_pvt_pool_count.
 638         * io_req_count is incremented by 1 during IO submission. The heartbeat
 639         * handler uses these two variables to determine if pvt_pool is idle or
 640         * busy.
 641         */
 642        u32 prev_io_req_count;
 643        u32 io_req_count;
 644
 645        /* statistics */
 646        u32 pbl_empty_count;
 647#ifdef LPFC_MXP_STAT
 648        u32 above_limit_count;
 649        u32 below_limit_count;
 650        u32 local_pbl_hit_count;
 651        u32 other_pbl_hit_count;
 652        u32 stat_max_hwm;
 653
 654#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */
 655        u32 stat_pbl_count;
 656        u32 stat_pvt_count;
 657        u32 stat_busy_count;
 658        u32 stat_snapshot_taken;
 659#endif
 660
 661        /* TODO: Separate pvt_pool into get and put list */
 662        struct lpfc_pbl_pool pbl_pool;   /* Public free XRI pool */
 663        struct lpfc_pvt_pool pvt_pool;   /* Private free XRI pool */
 664};
 665
 666struct lpfc_fc4_ctrl_stat {
 667        u32 input_requests;
 668        u32 output_requests;
 669        u32 control_requests;
 670        u32 io_cmpls;
 671};
 672
 673#ifdef LPFC_HDWQ_LOCK_STAT
 674struct lpfc_lock_stat {
 675        uint32_t alloc_xri_get;
 676        uint32_t alloc_xri_put;
 677        uint32_t free_xri;
 678        uint32_t wq_access;
 679        uint32_t alloc_pvt_pool;
 680        uint32_t mv_from_pvt_pool;
 681        uint32_t mv_to_pub_pool;
 682        uint32_t mv_to_pvt_pool;
 683        uint32_t free_pub_pool;
 684        uint32_t free_pvt_pool;
 685};
 686#endif
 687
 688struct lpfc_eq_intr_info {
 689        struct list_head list;
 690        uint32_t icnt;
 691};
 692
 693/* SLI4 HBA data structure entries */
 694struct lpfc_sli4_hdw_queue {
 695        /* Pointers to the constructed SLI4 queues */
 696        struct lpfc_queue *hba_eq;  /* Event queues for HBA */
 697        struct lpfc_queue *io_cq;   /* Fast-path FCP & NVME compl queue */
 698        struct lpfc_queue *io_wq;   /* Fast-path FCP & NVME work queue */
 699        uint16_t io_cq_map;
 700
 701        /* Keep track of IO buffers for this hardware queue */
 702        spinlock_t io_buf_list_get_lock;  /* Common buf alloc list lock */
 703        struct list_head lpfc_io_buf_list_get;
 704        spinlock_t io_buf_list_put_lock;  /* Common buf free list lock */
 705        struct list_head lpfc_io_buf_list_put;
 706        spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
 707        struct list_head lpfc_abts_io_buf_list;
 708        uint32_t total_io_bufs;
 709        uint32_t get_io_bufs;
 710        uint32_t put_io_bufs;
 711        uint32_t empty_io_bufs;
 712        uint32_t abts_scsi_io_bufs;
 713        uint32_t abts_nvme_io_bufs;
 714
 715        /* Multi-XRI pool per HWQ */
 716        struct lpfc_multixri_pool *p_multixri_pool;
 717
 718        /* FC-4 Stats counters */
 719        struct lpfc_fc4_ctrl_stat nvme_cstat;
 720        struct lpfc_fc4_ctrl_stat scsi_cstat;
 721#ifdef LPFC_HDWQ_LOCK_STAT
 722        struct lpfc_lock_stat lock_conflict;
 723#endif
 724
 725        /* Per HDWQ pool resources */
 726        struct list_head sgl_list;
 727        struct list_head cmd_rsp_buf_list;
 728
 729        /* Lock for syncing Per HDWQ pool resources */
 730        spinlock_t hdwq_lock;
 731};
 732
 733#ifdef LPFC_HDWQ_LOCK_STAT
 734/* compile time trylock stats */
 735#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
 736        { \
 737        int only_once = 1; \
 738        while (spin_trylock_irqsave(lock, flag) == 0) { \
 739                if (only_once) { \
 740                        only_once = 0; \
 741                        qp->lock_conflict.lstat++; \
 742                } \
 743        } \
 744        }
 745#define lpfc_qp_spin_lock(lock, qp, lstat) \
 746        { \
 747        int only_once = 1; \
 748        while (spin_trylock(lock) == 0) { \
 749                if (only_once) { \
 750                        only_once = 0; \
 751                        qp->lock_conflict.lstat++; \
 752                } \
 753        } \
 754        }
 755#else
 756#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
 757        spin_lock_irqsave(lock, flag)
 758#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
 759#endif
 760
 761#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 762struct lpfc_hdwq_stat {
 763        u32 hdwq_no;
 764        u32 rcv_io;
 765        u32 xmt_io;
 766        u32 cmpl_io;
 767};
 768#endif
 769
 770struct lpfc_sli4_hba {
 771        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
 772                                           * config space registers
 773                                           */
 774        void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
 775                                           * control registers
 776                                           */
 777        void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
 778                                           * doorbell registers
 779                                           */
 780        void __iomem *dpp_regs_memmap_p;  /* Kernel memory mapped address for
 781                                           * dpp registers
 782                                           */
 783        union {
 784                struct {
 785                        /* IF Type 0, BAR 0 PCI cfg space reg mem map */
 786                        void __iomem *UERRLOregaddr;
 787                        void __iomem *UERRHIregaddr;
 788                        void __iomem *UEMASKLOregaddr;
 789                        void __iomem *UEMASKHIregaddr;
 790                } if_type0;
 791                struct {
 792                        /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
 793                        void __iomem *STATUSregaddr;
 794                        void __iomem *CTRLregaddr;
 795                        void __iomem *ERR1regaddr;
 796#define SLIPORT_ERR1_REG_ERR_CODE_1             0x1
 797#define SLIPORT_ERR1_REG_ERR_CODE_2             0x2
 798                        void __iomem *ERR2regaddr;
 799#define SLIPORT_ERR2_REG_FW_RESTART             0x0
 800#define SLIPORT_ERR2_REG_FUNC_PROVISON          0x1
 801#define SLIPORT_ERR2_REG_FORCED_DUMP            0x2
 802#define SLIPORT_ERR2_REG_FAILURE_EQ             0x3
 803#define SLIPORT_ERR2_REG_FAILURE_CQ             0x4
 804#define SLIPORT_ERR2_REG_FAILURE_BUS            0x5
 805#define SLIPORT_ERR2_REG_FAILURE_RQ             0x6
 806                        void __iomem *EQDregaddr;
 807                } if_type2;
 808        } u;
 809
 810        /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
 811        void __iomem *PSMPHRregaddr;
 812
 813        /* Well-known SLI INTF register memory map. */
 814        void __iomem *SLIINTFregaddr;
 815
 816        /* IF type 0, BAR 1 function CSR register memory map */
 817        void __iomem *ISRregaddr;       /* HST_ISR register */
 818        void __iomem *IMRregaddr;       /* HST_IMR register */
 819        void __iomem *ISCRregaddr;      /* HST_ISCR register */
 820        /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
 821        void __iomem *RQDBregaddr;      /* RQ_DOORBELL register */
 822        void __iomem *WQDBregaddr;      /* WQ_DOORBELL register */
 823        void __iomem *CQDBregaddr;      /* CQ_DOORBELL register */
 824        void __iomem *EQDBregaddr;      /* EQ_DOORBELL register */
 825        void __iomem *MQDBregaddr;      /* MQ_DOORBELL register */
 826        void __iomem *BMBXregaddr;      /* BootStrap MBX register */
 827
 828        uint32_t ue_mask_lo;
 829        uint32_t ue_mask_hi;
 830        uint32_t ue_to_sr;
 831        uint32_t ue_to_rp;
 832        struct lpfc_register sli_intf;
 833        struct lpfc_pc_sli4_params pc_sli4_params;
 834        struct lpfc_bbscn_params bbscn_params;
 835        struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
 836
 837        void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
 838        void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq,
 839                                uint32_t count, bool arm);
 840        void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq,
 841                                uint32_t count, bool arm);
 842
 843        /* Pointers to the constructed SLI4 queues */
 844        struct lpfc_sli4_hdw_queue *hdwq;
 845        struct list_head lpfc_wq_list;
 846
 847        /* Pointers to the constructed SLI4 queues for NVMET */
 848        struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
 849        struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
 850        struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
 851
 852        struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
 853        struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
 854        struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
 855        struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
 856        struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
 857        struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
 858        struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
 859        struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
 860
 861        struct lpfc_name wwnn;
 862        struct lpfc_name wwpn;
 863
 864        uint32_t fw_func_mode;  /* FW function protocol mode */
 865        uint32_t ulp0_mode;     /* ULP0 protocol mode */
 866        uint32_t ulp1_mode;     /* ULP1 protocol mode */
 867
 868        /* Optimized Access Storage specific queues/structures */
 869        uint64_t oas_next_lun;
 870        uint8_t oas_next_tgt_wwpn[8];
 871        uint8_t oas_next_vpt_wwpn[8];
 872
 873        /* Setup information for various queue parameters */
 874        int eq_esize;
 875        int eq_ecount;
 876        int cq_esize;
 877        int cq_ecount;
 878        int wq_esize;
 879        int wq_ecount;
 880        int mq_esize;
 881        int mq_ecount;
 882        int rq_esize;
 883        int rq_ecount;
 884#define LPFC_SP_EQ_MAX_INTR_SEC         10000
 885#define LPFC_FP_EQ_MAX_INTR_SEC         10000
 886
 887        uint32_t intr_enable;
 888        struct lpfc_bmbx bmbx;
 889        struct lpfc_max_cfg_param max_cfg_param;
 890        uint16_t extents_in_use; /* must allocate resource extents. */
 891        uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
 892        uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
 893        uint16_t next_rpi;
 894        uint16_t io_xri_max;
 895        uint16_t io_xri_cnt;
 896        uint16_t io_xri_start;
 897        uint16_t els_xri_cnt;
 898        uint16_t nvmet_xri_cnt;
 899        uint16_t nvmet_io_wait_cnt;
 900        uint16_t nvmet_io_wait_total;
 901        uint16_t cq_max;
 902        struct lpfc_queue **cq_lookup;
 903        struct list_head lpfc_els_sgl_list;
 904        struct list_head lpfc_abts_els_sgl_list;
 905        spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
 906        struct list_head lpfc_abts_io_buf_list;
 907        struct list_head lpfc_nvmet_sgl_list;
 908        spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
 909        struct list_head lpfc_abts_nvmet_ctx_list;
 910        spinlock_t t_active_list_lock; /* list of active NVMET IOs */
 911        struct list_head t_active_ctx_list;
 912        struct list_head lpfc_nvmet_io_wait_list;
 913        struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
 914        struct lpfc_sglq **lpfc_sglq_active_list;
 915        struct list_head lpfc_rpi_hdr_list;
 916        unsigned long *rpi_bmask;
 917        uint16_t *rpi_ids;
 918        uint16_t rpi_count;
 919        struct list_head lpfc_rpi_blk_list;
 920        unsigned long *xri_bmask;
 921        uint16_t *xri_ids;
 922        struct list_head lpfc_xri_blk_list;
 923        unsigned long *vfi_bmask;
 924        uint16_t *vfi_ids;
 925        uint16_t vfi_count;
 926        struct list_head lpfc_vfi_blk_list;
 927        struct lpfc_sli4_flags sli4_flags;
 928        struct list_head sp_queue_event;
 929        struct list_head sp_cqe_event_pool;
 930        struct list_head sp_asynce_work_queue;
 931        spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
 932        struct list_head sp_els_xri_aborted_work_queue;
 933        spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
 934        struct list_head sp_unsol_work_queue;
 935        struct lpfc_sli4_link link_state;
 936        struct lpfc_sli4_lnk_info lnk_info;
 937        uint32_t pport_name_sta;
 938#define LPFC_SLI4_PPNAME_NON    0
 939#define LPFC_SLI4_PPNAME_GET    1
 940        struct lpfc_iov iov;
 941        spinlock_t sgl_list_lock; /* list of aborted els IOs */
 942        spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
 943        uint32_t physical_port;
 944
 945        /* CPU to vector mapping information */
 946        struct lpfc_vector_map_info *cpu_map;
 947        uint16_t num_possible_cpu;
 948        uint16_t num_present_cpu;
 949        struct cpumask irq_aff_mask;
 950        uint16_t curr_disp_cpu;
 951        struct lpfc_eq_intr_info __percpu *eq_info;
 952#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 953        struct lpfc_hdwq_stat __percpu *c_stat;
 954#endif
 955        struct lpfc_idle_stat *idle_stat;
 956        uint32_t conf_trunk;
 957#define lpfc_conf_trunk_port0_WORD      conf_trunk
 958#define lpfc_conf_trunk_port0_SHIFT     0
 959#define lpfc_conf_trunk_port0_MASK      0x1
 960#define lpfc_conf_trunk_port1_WORD      conf_trunk
 961#define lpfc_conf_trunk_port1_SHIFT     1
 962#define lpfc_conf_trunk_port1_MASK      0x1
 963#define lpfc_conf_trunk_port2_WORD      conf_trunk
 964#define lpfc_conf_trunk_port2_SHIFT     2
 965#define lpfc_conf_trunk_port2_MASK      0x1
 966#define lpfc_conf_trunk_port3_WORD      conf_trunk
 967#define lpfc_conf_trunk_port3_SHIFT     3
 968#define lpfc_conf_trunk_port3_MASK      0x1
 969#define lpfc_conf_trunk_port0_nd_WORD   conf_trunk
 970#define lpfc_conf_trunk_port0_nd_SHIFT  4
 971#define lpfc_conf_trunk_port0_nd_MASK   0x1
 972#define lpfc_conf_trunk_port1_nd_WORD   conf_trunk
 973#define lpfc_conf_trunk_port1_nd_SHIFT  5
 974#define lpfc_conf_trunk_port1_nd_MASK   0x1
 975#define lpfc_conf_trunk_port2_nd_WORD   conf_trunk
 976#define lpfc_conf_trunk_port2_nd_SHIFT  6
 977#define lpfc_conf_trunk_port2_nd_MASK   0x1
 978#define lpfc_conf_trunk_port3_nd_WORD   conf_trunk
 979#define lpfc_conf_trunk_port3_nd_SHIFT  7
 980#define lpfc_conf_trunk_port3_nd_MASK   0x1
 981};
 982
 983enum lpfc_sge_type {
 984        GEN_BUFF_TYPE,
 985        SCSI_BUFF_TYPE,
 986        NVMET_BUFF_TYPE
 987};
 988
 989enum lpfc_sgl_state {
 990        SGL_FREED,
 991        SGL_ALLOCATED,
 992        SGL_XRI_ABORTED
 993};
 994
 995struct lpfc_sglq {
 996        /* lpfc_sglqs are used in double linked lists */
 997        struct list_head list;
 998        struct list_head clist;
 999        enum lpfc_sge_type buff_type; /* is this a scsi sgl */
1000        enum lpfc_sgl_state state;
1001        struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
1002        uint16_t iotag;         /* pre-assigned IO tag */
1003        uint16_t sli4_lxritag;  /* logical pre-assigned xri. */
1004        uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
1005        struct sli4_sge *sgl;   /* pre-assigned SGL */
1006        void *virt;             /* virtual address. */
1007        dma_addr_t phys;        /* physical address */
1008};
1009
1010struct lpfc_rpi_hdr {
1011        struct list_head list;
1012        uint32_t len;
1013        struct lpfc_dmabuf *dmabuf;
1014        uint32_t page_count;
1015        uint32_t start_rpi;
1016        uint16_t next_rpi;
1017};
1018
1019struct lpfc_rsrc_blks {
1020        struct list_head list;
1021        uint16_t rsrc_start;
1022        uint16_t rsrc_size;
1023        uint16_t rsrc_used;
1024};
1025
1026struct lpfc_rdp_context {
1027        struct lpfc_nodelist *ndlp;
1028        uint16_t ox_id;
1029        uint16_t rx_id;
1030        READ_LNK_VAR link_stat;
1031        uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
1032        uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
1033        void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
1034};
1035
1036struct lpfc_lcb_context {
1037        uint8_t  sub_command;
1038        uint8_t  type;
1039        uint8_t  capability;
1040        uint8_t  frequency;
1041        uint16_t  duration;
1042        uint16_t ox_id;
1043        uint16_t rx_id;
1044        struct lpfc_nodelist *ndlp;
1045};
1046
1047
1048/*
1049 * SLI4 specific function prototypes
1050 */
1051int lpfc_pci_function_reset(struct lpfc_hba *);
1052int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
1053int lpfc_sli4_hba_setup(struct lpfc_hba *);
1054int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
1055                     uint8_t, uint32_t, bool);
1056void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
1057void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
1058void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
1059                           struct lpfc_mbx_sge *);
1060int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
1061                               uint16_t);
1062
1063void lpfc_sli4_hba_reset(struct lpfc_hba *);
1064struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
1065                                         uint32_t page_size,
1066                                         uint32_t entry_size,
1067                                         uint32_t entry_count, int cpu);
1068void lpfc_sli4_queue_free(struct lpfc_queue *);
1069int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
1070void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
1071                             uint32_t numq, uint32_t usdelay);
1072int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
1073                        struct lpfc_queue *, uint32_t, uint32_t);
1074int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
1075                        struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
1076                        uint32_t subtype);
1077int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
1078                       struct lpfc_queue *, uint32_t);
1079int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
1080                        struct lpfc_queue *, uint32_t);
1081int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
1082                        struct lpfc_queue *, struct lpfc_queue *, uint32_t);
1083int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
1084                        struct lpfc_queue **drqp, struct lpfc_queue **cqp,
1085                        uint32_t subtype);
1086int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
1087int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
1088int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
1089int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
1090int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
1091                         struct lpfc_queue *);
1092int lpfc_sli4_queue_setup(struct lpfc_hba *);
1093void lpfc_sli4_queue_unset(struct lpfc_hba *);
1094int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
1095int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
1096uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
1097void lpfc_sli4_free_xri(struct lpfc_hba *, int);
1098int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
1099struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
1100struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
1101void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
1102void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
1103int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
1104int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
1105int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
1106struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
1107void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
1108int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
1109void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
1110void lpfc_sli4_remove_rpis(struct lpfc_hba *);
1111void lpfc_sli4_async_event_proc(struct lpfc_hba *);
1112void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
1113int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
1114                        void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
1115void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
1116void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
1117                                struct sli4_wcqe_xri_aborted *axri,
1118                                struct lpfc_io_buf *lpfc_ncmd);
1119void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
1120                              struct sli4_wcqe_xri_aborted *axri, int idx);
1121void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1122                                 struct sli4_wcqe_xri_aborted *axri);
1123void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
1124                               struct sli4_wcqe_xri_aborted *);
1125void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
1126void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
1127int lpfc_sli4_brdreset(struct lpfc_hba *);
1128int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
1129void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
1130int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
1131int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
1132int lpfc_sli4_init_vpi(struct lpfc_vport *);
1133void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
1134void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1135                           uint32_t count, bool arm);
1136void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1137                           uint32_t count, bool arm);
1138void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
1139void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1140                               uint32_t count, bool arm);
1141void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1142                               uint32_t count, bool arm);
1143void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
1144int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
1145int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
1146int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
1147void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
1148void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
1149void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
1150int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
1151int lpfc_sli4_post_status_check(struct lpfc_hba *);
1152uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
1153uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
1154void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
1155struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
1156                                              struct lpfc_io_buf *buf);
1157struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1158                                                      struct lpfc_io_buf *buf);
1159int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
1160int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1161                                  struct lpfc_io_buf *buf);
1162void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
1163                            struct lpfc_sli4_hdw_queue *hdwq);
1164void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1165                                    struct lpfc_sli4_hdw_queue *hdwq);
1166static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
1167{
1168        return q->q_pgs[idx / q->entry_cnt_per_pg] +
1169                (q->entry_size * (idx % q->entry_cnt_per_pg));
1170}
1171