linux/drivers/infiniband/hw/bnxt_re/qplib_fp.h
<<
>>
Prefs
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: Fast Path Operators (header)
  37 */
  38
  39#ifndef __BNXT_QPLIB_FP_H__
  40#define __BNXT_QPLIB_FP_H__
  41
  42struct bnxt_qplib_srq {
  43        struct bnxt_qplib_pd            *pd;
  44        struct bnxt_qplib_dpi           *dpi;
  45        void __iomem                    *dbr_base;
  46        u64                             srq_handle;
  47        u32                             id;
  48        u32                             max_wqe;
  49        u32                             max_sge;
  50        u32                             threshold;
  51        bool                            arm_req;
  52        struct bnxt_qplib_cq            *cq;
  53        struct bnxt_qplib_hwq           hwq;
  54        struct bnxt_qplib_swq           *swq;
  55        int                             start_idx;
  56        int                             last_idx;
  57        struct bnxt_qplib_sg_info       sg_info;
  58        u16                             eventq_hw_ring_id;
  59        spinlock_t                      lock; /* protect SRQE link list */
  60};
  61
  62struct bnxt_qplib_sge {
  63        u64                             addr;
  64        u32                             lkey;
  65        u32                             size;
  66};
  67
  68#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE   sizeof(struct sq_send)
  69
  70#define SQE_CNT_PER_PG          (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
  71#define SQE_MAX_IDX_PER_PG      (SQE_CNT_PER_PG - 1)
  72
  73static inline u32 get_sqe_pg(u32 val)
  74{
  75        return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
  76}
  77
  78static inline u32 get_sqe_idx(u32 val)
  79{
  80        return (val & SQE_MAX_IDX_PER_PG);
  81}
  82
  83#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE  sizeof(struct sq_psn_search)
  84
  85#define PSNE_CNT_PER_PG         (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
  86#define PSNE_MAX_IDX_PER_PG     (PSNE_CNT_PER_PG - 1)
  87
  88static inline u32 get_psne_pg(u32 val)
  89{
  90        return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
  91}
  92
  93static inline u32 get_psne_idx(u32 val)
  94{
  95        return (val & PSNE_MAX_IDX_PER_PG);
  96}
  97
  98#define BNXT_QPLIB_QP_MAX_SGL   6
  99
 100struct bnxt_qplib_swq {
 101        u64                             wr_id;
 102        int                             next_idx;
 103        u8                              type;
 104        u8                              flags;
 105        u32                             start_psn;
 106        u32                             next_psn;
 107        struct sq_psn_search            *psn_search;
 108        struct sq_psn_search_ext        *psn_ext;
 109};
 110
 111struct bnxt_qplib_swqe {
 112        /* General */
 113#define BNXT_QPLIB_FENCE_WRID   0x46454E43      /* "FENC" */
 114        u64                             wr_id;
 115        u8                              reqs_type;
 116        u8                              type;
 117#define BNXT_QPLIB_SWQE_TYPE_SEND                       0
 118#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM              1
 119#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV              2
 120#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE                 4
 121#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM        5
 122#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ                  6
 123#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP         8
 124#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD       11
 125#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV                  12
 126#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR                13
 127#define BNXT_QPLIB_SWQE_TYPE_REG_MR                     13
 128#define BNXT_QPLIB_SWQE_TYPE_BIND_MW                    14
 129#define BNXT_QPLIB_SWQE_TYPE_RECV                       128
 130#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM              129
 131        u8                              flags;
 132#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP               BIT(0)
 133#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE           BIT(1)
 134#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE                  BIT(2)
 135#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT             BIT(3)
 136#define BNXT_QPLIB_SWQE_FLAGS_INLINE                    BIT(4)
 137        struct bnxt_qplib_sge           sg_list[BNXT_QPLIB_QP_MAX_SGL];
 138        int                             num_sge;
 139        /* Max inline data is 96 bytes */
 140        u32                             inline_len;
 141#define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH               96
 142        u8              inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH];
 143
 144        union {
 145                /* Send, with imm, inval key */
 146                struct {
 147                        union {
 148                                __be32  imm_data;
 149                                u32     inv_key;
 150                        };
 151                        u32             q_key;
 152                        u32             dst_qp;
 153                        u16             avid;
 154                } send;
 155
 156                /* Send Raw Ethernet and QP1 */
 157                struct {
 158                        u16             lflags;
 159                        u16             cfa_action;
 160                        u32             cfa_meta;
 161                } rawqp1;
 162
 163                /* RDMA write, with imm, read */
 164                struct {
 165                        union {
 166                                __be32  imm_data;
 167                                u32     inv_key;
 168                        };
 169                        u64             remote_va;
 170                        u32             r_key;
 171                } rdma;
 172
 173                /* Atomic cmp/swap, fetch/add */
 174                struct {
 175                        u64             remote_va;
 176                        u32             r_key;
 177                        u64             swap_data;
 178                        u64             cmp_data;
 179                } atomic;
 180
 181                /* Local Invalidate */
 182                struct {
 183                        u32             inv_l_key;
 184                } local_inv;
 185
 186                /* FR-PMR */
 187                struct {
 188                        u8              access_cntl;
 189                        u8              pg_sz_log;
 190                        bool            zero_based;
 191                        u32             l_key;
 192                        u32             length;
 193                        u8              pbl_pg_sz_log;
 194#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K                    0
 195#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K                    1
 196#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K                   4
 197#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K                  6
 198#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M                    8
 199#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M                    9
 200#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M                    10
 201#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G                    18
 202                        u8              levels;
 203#define PAGE_SHIFT_4K   12
 204                        __le64          *pbl_ptr;
 205                        dma_addr_t      pbl_dma_ptr;
 206                        u64             *page_list;
 207                        u16             page_list_len;
 208                        u64             va;
 209                } frmr;
 210
 211                /* Bind */
 212                struct {
 213                        u8              access_cntl;
 214#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE         BIT(0)
 215#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ         BIT(1)
 216#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE        BIT(2)
 217#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC       BIT(3)
 218#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND         BIT(4)
 219                        bool            zero_based;
 220                        u8              mw_type;
 221                        u32             parent_l_key;
 222                        u32             r_key;
 223                        u64             va;
 224                        u32             length;
 225                } bind;
 226        };
 227};
 228
 229#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE   sizeof(struct rq_wqe)
 230
 231#define RQE_CNT_PER_PG          (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
 232#define RQE_MAX_IDX_PER_PG      (RQE_CNT_PER_PG - 1)
 233#define RQE_PG(x)               (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
 234#define RQE_IDX(x)              ((x) & RQE_MAX_IDX_PER_PG)
 235
 236struct bnxt_qplib_q {
 237        struct bnxt_qplib_hwq           hwq;
 238        struct bnxt_qplib_swq           *swq;
 239        struct bnxt_qplib_sg_info       sg_info;
 240        u32                             max_wqe;
 241        u16                             q_full_delta;
 242        u16                             max_sge;
 243        u32                             psn;
 244        bool                            condition;
 245        bool                            single;
 246        bool                            send_phantom;
 247        u32                             phantom_wqe_cnt;
 248        u32                             phantom_cqe_cnt;
 249        u32                             next_cq_cons;
 250        bool                            flushed;
 251};
 252
 253struct bnxt_qplib_qp {
 254        struct bnxt_qplib_pd            *pd;
 255        struct bnxt_qplib_dpi           *dpi;
 256        struct bnxt_qplib_chip_ctx      *cctx;
 257        u64                             qp_handle;
 258#define        BNXT_QPLIB_QP_ID_INVALID        0xFFFFFFFF
 259        u32                             id;
 260        u8                              type;
 261        u8                              sig_type;
 262        u32                             modify_flags;
 263        u8                              state;
 264        u8                              cur_qp_state;
 265        u32                             max_inline_data;
 266        u32                             mtu;
 267        u8                              path_mtu;
 268        bool                            en_sqd_async_notify;
 269        u16                             pkey_index;
 270        u32                             qkey;
 271        u32                             dest_qp_id;
 272        u8                              access;
 273        u8                              timeout;
 274        u8                              retry_cnt;
 275        u8                              rnr_retry;
 276        u64                             wqe_cnt;
 277        u32                             min_rnr_timer;
 278        u32                             max_rd_atomic;
 279        u32                             max_dest_rd_atomic;
 280        u32                             dest_qpn;
 281        u8                              smac[6];
 282        u16                             vlan_id;
 283        u8                              nw_type;
 284        struct bnxt_qplib_ah            ah;
 285
 286#define BTH_PSN_MASK                    ((1 << 24) - 1)
 287        /* SQ */
 288        struct bnxt_qplib_q             sq;
 289        /* RQ */
 290        struct bnxt_qplib_q             rq;
 291        /* SRQ */
 292        struct bnxt_qplib_srq           *srq;
 293        /* CQ */
 294        struct bnxt_qplib_cq            *scq;
 295        struct bnxt_qplib_cq            *rcq;
 296        /* IRRQ and ORRQ */
 297        struct bnxt_qplib_hwq           irrq;
 298        struct bnxt_qplib_hwq           orrq;
 299        /* Header buffer for QP1 */
 300        int                             sq_hdr_buf_size;
 301        int                             rq_hdr_buf_size;
 302/*
 303 * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
 304 * and ib_bth + ib_deth (20).
 305 * Max required is 82 when RoCE V2 is enabled
 306 */
 307#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2       86
 308        /* Ethernet header      =  14 */
 309        /* ib_grh               =  40 (provided by MAD) */
 310        /* ib_bth + ib_deth     =  20 */
 311        /* MAD                  = 256 (provided by MAD) */
 312        /* iCRC                 =   4 */
 313#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE      14
 314#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2       512
 315#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4        20
 316#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6        40
 317#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE    20
 318        void                            *sq_hdr_buf;
 319        dma_addr_t                      sq_hdr_buf_map;
 320        void                            *rq_hdr_buf;
 321        dma_addr_t                      rq_hdr_buf_map;
 322        struct list_head                sq_flush;
 323        struct list_head                rq_flush;
 324};
 325
 326#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE   sizeof(struct cq_base)
 327
 328#define CQE_CNT_PER_PG          (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
 329#define CQE_MAX_IDX_PER_PG      (CQE_CNT_PER_PG - 1)
 330#define CQE_PG(x)               (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
 331#define CQE_IDX(x)              ((x) & CQE_MAX_IDX_PER_PG)
 332
 333#define ROCE_CQE_CMP_V                  0
 334#define CQE_CMP_VALID(hdr, raw_cons, cp_bit)                    \
 335        (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) ==         \
 336           !((raw_cons) & (cp_bit)))
 337
 338static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
 339{
 340        return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
 341                       &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
 342                                                 &qplib_q->hwq);
 343}
 344
 345struct bnxt_qplib_cqe {
 346        u8                              status;
 347        u8                              type;
 348        u8                              opcode;
 349        u32                             length;
 350        u16                             cfa_meta;
 351        u64                             wr_id;
 352        union {
 353                __be32                  immdata;
 354                u32                     invrkey;
 355        };
 356        u64                             qp_handle;
 357        u64                             mr_handle;
 358        u16                             flags;
 359        u8                              smac[6];
 360        u32                             src_qp;
 361        u16                             raweth_qp1_flags;
 362        u16                             raweth_qp1_errors;
 363        u16                             raweth_qp1_cfa_code;
 364        u32                             raweth_qp1_flags2;
 365        u32                             raweth_qp1_metadata;
 366        u8                              raweth_qp1_payload_offset;
 367        u16                             pkey_index;
 368};
 369
 370#define BNXT_QPLIB_QUEUE_START_PERIOD           0x01
 371struct bnxt_qplib_cq {
 372        struct bnxt_qplib_dpi           *dpi;
 373        void __iomem                    *dbr_base;
 374        u32                             max_wqe;
 375        u32                             id;
 376        u16                             count;
 377        u16                             period;
 378        struct bnxt_qplib_hwq           hwq;
 379        u32                             cnq_hw_ring_id;
 380        struct bnxt_qplib_nq            *nq;
 381        bool                            resize_in_progress;
 382        struct bnxt_qplib_sg_info       sg_info;
 383        u64                             cq_handle;
 384
 385#define CQ_RESIZE_WAIT_TIME_MS          500
 386        unsigned long                   flags;
 387#define CQ_FLAGS_RESIZE_IN_PROG         1
 388        wait_queue_head_t               waitq;
 389        struct list_head                sqf_head, rqf_head;
 390        atomic_t                        arm_state;
 391        spinlock_t                      compl_lock; /* synch CQ handlers */
 392/* Locking Notes:
 393 * QP can move to error state from modify_qp, async error event or error
 394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
 395 * to two flush lists, one each for SQ and RQ.
 396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
 397 * flush_locks should be acquired when QP is moved to error. The control path
 398 * operations(modify_qp and async error events) are synchronized with poll_cq
 399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
 400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
 401 * of the same QP while manipulating the flush list.
 402 */
 403        spinlock_t                      flush_lock; /* QP flush management */
 404};
 405
 406#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
 407#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
 408#define IRD_LIMIT_TO_IRRQ_SLOTS(x)      (2 * (x) + 2)
 409#define IRRQ_SLOTS_TO_IRD_LIMIT(s)      (((s) >> 1) - 1)
 410#define ORD_LIMIT_TO_ORRQ_SLOTS(x)      ((x) + 1)
 411#define ORRQ_SLOTS_TO_ORD_LIMIT(s)      ((s) - 1)
 412
 413#define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE   sizeof(struct nq_base)
 414
 415#define NQE_CNT_PER_PG          (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
 416#define NQE_MAX_IDX_PER_PG      (NQE_CNT_PER_PG - 1)
 417#define NQE_PG(x)               (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
 418#define NQE_IDX(x)              ((x) & NQE_MAX_IDX_PER_PG)
 419
 420#define NQE_CMP_VALID(hdr, raw_cons, cp_bit)                    \
 421        (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) ==     \
 422           !((raw_cons) & (cp_bit)))
 423
 424#define BNXT_QPLIB_NQE_MAX_CNT          (128 * 1024)
 425
 426#define NQ_CONS_PCI_BAR_REGION          2
 427#define NQ_DB_KEY_CP                    (0x2 << CMPL_DOORBELL_KEY_SFT)
 428#define NQ_DB_IDX_VALID                 CMPL_DOORBELL_IDX_VALID
 429#define NQ_DB_IRQ_DIS                   CMPL_DOORBELL_MASK
 430#define NQ_DB_CP_FLAGS_REARM            (NQ_DB_KEY_CP |         \
 431                                         NQ_DB_IDX_VALID)
 432#define NQ_DB_CP_FLAGS                  (NQ_DB_KEY_CP    |      \
 433                                         NQ_DB_IDX_VALID |      \
 434                                         NQ_DB_IRQ_DIS)
 435
 436static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index,
 437                                           u32 xid, bool arm)
 438{
 439        u64 val;
 440
 441        val = xid & DBC_DBC_XID_MASK;
 442        val |= DBC_DBC_PATH_ROCE;
 443        val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
 444        val <<= 32;
 445        val |= index & DBC_DBC_INDEX_MASK;
 446        writeq(val, db);
 447}
 448
 449static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons,
 450                                               u32 max_elements, u32 xid,
 451                                               bool gen_p5)
 452{
 453        u32 index = raw_cons & (max_elements - 1);
 454
 455        if (gen_p5)
 456                bnxt_qplib_ring_nq_db64(db, index, xid, true);
 457        else
 458                writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db);
 459}
 460
 461static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons,
 462                                         u32 max_elements, u32 xid,
 463                                         bool gen_p5)
 464{
 465        u32 index = raw_cons & (max_elements - 1);
 466
 467        if (gen_p5)
 468                bnxt_qplib_ring_nq_db64(db, index, xid, false);
 469        else
 470                writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db);
 471}
 472
 473struct bnxt_qplib_nq {
 474        struct pci_dev          *pdev;
 475        struct bnxt_qplib_res   *res;
 476
 477        int                     vector;
 478        cpumask_t               mask;
 479        int                     budget;
 480        bool                    requested;
 481        struct tasklet_struct   worker;
 482        struct bnxt_qplib_hwq   hwq;
 483
 484        u16                     bar_reg;
 485        u32                     bar_reg_off;
 486        u16                     ring_id;
 487        void __iomem            *bar_reg_iomem;
 488
 489        int                     (*cqn_handler)(struct bnxt_qplib_nq *nq,
 490                                               struct bnxt_qplib_cq *cq);
 491        int                     (*srqn_handler)(struct bnxt_qplib_nq *nq,
 492                                                struct bnxt_qplib_srq *srq,
 493                                                u8 event);
 494        struct workqueue_struct *cqn_wq;
 495        char                    name[32];
 496};
 497
 498struct bnxt_qplib_nq_work {
 499        struct work_struct      work;
 500        struct bnxt_qplib_nq    *nq;
 501        struct bnxt_qplib_cq    *cq;
 502};
 503
 504void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
 505void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
 506int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
 507                            int msix_vector, bool need_init);
 508int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 509                         int nq_idx, int msix_vector, int bar_reg_offset,
 510                         int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 511                                            struct bnxt_qplib_cq *cq),
 512                         int (*srqn_handler)(struct bnxt_qplib_nq *nq,
 513                                             struct bnxt_qplib_srq *srq,
 514                                             u8 event));
 515int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
 516                          struct bnxt_qplib_srq *srq);
 517int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
 518                          struct bnxt_qplib_srq *srq);
 519int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
 520                         struct bnxt_qplib_srq *srq);
 521void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 522                            struct bnxt_qplib_srq *srq);
 523int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
 524                             struct bnxt_qplib_swqe *wqe);
 525int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 526int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 527int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 528int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 529int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 530void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
 531void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
 532                            struct bnxt_qplib_qp *qp);
 533void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
 534                                struct bnxt_qplib_sge *sge);
 535void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
 536                                struct bnxt_qplib_sge *sge);
 537u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
 538dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
 539                                            u32 index);
 540void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
 541int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
 542                         struct bnxt_qplib_swqe *wqe);
 543void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
 544int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
 545                         struct bnxt_qplib_swqe *wqe);
 546int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 547int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 548int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
 549                       int num, struct bnxt_qplib_qp **qp);
 550bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
 551void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
 552void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
 553int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
 554void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
 555void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
 556                                 unsigned long *flags);
 557void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
 558                                 unsigned long *flags);
 559int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
 560                                  struct bnxt_qplib_cqe *cqe,
 561                                  int num_cqes);
 562void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
 563#endif /* __BNXT_QPLIB_FP_H__ */
 564