linux/include/linux/qed/qed_roce_if.h
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2016  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef _QED_ROCE_IF_H
  33#define _QED_ROCE_IF_H
  34#include <linux/types.h>
  35#include <linux/delay.h>
  36#include <linux/list.h>
  37#include <linux/mutex.h>
  38#include <linux/pci.h>
  39#include <linux/slab.h>
  40#include <linux/qed/qed_if.h>
  41#include <linux/qed/qed_ll2_if.h>
  42#include <linux/qed/rdma_common.h>
  43
  44enum qed_roce_ll2_tx_dest {
  45        /* Light L2 TX Destination to the Network */
  46        QED_ROCE_LL2_TX_DEST_NW,
  47
  48        /* Light L2 TX Destination to the Loopback */
  49        QED_ROCE_LL2_TX_DEST_LB,
  50        QED_ROCE_LL2_TX_DEST_MAX
  51};
  52
  53#define QED_RDMA_MAX_CNQ_SIZE               (0xFFFF)
  54
  55/* rdma interface */
  56
  57enum qed_roce_qp_state {
  58        QED_ROCE_QP_STATE_RESET,
  59        QED_ROCE_QP_STATE_INIT,
  60        QED_ROCE_QP_STATE_RTR,
  61        QED_ROCE_QP_STATE_RTS,
  62        QED_ROCE_QP_STATE_SQD,
  63        QED_ROCE_QP_STATE_ERR,
  64        QED_ROCE_QP_STATE_SQE
  65};
  66
  67enum qed_rdma_tid_type {
  68        QED_RDMA_TID_REGISTERED_MR,
  69        QED_RDMA_TID_FMR,
  70        QED_RDMA_TID_MW_TYPE1,
  71        QED_RDMA_TID_MW_TYPE2A
  72};
  73
  74struct qed_rdma_events {
  75        void *context;
  76        void (*affiliated_event)(void *context, u8 fw_event_code,
  77                                 void *fw_handle);
  78        void (*unaffiliated_event)(void *context, u8 event_code);
  79};
  80
  81struct qed_rdma_device {
  82        u32 vendor_id;
  83        u32 vendor_part_id;
  84        u32 hw_ver;
  85        u64 fw_ver;
  86
  87        u64 node_guid;
  88        u64 sys_image_guid;
  89
  90        u8 max_cnq;
  91        u8 max_sge;
  92        u8 max_srq_sge;
  93        u16 max_inline;
  94        u32 max_wqe;
  95        u32 max_srq_wqe;
  96        u8 max_qp_resp_rd_atomic_resc;
  97        u8 max_qp_req_rd_atomic_resc;
  98        u64 max_dev_resp_rd_atomic_resc;
  99        u32 max_cq;
 100        u32 max_qp;
 101        u32 max_srq;
 102        u32 max_mr;
 103        u64 max_mr_size;
 104        u32 max_cqe;
 105        u32 max_mw;
 106        u32 max_fmr;
 107        u32 max_mr_mw_fmr_pbl;
 108        u64 max_mr_mw_fmr_size;
 109        u32 max_pd;
 110        u32 max_ah;
 111        u8 max_pkey;
 112        u16 max_srq_wr;
 113        u8 max_stats_queues;
 114        u32 dev_caps;
 115
 116        /* Abilty to support RNR-NAK generation */
 117
 118#define QED_RDMA_DEV_CAP_RNR_NAK_MASK                           0x1
 119#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT                  0
 120        /* Abilty to support shutdown port */
 121#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
 122#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
 123        /* Abilty to support port active event */
 124#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
 125#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
 126        /* Abilty to support port change event */
 127#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
 128#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
 129        /* Abilty to support system image GUID */
 130#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK                 0x1
 131#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT                        4
 132        /* Abilty to support bad P_Key counter support */
 133#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
 134#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
 135        /* Abilty to support atomic operations */
 136#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK                 0x1
 137#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT                        6
 138#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK                 0x1
 139#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT                        7
 140        /* Abilty to support modifying the maximum number of
 141         * outstanding work requests per QP
 142         */
 143#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
 144#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
 145        /* Abilty to support automatic path migration */
 146#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
 147#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
 148        /* Abilty to support the base memory management extensions */
 149#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
 150#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
 151#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
 152#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
 153        /* Abilty to support multipile page sizes per memory region */
 154#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
 155#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
 156        /* Abilty to support block list physical buffer list */
 157#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK                        0x1
 158#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT                       13
 159        /* Abilty to support zero based virtual addresses */
 160#define QED_RDMA_DEV_CAP_ZBVA_MASK                              0x1
 161#define QED_RDMA_DEV_CAP_ZBVA_SHIFT                             14
 162        /* Abilty to support local invalidate fencing */
 163#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
 164#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
 165        /* Abilty to support Loopback on QP */
 166#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK                      0x1
 167#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT                     16
 168        u64 page_size_caps;
 169        u8 dev_ack_delay;
 170        u32 reserved_lkey;
 171        u32 bad_pkey_counter;
 172        struct qed_rdma_events events;
 173};
 174
 175enum qed_port_state {
 176        QED_RDMA_PORT_UP,
 177        QED_RDMA_PORT_DOWN,
 178};
 179
 180enum qed_roce_capability {
 181        QED_ROCE_V1 = 1 << 0,
 182        QED_ROCE_V2 = 1 << 1,
 183};
 184
 185struct qed_rdma_port {
 186        enum qed_port_state port_state;
 187        int link_speed;
 188        u64 max_msg_size;
 189        u8 source_gid_table_len;
 190        void *source_gid_table_ptr;
 191        u8 pkey_table_len;
 192        void *pkey_table_ptr;
 193        u32 pkey_bad_counter;
 194        enum qed_roce_capability capability;
 195};
 196
 197struct qed_rdma_cnq_params {
 198        u8 num_pbl_pages;
 199        u64 pbl_ptr;
 200};
 201
 202/* The CQ Mode affects the CQ doorbell transaction size.
 203 * 64/32 bit machines should configure to 32/16 bits respectively.
 204 */
 205enum qed_rdma_cq_mode {
 206        QED_RDMA_CQ_MODE_16_BITS,
 207        QED_RDMA_CQ_MODE_32_BITS,
 208};
 209
 210struct qed_roce_dcqcn_params {
 211        u8 notification_point;
 212        u8 reaction_point;
 213
 214        /* fields for notification point */
 215        u32 cnp_send_timeout;
 216
 217        /* fields for reaction point */
 218        u32 rl_bc_rate;
 219        u16 rl_max_rate;
 220        u16 rl_r_ai;
 221        u16 rl_r_hai;
 222        u16 dcqcn_g;
 223        u32 dcqcn_k_us;
 224        u32 dcqcn_timeout_us;
 225};
 226
 227struct qed_rdma_start_in_params {
 228        struct qed_rdma_events *events;
 229        struct qed_rdma_cnq_params cnq_pbl_list[128];
 230        u8 desired_cnq;
 231        enum qed_rdma_cq_mode cq_mode;
 232        struct qed_roce_dcqcn_params dcqcn_params;
 233        u16 max_mtu;
 234        u8 mac_addr[ETH_ALEN];
 235        u8 iwarp_flags;
 236};
 237
 238struct qed_rdma_add_user_out_params {
 239        u16 dpi;
 240        u64 dpi_addr;
 241        u64 dpi_phys_addr;
 242        u32 dpi_size;
 243};
 244
 245enum roce_mode {
 246        ROCE_V1,
 247        ROCE_V2_IPV4,
 248        ROCE_V2_IPV6,
 249        MAX_ROCE_MODE
 250};
 251
 252union qed_gid {
 253        u8 bytes[16];
 254        u16 words[8];
 255        u32 dwords[4];
 256        u64 qwords[2];
 257        u32 ipv4_addr;
 258};
 259
 260struct qed_rdma_register_tid_in_params {
 261        u32 itid;
 262        enum qed_rdma_tid_type tid_type;
 263        u8 key;
 264        u16 pd;
 265        bool local_read;
 266        bool local_write;
 267        bool remote_read;
 268        bool remote_write;
 269        bool remote_atomic;
 270        bool mw_bind;
 271        u64 pbl_ptr;
 272        bool pbl_two_level;
 273        u8 pbl_page_size_log;
 274        u8 page_size_log;
 275        u32 fbo;
 276        u64 length;
 277        u64 vaddr;
 278        bool zbva;
 279        bool phy_mr;
 280        bool dma_mr;
 281
 282        bool dif_enabled;
 283        u64 dif_error_addr;
 284        u64 dif_runt_addr;
 285};
 286
 287struct qed_rdma_create_cq_in_params {
 288        u32 cq_handle_lo;
 289        u32 cq_handle_hi;
 290        u32 cq_size;
 291        u16 dpi;
 292        bool pbl_two_level;
 293        u64 pbl_ptr;
 294        u16 pbl_num_pages;
 295        u8 pbl_page_size_log;
 296        u8 cnq_id;
 297        u16 int_timeout;
 298};
 299
 300struct qed_rdma_create_srq_in_params {
 301        u64 pbl_base_addr;
 302        u64 prod_pair_addr;
 303        u16 num_pages;
 304        u16 pd_id;
 305        u16 page_size;
 306};
 307
 308struct qed_rdma_destroy_cq_in_params {
 309        u16 icid;
 310};
 311
 312struct qed_rdma_destroy_cq_out_params {
 313        u16 num_cq_notif;
 314};
 315
 316struct qed_rdma_create_qp_in_params {
 317        u32 qp_handle_lo;
 318        u32 qp_handle_hi;
 319        u32 qp_handle_async_lo;
 320        u32 qp_handle_async_hi;
 321        bool use_srq;
 322        bool signal_all;
 323        bool fmr_and_reserved_lkey;
 324        u16 pd;
 325        u16 dpi;
 326        u16 sq_cq_id;
 327        u16 sq_num_pages;
 328        u64 sq_pbl_ptr;
 329        u8 max_sq_sges;
 330        u16 rq_cq_id;
 331        u16 rq_num_pages;
 332        u64 rq_pbl_ptr;
 333        u16 srq_id;
 334        u8 stats_queue;
 335};
 336
 337struct qed_rdma_create_qp_out_params {
 338        u32 qp_id;
 339        u16 icid;
 340        void *rq_pbl_virt;
 341        dma_addr_t rq_pbl_phys;
 342        void *sq_pbl_virt;
 343        dma_addr_t sq_pbl_phys;
 344};
 345
 346struct qed_rdma_modify_qp_in_params {
 347        u32 modify_flags;
 348#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
 349#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
 350#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
 351#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
 352#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
 353#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
 354#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
 355#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
 356#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
 357#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
 358#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
 359#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
 360#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
 361#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
 362#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
 363#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
 364#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
 365#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
 366#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
 367#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
 368#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
 369#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
 370#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
 371#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
 372#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
 373#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
 374#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
 375#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
 376#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
 377#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
 378
 379        enum qed_roce_qp_state new_state;
 380        u16 pkey;
 381        bool incoming_rdma_read_en;
 382        bool incoming_rdma_write_en;
 383        bool incoming_atomic_en;
 384        bool e2e_flow_control_en;
 385        u32 dest_qp;
 386        bool lb_indication;
 387        u16 mtu;
 388        u8 traffic_class_tos;
 389        u8 hop_limit_ttl;
 390        u32 flow_label;
 391        union qed_gid sgid;
 392        union qed_gid dgid;
 393        u16 udp_src_port;
 394
 395        u16 vlan_id;
 396
 397        u32 rq_psn;
 398        u32 sq_psn;
 399        u8 max_rd_atomic_resp;
 400        u8 max_rd_atomic_req;
 401        u32 ack_timeout;
 402        u8 retry_cnt;
 403        u8 rnr_retry_cnt;
 404        u8 min_rnr_nak_timer;
 405        bool sqd_async;
 406        u8 remote_mac_addr[6];
 407        u8 local_mac_addr[6];
 408        bool use_local_mac;
 409        enum roce_mode roce_mode;
 410};
 411
 412struct qed_rdma_query_qp_out_params {
 413        enum qed_roce_qp_state state;
 414        u32 rq_psn;
 415        u32 sq_psn;
 416        bool draining;
 417        u16 mtu;
 418        u32 dest_qp;
 419        bool incoming_rdma_read_en;
 420        bool incoming_rdma_write_en;
 421        bool incoming_atomic_en;
 422        bool e2e_flow_control_en;
 423        union qed_gid sgid;
 424        union qed_gid dgid;
 425        u32 flow_label;
 426        u8 hop_limit_ttl;
 427        u8 traffic_class_tos;
 428        u32 timeout;
 429        u8 rnr_retry;
 430        u8 retry_cnt;
 431        u8 min_rnr_nak_timer;
 432        u16 pkey_index;
 433        u8 max_rd_atomic;
 434        u8 max_dest_rd_atomic;
 435        bool sqd_async;
 436};
 437
 438struct qed_rdma_create_srq_out_params {
 439        u16 srq_id;
 440};
 441
 442struct qed_rdma_destroy_srq_in_params {
 443        u16 srq_id;
 444};
 445
 446struct qed_rdma_modify_srq_in_params {
 447        u32 wqe_limit;
 448        u16 srq_id;
 449};
 450
 451struct qed_rdma_stats_out_params {
 452        u64 sent_bytes;
 453        u64 sent_pkts;
 454        u64 rcv_bytes;
 455        u64 rcv_pkts;
 456};
 457
 458struct qed_rdma_counters_out_params {
 459        u64 pd_count;
 460        u64 max_pd;
 461        u64 dpi_count;
 462        u64 max_dpi;
 463        u64 cq_count;
 464        u64 max_cq;
 465        u64 qp_count;
 466        u64 max_qp;
 467        u64 tid_count;
 468        u64 max_tid;
 469};
 470
 471#define QED_ROCE_TX_HEAD_FAILURE        (1)
 472#define QED_ROCE_TX_FRAG_FAILURE        (2)
 473
 474struct qed_roce_ll2_header {
 475        void *vaddr;
 476        dma_addr_t baddr;
 477        size_t len;
 478};
 479
 480struct qed_roce_ll2_buffer {
 481        dma_addr_t baddr;
 482        size_t len;
 483};
 484
 485struct qed_roce_ll2_packet {
 486        struct qed_roce_ll2_header header;
 487        int n_seg;
 488        struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
 489        int roce_mode;
 490        enum qed_roce_ll2_tx_dest tx_dest;
 491};
 492
 493struct qed_roce_ll2_tx_params {
 494        int reserved;
 495};
 496
 497struct qed_roce_ll2_rx_params {
 498        u16 vlan_id;
 499        u8 smac[ETH_ALEN];
 500        int rc;
 501};
 502
 503struct qed_roce_ll2_cbs {
 504        void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
 505
 506        void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
 507                      struct qed_roce_ll2_rx_params *params);
 508};
 509
 510struct qed_roce_ll2_params {
 511        u16 max_rx_buffers;
 512        u16 max_tx_buffers;
 513        u16 mtu;
 514        u8 mac_address[ETH_ALEN];
 515        struct qed_roce_ll2_cbs cbs;
 516        void *cb_cookie;
 517};
 518
 519struct qed_roce_ll2_info {
 520        u8 handle;
 521        struct qed_roce_ll2_cbs cbs;
 522        u8 mac_address[ETH_ALEN];
 523        void *cb_cookie;
 524
 525        /* Lock to protect ll2 */
 526        struct mutex lock;
 527};
 528
 529enum qed_rdma_type {
 530        QED_RDMA_TYPE_ROCE,
 531};
 532
 533struct qed_dev_rdma_info {
 534        struct qed_dev_info common;
 535        enum qed_rdma_type rdma_type;
 536};
 537
 538struct qed_rdma_ops {
 539        const struct qed_common_ops *common;
 540
 541        int (*fill_dev_info)(struct qed_dev *cdev,
 542                             struct qed_dev_rdma_info *info);
 543        void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
 544
 545        int (*rdma_init)(struct qed_dev *dev,
 546                         struct qed_rdma_start_in_params *iparams);
 547
 548        int (*rdma_add_user)(void *rdma_cxt,
 549                             struct qed_rdma_add_user_out_params *oparams);
 550
 551        void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
 552        int (*rdma_stop)(void *rdma_cxt);
 553        struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
 554        struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
 555        int (*rdma_get_start_sb)(struct qed_dev *cdev);
 556        int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
 557        void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
 558        int (*rdma_get_rdma_int)(struct qed_dev *cdev,
 559                                 struct qed_int_info *info);
 560        int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
 561        int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
 562        void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
 563        int (*rdma_create_cq)(void *rdma_cxt,
 564                              struct qed_rdma_create_cq_in_params *params,
 565                              u16 *icid);
 566        int (*rdma_destroy_cq)(void *rdma_cxt,
 567                               struct qed_rdma_destroy_cq_in_params *iparams,
 568                               struct qed_rdma_destroy_cq_out_params *oparams);
 569        struct qed_rdma_qp *
 570        (*rdma_create_qp)(void *rdma_cxt,
 571                          struct qed_rdma_create_qp_in_params *iparams,
 572                          struct qed_rdma_create_qp_out_params *oparams);
 573
 574        int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
 575                              struct qed_rdma_modify_qp_in_params *iparams);
 576
 577        int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
 578                             struct qed_rdma_query_qp_out_params *oparams);
 579        int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
 580        int
 581        (*rdma_register_tid)(void *rdma_cxt,
 582                             struct qed_rdma_register_tid_in_params *iparams);
 583        int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
 584        int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
 585        void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
 586        int (*roce_ll2_start)(struct qed_dev *cdev,
 587                              struct qed_roce_ll2_params *params);
 588        int (*roce_ll2_stop)(struct qed_dev *cdev);
 589        int (*roce_ll2_tx)(struct qed_dev *cdev,
 590                           struct qed_roce_ll2_packet *packet,
 591                           struct qed_roce_ll2_tx_params *params);
 592        int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
 593                                       struct qed_roce_ll2_buffer *buf,
 594                                       u64 cookie, u8 notify_fw);
 595        int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
 596                                       u8 *old_mac_address,
 597                                       u8 *new_mac_address);
 598        int (*roce_ll2_stats)(struct qed_dev *cdev,
 599                              struct qed_ll2_stats *stats);
 600};
 601
 602const struct qed_rdma_ops *qed_get_rdma_ops(void);
 603
 604#endif
 605