linux/drivers/infiniband/hw/irdma/verbs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
   2/* Copyright (c) 2015 - 2021 Intel Corporation */
   3#ifndef IRDMA_VERBS_H
   4#define IRDMA_VERBS_H
   5
   6#define IRDMA_MAX_SAVED_PHY_PGADDR      4
   7
   8#define IRDMA_PKEY_TBL_SZ               1
   9#define IRDMA_DEFAULT_PKEY              0xFFFF
  10
  11struct irdma_ucontext {
  12        struct ib_ucontext ibucontext;
  13        struct irdma_device *iwdev;
  14        struct rdma_user_mmap_entry *db_mmap_entry;
  15        struct list_head cq_reg_mem_list;
  16        spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
  17        struct list_head qp_reg_mem_list;
  18        spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
  19        int abi_ver;
  20        bool legacy_mode;
  21};
  22
  23struct irdma_pd {
  24        struct ib_pd ibpd;
  25        struct irdma_sc_pd sc_pd;
  26};
  27
  28struct irdma_av {
  29        u8 macaddr[16];
  30        struct rdma_ah_attr attrs;
  31        union {
  32                struct sockaddr saddr;
  33                struct sockaddr_in saddr_in;
  34                struct sockaddr_in6 saddr_in6;
  35        } sgid_addr, dgid_addr;
  36        u8 net_type;
  37};
  38
  39struct irdma_ah {
  40        struct ib_ah ibah;
  41        struct irdma_sc_ah sc_ah;
  42        struct irdma_pd *pd;
  43        struct irdma_av av;
  44        u8 sgid_index;
  45        union ib_gid dgid;
  46};
  47
  48struct irdma_hmc_pble {
  49        union {
  50                u32 idx;
  51                dma_addr_t addr;
  52        };
  53};
  54
  55struct irdma_cq_mr {
  56        struct irdma_hmc_pble cq_pbl;
  57        dma_addr_t shadow;
  58        bool split;
  59};
  60
  61struct irdma_qp_mr {
  62        struct irdma_hmc_pble sq_pbl;
  63        struct irdma_hmc_pble rq_pbl;
  64        dma_addr_t shadow;
  65        struct page *sq_page;
  66};
  67
  68struct irdma_cq_buf {
  69        struct irdma_dma_mem kmem_buf;
  70        struct irdma_cq_uk cq_uk;
  71        struct irdma_hw *hw;
  72        struct list_head list;
  73        struct work_struct work;
  74};
  75
  76struct irdma_pbl {
  77        struct list_head list;
  78        union {
  79                struct irdma_qp_mr qp_mr;
  80                struct irdma_cq_mr cq_mr;
  81        };
  82
  83        bool pbl_allocated:1;
  84        bool on_list:1;
  85        u64 user_base;
  86        struct irdma_pble_alloc pble_alloc;
  87        struct irdma_mr *iwmr;
  88};
  89
  90struct irdma_mr {
  91        union {
  92                struct ib_mr ibmr;
  93                struct ib_mw ibmw;
  94        };
  95        struct ib_umem *region;
  96        u16 type;
  97        u32 page_cnt;
  98        u64 page_size;
  99        u32 npages;
 100        u32 stag;
 101        u64 len;
 102        u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
 103        struct irdma_pbl iwpbl;
 104};
 105
 106struct irdma_cq {
 107        struct ib_cq ibcq;
 108        struct irdma_sc_cq sc_cq;
 109        u16 cq_head;
 110        u16 cq_size;
 111        u16 cq_num;
 112        bool user_mode;
 113        u32 polled_cmpls;
 114        u32 cq_mem_size;
 115        struct irdma_dma_mem kmem;
 116        struct irdma_dma_mem kmem_shadow;
 117        spinlock_t lock; /* for poll cq */
 118        struct irdma_pbl *iwpbl;
 119        struct irdma_pbl *iwpbl_shadow;
 120        struct list_head resize_list;
 121        struct irdma_cq_poll_info cur_cqe;
 122};
 123
 124struct disconn_work {
 125        struct work_struct work;
 126        struct irdma_qp *iwqp;
 127};
 128
 129struct iw_cm_id;
 130
 131struct irdma_qp_kmode {
 132        struct irdma_dma_mem dma_mem;
 133        struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
 134        u64 *rq_wrid_mem;
 135};
 136
 137struct irdma_qp {
 138        struct ib_qp ibqp;
 139        struct irdma_sc_qp sc_qp;
 140        struct irdma_device *iwdev;
 141        struct irdma_cq *iwscq;
 142        struct irdma_cq *iwrcq;
 143        struct irdma_pd *iwpd;
 144        struct rdma_user_mmap_entry *push_wqe_mmap_entry;
 145        struct rdma_user_mmap_entry *push_db_mmap_entry;
 146        struct irdma_qp_host_ctx_info ctx_info;
 147        union {
 148                struct irdma_iwarp_offload_info iwarp_info;
 149                struct irdma_roce_offload_info roce_info;
 150        };
 151
 152        union {
 153                struct irdma_tcp_offload_info tcp_info;
 154                struct irdma_udp_offload_info udp_info;
 155        };
 156
 157        struct irdma_ah roce_ah;
 158        struct list_head teardown_entry;
 159        refcount_t refcnt;
 160        struct iw_cm_id *cm_id;
 161        struct irdma_cm_node *cm_node;
 162        struct ib_mr *lsmm_mr;
 163        atomic_t hw_mod_qp_pend;
 164        enum ib_qp_state ibqp_state;
 165        u32 qp_mem_size;
 166        u32 last_aeq;
 167        int max_send_wr;
 168        int max_recv_wr;
 169        atomic_t close_timer_started;
 170        spinlock_t lock; /* serialize posting WRs to SQ/RQ */
 171        struct irdma_qp_context *iwqp_context;
 172        void *pbl_vbase;
 173        dma_addr_t pbl_pbase;
 174        struct page *page;
 175        u8 active_conn : 1;
 176        u8 user_mode : 1;
 177        u8 hte_added : 1;
 178        u8 flush_issued : 1;
 179        u8 sig_all : 1;
 180        u8 pau_mode : 1;
 181        u8 rsvd : 1;
 182        u8 iwarp_state;
 183        u16 term_sq_flush_code;
 184        u16 term_rq_flush_code;
 185        u8 hw_iwarp_state;
 186        u8 hw_tcp_state;
 187        struct irdma_qp_kmode kqp;
 188        struct irdma_dma_mem host_ctx;
 189        struct timer_list terminate_timer;
 190        struct irdma_pbl *iwpbl;
 191        struct irdma_dma_mem q2_ctx_mem;
 192        struct irdma_dma_mem ietf_mem;
 193        struct completion free_qp;
 194        wait_queue_head_t waitq;
 195        wait_queue_head_t mod_qp_waitq;
 196        u8 rts_ae_rcvd;
 197};
 198
 199enum irdma_mmap_flag {
 200        IRDMA_MMAP_IO_NC,
 201        IRDMA_MMAP_IO_WC,
 202};
 203
 204struct irdma_user_mmap_entry {
 205        struct rdma_user_mmap_entry rdma_entry;
 206        u64 bar_offset;
 207        u8 mmap_flag;
 208};
 209
 210static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
 211{
 212        return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
 213}
 214
 215static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
 216{
 217        return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
 218}
 219
 220void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
 221int irdma_ib_register_device(struct irdma_device *iwdev);
 222void irdma_ib_unregister_device(struct irdma_device *iwdev);
 223void irdma_ib_dealloc_device(struct ib_device *ibdev);
 224void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
 225#endif /* IRDMA_VERBS_H */
 226