1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#ifndef __QEDR_H__
33#define __QEDR_H__
34
35#include <linux/pci.h>
36#include <rdma/ib_addr.h>
37#include <linux/qed/qed_if.h>
38#include <linux/qed/qed_chain.h>
39#include <linux/qed/qed_rdma_if.h>
40#include <linux/qed/qede_rdma.h>
41#include <linux/qed/roce_common.h>
42#include "qedr_hsi_rdma.h"
43
44#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
45#define DP_NAME(dev) ((dev)->ibdev.name)
46
47#define DP_DEBUG(dev, module, fmt, ...) \
48 pr_debug("(%s) " module ": " fmt, \
49 DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
50
51#define QEDR_MSG_INIT "INIT"
52#define QEDR_MSG_MISC "MISC"
53#define QEDR_MSG_CQ " CQ"
54#define QEDR_MSG_MR " MR"
55#define QEDR_MSG_RQ " RQ"
56#define QEDR_MSG_SQ " SQ"
57#define QEDR_MSG_QP " QP"
58#define QEDR_MSG_GSI " GSI"
59
60#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
61
62#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
63#define FW_PAGE_SHIFT (12)
64
65struct qedr_dev;
66
67struct qedr_cnq {
68 struct qedr_dev *dev;
69 struct qed_chain pbl;
70 struct qed_sb_info *sb;
71 char name[32];
72 u64 n_comp;
73 __le16 *hw_cons_ptr;
74 u8 index;
75};
76
77#define QEDR_MAX_SGID 128
78
79struct qedr_device_attr {
80 u32 vendor_id;
81 u32 vendor_part_id;
82 u32 hw_ver;
83 u64 fw_ver;
84 u64 node_guid;
85 u64 sys_image_guid;
86 u8 max_cnq;
87 u8 max_sge;
88 u16 max_inline;
89 u32 max_sqe;
90 u32 max_rqe;
91 u8 max_qp_resp_rd_atomic_resc;
92 u8 max_qp_req_rd_atomic_resc;
93 u64 max_dev_resp_rd_atomic_resc;
94 u32 max_cq;
95 u32 max_qp;
96 u32 max_mr;
97 u64 max_mr_size;
98 u32 max_cqe;
99 u32 max_mw;
100 u32 max_fmr;
101 u32 max_mr_mw_fmr_pbl;
102 u64 max_mr_mw_fmr_size;
103 u32 max_pd;
104 u32 max_ah;
105 u8 max_pkey;
106 u32 max_srq;
107 u32 max_srq_wr;
108 u8 max_srq_sge;
109 u8 max_stats_queues;
110 u32 dev_caps;
111
112 u64 page_size_caps;
113 u8 dev_ack_delay;
114 u32 reserved_lkey;
115 u32 bad_pkey_counter;
116 struct qed_rdma_events events;
117};
118
119#define QEDR_ENET_STATE_BIT (0)
120
121struct qedr_dev {
122 struct ib_device ibdev;
123 struct qed_dev *cdev;
124 struct pci_dev *pdev;
125 struct net_device *ndev;
126
127 enum ib_atomic_cap atomic_cap;
128
129 void *rdma_ctx;
130 struct qedr_device_attr attr;
131
132 const struct qed_rdma_ops *ops;
133 struct qed_int_info int_info;
134
135 struct qed_sb_info *sb_array;
136 struct qedr_cnq *cnq_array;
137 int num_cnq;
138 int sb_start;
139
140 void __iomem *db_addr;
141 u64 db_phys_addr;
142 u32 db_size;
143 u16 dpi;
144
145 union ib_gid *sgid_tbl;
146
147
148 spinlock_t sgid_lock;
149
150 u64 guid;
151
152 u32 dp_module;
153 u8 dp_level;
154 u8 num_hwfns;
155 u8 gsi_ll2_handle;
156
157 uint wq_multiplier;
158 u8 gsi_ll2_mac_address[ETH_ALEN];
159 int gsi_qp_created;
160 struct qedr_cq *gsi_sqcq;
161 struct qedr_cq *gsi_rqcq;
162 struct qedr_qp *gsi_qp;
163
164 unsigned long enet_state;
165
166 u8 user_dpm_enabled;
167};
168
169#define QEDR_MAX_SQ_PBL (0x8000)
170#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
171#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
172#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
173 QEDR_SQE_ELEMENT_SIZE)
174#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
175 QEDR_SQE_ELEMENT_SIZE)
176#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
177 (RDMA_RING_PAGE_SIZE) / \
178 (QEDR_SQE_ELEMENT_SIZE) /\
179 (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
180
181#define QEDR_MAX_RQ_PBL (0x2000)
182#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
183#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
184#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
185#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
186 QEDR_RQE_ELEMENT_SIZE)
187#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
188 (RDMA_RING_PAGE_SIZE) / \
189 (QEDR_RQE_ELEMENT_SIZE) /\
190 (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
191
192#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
193#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
194#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
195 sizeof(u64)) - 1)
196#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
197 (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
198
199#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
200
201#define QEDR_MAX_PORT (1)
202#define QEDR_PORT (1)
203
204#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
205
206#define QEDR_ROCE_PKEY_MAX 1
207#define QEDR_ROCE_PKEY_TABLE_LEN 1
208#define QEDR_ROCE_PKEY_DEFAULT 0xffff
209
210struct qedr_pbl {
211 struct list_head list_entry;
212 void *va;
213 dma_addr_t pa;
214};
215
216struct qedr_ucontext {
217 struct ib_ucontext ibucontext;
218 struct qedr_dev *dev;
219 struct qedr_pd *pd;
220 u64 dpi_addr;
221 u64 dpi_phys_addr;
222 u32 dpi_size;
223 u16 dpi;
224
225 struct list_head mm_head;
226
227
228 struct mutex mm_list_lock;
229};
230
231union db_prod64 {
232 struct rdma_pwm_val32_data data;
233 u64 raw;
234};
235
236enum qedr_cq_type {
237 QEDR_CQ_TYPE_GSI,
238 QEDR_CQ_TYPE_KERNEL,
239 QEDR_CQ_TYPE_USER,
240};
241
242struct qedr_pbl_info {
243 u32 num_pbls;
244 u32 num_pbes;
245 u32 pbl_size;
246 u32 pbe_size;
247 bool two_layered;
248};
249
250struct qedr_userq {
251 struct ib_umem *umem;
252 struct qedr_pbl_info pbl_info;
253 struct qedr_pbl *pbl_tbl;
254 u64 buf_addr;
255 size_t buf_len;
256};
257
258struct qedr_cq {
259 struct ib_cq ibcq;
260
261 enum qedr_cq_type cq_type;
262 u32 sig;
263
264 u16 icid;
265
266
267 spinlock_t cq_lock;
268 u8 arm_flags;
269 struct qed_chain pbl;
270
271 void __iomem *db_addr;
272 union db_prod64 db;
273
274 u8 pbl_toggle;
275 union rdma_cqe *latest_cqe;
276 union rdma_cqe *toggle_cqe;
277
278 u32 cq_cons;
279
280 struct qedr_userq q;
281 u8 destroyed;
282 u16 cnq_notif;
283};
284
285struct qedr_pd {
286 struct ib_pd ibpd;
287 u32 pd_id;
288 struct qedr_ucontext *uctx;
289};
290
291struct qedr_mm {
292 struct {
293 u64 phy_addr;
294 unsigned long len;
295 } key;
296 struct list_head entry;
297};
298
299union db_prod32 {
300 struct rdma_pwm_val16_data data;
301 u32 raw;
302};
303
304struct qedr_qp_hwq_info {
305
306 struct qed_chain pbl;
307 u64 p_phys_addr_tbl;
308 u32 max_sges;
309
310
311 u16 prod;
312 u16 cons;
313 u16 wqe_cons;
314 u16 gsi_cons;
315 u16 max_wr;
316
317
318 void __iomem *db;
319 union db_prod32 db_data;
320};
321
322#define QEDR_INC_SW_IDX(p_info, index) \
323 do { \
324 p_info->index = (p_info->index + 1) & \
325 qed_chain_get_capacity(p_info->pbl) \
326 } while (0)
327
328enum qedr_qp_err_bitmap {
329 QEDR_QP_ERR_SQ_FULL = 1,
330 QEDR_QP_ERR_RQ_FULL = 2,
331 QEDR_QP_ERR_BAD_SR = 4,
332 QEDR_QP_ERR_BAD_RR = 8,
333 QEDR_QP_ERR_SQ_PBL_FULL = 16,
334 QEDR_QP_ERR_RQ_PBL_FULL = 32,
335};
336
337struct qedr_qp {
338 struct ib_qp ibqp;
339 struct qedr_dev *dev;
340
341 struct qedr_qp_hwq_info sq;
342 struct qedr_qp_hwq_info rq;
343
344 u32 max_inline_data;
345
346
347 spinlock_t q_lock;
348 struct qedr_cq *sq_cq;
349 struct qedr_cq *rq_cq;
350 struct qedr_srq *srq;
351 enum qed_roce_qp_state state;
352 u32 id;
353 struct qedr_pd *pd;
354 enum ib_qp_type qp_type;
355 struct qed_rdma_qp *qed_qp;
356 u32 qp_id;
357 u16 icid;
358 u16 mtu;
359 int sgid_idx;
360 u32 rq_psn;
361 u32 sq_psn;
362 u32 qkey;
363 u32 dest_qp_num;
364
365
366 u8 prev_wqe_size;
367 u16 wqe_cons;
368 u32 err_bitmap;
369 bool signaled;
370
371
372 struct {
373 u64 wr_id;
374 enum ib_wc_opcode opcode;
375 u32 bytes_len;
376 u8 wqe_size;
377 bool signaled;
378 dma_addr_t icrc_mapping;
379 u32 *icrc;
380 struct qedr_mr *mr;
381 } *wqe_wr_id;
382
383
384 struct {
385 u64 wr_id;
386 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
387 u8 wqe_size;
388
389 u8 smac[ETH_ALEN];
390 u16 vlan;
391 int rc;
392 } *rqe_wr_id;
393
394
395 struct qedr_userq usq;
396 struct qedr_userq urq;
397};
398
399struct qedr_ah {
400 struct ib_ah ibah;
401 struct rdma_ah_attr attr;
402};
403
404enum qedr_mr_type {
405 QEDR_MR_USER,
406 QEDR_MR_KERNEL,
407 QEDR_MR_DMA,
408 QEDR_MR_FRMR,
409};
410
411struct mr_info {
412 struct qedr_pbl *pbl_table;
413 struct qedr_pbl_info pbl_info;
414 struct list_head free_pbl_list;
415 struct list_head inuse_pbl_list;
416 u32 completed;
417 u32 completed_handled;
418};
419
420struct qedr_mr {
421 struct ib_mr ibmr;
422 struct ib_umem *umem;
423
424 struct qed_rdma_register_tid_in_params hw_mr;
425 enum qedr_mr_type type;
426
427 struct qedr_dev *dev;
428 struct mr_info info;
429
430 u64 *pages;
431 u32 npages;
432};
433
434#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
435
436#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
437 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
438#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
439 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
440#define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
441 RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
442
443static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
444{
445 info->cons = (info->cons + 1) % info->max_wr;
446 info->wqe_cons++;
447}
448
449static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
450{
451 info->prod = (info->prod + 1) % info->max_wr;
452}
453
454static inline int qedr_get_dmac(struct qedr_dev *dev,
455 struct rdma_ah_attr *ah_attr, u8 *mac_addr)
456{
457 union ib_gid zero_sgid = { { 0 } };
458 struct in6_addr in6;
459 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
460 u8 *dmac;
461
462 if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
463 DP_ERR(dev, "Local port GID not supported\n");
464 eth_zero_addr(mac_addr);
465 return -EINVAL;
466 }
467
468 memcpy(&in6, grh->dgid.raw, sizeof(in6));
469 dmac = rdma_ah_retrieve_dmac(ah_attr);
470 if (!dmac)
471 return -EINVAL;
472 ether_addr_copy(mac_addr, dmac);
473
474 return 0;
475}
476
477static inline
478struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
479{
480 return container_of(ibucontext, struct qedr_ucontext, ibucontext);
481}
482
483static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
484{
485 return container_of(ibdev, struct qedr_dev, ibdev);
486}
487
488static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
489{
490 return container_of(ibpd, struct qedr_pd, ibpd);
491}
492
493static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
494{
495 return container_of(ibcq, struct qedr_cq, ibcq);
496}
497
498static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
499{
500 return container_of(ibqp, struct qedr_qp, ibqp);
501}
502
503static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
504{
505 return container_of(ibah, struct qedr_ah, ibah);
506}
507
508static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
509{
510 return container_of(ibmr, struct qedr_mr, ibmr);
511}
512#endif
513