1
2
3
4
5
6
7#ifndef RXE_VERBS_H
8#define RXE_VERBS_H
9
10#include <linux/interrupt.h>
11#include <linux/workqueue.h>
12#include <rdma/rdma_user_rxe.h>
13#include "rxe_pool.h"
14#include "rxe_task.h"
15#include "rxe_hw_counters.h"
16
17static inline int pkey_match(u16 key1, u16 key2)
18{
19 return (((key1 & 0x7fff) != 0) &&
20 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
22}
23
24
25
26
27
28static inline int psn_compare(u32 psn_a, u32 psn_b)
29{
30 s32 diff;
31
32 diff = (psn_a - psn_b) << 8;
33 return diff;
34}
35
36struct rxe_ucontext {
37 struct ib_ucontext ibuc;
38 struct rxe_pool_elem elem;
39};
40
41struct rxe_pd {
42 struct ib_pd ibpd;
43 struct rxe_pool_elem elem;
44};
45
46struct rxe_ah {
47 struct ib_ah ibah;
48 struct rxe_pool_elem elem;
49 struct rxe_av av;
50 bool is_user;
51 int ah_num;
52};
53
54struct rxe_cqe {
55 union {
56 struct ib_wc ibwc;
57 struct ib_uverbs_wc uibwc;
58 };
59};
60
61struct rxe_cq {
62 struct ib_cq ibcq;
63 struct rxe_pool_elem elem;
64 struct rxe_queue *queue;
65 spinlock_t cq_lock;
66 u8 notify;
67 bool is_dying;
68 bool is_user;
69 struct tasklet_struct comp_task;
70};
71
72enum wqe_state {
73 wqe_state_posted,
74 wqe_state_processing,
75 wqe_state_pending,
76 wqe_state_done,
77 wqe_state_error,
78};
79
80struct rxe_sq {
81 int max_wr;
82 int max_sge;
83 int max_inline;
84 spinlock_t sq_lock;
85 struct rxe_queue *queue;
86};
87
88struct rxe_rq {
89 int max_wr;
90 int max_sge;
91 spinlock_t producer_lock;
92 spinlock_t consumer_lock;
93 struct rxe_queue *queue;
94};
95
96struct rxe_srq {
97 struct ib_srq ibsrq;
98 struct rxe_pool_elem elem;
99 struct rxe_pd *pd;
100 struct rxe_rq rq;
101 u32 srq_num;
102
103 int limit;
104 int error;
105};
106
107enum rxe_qp_state {
108 QP_STATE_RESET,
109 QP_STATE_INIT,
110 QP_STATE_READY,
111 QP_STATE_DRAIN,
112 QP_STATE_DRAINED,
113 QP_STATE_ERROR
114};
115
116struct rxe_req_info {
117 enum rxe_qp_state state;
118 int wqe_index;
119 u32 psn;
120 int opcode;
121 atomic_t rd_atomic;
122 int wait_fence;
123 int need_rd_atomic;
124 int wait_psn;
125 int need_retry;
126 int noack_pkts;
127 struct rxe_task task;
128};
129
130struct rxe_comp_info {
131 u32 psn;
132 int opcode;
133 int timeout;
134 int timeout_retry;
135 int started_retry;
136 u32 retry_cnt;
137 u32 rnr_retry;
138 struct rxe_task task;
139};
140
141enum rdatm_res_state {
142 rdatm_res_state_next,
143 rdatm_res_state_new,
144 rdatm_res_state_replay,
145};
146
147struct resp_res {
148 int type;
149 int replay;
150 u32 first_psn;
151 u32 last_psn;
152 u32 cur_psn;
153 enum rdatm_res_state state;
154
155 union {
156 struct {
157 struct sk_buff *skb;
158 } atomic;
159 struct {
160 u64 va_org;
161 u32 rkey;
162 u32 length;
163 u64 va;
164 u32 resid;
165 } read;
166 };
167};
168
169struct rxe_resp_info {
170 enum rxe_qp_state state;
171 u32 msn;
172 u32 psn;
173 u32 ack_psn;
174 int opcode;
175 int drop_msg;
176 int goto_error;
177 int sent_psn_nak;
178 enum ib_wc_status status;
179 u8 aeth_syndrome;
180
181
182 struct rxe_recv_wqe *wqe;
183
184
185 u64 va;
186 u64 offset;
187 struct rxe_mr *mr;
188 u32 resid;
189 u32 rkey;
190 u32 length;
191 u64 atomic_orig;
192
193
194 struct {
195 struct rxe_recv_wqe wqe;
196 struct ib_sge sge[RXE_MAX_SGE];
197 } srq_wqe;
198
199
200
201
202 struct resp_res *resources;
203 unsigned int res_head;
204 unsigned int res_tail;
205 struct resp_res *res;
206 struct rxe_task task;
207};
208
209struct rxe_qp {
210 struct ib_qp ibqp;
211 struct rxe_pool_elem elem;
212 struct ib_qp_attr attr;
213 unsigned int valid;
214 unsigned int mtu;
215 bool is_user;
216
217 struct rxe_pd *pd;
218 struct rxe_srq *srq;
219 struct rxe_cq *scq;
220 struct rxe_cq *rcq;
221
222 enum ib_sig_type sq_sig_type;
223
224 struct rxe_sq sq;
225 struct rxe_rq rq;
226
227 struct socket *sk;
228 u32 dst_cookie;
229 u16 src_port;
230
231 struct rxe_av pri_av;
232 struct rxe_av alt_av;
233
234 atomic_t mcg_num;
235
236 struct sk_buff_head req_pkts;
237 struct sk_buff_head resp_pkts;
238
239 struct rxe_req_info req;
240 struct rxe_comp_info comp;
241 struct rxe_resp_info resp;
242
243 atomic_t ssn;
244 atomic_t skb_out;
245 int need_req_skb;
246
247
248
249
250
251
252 struct timer_list retrans_timer;
253 u64 qp_timeout_jiffies;
254
255
256 struct timer_list rnr_nak_timer;
257
258 spinlock_t state_lock;
259
260 struct execute_work cleanup_work;
261};
262
263enum rxe_mr_state {
264 RXE_MR_STATE_INVALID,
265 RXE_MR_STATE_FREE,
266 RXE_MR_STATE_VALID,
267};
268
269enum rxe_mr_copy_dir {
270 RXE_TO_MR_OBJ,
271 RXE_FROM_MR_OBJ,
272};
273
274enum rxe_mr_lookup_type {
275 RXE_LOOKUP_LOCAL,
276 RXE_LOOKUP_REMOTE,
277};
278
279#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
280
281struct rxe_phys_buf {
282 u64 addr;
283 u64 size;
284};
285
286struct rxe_map {
287 struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
288};
289
290struct rxe_map_set {
291 struct rxe_map **map;
292 u64 va;
293 u64 iova;
294 size_t length;
295 u32 offset;
296 u32 nbuf;
297 int page_shift;
298 int page_mask;
299};
300
301static inline int rkey_is_mw(u32 rkey)
302{
303 u32 index = rkey >> 8;
304
305 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
306}
307
308struct rxe_mr {
309 struct rxe_pool_elem elem;
310 struct ib_mr ibmr;
311
312 struct ib_umem *umem;
313
314 u32 lkey;
315 u32 rkey;
316 enum rxe_mr_state state;
317 enum ib_mr_type type;
318 int access;
319
320 int map_shift;
321 int map_mask;
322
323 u32 num_buf;
324
325 u32 max_buf;
326 u32 num_map;
327
328 atomic_t num_mw;
329
330 struct rxe_map_set *cur_map_set;
331 struct rxe_map_set *next_map_set;
332};
333
334enum rxe_mw_state {
335 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
336 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
337 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
338};
339
340struct rxe_mw {
341 struct ib_mw ibmw;
342 struct rxe_pool_elem elem;
343 spinlock_t lock;
344 enum rxe_mw_state state;
345 struct rxe_qp *qp;
346 struct rxe_mr *mr;
347 u32 rkey;
348 int access;
349 u64 addr;
350 u64 length;
351};
352
353struct rxe_mcg {
354 struct rb_node node;
355 struct kref ref_cnt;
356 struct rxe_dev *rxe;
357 struct list_head qp_list;
358 union ib_gid mgid;
359 atomic_t qp_num;
360 u32 qkey;
361 u16 pkey;
362};
363
364struct rxe_mca {
365 struct list_head qp_list;
366 struct rxe_qp *qp;
367};
368
369struct rxe_port {
370 struct ib_port_attr attr;
371 __be64 port_guid;
372 __be64 subnet_prefix;
373 spinlock_t port_lock;
374 unsigned int mtu_cap;
375
376 u32 qp_smi_index;
377 u32 qp_gsi_index;
378};
379
380struct rxe_dev {
381 struct ib_device ib_dev;
382 struct ib_device_attr attr;
383 int max_ucontext;
384 int max_inline_data;
385 struct mutex usdev_lock;
386
387 struct net_device *ndev;
388
389 struct rxe_pool uc_pool;
390 struct rxe_pool pd_pool;
391 struct rxe_pool ah_pool;
392 struct rxe_pool srq_pool;
393 struct rxe_pool qp_pool;
394 struct rxe_pool cq_pool;
395 struct rxe_pool mr_pool;
396 struct rxe_pool mw_pool;
397 struct rxe_pool mc_grp_pool;
398
399
400 spinlock_t mcg_lock;
401 struct rb_root mcg_tree;
402 atomic_t mcg_num;
403 atomic_t mcg_attach;
404
405 spinlock_t pending_lock;
406 struct list_head pending_mmaps;
407
408 spinlock_t mmap_offset_lock;
409 u64 mmap_offset;
410
411 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
412
413 struct rxe_port port;
414 struct crypto_shash *tfm;
415};
416
417static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
418{
419 atomic64_inc(&rxe->stats_counters[index]);
420}
421
422static inline struct rxe_dev *to_rdev(struct ib_device *dev)
423{
424 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
425}
426
427static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
428{
429 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
430}
431
432static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
433{
434 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
435}
436
437static inline struct rxe_ah *to_rah(struct ib_ah *ah)
438{
439 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
440}
441
442static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
443{
444 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
445}
446
447static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
448{
449 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
450}
451
452static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
453{
454 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
455}
456
457static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
458{
459 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
460}
461
462static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
463{
464 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
465}
466
467static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
468{
469 return to_rpd(ah->ibah.pd);
470}
471
472static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
473{
474 return to_rpd(mr->ibmr.pd);
475}
476
477static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
478{
479 return to_rpd(mw->ibmw.pd);
480}
481
482int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
483
484#endif
485