1#ifndef DEF_RDMAVT_INCQP_H
2#define DEF_RDMAVT_INCQP_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <rdma/rdma_vt.h>
52#include <rdma/ib_pack.h>
53#include <rdma/ib_verbs.h>
54#include <rdma/rdmavt_cq.h>
55
56
57
58#define RVT_R_WRID_VALID 0
59#define RVT_R_REWIND_SGE 1
60
61
62
63
64#define RVT_R_REUSE_SGE 0x01
65#define RVT_R_RDMAR_SEQ 0x02
66#define RVT_R_RSP_NAK 0x04
67#define RVT_R_RSP_SEND 0x08
68#define RVT_R_COMM_EST 0x10
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96#define RVT_S_SIGNAL_REQ_WR 0x0001
97#define RVT_S_BUSY 0x0002
98#define RVT_S_TIMER 0x0004
99#define RVT_S_RESP_PENDING 0x0008
100#define RVT_S_ACK_PENDING 0x0010
101#define RVT_S_WAIT_FENCE 0x0020
102#define RVT_S_WAIT_RDMAR 0x0040
103#define RVT_S_WAIT_RNR 0x0080
104#define RVT_S_WAIT_SSN_CREDIT 0x0100
105#define RVT_S_WAIT_DMA 0x0200
106#define RVT_S_WAIT_PIO 0x0400
107#define RVT_S_WAIT_TX 0x0800
108#define RVT_S_WAIT_DMA_DESC 0x1000
109#define RVT_S_WAIT_KMEM 0x2000
110#define RVT_S_WAIT_PSN 0x4000
111#define RVT_S_WAIT_ACK 0x8000
112#define RVT_S_SEND_ONE 0x10000
113#define RVT_S_UNLIMITED_CREDIT 0x20000
114#define RVT_S_ECN 0x40000
115#define RVT_S_MAX_BIT_MASK 0x800000
116
117
118
119
120
121
122
123
124
125#define RVT_S_ANY_WAIT_IO \
126 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
127 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
128
129
130
131
132#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
133 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
134 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
135
136#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
137
138
139#define RVT_OPCODE_QP_MASK 0xE0
140
141
142#define RVT_POST_SEND_OK 0x01
143#define RVT_POST_RECV_OK 0x02
144#define RVT_PROCESS_RECV_OK 0x04
145#define RVT_PROCESS_SEND_OK 0x08
146#define RVT_PROCESS_NEXT_SEND_OK 0x10
147#define RVT_FLUSH_SEND 0x20
148#define RVT_FLUSH_RECV 0x40
149#define RVT_PROCESS_OR_FLUSH_SEND \
150 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
151#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
152 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
153
154
155
156
157#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
158#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
159
160
161
162
163
164
165struct rvt_swqe {
166 union {
167 struct ib_send_wr wr;
168 struct ib_ud_wr ud_wr;
169 struct ib_reg_wr reg_wr;
170 struct ib_rdma_wr rdma_wr;
171 struct ib_atomic_wr atomic_wr;
172 };
173 u32 psn;
174 u32 lpsn;
175 u32 ssn;
176 u32 length;
177 struct rvt_sge sg_list[0];
178};
179
180
181
182
183
184
185struct rvt_rwqe {
186 u64 wr_id;
187 u8 num_sge;
188 struct ib_sge sg_list[0];
189};
190
191
192
193
194
195
196
197
198
199struct rvt_rwq {
200 u32 head;
201 u32 tail;
202 struct rvt_rwqe wq[0];
203};
204
205struct rvt_rq {
206 struct rvt_rwq *wq;
207 u32 size;
208 u8 max_sge;
209
210 spinlock_t lock ____cacheline_aligned_in_smp;
211};
212
213
214
215
216
217
218struct rvt_mmap_info {
219 struct list_head pending_mmaps;
220 struct ib_ucontext *context;
221 void *obj;
222 __u64 offset;
223 struct kref ref;
224 unsigned size;
225};
226
227
228
229
230
231struct rvt_ack_entry {
232 struct rvt_sge rdma_sge;
233 u64 atomic_data;
234 u32 psn;
235 u32 lpsn;
236 u8 opcode;
237 u8 sent;
238};
239
240#define RC_QP_SCALING_INTERVAL 5
241
242#define RVT_OPERATION_PRIV 0x00000001
243#define RVT_OPERATION_ATOMIC 0x00000002
244#define RVT_OPERATION_ATOMIC_SGE 0x00000004
245#define RVT_OPERATION_LOCAL 0x00000008
246#define RVT_OPERATION_USE_RESERVE 0x00000010
247
248#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct rvt_operation_params {
263 size_t length;
264 u32 qpt_support;
265 u32 flags;
266};
267
268
269
270
271
272struct rvt_qp {
273 struct ib_qp ibqp;
274 void *priv;
275
276 struct rdma_ah_attr remote_ah_attr;
277 struct rdma_ah_attr alt_ah_attr;
278 struct rvt_qp __rcu *next;
279 struct rvt_swqe *s_wq;
280 struct rvt_mmap_info *ip;
281
282 unsigned long timeout_jiffies;
283
284 int srate_mbps;
285 pid_t pid;
286 u32 remote_qpn;
287 u32 qkey;
288 u32 s_size;
289
290 u16 pmtu;
291 u8 log_pmtu;
292 u8 state;
293 u8 allowed_ops;
294 u8 qp_access_flags;
295 u8 alt_timeout;
296 u8 timeout;
297 u8 s_srate;
298 u8 s_mig_state;
299 u8 port_num;
300 u8 s_pkey_index;
301 u8 s_alt_pkey_index;
302 u8 r_max_rd_atomic;
303 u8 s_max_rd_atomic;
304 u8 s_retry_cnt;
305 u8 s_rnr_retry_cnt;
306 u8 r_min_rnr_timer;
307 u8 s_max_sge;
308 u8 s_draining;
309
310
311 atomic_t refcount ____cacheline_aligned_in_smp;
312 wait_queue_head_t wait;
313
314 struct rvt_ack_entry *s_ack_queue;
315 struct rvt_sge_state s_rdma_read_sge;
316
317 spinlock_t r_lock ____cacheline_aligned_in_smp;
318 u32 r_psn;
319 unsigned long r_aflags;
320 u64 r_wr_id;
321 u32 r_ack_psn;
322 u32 r_len;
323 u32 r_rcv_len;
324 u32 r_msn;
325
326 u8 r_state;
327 u8 r_flags;
328 u8 r_head_ack_queue;
329 u8 r_adefered;
330
331 struct list_head rspwait;
332
333 struct rvt_sge_state r_sge;
334 struct rvt_rq r_rq;
335
336
337 spinlock_t s_hlock ____cacheline_aligned_in_smp;
338 u32 s_head;
339 u32 s_next_psn;
340 u32 s_avail;
341 u32 s_ssn;
342 atomic_t s_reserved_used;
343
344 spinlock_t s_lock ____cacheline_aligned_in_smp;
345 u32 s_flags;
346 struct rvt_sge_state *s_cur_sge;
347 struct rvt_swqe *s_wqe;
348 struct rvt_sge_state s_sge;
349 struct rvt_mregion *s_rdma_mr;
350 u32 s_len;
351 u32 s_rdma_read_len;
352 u32 s_last_psn;
353 u32 s_sending_psn;
354 u32 s_sending_hpsn;
355 u32 s_psn;
356 u32 s_ack_rdma_psn;
357 u32 s_ack_psn;
358 u32 s_tail;
359 u32 s_cur;
360 u32 s_acked;
361 u32 s_last;
362 u32 s_lsn;
363 u32 s_ahgpsn;
364 u16 s_cur_size;
365 u16 s_rdma_ack_cnt;
366 u8 s_hdrwords;
367 s8 s_ahgidx;
368 u8 s_state;
369 u8 s_ack_state;
370 u8 s_nak_state;
371 u8 r_nak_state;
372 u8 s_retry;
373 u8 s_rnr_retry;
374 u8 s_num_rd_atomic;
375 u8 s_tail_ack_queue;
376
377 struct rvt_sge_state s_ack_rdma_sge;
378 struct timer_list s_timer;
379 struct hrtimer s_rnr_timer;
380
381 atomic_t local_ops_pending;
382
383
384
385
386 struct rvt_sge r_sg_list[0]
387 ____cacheline_aligned_in_smp;
388};
389
390struct rvt_srq {
391 struct ib_srq ibsrq;
392 struct rvt_rq rq;
393 struct rvt_mmap_info *ip;
394
395 u32 limit;
396};
397
398#define RVT_QPN_MAX BIT(24)
399#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
400#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
401#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
402#define RVT_QPN_MASK IB_QPN_MASK
403
404
405
406
407
408
409struct rvt_qpn_map {
410 void *page;
411};
412
413struct rvt_qpn_table {
414 spinlock_t lock;
415 unsigned flags;
416 u32 last;
417 u32 nmaps;
418 u16 limit;
419 u8 incr;
420
421 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
422};
423
424struct rvt_qp_ibdev {
425 u32 qp_table_size;
426 u32 qp_table_bits;
427 struct rvt_qp __rcu **qp_table;
428 spinlock_t qpt_lock;
429 struct rvt_qpn_table qpn_table;
430};
431
432
433
434
435
436
437struct rvt_mcast_qp {
438 struct list_head list;
439 struct rvt_qp *qp;
440};
441
442struct rvt_mcast_addr {
443 union ib_gid mgid;
444 u16 lid;
445};
446
447struct rvt_mcast {
448 struct rb_node rb_node;
449 struct rvt_mcast_addr mcast_addr;
450 struct list_head qp_list;
451 wait_queue_head_t wait;
452 atomic_t refcount;
453 int n_attached;
454};
455
456
457
458
459
460static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
461 unsigned n)
462{
463 return (struct rvt_swqe *)((char *)qp->s_wq +
464 (sizeof(struct rvt_swqe) +
465 qp->s_max_sge *
466 sizeof(struct rvt_sge)) * n);
467}
468
469
470
471
472
473static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
474{
475 return (struct rvt_rwqe *)
476 ((char *)rq->wq->wq +
477 (sizeof(struct rvt_rwqe) +
478 rq->max_sge * sizeof(struct ib_sge)) * n);
479}
480
481
482
483
484
485static inline bool rvt_is_user_qp(struct rvt_qp *qp)
486{
487 return !!qp->pid;
488}
489
490
491
492
493
494static inline void rvt_get_qp(struct rvt_qp *qp)
495{
496 atomic_inc(&qp->refcount);
497}
498
499
500
501
502
503static inline void rvt_put_qp(struct rvt_qp *qp)
504{
505 if (qp && atomic_dec_and_test(&qp->refcount))
506 wake_up(&qp->wait);
507}
508
509
510
511
512
513
514
515static inline void rvt_put_swqe(struct rvt_swqe *wqe)
516{
517 int i;
518
519 for (i = 0; i < wqe->wr.num_sge; i++) {
520 struct rvt_sge *sge = &wqe->sg_list[i];
521
522 rvt_put_mr(sge->mr);
523 }
524}
525
526
527
528
529
530
531
532
533
534static inline void rvt_qp_wqe_reserve(
535 struct rvt_qp *qp,
536 struct rvt_swqe *wqe)
537{
538 atomic_inc(&qp->s_reserved_used);
539}
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static inline void rvt_qp_wqe_unreserve(
557 struct rvt_qp *qp,
558 struct rvt_swqe *wqe)
559{
560 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
561 atomic_dec(&qp->s_reserved_used);
562
563 smp_mb__after_atomic();
564 }
565}
566
567extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
568
569
570
571
572
573
574
575
576
577
578
579
580
581static inline void rvt_qp_swqe_complete(
582 struct rvt_qp *qp,
583 struct rvt_swqe *wqe,
584 enum ib_wc_opcode opcode,
585 enum ib_wc_status status)
586{
587 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
588 return;
589 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
590 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
591 status != IB_WC_SUCCESS) {
592 struct ib_wc wc;
593
594 memset(&wc, 0, sizeof(wc));
595 wc.wr_id = wqe->wr.wr_id;
596 wc.status = status;
597 wc.opcode = opcode;
598 wc.qp = &qp->ibqp;
599 wc.byte_len = wqe->length;
600 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
601 status != IB_WC_SUCCESS);
602 }
603}
604
605
606
607
608
609static inline int rvt_cmp_msn(u32 a, u32 b)
610{
611 return (((int)a) - ((int)b)) << 8;
612}
613
614
615
616
617
618
619
620__be32 rvt_compute_aeth(struct rvt_qp *qp);
621
622
623
624
625
626
627
628
629void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
630
631
632
633
634
635
636
637static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
638{
639 return (len + qp->pmtu - 1) >> qp->log_pmtu;
640}
641
642
643
644
645
646
647
648static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
649{
650 return len >> qp->log_pmtu;
651}
652
653
654
655
656
657
658
659static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
660{
661 if (timeout > 31)
662 timeout = 31;
663
664 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
665}
666
667extern const int ib_rvt_state_ops[];
668
669struct rvt_dev_info;
670int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
671void rvt_comm_est(struct rvt_qp *qp);
672int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
673void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
674unsigned long rvt_rnr_tbl_to_usec(u32 index);
675enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
676void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
677void rvt_del_timers_sync(struct rvt_qp *qp);
678void rvt_stop_rc_timers(struct rvt_qp *qp);
679void rvt_add_retry_timer(struct rvt_qp *qp);
680
681
682
683
684
685
686
687
688
689struct rvt_qp_iter {
690 struct rvt_qp *qp;
691
692 struct rvt_dev_info *rdi;
693
694 void (*cb)(struct rvt_qp *qp, u64 v);
695
696 u64 v;
697
698 int specials;
699
700 int n;
701};
702
703struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
704 u64 v,
705 void (*cb)(struct rvt_qp *qp, u64 v));
706int rvt_qp_iter_next(struct rvt_qp_iter *iter);
707void rvt_qp_iter(struct rvt_dev_info *rdi,
708 u64 v,
709 void (*cb)(struct rvt_qp *qp, u64 v));
710void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
711#endif
712