1#ifndef DEF_RDMAVT_INCQP_H
2#define DEF_RDMAVT_INCQP_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <rdma/rdma_vt.h>
52#include <rdma/ib_pack.h>
53#include <rdma/ib_verbs.h>
54#include <rdma/rdmavt_cq.h>
55
56
57
58#define RVT_R_WRID_VALID 0
59#define RVT_R_REWIND_SGE 1
60
61
62
63
64#define RVT_R_REUSE_SGE 0x01
65#define RVT_R_RDMAR_SEQ 0x02
66#define RVT_R_RSP_NAK 0x04
67#define RVT_R_RSP_SEND 0x08
68#define RVT_R_COMM_EST 0x10
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define RVT_S_SIGNAL_REQ_WR 0x0001
96#define RVT_S_BUSY 0x0002
97#define RVT_S_TIMER 0x0004
98#define RVT_S_RESP_PENDING 0x0008
99#define RVT_S_ACK_PENDING 0x0010
100#define RVT_S_WAIT_FENCE 0x0020
101#define RVT_S_WAIT_RDMAR 0x0040
102#define RVT_S_WAIT_RNR 0x0080
103#define RVT_S_WAIT_SSN_CREDIT 0x0100
104#define RVT_S_WAIT_DMA 0x0200
105#define RVT_S_WAIT_PIO 0x0400
106#define RVT_S_WAIT_TX 0x0800
107#define RVT_S_WAIT_DMA_DESC 0x1000
108#define RVT_S_WAIT_KMEM 0x2000
109#define RVT_S_WAIT_PSN 0x4000
110#define RVT_S_WAIT_ACK 0x8000
111#define RVT_S_SEND_ONE 0x10000
112#define RVT_S_UNLIMITED_CREDIT 0x20000
113#define RVT_S_ECN 0x40000
114#define RVT_S_MAX_BIT_MASK 0x800000
115
116
117
118
119
120
121
122
123
124#define RVT_S_ANY_WAIT_IO \
125 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
126 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
127
128
129
130
131#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
132 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
133 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
134
135#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
136
137
138#define RVT_OPCODE_QP_MASK 0xE0
139
140
141#define RVT_POST_SEND_OK 0x01
142#define RVT_POST_RECV_OK 0x02
143#define RVT_PROCESS_RECV_OK 0x04
144#define RVT_PROCESS_SEND_OK 0x08
145#define RVT_PROCESS_NEXT_SEND_OK 0x10
146#define RVT_FLUSH_SEND 0x20
147#define RVT_FLUSH_RECV 0x40
148#define RVT_PROCESS_OR_FLUSH_SEND \
149 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
150#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
151 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
152
153
154
155
156#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
157#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
158
159
160
161
162
163
164struct rvt_swqe {
165 union {
166 struct ib_send_wr wr;
167 struct ib_ud_wr ud_wr;
168 struct ib_reg_wr reg_wr;
169 struct ib_rdma_wr rdma_wr;
170 struct ib_atomic_wr atomic_wr;
171 };
172 u32 psn;
173 u32 lpsn;
174 u32 ssn;
175 u32 length;
176 void *priv;
177 struct rvt_sge sg_list[0];
178};
179
180
181
182
183
184
185struct rvt_rwqe {
186 u64 wr_id;
187 u8 num_sge;
188 struct ib_sge sg_list[0];
189};
190
191
192
193
194
195
196
197
198
199struct rvt_rwq {
200 u32 head;
201 u32 tail;
202 struct rvt_rwqe wq[0];
203};
204
205struct rvt_rq {
206 struct rvt_rwq *wq;
207 u32 size;
208 u8 max_sge;
209
210 spinlock_t lock ____cacheline_aligned_in_smp;
211};
212
213
214
215
216
217struct rvt_ack_entry {
218 struct rvt_sge rdma_sge;
219 u64 atomic_data;
220 u32 psn;
221 u32 lpsn;
222 u8 opcode;
223 u8 sent;
224 void *priv;
225};
226
227#define RC_QP_SCALING_INTERVAL 5
228
229#define RVT_OPERATION_PRIV 0x00000001
230#define RVT_OPERATION_ATOMIC 0x00000002
231#define RVT_OPERATION_ATOMIC_SGE 0x00000004
232#define RVT_OPERATION_LOCAL 0x00000008
233#define RVT_OPERATION_USE_RESERVE 0x00000010
234#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
235
236#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct rvt_operation_params {
251 size_t length;
252 u32 qpt_support;
253 u32 flags;
254};
255
256
257
258
259
260struct rvt_qp {
261 struct ib_qp ibqp;
262 void *priv;
263
264 struct rdma_ah_attr remote_ah_attr;
265 struct rdma_ah_attr alt_ah_attr;
266 struct rvt_qp __rcu *next;
267 struct rvt_swqe *s_wq;
268 struct rvt_mmap_info *ip;
269
270 unsigned long timeout_jiffies;
271
272 int srate_mbps;
273 pid_t pid;
274 u32 remote_qpn;
275 u32 qkey;
276 u32 s_size;
277
278 u16 pmtu;
279 u8 log_pmtu;
280 u8 state;
281 u8 allowed_ops;
282 u8 qp_access_flags;
283 u8 alt_timeout;
284 u8 timeout;
285 u8 s_srate;
286 u8 s_mig_state;
287 u8 port_num;
288 u8 s_pkey_index;
289 u8 s_alt_pkey_index;
290 u8 r_max_rd_atomic;
291 u8 s_max_rd_atomic;
292 u8 s_retry_cnt;
293 u8 s_rnr_retry_cnt;
294 u8 r_min_rnr_timer;
295 u8 s_max_sge;
296 u8 s_draining;
297
298
299 atomic_t refcount ____cacheline_aligned_in_smp;
300 wait_queue_head_t wait;
301
302 struct rvt_ack_entry *s_ack_queue;
303 struct rvt_sge_state s_rdma_read_sge;
304
305 spinlock_t r_lock ____cacheline_aligned_in_smp;
306 u32 r_psn;
307 unsigned long r_aflags;
308 u64 r_wr_id;
309 u32 r_ack_psn;
310 u32 r_len;
311 u32 r_rcv_len;
312 u32 r_msn;
313
314 u8 r_state;
315 u8 r_flags;
316 u8 r_head_ack_queue;
317 u8 r_adefered;
318
319 struct list_head rspwait;
320
321 struct rvt_sge_state r_sge;
322 struct rvt_rq r_rq;
323
324
325 spinlock_t s_hlock ____cacheline_aligned_in_smp;
326 u32 s_head;
327 u32 s_next_psn;
328 u32 s_avail;
329 u32 s_ssn;
330 atomic_t s_reserved_used;
331
332 spinlock_t s_lock ____cacheline_aligned_in_smp;
333 u32 s_flags;
334 struct rvt_sge_state *s_cur_sge;
335 struct rvt_swqe *s_wqe;
336 struct rvt_sge_state s_sge;
337 struct rvt_mregion *s_rdma_mr;
338 u32 s_len;
339 u32 s_rdma_read_len;
340 u32 s_last_psn;
341 u32 s_sending_psn;
342 u32 s_sending_hpsn;
343 u32 s_psn;
344 u32 s_ack_rdma_psn;
345 u32 s_ack_psn;
346 u32 s_tail;
347 u32 s_cur;
348 u32 s_acked;
349 u32 s_last;
350 u32 s_lsn;
351 u32 s_ahgpsn;
352 u16 s_cur_size;
353 u16 s_rdma_ack_cnt;
354 u8 s_hdrwords;
355 s8 s_ahgidx;
356 u8 s_state;
357 u8 s_ack_state;
358 u8 s_nak_state;
359 u8 r_nak_state;
360 u8 s_retry;
361 u8 s_rnr_retry;
362 u8 s_num_rd_atomic;
363 u8 s_tail_ack_queue;
364 u8 s_acked_ack_queue;
365
366 struct rvt_sge_state s_ack_rdma_sge;
367 struct timer_list s_timer;
368 struct hrtimer s_rnr_timer;
369
370 atomic_t local_ops_pending;
371
372
373
374
375 struct rvt_sge r_sg_list[0]
376 ____cacheline_aligned_in_smp;
377};
378
379struct rvt_srq {
380 struct ib_srq ibsrq;
381 struct rvt_rq rq;
382 struct rvt_mmap_info *ip;
383
384 u32 limit;
385};
386
387static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
388{
389 return container_of(ibsrq, struct rvt_srq, ibsrq);
390}
391
392static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
393{
394 return container_of(ibqp, struct rvt_qp, ibqp);
395}
396
397#define RVT_QPN_MAX BIT(24)
398#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
399#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
400#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
401#define RVT_QPN_MASK IB_QPN_MASK
402
403
404
405
406
407
408struct rvt_qpn_map {
409 void *page;
410};
411
412struct rvt_qpn_table {
413 spinlock_t lock;
414 unsigned flags;
415 u32 last;
416 u32 nmaps;
417 u16 limit;
418 u8 incr;
419
420 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
421};
422
423struct rvt_qp_ibdev {
424 u32 qp_table_size;
425 u32 qp_table_bits;
426 struct rvt_qp __rcu **qp_table;
427 spinlock_t qpt_lock;
428 struct rvt_qpn_table qpn_table;
429};
430
431
432
433
434
435
436struct rvt_mcast_qp {
437 struct list_head list;
438 struct rvt_qp *qp;
439};
440
441struct rvt_mcast_addr {
442 union ib_gid mgid;
443 u16 lid;
444};
445
446struct rvt_mcast {
447 struct rb_node rb_node;
448 struct rvt_mcast_addr mcast_addr;
449 struct list_head qp_list;
450 wait_queue_head_t wait;
451 atomic_t refcount;
452 int n_attached;
453};
454
455
456
457
458
459static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
460 unsigned n)
461{
462 return (struct rvt_swqe *)((char *)qp->s_wq +
463 (sizeof(struct rvt_swqe) +
464 qp->s_max_sge *
465 sizeof(struct rvt_sge)) * n);
466}
467
468
469
470
471
472static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
473{
474 return (struct rvt_rwqe *)
475 ((char *)rq->wq->wq +
476 (sizeof(struct rvt_rwqe) +
477 rq->max_sge * sizeof(struct ib_sge)) * n);
478}
479
480
481
482
483
484static inline bool rvt_is_user_qp(struct rvt_qp *qp)
485{
486 return !!qp->pid;
487}
488
489
490
491
492
493static inline void rvt_get_qp(struct rvt_qp *qp)
494{
495 atomic_inc(&qp->refcount);
496}
497
498
499
500
501
502static inline void rvt_put_qp(struct rvt_qp *qp)
503{
504 if (qp && atomic_dec_and_test(&qp->refcount))
505 wake_up(&qp->wait);
506}
507
508
509
510
511
512
513
514static inline void rvt_put_swqe(struct rvt_swqe *wqe)
515{
516 int i;
517
518 for (i = 0; i < wqe->wr.num_sge; i++) {
519 struct rvt_sge *sge = &wqe->sg_list[i];
520
521 rvt_put_mr(sge->mr);
522 }
523}
524
525
526
527
528
529
530
531
532
533static inline void rvt_qp_wqe_reserve(
534 struct rvt_qp *qp,
535 struct rvt_swqe *wqe)
536{
537 atomic_inc(&qp->s_reserved_used);
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static inline void rvt_qp_wqe_unreserve(
556 struct rvt_qp *qp,
557 struct rvt_swqe *wqe)
558{
559 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
560 atomic_dec(&qp->s_reserved_used);
561
562 smp_mb__after_atomic();
563 }
564}
565
566extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
567
568
569
570
571
572
573
574
575
576
577
578
579
580static inline void rvt_qp_swqe_complete(
581 struct rvt_qp *qp,
582 struct rvt_swqe *wqe,
583 enum ib_wc_opcode opcode,
584 enum ib_wc_status status)
585{
586 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
587 return;
588 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
589 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
590 status != IB_WC_SUCCESS) {
591 struct ib_wc wc;
592
593 memset(&wc, 0, sizeof(wc));
594 wc.wr_id = wqe->wr.wr_id;
595 wc.status = status;
596 wc.opcode = opcode;
597 wc.qp = &qp->ibqp;
598 wc.byte_len = wqe->length;
599 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
600 status != IB_WC_SUCCESS);
601 }
602}
603
604
605
606
607
608static inline int rvt_cmp_msn(u32 a, u32 b)
609{
610 return (((int)a) - ((int)b)) << 8;
611}
612
613
614
615
616
617
618
619__be32 rvt_compute_aeth(struct rvt_qp *qp);
620
621
622
623
624
625
626
627
628void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
629
630
631
632
633
634
635
636
637
638u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
639
640
641
642
643
644
645
646static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
647{
648 return (len + qp->pmtu - 1) >> qp->log_pmtu;
649}
650
651
652
653
654
655
656
657static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
658{
659 return len >> qp->log_pmtu;
660}
661
662
663
664
665
666
667
668static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
669{
670 if (timeout > 31)
671 timeout = 31;
672
673 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
674}
675
676
677
678
679
680
681
682
683
684static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
685 struct rvt_ibport *rvp,
686 u32 qpn) __must_hold(RCU)
687{
688 struct rvt_qp *qp = NULL;
689
690 if (unlikely(qpn <= 1)) {
691 qp = rcu_dereference(rvp->qp[qpn]);
692 } else {
693 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
694
695 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
696 qp = rcu_dereference(qp->next))
697 if (qp->ibqp.qp_num == qpn)
698 break;
699 }
700 return qp;
701}
702
703
704
705
706
707
708
709static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
710{
711 struct ib_qp *ibqp = &qp->ibqp;
712 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
713
714 lockdep_assert_held(&qp->s_lock);
715 qp->s_flags |= RVT_S_TIMER;
716
717 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
718 (qp->timeout_jiffies << shift));
719}
720
721static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
722{
723 return rvt_mod_retry_timer_ext(qp, 0);
724}
725
726
727
728
729
730
731
732
733static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
734{
735 rvt_put_swqe(wqe);
736 if (qp->allowed_ops == IB_OPCODE_UD)
737 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
738}
739
740extern const int ib_rvt_state_ops[];
741
742struct rvt_dev_info;
743int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
744void rvt_comm_est(struct rvt_qp *qp);
745int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
746void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
747unsigned long rvt_rnr_tbl_to_usec(u32 index);
748enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
749void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
750void rvt_del_timers_sync(struct rvt_qp *qp);
751void rvt_stop_rc_timers(struct rvt_qp *qp);
752void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
753static inline void rvt_add_retry_timer(struct rvt_qp *qp)
754{
755 rvt_add_retry_timer_ext(qp, 0);
756}
757
758void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
759 void *data, u32 length,
760 bool release, bool copy_last);
761void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
762 enum ib_wc_status status);
763void rvt_ruc_loopback(struct rvt_qp *qp);
764
765
766
767
768
769
770
771
772
773struct rvt_qp_iter {
774 struct rvt_qp *qp;
775
776 struct rvt_dev_info *rdi;
777
778 void (*cb)(struct rvt_qp *qp, u64 v);
779
780 u64 v;
781
782 int specials;
783
784 int n;
785};
786
787struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
788 u64 v,
789 void (*cb)(struct rvt_qp *qp, u64 v));
790int rvt_qp_iter_next(struct rvt_qp_iter *iter);
791void rvt_qp_iter(struct rvt_dev_info *rdi,
792 u64 v,
793 void (*cb)(struct rvt_qp *qp, u64 v));
794void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
795#endif
796