1
2
3
4
5
6#ifndef DEF_RDMAVT_INCQP_H
7#define DEF_RDMAVT_INCQP_H
8
9#include <rdma/rdma_vt.h>
10#include <rdma/ib_pack.h>
11#include <rdma/ib_verbs.h>
12#include <rdma/rdmavt_cq.h>
13#include <rdma/rvt-abi.h>
14
15
16
17#define RVT_R_WRID_VALID 0
18#define RVT_R_REWIND_SGE 1
19
20
21
22
23#define RVT_R_REUSE_SGE 0x01
24#define RVT_R_RDMAR_SEQ 0x02
25#define RVT_R_RSP_NAK 0x04
26#define RVT_R_RSP_SEND 0x08
27#define RVT_R_COMM_EST 0x10
28
29
30
31
32
33
34#define RVT_KDETH_QP_PREFIX 0x80
35#define RVT_KDETH_QP_SUFFIX 0xffff
36#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
37#define RVT_KDETH_QP_PREFIX_SHIFT 16
38#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
39 RVT_KDETH_QP_PREFIX_SHIFT)
40#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
41
42
43
44
45
46
47#define RVT_AIP_QP_PREFIX 0x81
48#define RVT_AIP_QP_SUFFIX 0xffff
49#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
50#define RVT_AIP_QP_PREFIX_SHIFT 16
51#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
52 RVT_AIP_QP_PREFIX_SHIFT)
53#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
54#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#define RVT_S_SIGNAL_REQ_WR 0x0001
82#define RVT_S_BUSY 0x0002
83#define RVT_S_TIMER 0x0004
84#define RVT_S_RESP_PENDING 0x0008
85#define RVT_S_ACK_PENDING 0x0010
86#define RVT_S_WAIT_FENCE 0x0020
87#define RVT_S_WAIT_RDMAR 0x0040
88#define RVT_S_WAIT_RNR 0x0080
89#define RVT_S_WAIT_SSN_CREDIT 0x0100
90#define RVT_S_WAIT_DMA 0x0200
91#define RVT_S_WAIT_PIO 0x0400
92#define RVT_S_WAIT_TX 0x0800
93#define RVT_S_WAIT_DMA_DESC 0x1000
94#define RVT_S_WAIT_KMEM 0x2000
95#define RVT_S_WAIT_PSN 0x4000
96#define RVT_S_WAIT_ACK 0x8000
97#define RVT_S_SEND_ONE 0x10000
98#define RVT_S_UNLIMITED_CREDIT 0x20000
99#define RVT_S_ECN 0x40000
100#define RVT_S_MAX_BIT_MASK 0x800000
101
102
103
104
105
106
107
108
109
110#define RVT_S_ANY_WAIT_IO \
111 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
112 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
113
114
115
116
117#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
118 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
119 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
120
121#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
122
123
124#define RVT_OPCODE_QP_MASK 0xE0
125
126
127#define RVT_POST_SEND_OK 0x01
128#define RVT_POST_RECV_OK 0x02
129#define RVT_PROCESS_RECV_OK 0x04
130#define RVT_PROCESS_SEND_OK 0x08
131#define RVT_PROCESS_NEXT_SEND_OK 0x10
132#define RVT_FLUSH_SEND 0x20
133#define RVT_FLUSH_RECV 0x40
134#define RVT_PROCESS_OR_FLUSH_SEND \
135 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
136#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
137 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
138
139
140
141
142#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
143#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
144
145
146
147
148
149
150
151
152
153
154
155
156struct rvt_ud_wr {
157 struct ib_ud_wr wr;
158 struct rdma_ah_attr *attr;
159};
160
161
162
163
164
165
166struct rvt_swqe {
167 union {
168 struct ib_send_wr wr;
169 struct rvt_ud_wr ud_wr;
170 struct ib_reg_wr reg_wr;
171 struct ib_rdma_wr rdma_wr;
172 struct ib_atomic_wr atomic_wr;
173 };
174 u32 psn;
175 u32 lpsn;
176 u32 ssn;
177 u32 length;
178 void *priv;
179 struct rvt_sge sg_list[];
180};
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195struct rvt_krwq {
196 spinlock_t p_lock;
197 u32 head;
198
199
200 spinlock_t c_lock ____cacheline_aligned_in_smp;
201 u32 tail;
202 u32 count;
203 struct rvt_rwqe *curr_wq;
204 struct rvt_rwqe wq[];
205};
206
207
208
209
210
211
212static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
213{
214 return ibah_to_rvtah(swqe->ud_wr.wr.ah);
215}
216
217
218
219
220
221
222static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
223{
224 return swqe->ud_wr.attr;
225}
226
227
228
229
230
231
232static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
233{
234 return swqe->ud_wr.wr.remote_qpn;
235}
236
237
238
239
240
241
242static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
243{
244 return swqe->ud_wr.wr.remote_qkey;
245}
246
247
248
249
250
251
252static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
253{
254 return swqe->ud_wr.wr.pkey_index;
255}
256
257struct rvt_rq {
258 struct rvt_rwq *wq;
259 struct rvt_krwq *kwq;
260 u32 size;
261 u8 max_sge;
262
263 spinlock_t lock ____cacheline_aligned_in_smp;
264};
265
266
267
268
269
270
271
272
273
274
275
276static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
277{
278 u32 count = head - tail;
279
280 if ((s32)count < 0)
281 count += rq->size;
282 return count;
283}
284
285
286
287
288
289struct rvt_ack_entry {
290 struct rvt_sge rdma_sge;
291 u64 atomic_data;
292 u32 psn;
293 u32 lpsn;
294 u8 opcode;
295 u8 sent;
296 void *priv;
297};
298
299#define RC_QP_SCALING_INTERVAL 5
300
301#define RVT_OPERATION_PRIV 0x00000001
302#define RVT_OPERATION_ATOMIC 0x00000002
303#define RVT_OPERATION_ATOMIC_SGE 0x00000004
304#define RVT_OPERATION_LOCAL 0x00000008
305#define RVT_OPERATION_USE_RESERVE 0x00000010
306#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
307
308#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
309
310
311
312
313
314
315
316
317
318
319
320
321
322struct rvt_operation_params {
323 size_t length;
324 u32 qpt_support;
325 u32 flags;
326};
327
328
329
330
331
332struct rvt_qp {
333 struct ib_qp ibqp;
334 void *priv;
335
336 struct rdma_ah_attr remote_ah_attr;
337 struct rdma_ah_attr alt_ah_attr;
338 struct rvt_qp __rcu *next;
339 struct rvt_swqe *s_wq;
340 struct rvt_mmap_info *ip;
341
342 unsigned long timeout_jiffies;
343
344 int srate_mbps;
345 pid_t pid;
346 u32 remote_qpn;
347 u32 qkey;
348 u32 s_size;
349
350 u16 pmtu;
351 u8 log_pmtu;
352 u8 state;
353 u8 allowed_ops;
354 u8 qp_access_flags;
355 u8 alt_timeout;
356 u8 timeout;
357 u8 s_srate;
358 u8 s_mig_state;
359 u8 port_num;
360 u8 s_pkey_index;
361 u8 s_alt_pkey_index;
362 u8 r_max_rd_atomic;
363 u8 s_max_rd_atomic;
364 u8 s_retry_cnt;
365 u8 s_rnr_retry_cnt;
366 u8 r_min_rnr_timer;
367 u8 s_max_sge;
368 u8 s_draining;
369
370
371 atomic_t refcount ____cacheline_aligned_in_smp;
372 wait_queue_head_t wait;
373
374 struct rvt_ack_entry *s_ack_queue;
375 struct rvt_sge_state s_rdma_read_sge;
376
377 spinlock_t r_lock ____cacheline_aligned_in_smp;
378 u32 r_psn;
379 unsigned long r_aflags;
380 u64 r_wr_id;
381 u32 r_ack_psn;
382 u32 r_len;
383 u32 r_rcv_len;
384 u32 r_msn;
385
386 u8 r_state;
387 u8 r_flags;
388 u8 r_head_ack_queue;
389 u8 r_adefered;
390
391 struct list_head rspwait;
392
393 struct rvt_sge_state r_sge;
394 struct rvt_rq r_rq;
395
396
397 spinlock_t s_hlock ____cacheline_aligned_in_smp;
398 u32 s_head;
399 u32 s_next_psn;
400 u32 s_avail;
401 u32 s_ssn;
402 atomic_t s_reserved_used;
403
404 spinlock_t s_lock ____cacheline_aligned_in_smp;
405 u32 s_flags;
406 struct rvt_sge_state *s_cur_sge;
407 struct rvt_swqe *s_wqe;
408 struct rvt_sge_state s_sge;
409 struct rvt_mregion *s_rdma_mr;
410 u32 s_len;
411 u32 s_rdma_read_len;
412 u32 s_last_psn;
413 u32 s_sending_psn;
414 u32 s_sending_hpsn;
415 u32 s_psn;
416 u32 s_ack_rdma_psn;
417 u32 s_ack_psn;
418 u32 s_tail;
419 u32 s_cur;
420 u32 s_acked;
421 u32 s_last;
422 u32 s_lsn;
423 u32 s_ahgpsn;
424 u16 s_cur_size;
425 u16 s_rdma_ack_cnt;
426 u8 s_hdrwords;
427 s8 s_ahgidx;
428 u8 s_state;
429 u8 s_ack_state;
430 u8 s_nak_state;
431 u8 r_nak_state;
432 u8 s_retry;
433 u8 s_rnr_retry;
434 u8 s_num_rd_atomic;
435 u8 s_tail_ack_queue;
436 u8 s_acked_ack_queue;
437
438 struct rvt_sge_state s_ack_rdma_sge;
439 struct timer_list s_timer;
440 struct hrtimer s_rnr_timer;
441
442 atomic_t local_ops_pending;
443
444
445
446
447 struct rvt_sge *r_sg_list
448 ____cacheline_aligned_in_smp;
449};
450
451struct rvt_srq {
452 struct ib_srq ibsrq;
453 struct rvt_rq rq;
454 struct rvt_mmap_info *ip;
455
456 u32 limit;
457};
458
459static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
460{
461 return container_of(ibsrq, struct rvt_srq, ibsrq);
462}
463
464static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
465{
466 return container_of(ibqp, struct rvt_qp, ibqp);
467}
468
469#define RVT_QPN_MAX BIT(24)
470#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
471#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
472#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
473#define RVT_QPN_MASK IB_QPN_MASK
474
475
476
477
478
479
480struct rvt_qpn_map {
481 void *page;
482};
483
484struct rvt_qpn_table {
485 spinlock_t lock;
486 unsigned flags;
487 u32 last;
488 u32 nmaps;
489 u16 limit;
490 u8 incr;
491
492 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
493};
494
495struct rvt_qp_ibdev {
496 u32 qp_table_size;
497 u32 qp_table_bits;
498 struct rvt_qp __rcu **qp_table;
499 spinlock_t qpt_lock;
500 struct rvt_qpn_table qpn_table;
501};
502
503
504
505
506
507
508struct rvt_mcast_qp {
509 struct list_head list;
510 struct rvt_qp *qp;
511};
512
513struct rvt_mcast_addr {
514 union ib_gid mgid;
515 u16 lid;
516};
517
518struct rvt_mcast {
519 struct rb_node rb_node;
520 struct rvt_mcast_addr mcast_addr;
521 struct list_head qp_list;
522 wait_queue_head_t wait;
523 atomic_t refcount;
524 int n_attached;
525};
526
527
528
529
530
531static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
532 unsigned n)
533{
534 return (struct rvt_swqe *)((char *)qp->s_wq +
535 (sizeof(struct rvt_swqe) +
536 qp->s_max_sge *
537 sizeof(struct rvt_sge)) * n);
538}
539
540
541
542
543
544static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
545{
546 return (struct rvt_rwqe *)
547 ((char *)rq->kwq->curr_wq +
548 (sizeof(struct rvt_rwqe) +
549 rq->max_sge * sizeof(struct ib_sge)) * n);
550}
551
552
553
554
555
556static inline bool rvt_is_user_qp(struct rvt_qp *qp)
557{
558 return !!qp->pid;
559}
560
561
562
563
564
565static inline void rvt_get_qp(struct rvt_qp *qp)
566{
567 atomic_inc(&qp->refcount);
568}
569
570
571
572
573
574static inline void rvt_put_qp(struct rvt_qp *qp)
575{
576 if (qp && atomic_dec_and_test(&qp->refcount))
577 wake_up(&qp->wait);
578}
579
580
581
582
583
584
585
586static inline void rvt_put_swqe(struct rvt_swqe *wqe)
587{
588 int i;
589
590 for (i = 0; i < wqe->wr.num_sge; i++) {
591 struct rvt_sge *sge = &wqe->sg_list[i];
592
593 rvt_put_mr(sge->mr);
594 }
595}
596
597
598
599
600
601
602
603
604
605static inline void rvt_qp_wqe_reserve(
606 struct rvt_qp *qp,
607 struct rvt_swqe *wqe)
608{
609 atomic_inc(&qp->s_reserved_used);
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
628{
629 if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
630 atomic_dec(&qp->s_reserved_used);
631
632 smp_mb__after_atomic();
633 }
634}
635
636extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
637
638
639
640
641
642static inline int rvt_cmp_msn(u32 a, u32 b)
643{
644 return (((int)a) - ((int)b)) << 8;
645}
646
647__be32 rvt_compute_aeth(struct rvt_qp *qp);
648
649void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
650
651u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
652
653
654
655
656
657
658
659
660static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
661{
662 return (len + qp->pmtu - 1) >> qp->log_pmtu;
663}
664
665
666
667
668
669
670
671static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
672{
673 return len >> qp->log_pmtu;
674}
675
676
677
678
679
680
681
682static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
683{
684 if (timeout > 31)
685 timeout = 31;
686
687 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
688}
689
690
691
692
693
694
695
696
697
698static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
699 struct rvt_ibport *rvp,
700 u32 qpn) __must_hold(RCU)
701{
702 struct rvt_qp *qp = NULL;
703
704 if (unlikely(qpn <= 1)) {
705 qp = rcu_dereference(rvp->qp[qpn]);
706 } else {
707 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
708
709 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
710 qp = rcu_dereference(qp->next))
711 if (qp->ibqp.qp_num == qpn)
712 break;
713 }
714 return qp;
715}
716
717
718
719
720
721
722
723static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
724{
725 struct ib_qp *ibqp = &qp->ibqp;
726 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
727
728 lockdep_assert_held(&qp->s_lock);
729 qp->s_flags |= RVT_S_TIMER;
730
731 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
732 (qp->timeout_jiffies << shift));
733}
734
735static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
736{
737 return rvt_mod_retry_timer_ext(qp, 0);
738}
739
740
741
742
743
744
745
746
747static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
748{
749 rvt_put_swqe(wqe);
750 if (qp->allowed_ops == IB_OPCODE_UD)
751 rdma_destroy_ah_attr(wqe->ud_wr.attr);
752}
753
754
755
756
757
758
759
760
761static inline u32
762rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
763{
764 if (++val >= qp->s_size)
765 val = 0;
766 return val;
767}
768
769int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
770
771
772
773
774
775
776
777
778
779
780
781
782static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
783 bool solicited)
784{
785 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
786
787 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
788 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
789}
790
791
792
793
794
795
796
797
798
799
800
801
802static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
803 bool solicited)
804{
805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
806
807 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
808 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827static inline u32
828rvt_qp_complete_swqe(struct rvt_qp *qp,
829 struct rvt_swqe *wqe,
830 enum ib_wc_opcode opcode,
831 enum ib_wc_status status)
832{
833 bool need_completion;
834 u64 wr_id;
835 u32 byte_len, last;
836 int flags = wqe->wr.send_flags;
837
838 rvt_qp_wqe_unreserve(qp, flags);
839 rvt_put_qp_swqe(qp, wqe);
840
841 need_completion =
842 !(flags & RVT_SEND_RESERVE_USED) &&
843 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
844 (flags & IB_SEND_SIGNALED) ||
845 status != IB_WC_SUCCESS);
846 if (need_completion) {
847 wr_id = wqe->wr.wr_id;
848 byte_len = wqe->length;
849
850 }
851 last = rvt_qp_swqe_incr(qp, qp->s_last);
852
853 smp_store_release(&qp->s_last, last);
854 if (need_completion) {
855 struct ib_wc w = {
856 .wr_id = wr_id,
857 .status = status,
858 .opcode = opcode,
859 .qp = &qp->ibqp,
860 .byte_len = byte_len,
861 };
862 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
863 }
864 return last;
865}
866
867extern const int ib_rvt_state_ops[];
868
869struct rvt_dev_info;
870int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
871void rvt_comm_est(struct rvt_qp *qp);
872void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
873unsigned long rvt_rnr_tbl_to_usec(u32 index);
874enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
875void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
876void rvt_del_timers_sync(struct rvt_qp *qp);
877void rvt_stop_rc_timers(struct rvt_qp *qp);
878void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
879static inline void rvt_add_retry_timer(struct rvt_qp *qp)
880{
881 rvt_add_retry_timer_ext(qp, 0);
882}
883
884void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
885 void *data, u32 length,
886 bool release, bool copy_last);
887void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
888 enum ib_wc_status status);
889void rvt_ruc_loopback(struct rvt_qp *qp);
890
891
892
893
894
895
896
897
898
899struct rvt_qp_iter {
900 struct rvt_qp *qp;
901
902 struct rvt_dev_info *rdi;
903
904 void (*cb)(struct rvt_qp *qp, u64 v);
905
906 u64 v;
907
908 int specials;
909
910 int n;
911};
912
913
914
915
916
917
918
919
920static inline u32 ib_cq_tail(struct ib_cq *send_cq)
921{
922 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
923
924 return ibcq_to_rvtcq(send_cq)->ip ?
925 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
926 ibcq_to_rvtcq(send_cq)->kqueue->tail;
927}
928
929
930
931
932
933
934
935
936static inline u32 ib_cq_head(struct ib_cq *send_cq)
937{
938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939
940 return ibcq_to_rvtcq(send_cq)->ip ?
941 RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
942 ibcq_to_rvtcq(send_cq)->kqueue->head;
943}
944
945
946
947
948
949
950
951
952static inline void rvt_free_rq(struct rvt_rq *rq)
953{
954 kvfree(rq->kwq);
955 rq->kwq = NULL;
956 vfree(rq->wq);
957 rq->wq = NULL;
958}
959
960
961
962
963
964
965
966static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
967{
968 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
969
970 return rdi->ports[qp->port_num - 1];
971}
972
973
974
975
976
977
978
979
980
981static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
982{
983 lockdep_assert_held(&qp->s_lock);
984 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
985 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
986 struct rvt_ibport *rvp = rvt_to_iport(qp);
987
988 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
989 rvp->n_rc_crwaits++;
990 return false;
991 }
992 return true;
993}
994
995struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
996 u64 v,
997 void (*cb)(struct rvt_qp *qp, u64 v));
998int rvt_qp_iter_next(struct rvt_qp_iter *iter);
999void rvt_qp_iter(struct rvt_dev_info *rdi,
1000 u64 v,
1001 void (*cb)(struct rvt_qp *qp, u64 v));
1002void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1003#endif
1004