1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/err.h>
49#include <linux/vmalloc.h>
50#include <linux/hash.h>
51#include <linux/module.h>
52#include <linux/seq_file.h>
53#include <rdma/rdma_vt.h>
54#include <rdma/rdmavt_qp.h>
55#include <rdma/ib_verbs.h>
56
57#include "hfi.h"
58#include "qp.h"
59#include "trace.h"
60#include "verbs_txreq.h"
61
62unsigned int hfi1_qp_table_size = 256;
63module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
64MODULE_PARM_DESC(qp_table_size, "QP table size");
65
66static void flush_tx_list(struct rvt_qp *qp);
67static int iowait_sleep(
68 struct sdma_engine *sde,
69 struct iowait *wait,
70 struct sdma_txreq *stx,
71 unsigned seq);
72static void iowait_wakeup(struct iowait *wait, int reason);
73static void iowait_sdma_drained(struct iowait *wait);
74static void qp_pio_drain(struct rvt_qp *qp);
75
76static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
77 struct rvt_qpn_map *map, unsigned off)
78{
79 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
80}
81
82
83
84
85static const u16 credit_table[31] = {
86 0,
87 1,
88 2,
89 3,
90 4,
91 6,
92 8,
93 12,
94 16,
95 24,
96 32,
97 48,
98 64,
99 96,
100 128,
101 192,
102 256,
103 384,
104 512,
105 768,
106 1024,
107 1536,
108 2048,
109 3072,
110 4096,
111 6144,
112 8192,
113 12288,
114 16384,
115 24576,
116 32768
117};
118
119const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
120[IB_WR_RDMA_WRITE] = {
121 .length = sizeof(struct ib_rdma_wr),
122 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
123},
124
125[IB_WR_RDMA_READ] = {
126 .length = sizeof(struct ib_rdma_wr),
127 .qpt_support = BIT(IB_QPT_RC),
128 .flags = RVT_OPERATION_ATOMIC,
129},
130
131[IB_WR_ATOMIC_CMP_AND_SWP] = {
132 .length = sizeof(struct ib_atomic_wr),
133 .qpt_support = BIT(IB_QPT_RC),
134 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
135},
136
137[IB_WR_ATOMIC_FETCH_AND_ADD] = {
138 .length = sizeof(struct ib_atomic_wr),
139 .qpt_support = BIT(IB_QPT_RC),
140 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
141},
142
143[IB_WR_RDMA_WRITE_WITH_IMM] = {
144 .length = sizeof(struct ib_rdma_wr),
145 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
146},
147
148[IB_WR_SEND] = {
149 .length = sizeof(struct ib_send_wr),
150 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
151 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
152},
153
154[IB_WR_SEND_WITH_IMM] = {
155 .length = sizeof(struct ib_send_wr),
156 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
157 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
158},
159
160[IB_WR_REG_MR] = {
161 .length = sizeof(struct ib_reg_wr),
162 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
163 .flags = RVT_OPERATION_LOCAL,
164},
165
166[IB_WR_LOCAL_INV] = {
167 .length = sizeof(struct ib_send_wr),
168 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
169 .flags = RVT_OPERATION_LOCAL,
170},
171
172[IB_WR_SEND_WITH_INV] = {
173 .length = sizeof(struct ib_send_wr),
174 .qpt_support = BIT(IB_QPT_RC),
175},
176
177};
178
179static void flush_tx_list(struct rvt_qp *qp)
180{
181 struct hfi1_qp_priv *priv = qp->priv;
182
183 while (!list_empty(&priv->s_iowait.tx_head)) {
184 struct sdma_txreq *tx;
185
186 tx = list_first_entry(
187 &priv->s_iowait.tx_head,
188 struct sdma_txreq,
189 list);
190 list_del_init(&tx->list);
191 hfi1_put_txreq(
192 container_of(tx, struct verbs_txreq, txreq));
193 }
194}
195
196static void flush_iowait(struct rvt_qp *qp)
197{
198 struct hfi1_qp_priv *priv = qp->priv;
199 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
200 unsigned long flags;
201
202 write_seqlock_irqsave(&dev->iowait_lock, flags);
203 if (!list_empty(&priv->s_iowait.list)) {
204 list_del_init(&priv->s_iowait.list);
205 rvt_put_qp(qp);
206 }
207 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
208}
209
210static inline int opa_mtu_enum_to_int(int mtu)
211{
212 switch (mtu) {
213 case OPA_MTU_8192: return 8192;
214 case OPA_MTU_10240: return 10240;
215 default: return -1;
216 }
217}
218
219
220
221
222
223
224static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
225{
226 int val;
227
228
229 if (mtu == (enum ib_mtu)OPA_MTU_10240)
230 mtu = OPA_MTU_8192;
231 val = opa_mtu_enum_to_int((int)mtu);
232 if (val > 0)
233 return val;
234 return ib_mtu_enum_to_int(mtu);
235}
236
237int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
238 int attr_mask, struct ib_udata *udata)
239{
240 struct ib_qp *ibqp = &qp->ibqp;
241 struct hfi1_ibdev *dev = to_idev(ibqp->device);
242 struct hfi1_devdata *dd = dd_from_dev(dev);
243 u8 sc;
244
245 if (attr_mask & IB_QP_AV) {
246 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
247 if (sc == 0xf)
248 return -EINVAL;
249
250 if (!qp_to_sdma_engine(qp, sc) &&
251 dd->flags & HFI1_HAS_SEND_DMA)
252 return -EINVAL;
253
254 if (!qp_to_send_context(qp, sc))
255 return -EINVAL;
256 }
257
258 if (attr_mask & IB_QP_ALT_PATH) {
259 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
260 if (sc == 0xf)
261 return -EINVAL;
262
263 if (!qp_to_sdma_engine(qp, sc) &&
264 dd->flags & HFI1_HAS_SEND_DMA)
265 return -EINVAL;
266
267 if (!qp_to_send_context(qp, sc))
268 return -EINVAL;
269 }
270
271 return 0;
272}
273
274void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
275 int attr_mask, struct ib_udata *udata)
276{
277 struct ib_qp *ibqp = &qp->ibqp;
278 struct hfi1_qp_priv *priv = qp->priv;
279
280 if (attr_mask & IB_QP_AV) {
281 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
282 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
283 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
284 }
285
286 if (attr_mask & IB_QP_PATH_MIG_STATE &&
287 attr->path_mig_state == IB_MIG_MIGRATED &&
288 qp->s_mig_state == IB_MIG_ARMED) {
289 qp->s_flags |= RVT_S_AHG_CLEAR;
290 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
291 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
292 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
293 }
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309int hfi1_check_send_wqe(struct rvt_qp *qp,
310 struct rvt_swqe *wqe)
311{
312 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
313 struct rvt_ah *ah;
314
315 switch (qp->ibqp.qp_type) {
316 case IB_QPT_RC:
317 case IB_QPT_UC:
318 if (wqe->length > 0x80000000U)
319 return -EINVAL;
320 break;
321 case IB_QPT_SMI:
322 ah = ibah_to_rvtah(wqe->ud_wr.ah);
323 if (wqe->length > (1 << ah->log_pmtu))
324 return -EINVAL;
325 break;
326 case IB_QPT_GSI:
327 case IB_QPT_UD:
328 ah = ibah_to_rvtah(wqe->ud_wr.ah);
329 if (wqe->length > (1 << ah->log_pmtu))
330 return -EINVAL;
331 if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
332 return -EINVAL;
333 default:
334 break;
335 }
336 return wqe->length <= piothreshold;
337}
338
339
340
341
342
343
344
345__be32 hfi1_compute_aeth(struct rvt_qp *qp)
346{
347 u32 aeth = qp->r_msn & HFI1_MSN_MASK;
348
349 if (qp->ibqp.srq) {
350
351
352
353
354 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
355 } else {
356 u32 min, max, x;
357 u32 credits;
358 struct rvt_rwq *wq = qp->r_rq.wq;
359 u32 head;
360 u32 tail;
361
362
363 head = wq->head;
364 if (head >= qp->r_rq.size)
365 head = 0;
366 tail = wq->tail;
367 if (tail >= qp->r_rq.size)
368 tail = 0;
369
370
371
372
373
374
375 credits = head - tail;
376 if ((int)credits < 0)
377 credits += qp->r_rq.size;
378
379
380
381
382 min = 0;
383 max = 31;
384 for (;;) {
385 x = (min + max) / 2;
386 if (credit_table[x] == credits)
387 break;
388 if (credit_table[x] > credits) {
389 max = x;
390 } else {
391 if (min == x)
392 break;
393 min = x;
394 }
395 }
396 aeth |= x << HFI1_AETH_CREDIT_SHIFT;
397 }
398 return cpu_to_be32(aeth);
399}
400
401
402
403
404
405
406
407
408
409
410void _hfi1_schedule_send(struct rvt_qp *qp)
411{
412 struct hfi1_qp_priv *priv = qp->priv;
413 struct hfi1_ibport *ibp =
414 to_iport(qp->ibqp.device, qp->port_num);
415 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
416 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
417
418 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
419 priv->s_sde ?
420 priv->s_sde->cpu :
421 cpumask_first(cpumask_of_node(dd->node)));
422}
423
424static void qp_pio_drain(struct rvt_qp *qp)
425{
426 struct hfi1_ibdev *dev;
427 struct hfi1_qp_priv *priv = qp->priv;
428
429 if (!priv->s_sendcontext)
430 return;
431 dev = to_idev(qp->ibqp.device);
432 while (iowait_pio_pending(&priv->s_iowait)) {
433 write_seqlock_irq(&dev->iowait_lock);
434 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
435 write_sequnlock_irq(&dev->iowait_lock);
436 iowait_pio_drain(&priv->s_iowait);
437 write_seqlock_irq(&dev->iowait_lock);
438 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
439 write_sequnlock_irq(&dev->iowait_lock);
440 }
441}
442
443
444
445
446
447
448
449
450void hfi1_schedule_send(struct rvt_qp *qp)
451{
452 lockdep_assert_held(&qp->s_lock);
453 if (hfi1_send_ok(qp))
454 _hfi1_schedule_send(qp);
455}
456
457
458
459
460
461
462
463
464void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
465{
466 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
467
468 lockdep_assert_held(&qp->s_lock);
469
470
471
472
473
474 if (credit == HFI1_AETH_CREDIT_INVAL) {
475 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
476 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
477 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
478 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
479 hfi1_schedule_send(qp);
480 }
481 }
482 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
483
484 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
485 if (cmp_msn(credit, qp->s_lsn) > 0) {
486 qp->s_lsn = credit;
487 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
488 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
489 hfi1_schedule_send(qp);
490 }
491 }
492 }
493}
494
495void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
496{
497 unsigned long flags;
498
499 spin_lock_irqsave(&qp->s_lock, flags);
500 if (qp->s_flags & flag) {
501 qp->s_flags &= ~flag;
502 trace_hfi1_qpwakeup(qp, flag);
503 hfi1_schedule_send(qp);
504 }
505 spin_unlock_irqrestore(&qp->s_lock, flags);
506
507 rvt_put_qp(qp);
508}
509
510static int iowait_sleep(
511 struct sdma_engine *sde,
512 struct iowait *wait,
513 struct sdma_txreq *stx,
514 unsigned seq)
515{
516 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
517 struct rvt_qp *qp;
518 struct hfi1_qp_priv *priv;
519 unsigned long flags;
520 int ret = 0;
521 struct hfi1_ibdev *dev;
522
523 qp = tx->qp;
524 priv = qp->priv;
525
526 spin_lock_irqsave(&qp->s_lock, flags);
527 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
528
529
530
531
532
533
534 dev = &sde->dd->verbs_dev;
535 list_add_tail(&stx->list, &wait->tx_head);
536 write_seqlock(&dev->iowait_lock);
537 if (sdma_progress(sde, seq, stx))
538 goto eagain;
539 if (list_empty(&priv->s_iowait.list)) {
540 struct hfi1_ibport *ibp =
541 to_iport(qp->ibqp.device, qp->port_num);
542
543 ibp->rvp.n_dmawait++;
544 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
545 list_add_tail(&priv->s_iowait.list, &sde->dmawait);
546 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
547 rvt_get_qp(qp);
548 }
549 write_sequnlock(&dev->iowait_lock);
550 qp->s_flags &= ~RVT_S_BUSY;
551 spin_unlock_irqrestore(&qp->s_lock, flags);
552 ret = -EBUSY;
553 } else {
554 spin_unlock_irqrestore(&qp->s_lock, flags);
555 hfi1_put_txreq(tx);
556 }
557 return ret;
558eagain:
559 write_sequnlock(&dev->iowait_lock);
560 spin_unlock_irqrestore(&qp->s_lock, flags);
561 list_del_init(&stx->list);
562 return -EAGAIN;
563}
564
565static void iowait_wakeup(struct iowait *wait, int reason)
566{
567 struct rvt_qp *qp = iowait_to_qp(wait);
568
569 WARN_ON(reason != SDMA_AVAIL_REASON);
570 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
571}
572
573static void iowait_sdma_drained(struct iowait *wait)
574{
575 struct rvt_qp *qp = iowait_to_qp(wait);
576 unsigned long flags;
577
578
579
580
581
582
583
584 spin_lock_irqsave(&qp->s_lock, flags);
585 if (qp->s_flags & RVT_S_WAIT_DMA) {
586 qp->s_flags &= ~RVT_S_WAIT_DMA;
587 hfi1_schedule_send(qp);
588 }
589 spin_unlock_irqrestore(&qp->s_lock, flags);
590}
591
592
593
594
595
596
597
598
599
600
601struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
602{
603 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
604 struct sdma_engine *sde;
605
606 if (!(dd->flags & HFI1_HAS_SEND_DMA))
607 return NULL;
608 switch (qp->ibqp.qp_type) {
609 case IB_QPT_SMI:
610 return NULL;
611 default:
612 break;
613 }
614 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
615 return sde;
616}
617
618
619
620
621
622
623
624
625
626struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
627{
628 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
629
630 switch (qp->ibqp.qp_type) {
631 case IB_QPT_SMI:
632
633 return dd->vld[15].sc;
634 default:
635 break;
636 }
637
638 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
639 sc5);
640}
641
642struct qp_iter {
643 struct hfi1_ibdev *dev;
644 struct rvt_qp *qp;
645 int specials;
646 int n;
647};
648
649struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
650{
651 struct qp_iter *iter;
652
653 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
654 if (!iter)
655 return NULL;
656
657 iter->dev = dev;
658 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
659
660 return iter;
661}
662
663int qp_iter_next(struct qp_iter *iter)
664{
665 struct hfi1_ibdev *dev = iter->dev;
666 int n = iter->n;
667 int ret = 1;
668 struct rvt_qp *pqp = iter->qp;
669 struct rvt_qp *qp;
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
686 if (pqp) {
687 qp = rcu_dereference(pqp->next);
688 } else {
689 if (n < iter->specials) {
690 struct hfi1_pportdata *ppd;
691 struct hfi1_ibport *ibp;
692 int pidx;
693
694 pidx = n % dev->rdi.ibdev.phys_port_cnt;
695 ppd = &dd_from_dev(dev)->pport[pidx];
696 ibp = &ppd->ibport_data;
697
698 if (!(n & 1))
699 qp = rcu_dereference(ibp->rvp.qp[0]);
700 else
701 qp = rcu_dereference(ibp->rvp.qp[1]);
702 } else {
703 qp = rcu_dereference(
704 dev->rdi.qp_dev->qp_table[
705 (n - iter->specials)]);
706 }
707 }
708 pqp = qp;
709 if (qp) {
710 iter->qp = qp;
711 iter->n = n;
712 return 0;
713 }
714 }
715 return ret;
716}
717
718static const char * const qp_type_str[] = {
719 "SMI", "GSI", "RC", "UC", "UD",
720};
721
722static int qp_idle(struct rvt_qp *qp)
723{
724 return
725 qp->s_last == qp->s_acked &&
726 qp->s_acked == qp->s_cur &&
727 qp->s_cur == qp->s_tail &&
728 qp->s_tail == qp->s_head;
729}
730
731void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
732{
733 struct rvt_swqe *wqe;
734 struct rvt_qp *qp = iter->qp;
735 struct hfi1_qp_priv *priv = qp->priv;
736 struct sdma_engine *sde;
737 struct send_context *send_context;
738
739 sde = qp_to_sdma_engine(qp, priv->s_sc);
740 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
741 send_context = qp_to_send_context(qp, priv->s_sc);
742 seq_printf(s,
743 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
744 iter->n,
745 qp_idle(qp) ? "I" : "B",
746 qp->ibqp.qp_num,
747 atomic_read(&qp->refcount),
748 qp_type_str[qp->ibqp.qp_type],
749 qp->state,
750 wqe ? wqe->wr.opcode : 0,
751 qp->s_hdrwords,
752 qp->s_flags,
753 iowait_sdma_pending(&priv->s_iowait),
754 iowait_pio_pending(&priv->s_iowait),
755 !list_empty(&priv->s_iowait.list),
756 qp->timeout,
757 wqe ? wqe->ssn : 0,
758 qp->s_lsn,
759 qp->s_last_psn,
760 qp->s_psn, qp->s_next_psn,
761 qp->s_sending_psn, qp->s_sending_hpsn,
762 qp->s_last, qp->s_acked, qp->s_cur,
763 qp->s_tail, qp->s_head, qp->s_size,
764 qp->s_avail,
765 qp->remote_qpn,
766 qp->remote_ah_attr.dlid,
767 qp->remote_ah_attr.sl,
768 qp->pmtu,
769 qp->s_retry,
770 qp->s_retry_cnt,
771 qp->s_rnr_retry_cnt,
772 sde,
773 sde ? sde->this_idx : 0,
774 send_context,
775 send_context ? send_context->sw_index : 0,
776 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
777 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
778 qp->pid);
779}
780
781void qp_comm_est(struct rvt_qp *qp)
782{
783 qp->r_flags |= RVT_R_COMM_EST;
784 if (qp->ibqp.event_handler) {
785 struct ib_event ev;
786
787 ev.device = qp->ibqp.device;
788 ev.element.qp = &qp->ibqp;
789 ev.event = IB_EVENT_COMM_EST;
790 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
791 }
792}
793
794void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
795 gfp_t gfp)
796{
797 struct hfi1_qp_priv *priv;
798
799 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
800 if (!priv)
801 return ERR_PTR(-ENOMEM);
802
803 priv->owner = qp;
804
805 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
806 rdi->dparms.node);
807 if (!priv->s_ahg) {
808 kfree(priv);
809 return ERR_PTR(-ENOMEM);
810 }
811 iowait_init(
812 &priv->s_iowait,
813 1,
814 _hfi1_do_send,
815 iowait_sleep,
816 iowait_wakeup,
817 iowait_sdma_drained);
818 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
819 qp->s_timer.function = hfi1_rc_timeout;
820 return priv;
821}
822
823void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
824{
825 struct hfi1_qp_priv *priv = qp->priv;
826
827 kfree(priv->s_ahg);
828 kfree(priv);
829}
830
831unsigned free_all_qps(struct rvt_dev_info *rdi)
832{
833 struct hfi1_ibdev *verbs_dev = container_of(rdi,
834 struct hfi1_ibdev,
835 rdi);
836 struct hfi1_devdata *dd = container_of(verbs_dev,
837 struct hfi1_devdata,
838 verbs_dev);
839 int n;
840 unsigned qp_inuse = 0;
841
842 for (n = 0; n < dd->num_pports; n++) {
843 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
844
845 rcu_read_lock();
846 if (rcu_dereference(ibp->rvp.qp[0]))
847 qp_inuse++;
848 if (rcu_dereference(ibp->rvp.qp[1]))
849 qp_inuse++;
850 rcu_read_unlock();
851 }
852
853 return qp_inuse;
854}
855
856void flush_qp_waiters(struct rvt_qp *qp)
857{
858 lockdep_assert_held(&qp->s_lock);
859 flush_iowait(qp);
860 hfi1_stop_rc_timers(qp);
861}
862
863void stop_send_queue(struct rvt_qp *qp)
864{
865 struct hfi1_qp_priv *priv = qp->priv;
866
867 cancel_work_sync(&priv->s_iowait.iowork);
868 hfi1_del_timers_sync(qp);
869}
870
871void quiesce_qp(struct rvt_qp *qp)
872{
873 struct hfi1_qp_priv *priv = qp->priv;
874
875 iowait_sdma_drain(&priv->s_iowait);
876 qp_pio_drain(qp);
877 flush_tx_list(qp);
878}
879
880void notify_qp_reset(struct rvt_qp *qp)
881{
882 struct hfi1_qp_priv *priv = qp->priv;
883
884 priv->r_adefered = 0;
885 clear_ahg(qp);
886}
887
888
889
890
891
892void hfi1_migrate_qp(struct rvt_qp *qp)
893{
894 struct hfi1_qp_priv *priv = qp->priv;
895 struct ib_event ev;
896
897 qp->s_mig_state = IB_MIG_MIGRATED;
898 qp->remote_ah_attr = qp->alt_ah_attr;
899 qp->port_num = qp->alt_ah_attr.port_num;
900 qp->s_pkey_index = qp->s_alt_pkey_index;
901 qp->s_flags |= RVT_S_AHG_CLEAR;
902 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
903 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
904
905 ev.device = qp->ibqp.device;
906 ev.element.qp = &qp->ibqp;
907 ev.event = IB_EVENT_PATH_MIG;
908 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
909}
910
911int mtu_to_path_mtu(u32 mtu)
912{
913 return mtu_to_enum(mtu, OPA_MTU_8192);
914}
915
916u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
917{
918 u32 mtu;
919 struct hfi1_ibdev *verbs_dev = container_of(rdi,
920 struct hfi1_ibdev,
921 rdi);
922 struct hfi1_devdata *dd = container_of(verbs_dev,
923 struct hfi1_devdata,
924 verbs_dev);
925 struct hfi1_ibport *ibp;
926 u8 sc, vl;
927
928 ibp = &dd->pport[qp->port_num - 1].ibport_data;
929 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
930 vl = sc_to_vlt(dd, sc);
931
932 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
933 if (vl < PER_VL_SEND_CONTEXTS)
934 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
935 return mtu;
936}
937
938int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
939 struct ib_qp_attr *attr)
940{
941 int mtu, pidx = qp->port_num - 1;
942 struct hfi1_ibdev *verbs_dev = container_of(rdi,
943 struct hfi1_ibdev,
944 rdi);
945 struct hfi1_devdata *dd = container_of(verbs_dev,
946 struct hfi1_devdata,
947 verbs_dev);
948 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
949 if (mtu == -1)
950 return -1;
951
952 if (mtu > dd->pport[pidx].ibmtu)
953 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
954 else
955 return attr->path_mtu;
956}
957
958void notify_error_qp(struct rvt_qp *qp)
959{
960 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
961 struct hfi1_qp_priv *priv = qp->priv;
962
963 write_seqlock(&dev->iowait_lock);
964 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
965 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
966 list_del_init(&priv->s_iowait.list);
967 rvt_put_qp(qp);
968 }
969 write_sequnlock(&dev->iowait_lock);
970
971 if (!(qp->s_flags & RVT_S_BUSY)) {
972 qp->s_hdrwords = 0;
973 if (qp->s_rdma_mr) {
974 rvt_put_mr(qp->s_rdma_mr);
975 qp->s_rdma_mr = NULL;
976 }
977 flush_tx_list(qp);
978 }
979}
980
981
982
983
984
985
986
987
988
989
990void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
991{
992 struct rvt_qp *qp = NULL;
993 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
994 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
995 int n;
996 int lastwqe;
997 struct ib_event ev;
998
999 rcu_read_lock();
1000
1001
1002 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
1003 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
1004 qp = rcu_dereference(qp->next)) {
1005 if (qp->port_num == ppd->port &&
1006 (qp->ibqp.qp_type == IB_QPT_UC ||
1007 qp->ibqp.qp_type == IB_QPT_RC) &&
1008 qp->remote_ah_attr.sl == sl &&
1009 (ib_rvt_state_ops[qp->state] &
1010 RVT_POST_SEND_OK)) {
1011 spin_lock_irq(&qp->r_lock);
1012 spin_lock(&qp->s_hlock);
1013 spin_lock(&qp->s_lock);
1014 lastwqe = rvt_error_qp(qp,
1015 IB_WC_WR_FLUSH_ERR);
1016 spin_unlock(&qp->s_lock);
1017 spin_unlock(&qp->s_hlock);
1018 spin_unlock_irq(&qp->r_lock);
1019 if (lastwqe) {
1020 ev.device = qp->ibqp.device;
1021 ev.element.qp = &qp->ibqp;
1022 ev.event =
1023 IB_EVENT_QP_LAST_WQE_REACHED;
1024 qp->ibqp.event_handler(&ev,
1025 qp->ibqp.qp_context);
1026 }
1027 }
1028 }
1029 }
1030
1031 rcu_read_unlock();
1032}
1033