1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/spinlock.h>
49
50#include "hfi.h"
51#include "mad.h"
52#include "qp.h"
53#include "verbs_txreq.h"
54#include "trace.h"
55
56
57
58
59
60static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
61{
62 int i, j, ret;
63 struct ib_wc wc;
64 struct rvt_lkey_table *rkt;
65 struct rvt_pd *pd;
66 struct rvt_sge_state *ss;
67
68 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
69 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
70 ss = &qp->r_sge;
71 ss->sg_list = qp->r_sg_list;
72 qp->r_len = 0;
73 for (i = j = 0; i < wqe->num_sge; i++) {
74 if (wqe->sg_list[i].length == 0)
75 continue;
76
77 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
78 NULL, &wqe->sg_list[i],
79 IB_ACCESS_LOCAL_WRITE);
80 if (unlikely(ret <= 0))
81 goto bad_lkey;
82 qp->r_len += wqe->sg_list[i].length;
83 j++;
84 }
85 ss->num_sge = j;
86 ss->total_len = qp->r_len;
87 ret = 1;
88 goto bail;
89
90bad_lkey:
91 while (j) {
92 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
93
94 rvt_put_mr(sge->mr);
95 }
96 ss->num_sge = 0;
97 memset(&wc, 0, sizeof(wc));
98 wc.wr_id = wqe->wr_id;
99 wc.status = IB_WC_LOC_PROT_ERR;
100 wc.opcode = IB_WC_RECV;
101 wc.qp = &qp->ibqp;
102
103 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
104 ret = 0;
105bail:
106 return ret;
107}
108
109
110
111
112
113
114
115
116
117
118
119int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
120{
121 unsigned long flags;
122 struct rvt_rq *rq;
123 struct rvt_rwq *wq;
124 struct rvt_srq *srq;
125 struct rvt_rwqe *wqe;
126 void (*handler)(struct ib_event *, void *);
127 u32 tail;
128 int ret;
129
130 if (qp->ibqp.srq) {
131 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
132 handler = srq->ibsrq.event_handler;
133 rq = &srq->rq;
134 } else {
135 srq = NULL;
136 handler = NULL;
137 rq = &qp->r_rq;
138 }
139
140 spin_lock_irqsave(&rq->lock, flags);
141 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
142 ret = 0;
143 goto unlock;
144 }
145
146 wq = rq->wq;
147 tail = wq->tail;
148
149 if (tail >= rq->size)
150 tail = 0;
151 if (unlikely(tail == wq->head)) {
152 ret = 0;
153 goto unlock;
154 }
155
156 smp_rmb();
157 wqe = rvt_get_rwqe_ptr(rq, tail);
158
159
160
161
162
163 if (++tail >= rq->size)
164 tail = 0;
165 wq->tail = tail;
166 if (!wr_id_only && !init_sge(qp, wqe)) {
167 ret = -1;
168 goto unlock;
169 }
170 qp->r_wr_id = wqe->wr_id;
171
172 ret = 1;
173 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
174 if (handler) {
175 u32 n;
176
177
178
179
180
181 n = wq->head;
182 if (n >= rq->size)
183 n = 0;
184 if (n < tail)
185 n += rq->size - tail;
186 else
187 n -= tail;
188 if (n < srq->limit) {
189 struct ib_event ev;
190
191 srq->limit = 0;
192 spin_unlock_irqrestore(&rq->lock, flags);
193 ev.device = qp->ibqp.device;
194 ev.element.srq = qp->ibqp.srq;
195 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
196 handler(&ev, srq->ibsrq.srq_context);
197 goto bail;
198 }
199 }
200unlock:
201 spin_unlock_irqrestore(&rq->lock, flags);
202bail:
203 return ret;
204}
205
206static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
207{
208 return (gid->global.interface_id == id &&
209 (gid->global.subnet_prefix == gid_prefix ||
210 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
211}
212
213
214
215
216
217
218
219int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
220{
221 __be64 guid;
222 unsigned long flags;
223 struct rvt_qp *qp = packet->qp;
224 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
225 u32 dlid = packet->dlid;
226 u32 slid = packet->slid;
227 u32 sl = packet->sl;
228 int migrated;
229 u32 bth0, bth1;
230 u16 pkey;
231
232 bth0 = be32_to_cpu(packet->ohdr->bth[0]);
233 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
234 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
235 pkey = hfi1_16B_get_pkey(packet->hdr);
236 migrated = bth1 & OPA_BTH_MIG_REQ;
237 } else {
238 pkey = ib_bth_get_pkey(packet->ohdr);
239 migrated = bth0 & IB_BTH_MIG_REQ;
240 }
241
242 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
243 if (!packet->grh) {
244 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
245 IB_AH_GRH) &&
246 (packet->etype != RHF_RCV_TYPE_BYPASS))
247 return 1;
248 } else {
249 const struct ib_global_route *grh;
250
251 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
252 IB_AH_GRH))
253 return 1;
254 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
255 guid = get_sguid(ibp, grh->sgid_index);
256 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
257 guid))
258 return 1;
259 if (!gid_ok(
260 &packet->grh->sgid,
261 grh->dgid.global.subnet_prefix,
262 grh->dgid.global.interface_id))
263 return 1;
264 }
265 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
266 sc5, slid))) {
267 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
268 slid, dlid);
269 return 1;
270 }
271
272 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
273 ppd_from_ibp(ibp)->port !=
274 rdma_ah_get_port_num(&qp->alt_ah_attr))
275 return 1;
276 spin_lock_irqsave(&qp->s_lock, flags);
277 hfi1_migrate_qp(qp);
278 spin_unlock_irqrestore(&qp->s_lock, flags);
279 } else {
280 if (!packet->grh) {
281 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
282 IB_AH_GRH) &&
283 (packet->etype != RHF_RCV_TYPE_BYPASS))
284 return 1;
285 } else {
286 const struct ib_global_route *grh;
287
288 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
289 IB_AH_GRH))
290 return 1;
291 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
292 guid = get_sguid(ibp, grh->sgid_index);
293 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
294 guid))
295 return 1;
296 if (!gid_ok(
297 &packet->grh->sgid,
298 grh->dgid.global.subnet_prefix,
299 grh->dgid.global.interface_id))
300 return 1;
301 }
302 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
303 sc5, slid))) {
304 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
305 slid, dlid);
306 return 1;
307 }
308
309 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
310 ppd_from_ibp(ibp)->port != qp->port_num)
311 return 1;
312 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
313 qp->s_mig_state = IB_MIG_ARMED;
314 }
315
316 return 0;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330static void ruc_loopback(struct rvt_qp *sqp)
331{
332 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
333 struct rvt_qp *qp;
334 struct rvt_swqe *wqe;
335 struct rvt_sge *sge;
336 unsigned long flags;
337 struct ib_wc wc;
338 u64 sdata;
339 atomic64_t *maddr;
340 enum ib_wc_status send_status;
341 bool release;
342 int ret;
343 bool copy_last = false;
344 int local_ops = 0;
345
346 rcu_read_lock();
347
348
349
350
351
352 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
353 sqp->remote_qpn);
354
355 spin_lock_irqsave(&sqp->s_lock, flags);
356
357
358 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
359 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
360 goto unlock;
361
362 sqp->s_flags |= RVT_S_BUSY;
363
364again:
365 smp_read_barrier_depends();
366 if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
367 goto clr_busy;
368 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
369
370
371 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
372 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
373 goto clr_busy;
374
375 send_status = IB_WC_WR_FLUSH_ERR;
376 goto flush_send;
377 }
378
379
380
381
382
383
384 if (sqp->s_last == sqp->s_cur) {
385 if (++sqp->s_cur >= sqp->s_size)
386 sqp->s_cur = 0;
387 }
388 spin_unlock_irqrestore(&sqp->s_lock, flags);
389
390 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
391 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
392 ibp->rvp.n_pkt_drops++;
393
394
395
396
397 if (sqp->ibqp.qp_type == IB_QPT_RC)
398 send_status = IB_WC_RETRY_EXC_ERR;
399 else
400 send_status = IB_WC_SUCCESS;
401 goto serr;
402 }
403
404 memset(&wc, 0, sizeof(wc));
405 send_status = IB_WC_SUCCESS;
406
407 release = true;
408 sqp->s_sge.sge = wqe->sg_list[0];
409 sqp->s_sge.sg_list = wqe->sg_list + 1;
410 sqp->s_sge.num_sge = wqe->wr.num_sge;
411 sqp->s_len = wqe->length;
412 switch (wqe->wr.opcode) {
413 case IB_WR_REG_MR:
414 goto send_comp;
415
416 case IB_WR_LOCAL_INV:
417 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
418 if (rvt_invalidate_rkey(sqp,
419 wqe->wr.ex.invalidate_rkey))
420 send_status = IB_WC_LOC_PROT_ERR;
421 local_ops = 1;
422 }
423 goto send_comp;
424
425 case IB_WR_SEND_WITH_INV:
426 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
427 wc.wc_flags = IB_WC_WITH_INVALIDATE;
428 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
429 }
430 goto send;
431
432 case IB_WR_SEND_WITH_IMM:
433 wc.wc_flags = IB_WC_WITH_IMM;
434 wc.ex.imm_data = wqe->wr.ex.imm_data;
435
436 case IB_WR_SEND:
437send:
438 ret = hfi1_rvt_get_rwqe(qp, 0);
439 if (ret < 0)
440 goto op_err;
441 if (!ret)
442 goto rnr_nak;
443 break;
444
445 case IB_WR_RDMA_WRITE_WITH_IMM:
446 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
447 goto inv_err;
448 wc.wc_flags = IB_WC_WITH_IMM;
449 wc.ex.imm_data = wqe->wr.ex.imm_data;
450 ret = hfi1_rvt_get_rwqe(qp, 1);
451 if (ret < 0)
452 goto op_err;
453 if (!ret)
454 goto rnr_nak;
455
456 goto do_write;
457 case IB_WR_RDMA_WRITE:
458 copy_last = rvt_is_user_qp(qp);
459 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
460 goto inv_err;
461do_write:
462 if (wqe->length == 0)
463 break;
464 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
465 wqe->rdma_wr.remote_addr,
466 wqe->rdma_wr.rkey,
467 IB_ACCESS_REMOTE_WRITE)))
468 goto acc_err;
469 qp->r_sge.sg_list = NULL;
470 qp->r_sge.num_sge = 1;
471 qp->r_sge.total_len = wqe->length;
472 break;
473
474 case IB_WR_RDMA_READ:
475 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
476 goto inv_err;
477 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
478 wqe->rdma_wr.remote_addr,
479 wqe->rdma_wr.rkey,
480 IB_ACCESS_REMOTE_READ)))
481 goto acc_err;
482 release = false;
483 sqp->s_sge.sg_list = NULL;
484 sqp->s_sge.num_sge = 1;
485 qp->r_sge.sge = wqe->sg_list[0];
486 qp->r_sge.sg_list = wqe->sg_list + 1;
487 qp->r_sge.num_sge = wqe->wr.num_sge;
488 qp->r_sge.total_len = wqe->length;
489 break;
490
491 case IB_WR_ATOMIC_CMP_AND_SWP:
492 case IB_WR_ATOMIC_FETCH_AND_ADD:
493 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
494 goto inv_err;
495 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
496 wqe->atomic_wr.remote_addr,
497 wqe->atomic_wr.rkey,
498 IB_ACCESS_REMOTE_ATOMIC)))
499 goto acc_err;
500
501 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
502 sdata = wqe->atomic_wr.compare_add;
503 *(u64 *)sqp->s_sge.sge.vaddr =
504 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
505 (u64)atomic64_add_return(sdata, maddr) - sdata :
506 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
507 sdata, wqe->atomic_wr.swap);
508 rvt_put_mr(qp->r_sge.sge.mr);
509 qp->r_sge.num_sge = 0;
510 goto send_comp;
511
512 default:
513 send_status = IB_WC_LOC_QP_OP_ERR;
514 goto serr;
515 }
516
517 sge = &sqp->s_sge.sge;
518 while (sqp->s_len) {
519 u32 len = sqp->s_len;
520
521 if (len > sge->length)
522 len = sge->length;
523 if (len > sge->sge_length)
524 len = sge->sge_length;
525 WARN_ON_ONCE(len == 0);
526 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
527 sge->vaddr += len;
528 sge->length -= len;
529 sge->sge_length -= len;
530 if (sge->sge_length == 0) {
531 if (!release)
532 rvt_put_mr(sge->mr);
533 if (--sqp->s_sge.num_sge)
534 *sge = *sqp->s_sge.sg_list++;
535 } else if (sge->length == 0 && sge->mr->lkey) {
536 if (++sge->n >= RVT_SEGSZ) {
537 if (++sge->m >= sge->mr->mapsz)
538 break;
539 sge->n = 0;
540 }
541 sge->vaddr =
542 sge->mr->map[sge->m]->segs[sge->n].vaddr;
543 sge->length =
544 sge->mr->map[sge->m]->segs[sge->n].length;
545 }
546 sqp->s_len -= len;
547 }
548 if (release)
549 rvt_put_ss(&qp->r_sge);
550
551 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
552 goto send_comp;
553
554 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
555 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
556 else
557 wc.opcode = IB_WC_RECV;
558 wc.wr_id = qp->r_wr_id;
559 wc.status = IB_WC_SUCCESS;
560 wc.byte_len = wqe->length;
561 wc.qp = &qp->ibqp;
562 wc.src_qp = qp->remote_qpn;
563 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
564 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
565 wc.port_num = 1;
566
567 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
568 wqe->wr.send_flags & IB_SEND_SOLICITED);
569
570send_comp:
571 spin_lock_irqsave(&sqp->s_lock, flags);
572 ibp->rvp.n_loop_pkts++;
573flush_send:
574 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
575 hfi1_send_complete(sqp, wqe, send_status);
576 if (local_ops) {
577 atomic_dec(&sqp->local_ops_pending);
578 local_ops = 0;
579 }
580 goto again;
581
582rnr_nak:
583
584 if (qp->ibqp.qp_type == IB_QPT_UC)
585 goto send_comp;
586 ibp->rvp.n_rnr_naks++;
587
588
589
590
591 if (sqp->s_rnr_retry == 0) {
592 send_status = IB_WC_RNR_RETRY_EXC_ERR;
593 goto serr;
594 }
595 if (sqp->s_rnr_retry_cnt < 7)
596 sqp->s_rnr_retry--;
597 spin_lock_irqsave(&sqp->s_lock, flags);
598 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
599 goto clr_busy;
600 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
601 IB_AETH_CREDIT_SHIFT);
602 goto clr_busy;
603
604op_err:
605 send_status = IB_WC_REM_OP_ERR;
606 wc.status = IB_WC_LOC_QP_OP_ERR;
607 goto err;
608
609inv_err:
610 send_status = IB_WC_REM_INV_REQ_ERR;
611 wc.status = IB_WC_LOC_QP_OP_ERR;
612 goto err;
613
614acc_err:
615 send_status = IB_WC_REM_ACCESS_ERR;
616 wc.status = IB_WC_LOC_PROT_ERR;
617err:
618
619 rvt_rc_error(qp, wc.status);
620
621serr:
622 spin_lock_irqsave(&sqp->s_lock, flags);
623 hfi1_send_complete(sqp, wqe, send_status);
624 if (sqp->ibqp.qp_type == IB_QPT_RC) {
625 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
626
627 sqp->s_flags &= ~RVT_S_BUSY;
628 spin_unlock_irqrestore(&sqp->s_lock, flags);
629 if (lastwqe) {
630 struct ib_event ev;
631
632 ev.device = sqp->ibqp.device;
633 ev.element.qp = &sqp->ibqp;
634 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
635 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
636 }
637 goto done;
638 }
639clr_busy:
640 sqp->s_flags &= ~RVT_S_BUSY;
641unlock:
642 spin_unlock_irqrestore(&sqp->s_lock, flags);
643done:
644 rcu_read_unlock();
645}
646
647
648
649
650
651
652
653
654
655
656
657u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
658 const struct ib_global_route *grh, u32 hwords, u32 nwords)
659{
660 hdr->version_tclass_flow =
661 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
662 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
663 (grh->flow_label << IB_GRH_FLOW_SHIFT));
664 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
665
666 hdr->next_hdr = IB_GRH_NEXT_HDR;
667 hdr->hop_limit = grh->hop_limit;
668
669 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
670 hdr->sgid.global.interface_id =
671 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
672 get_sguid(ibp, grh->sgid_index) :
673 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
674 hdr->dgid = grh->dgid;
675
676
677 return sizeof(struct ib_grh) / sizeof(u32);
678}
679
680#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
681 hdr.ibh.u.oth.bth[2]) / 4)
682
683
684
685
686
687
688
689
690
691
692
693
694static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
695{
696 struct hfi1_qp_priv *priv = qp->priv;
697
698 if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
699 clear_ahg(qp);
700 if (!(qp->s_flags & RVT_S_AHG_VALID)) {
701
702 if (qp->s_ahgidx < 0)
703 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
704 if (qp->s_ahgidx >= 0) {
705 qp->s_ahgpsn = npsn;
706 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
707
708 priv->s_ahg->ahgidx = qp->s_ahgidx;
709 qp->s_flags |= RVT_S_AHG_VALID;
710 }
711 } else {
712
713 if (qp->s_ahgidx >= 0) {
714 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
715 priv->s_ahg->ahgidx = qp->s_ahgidx;
716 priv->s_ahg->ahgcount++;
717 priv->s_ahg->ahgdesc[0] =
718 sdma_build_ahg_descriptor(
719 (__force u16)cpu_to_be16((u16)npsn),
720 BTH2_OFFSET,
721 16,
722 16);
723 if ((npsn & 0xffff0000) !=
724 (qp->s_ahgpsn & 0xffff0000)) {
725 priv->s_ahg->ahgcount++;
726 priv->s_ahg->ahgdesc[1] =
727 sdma_build_ahg_descriptor(
728 (__force u16)cpu_to_be16(
729 (u16)(npsn >> 16)),
730 BTH2_OFFSET,
731 0,
732 16);
733 }
734 }
735 }
736}
737
738static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
739 struct ib_other_headers *ohdr,
740 u32 bth0, u32 bth1, u32 bth2)
741{
742 bth1 |= qp->remote_qpn;
743 ohdr->bth[0] = cpu_to_be32(bth0);
744 ohdr->bth[1] = cpu_to_be32(bth1);
745 ohdr->bth[2] = cpu_to_be32(bth2);
746}
747
748static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
749 struct ib_other_headers *ohdr,
750 u32 bth0, u32 bth2, int middle,
751 struct hfi1_pkt_state *ps)
752{
753 struct hfi1_qp_priv *priv = qp->priv;
754 struct hfi1_ibport *ibp = ps->ibp;
755 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
756 u32 bth1 = 0;
757 u32 slid;
758 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
759 u8 l4 = OPA_16B_L4_IB_LOCAL;
760 u8 extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2),
761 ps->s_txreq->s_cur_size);
762 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
763 extra_bytes + SIZE_OF_LT) >> 2);
764 u8 becn = 0;
765
766 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
767 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
768 struct ib_grh *grh;
769 struct ib_global_route *grd =
770 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
771 int hdrwords;
772
773
774
775
776
777 if (grd->sgid_index == OPA_GID_INDEX)
778 grd->sgid_index = 0;
779 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
780 l4 = OPA_16B_L4_IB_GLOBAL;
781 hdrwords = qp->s_hdrwords - 4;
782 qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd,
783 hdrwords, nwords);
784 middle = 0;
785 }
786
787 if (qp->s_mig_state == IB_MIG_MIGRATED)
788 bth1 |= OPA_BTH_MIG_REQ;
789 else
790 middle = 0;
791
792 if (middle)
793 build_ahg(qp, bth2);
794 else
795 qp->s_flags &= ~RVT_S_AHG_VALID;
796
797 bth0 |= pkey;
798 bth0 |= extra_bytes << 20;
799 if (qp->s_flags & RVT_S_ECN) {
800 qp->s_flags &= ~RVT_S_ECN;
801
802 becn = 1;
803 }
804 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
805
806 if (!ppd->lid)
807 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
808 else
809 slid = ppd->lid |
810 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
811 ((1 << ppd->lmc) - 1));
812
813 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
814 slid,
815 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
816 16B),
817 (qp->s_hdrwords + nwords) >> 1,
818 pkey, becn, 0, l4, priv->s_sc);
819}
820
821static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
822 struct ib_other_headers *ohdr,
823 u32 bth0, u32 bth2, int middle,
824 struct hfi1_pkt_state *ps)
825{
826 struct hfi1_qp_priv *priv = qp->priv;
827 struct hfi1_ibport *ibp = ps->ibp;
828 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
829 u32 bth1 = 0;
830 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
831 u16 lrh0 = HFI1_LRH_BTH;
832 u16 slid;
833 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
834 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
835 extra_bytes) >> 2);
836
837 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
838 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
839 int hdrwords = qp->s_hdrwords - 2;
840
841 lrh0 = HFI1_LRH_GRH;
842 qp->s_hdrwords +=
843 hfi1_make_grh(ibp, grh,
844 rdma_ah_read_grh(&qp->remote_ah_attr),
845 hdrwords, nwords);
846 middle = 0;
847 }
848 lrh0 |= (priv->s_sc & 0xf) << 12 |
849 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
850
851 if (qp->s_mig_state == IB_MIG_MIGRATED)
852 bth0 |= IB_BTH_MIG_REQ;
853 else
854 middle = 0;
855
856 if (middle)
857 build_ahg(qp, bth2);
858 else
859 qp->s_flags &= ~RVT_S_AHG_VALID;
860
861 bth0 |= pkey;
862 bth0 |= extra_bytes << 20;
863 if (qp->s_flags & RVT_S_ECN) {
864 qp->s_flags &= ~RVT_S_ECN;
865
866 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
867 }
868 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
869
870 if (!ppd->lid)
871 slid = be16_to_cpu(IB_LID_PERMISSIVE);
872 else
873 slid = ppd->lid |
874 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
875 ((1 << ppd->lmc) - 1));
876 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
877 lrh0,
878 qp->s_hdrwords + nwords,
879 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
880 ppd_from_ibp(ibp)->lid |
881 rdma_ah_get_path_bits(&qp->remote_ah_attr));
882}
883
884typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
885 struct ib_other_headers *ohdr,
886 u32 bth0, u32 bth2, int middle,
887 struct hfi1_pkt_state *ps);
888
889
890static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
891 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
892 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
893};
894
895void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
896 u32 bth0, u32 bth2, int middle,
897 struct hfi1_pkt_state *ps)
898{
899 struct hfi1_qp_priv *priv = qp->priv;
900
901
902
903
904
905
906
907
908
909
910
911
912 priv->s_ahg->tx_flags = 0;
913 priv->s_ahg->ahgcount = 0;
914 priv->s_ahg->ahgidx = 0;
915
916
917 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
918}
919
920
921#define SEND_RESCHED_TIMEOUT (5 * HZ)
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936static bool schedule_send_yield(struct rvt_qp *qp,
937 struct hfi1_pkt_state *ps)
938{
939 ps->pkts_sent = true;
940
941 if (unlikely(time_after(jiffies, ps->timeout))) {
942 if (!ps->in_thread ||
943 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
944 spin_lock_irqsave(&qp->s_lock, ps->flags);
945 qp->s_flags &= ~RVT_S_BUSY;
946 hfi1_schedule_send(qp);
947 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
948 this_cpu_inc(*ps->ppd->dd->send_schedule);
949 trace_hfi1_rc_expired_time_slice(qp, true);
950 return true;
951 }
952
953 cond_resched();
954 this_cpu_inc(*ps->ppd->dd->send_schedule);
955 ps->timeout = jiffies + ps->timeout_int;
956 }
957
958 trace_hfi1_rc_expired_time_slice(qp, false);
959 return false;
960}
961
962void hfi1_do_send_from_rvt(struct rvt_qp *qp)
963{
964 hfi1_do_send(qp, false);
965}
966
967void _hfi1_do_send(struct work_struct *work)
968{
969 struct iowait *wait = container_of(work, struct iowait, iowork);
970 struct rvt_qp *qp = iowait_to_qp(wait);
971
972 hfi1_do_send(qp, true);
973}
974
975
976
977
978
979
980
981
982
983
984void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
985{
986 struct hfi1_pkt_state ps;
987 struct hfi1_qp_priv *priv = qp->priv;
988 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
989
990 ps.dev = to_idev(qp->ibqp.device);
991 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
992 ps.ppd = ppd_from_ibp(ps.ibp);
993 ps.in_thread = in_thread;
994
995 trace_hfi1_rc_do_send(qp, in_thread);
996
997 switch (qp->ibqp.qp_type) {
998 case IB_QPT_RC:
999 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
1000 ~((1 << ps.ppd->lmc) - 1)) ==
1001 ps.ppd->lid)) {
1002 ruc_loopback(qp);
1003 return;
1004 }
1005 make_req = hfi1_make_rc_req;
1006 ps.timeout_int = qp->timeout_jiffies;
1007 break;
1008 case IB_QPT_UC:
1009 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
1010 ~((1 << ps.ppd->lmc) - 1)) ==
1011 ps.ppd->lid)) {
1012 ruc_loopback(qp);
1013 return;
1014 }
1015 make_req = hfi1_make_uc_req;
1016 ps.timeout_int = SEND_RESCHED_TIMEOUT;
1017 break;
1018 default:
1019 make_req = hfi1_make_ud_req;
1020 ps.timeout_int = SEND_RESCHED_TIMEOUT;
1021 }
1022
1023 spin_lock_irqsave(&qp->s_lock, ps.flags);
1024
1025
1026 if (!hfi1_send_ok(qp)) {
1027 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1028 return;
1029 }
1030
1031 qp->s_flags |= RVT_S_BUSY;
1032
1033 ps.timeout_int = ps.timeout_int / 8;
1034 ps.timeout = jiffies + ps.timeout_int;
1035 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
1036 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
1037 ps.pkts_sent = false;
1038
1039
1040 ps.s_txreq = get_waiting_verbs_txreq(qp);
1041 do {
1042
1043 if (qp->s_hdrwords != 0) {
1044 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1045
1046
1047
1048
1049 if (hfi1_verbs_send(qp, &ps))
1050 return;
1051
1052 qp->s_hdrwords = 0;
1053
1054 if (schedule_send_yield(qp, &ps))
1055 return;
1056
1057 spin_lock_irqsave(&qp->s_lock, ps.flags);
1058 }
1059 } while (make_req(qp, &ps));
1060 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
1061 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
1062}
1063
1064
1065
1066
1067void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
1068 enum ib_wc_status status)
1069{
1070 u32 old_last, last;
1071
1072 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
1073 return;
1074
1075 last = qp->s_last;
1076 old_last = last;
1077 trace_hfi1_qp_send_completion(qp, wqe, last);
1078 if (++last >= qp->s_size)
1079 last = 0;
1080 trace_hfi1_qp_send_completion(qp, wqe, last);
1081 qp->s_last = last;
1082
1083 barrier();
1084 rvt_put_swqe(wqe);
1085 if (qp->ibqp.qp_type == IB_QPT_UD ||
1086 qp->ibqp.qp_type == IB_QPT_SMI ||
1087 qp->ibqp.qp_type == IB_QPT_GSI)
1088 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
1089
1090 rvt_qp_swqe_complete(qp,
1091 wqe,
1092 ib_hfi1_wc_opcode[wqe->wr.opcode],
1093 status);
1094
1095 if (qp->s_acked == old_last)
1096 qp->s_acked = last;
1097 if (qp->s_cur == old_last)
1098 qp->s_cur = last;
1099 if (qp->s_tail == old_last)
1100 qp->s_tail = last;
1101 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
1102 qp->s_draining = 0;
1103}
1104