1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38
39#define OP(x) IB_OPCODE_RC_##x
40
41
42static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
43 u32 psn, u32 pmtu)
44{
45 u32 len;
46
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
51 ss->total_len = wqe->length;
52 rvt_skip_sge(ss, len, false);
53 return wqe->length - len;
54}
55
56
57
58
59
60
61
62
63
64
65
66
67static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
68 struct ib_other_headers *ohdr, u32 pmtu)
69{
70 struct rvt_ack_entry *e;
71 u32 hwords;
72 u32 len;
73 u32 bth0;
74 u32 bth2;
75
76
77 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
78 goto bail;
79
80
81 hwords = 5;
82
83 switch (qp->s_ack_state) {
84 case OP(RDMA_READ_RESPONSE_LAST):
85 case OP(RDMA_READ_RESPONSE_ONLY):
86 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
87 if (e->rdma_sge.mr) {
88 rvt_put_mr(e->rdma_sge.mr);
89 e->rdma_sge.mr = NULL;
90 }
91
92 case OP(ATOMIC_ACKNOWLEDGE):
93
94
95
96
97
98 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
99 qp->s_tail_ack_queue = 0;
100
101 case OP(SEND_ONLY):
102 case OP(ACKNOWLEDGE):
103
104 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
105 if (qp->s_flags & RVT_S_ACK_PENDING)
106 goto normal;
107 goto bail;
108 }
109
110 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
111 if (e->opcode == OP(RDMA_READ_REQUEST)) {
112
113
114
115
116
117
118 len = e->rdma_sge.sge_length;
119 if (len && !e->rdma_sge.mr) {
120 qp->s_tail_ack_queue = qp->r_head_ack_queue;
121 goto bail;
122 }
123
124 qp->s_rdma_mr = e->rdma_sge.mr;
125 if (qp->s_rdma_mr)
126 rvt_get_mr(qp->s_rdma_mr);
127 qp->s_ack_rdma_sge.sge = e->rdma_sge;
128 qp->s_ack_rdma_sge.num_sge = 1;
129 qp->s_cur_sge = &qp->s_ack_rdma_sge;
130 if (len > pmtu) {
131 len = pmtu;
132 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
133 } else {
134 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
135 e->sent = 1;
136 }
137 ohdr->u.aeth = rvt_compute_aeth(qp);
138 hwords++;
139 qp->s_ack_rdma_psn = e->psn;
140 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
141 } else {
142
143 qp->s_cur_sge = NULL;
144 len = 0;
145 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
146 ohdr->u.at.aeth = rvt_compute_aeth(qp);
147 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
148 hwords += sizeof(ohdr->u.at) / sizeof(u32);
149 bth2 = e->psn & QIB_PSN_MASK;
150 e->sent = 1;
151 }
152 bth0 = qp->s_ack_state << 24;
153 break;
154
155 case OP(RDMA_READ_RESPONSE_FIRST):
156 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
157
158 case OP(RDMA_READ_RESPONSE_MIDDLE):
159 qp->s_cur_sge = &qp->s_ack_rdma_sge;
160 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
161 if (qp->s_rdma_mr)
162 rvt_get_mr(qp->s_rdma_mr);
163 len = qp->s_ack_rdma_sge.sge.sge_length;
164 if (len > pmtu)
165 len = pmtu;
166 else {
167 ohdr->u.aeth = rvt_compute_aeth(qp);
168 hwords++;
169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
170 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
171 e->sent = 1;
172 }
173 bth0 = qp->s_ack_state << 24;
174 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
175 break;
176
177 default:
178normal:
179
180
181
182
183
184
185 qp->s_ack_state = OP(SEND_ONLY);
186 qp->s_flags &= ~RVT_S_ACK_PENDING;
187 qp->s_cur_sge = NULL;
188 if (qp->s_nak_state)
189 ohdr->u.aeth =
190 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
191 (qp->s_nak_state <<
192 IB_AETH_CREDIT_SHIFT));
193 else
194 ohdr->u.aeth = rvt_compute_aeth(qp);
195 hwords++;
196 len = 0;
197 bth0 = OP(ACKNOWLEDGE) << 24;
198 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
199 }
200 qp->s_rdma_ack_cnt++;
201 qp->s_hdrwords = hwords;
202 qp->s_cur_size = len;
203 qib_make_ruc_header(qp, ohdr, bth0, bth2);
204 return 1;
205
206bail:
207 qp->s_ack_state = OP(ACKNOWLEDGE);
208 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
209 return 0;
210}
211
212
213
214
215
216
217
218
219
220int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
221{
222 struct qib_qp_priv *priv = qp->priv;
223 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
224 struct ib_other_headers *ohdr;
225 struct rvt_sge_state *ss;
226 struct rvt_swqe *wqe;
227 u32 hwords;
228 u32 len;
229 u32 bth0;
230 u32 bth2;
231 u32 pmtu = qp->pmtu;
232 char newreq;
233 int ret = 0;
234 int delta;
235
236 ohdr = &priv->s_hdr->u.oth;
237 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
238 ohdr = &priv->s_hdr->u.l.oth;
239
240
241 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
242 qib_make_rc_ack(dev, qp, ohdr, pmtu))
243 goto done;
244
245 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
246 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
247 goto bail;
248
249 smp_read_barrier_depends();
250 if (qp->s_last == READ_ONCE(qp->s_head))
251 goto bail;
252
253 if (atomic_read(&priv->s_dma_busy)) {
254 qp->s_flags |= RVT_S_WAIT_DMA;
255 goto bail;
256 }
257 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
258 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
259 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
260
261 goto done;
262 }
263
264 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
265 goto bail;
266
267 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
268 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
269 qp->s_flags |= RVT_S_WAIT_PSN;
270 goto bail;
271 }
272 qp->s_sending_psn = qp->s_psn;
273 qp->s_sending_hpsn = qp->s_psn - 1;
274 }
275
276
277 hwords = 5;
278 bth0 = 0;
279
280
281 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
282 switch (qp->s_state) {
283 default:
284 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
285 goto bail;
286
287
288
289
290
291
292
293 newreq = 0;
294 if (qp->s_cur == qp->s_tail) {
295
296 smp_read_barrier_depends();
297 if (qp->s_tail == READ_ONCE(qp->s_head))
298 goto bail;
299
300
301
302
303 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
304 qp->s_num_rd_atomic) {
305 qp->s_flags |= RVT_S_WAIT_FENCE;
306 goto bail;
307 }
308 newreq = 1;
309 qp->s_psn = wqe->psn;
310 }
311
312
313
314
315
316 len = wqe->length;
317 ss = &qp->s_sge;
318 bth2 = qp->s_psn & QIB_PSN_MASK;
319 switch (wqe->wr.opcode) {
320 case IB_WR_SEND:
321 case IB_WR_SEND_WITH_IMM:
322
323 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
324 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
325 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
326 goto bail;
327 }
328 if (len > pmtu) {
329 qp->s_state = OP(SEND_FIRST);
330 len = pmtu;
331 break;
332 }
333 if (wqe->wr.opcode == IB_WR_SEND)
334 qp->s_state = OP(SEND_ONLY);
335 else {
336 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
337
338 ohdr->u.imm_data = wqe->wr.ex.imm_data;
339 hwords += 1;
340 }
341 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
342 bth0 |= IB_BTH_SOLICITED;
343 bth2 |= IB_BTH_REQ_ACK;
344 if (++qp->s_cur == qp->s_size)
345 qp->s_cur = 0;
346 break;
347
348 case IB_WR_RDMA_WRITE:
349 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
350 qp->s_lsn++;
351 goto no_flow_control;
352 case IB_WR_RDMA_WRITE_WITH_IMM:
353
354 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
355 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
356 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
357 goto bail;
358 }
359no_flow_control:
360 ohdr->u.rc.reth.vaddr =
361 cpu_to_be64(wqe->rdma_wr.remote_addr);
362 ohdr->u.rc.reth.rkey =
363 cpu_to_be32(wqe->rdma_wr.rkey);
364 ohdr->u.rc.reth.length = cpu_to_be32(len);
365 hwords += sizeof(struct ib_reth) / sizeof(u32);
366 if (len > pmtu) {
367 qp->s_state = OP(RDMA_WRITE_FIRST);
368 len = pmtu;
369 break;
370 }
371 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
372 qp->s_state = OP(RDMA_WRITE_ONLY);
373 else {
374 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
375
376 ohdr->u.rc.imm_data =
377 wqe->rdma_wr.wr.ex.imm_data;
378 hwords += 1;
379 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
380 bth0 |= IB_BTH_SOLICITED;
381 }
382 bth2 |= IB_BTH_REQ_ACK;
383 if (++qp->s_cur == qp->s_size)
384 qp->s_cur = 0;
385 break;
386
387 case IB_WR_RDMA_READ:
388
389
390
391
392 if (newreq) {
393 if (qp->s_num_rd_atomic >=
394 qp->s_max_rd_atomic) {
395 qp->s_flags |= RVT_S_WAIT_RDMAR;
396 goto bail;
397 }
398 qp->s_num_rd_atomic++;
399 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
400 qp->s_lsn++;
401 }
402
403 ohdr->u.rc.reth.vaddr =
404 cpu_to_be64(wqe->rdma_wr.remote_addr);
405 ohdr->u.rc.reth.rkey =
406 cpu_to_be32(wqe->rdma_wr.rkey);
407 ohdr->u.rc.reth.length = cpu_to_be32(len);
408 qp->s_state = OP(RDMA_READ_REQUEST);
409 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
410 ss = NULL;
411 len = 0;
412 bth2 |= IB_BTH_REQ_ACK;
413 if (++qp->s_cur == qp->s_size)
414 qp->s_cur = 0;
415 break;
416
417 case IB_WR_ATOMIC_CMP_AND_SWP:
418 case IB_WR_ATOMIC_FETCH_AND_ADD:
419
420
421
422
423 if (newreq) {
424 if (qp->s_num_rd_atomic >=
425 qp->s_max_rd_atomic) {
426 qp->s_flags |= RVT_S_WAIT_RDMAR;
427 goto bail;
428 }
429 qp->s_num_rd_atomic++;
430 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
431 qp->s_lsn++;
432 }
433 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
434 qp->s_state = OP(COMPARE_SWAP);
435 put_ib_ateth_swap(wqe->atomic_wr.swap,
436 &ohdr->u.atomic_eth);
437 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
438 &ohdr->u.atomic_eth);
439 } else {
440 qp->s_state = OP(FETCH_ADD);
441 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
442 &ohdr->u.atomic_eth);
443 put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
444 }
445 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
446 &ohdr->u.atomic_eth);
447 ohdr->u.atomic_eth.rkey = cpu_to_be32(
448 wqe->atomic_wr.rkey);
449 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
450 ss = NULL;
451 len = 0;
452 bth2 |= IB_BTH_REQ_ACK;
453 if (++qp->s_cur == qp->s_size)
454 qp->s_cur = 0;
455 break;
456
457 default:
458 goto bail;
459 }
460 qp->s_sge.sge = wqe->sg_list[0];
461 qp->s_sge.sg_list = wqe->sg_list + 1;
462 qp->s_sge.num_sge = wqe->wr.num_sge;
463 qp->s_sge.total_len = wqe->length;
464 qp->s_len = wqe->length;
465 if (newreq) {
466 qp->s_tail++;
467 if (qp->s_tail >= qp->s_size)
468 qp->s_tail = 0;
469 }
470 if (wqe->wr.opcode == IB_WR_RDMA_READ)
471 qp->s_psn = wqe->lpsn + 1;
472 else
473 qp->s_psn++;
474 break;
475
476 case OP(RDMA_READ_RESPONSE_FIRST):
477
478
479
480
481
482
483
484
485
486 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
487
488 case OP(SEND_FIRST):
489 qp->s_state = OP(SEND_MIDDLE);
490
491 case OP(SEND_MIDDLE):
492 bth2 = qp->s_psn++ & QIB_PSN_MASK;
493 ss = &qp->s_sge;
494 len = qp->s_len;
495 if (len > pmtu) {
496 len = pmtu;
497 break;
498 }
499 if (wqe->wr.opcode == IB_WR_SEND)
500 qp->s_state = OP(SEND_LAST);
501 else {
502 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
503
504 ohdr->u.imm_data = wqe->wr.ex.imm_data;
505 hwords += 1;
506 }
507 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
508 bth0 |= IB_BTH_SOLICITED;
509 bth2 |= IB_BTH_REQ_ACK;
510 qp->s_cur++;
511 if (qp->s_cur >= qp->s_size)
512 qp->s_cur = 0;
513 break;
514
515 case OP(RDMA_READ_RESPONSE_LAST):
516
517
518
519
520
521
522
523
524
525 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
526
527 case OP(RDMA_WRITE_FIRST):
528 qp->s_state = OP(RDMA_WRITE_MIDDLE);
529
530 case OP(RDMA_WRITE_MIDDLE):
531 bth2 = qp->s_psn++ & QIB_PSN_MASK;
532 ss = &qp->s_sge;
533 len = qp->s_len;
534 if (len > pmtu) {
535 len = pmtu;
536 break;
537 }
538 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
539 qp->s_state = OP(RDMA_WRITE_LAST);
540 else {
541 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
542
543 ohdr->u.imm_data = wqe->wr.ex.imm_data;
544 hwords += 1;
545 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
546 bth0 |= IB_BTH_SOLICITED;
547 }
548 bth2 |= IB_BTH_REQ_ACK;
549 qp->s_cur++;
550 if (qp->s_cur >= qp->s_size)
551 qp->s_cur = 0;
552 break;
553
554 case OP(RDMA_READ_RESPONSE_MIDDLE):
555
556
557
558
559
560
561
562
563
564 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
565 ohdr->u.rc.reth.vaddr =
566 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
567 ohdr->u.rc.reth.rkey =
568 cpu_to_be32(wqe->rdma_wr.rkey);
569 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
570 qp->s_state = OP(RDMA_READ_REQUEST);
571 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
572 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
573 qp->s_psn = wqe->lpsn + 1;
574 ss = NULL;
575 len = 0;
576 qp->s_cur++;
577 if (qp->s_cur == qp->s_size)
578 qp->s_cur = 0;
579 break;
580 }
581 qp->s_sending_hpsn = bth2;
582 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
583 if (delta && delta % QIB_PSN_CREDIT == 0)
584 bth2 |= IB_BTH_REQ_ACK;
585 if (qp->s_flags & RVT_S_SEND_ONE) {
586 qp->s_flags &= ~RVT_S_SEND_ONE;
587 qp->s_flags |= RVT_S_WAIT_ACK;
588 bth2 |= IB_BTH_REQ_ACK;
589 }
590 qp->s_len -= len;
591 qp->s_hdrwords = hwords;
592 qp->s_cur_sge = ss;
593 qp->s_cur_size = len;
594 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
595done:
596 return 1;
597bail:
598 qp->s_flags &= ~RVT_S_BUSY;
599 return ret;
600}
601
602
603
604
605
606
607
608
609
610void qib_send_rc_ack(struct rvt_qp *qp)
611{
612 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
613 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
614 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
615 u64 pbc;
616 u16 lrh0;
617 u32 bth0;
618 u32 hwords;
619 u32 pbufn;
620 u32 __iomem *piobuf;
621 struct ib_header hdr;
622 struct ib_other_headers *ohdr;
623 u32 control;
624 unsigned long flags;
625
626 spin_lock_irqsave(&qp->s_lock, flags);
627
628 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
629 goto unlock;
630
631
632 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
633 goto queue_ack;
634
635
636 ohdr = &hdr.u.oth;
637 lrh0 = QIB_LRH_BTH;
638
639 hwords = 6;
640 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
641 IB_AH_GRH)) {
642 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
643 rdma_ah_read_grh(&qp->remote_ah_attr),
644 hwords, 0);
645 ohdr = &hdr.u.l.oth;
646 lrh0 = QIB_LRH_GRH;
647 }
648
649 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
650 if (qp->s_mig_state == IB_MIG_MIGRATED)
651 bth0 |= IB_BTH_MIG_REQ;
652 if (qp->r_nak_state)
653 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
654 (qp->r_nak_state <<
655 IB_AETH_CREDIT_SHIFT));
656 else
657 ohdr->u.aeth = rvt_compute_aeth(qp);
658 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
659 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
660 hdr.lrh[0] = cpu_to_be16(lrh0);
661 hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
662 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
663 hdr.lrh[3] = cpu_to_be16(ppd->lid |
664 rdma_ah_get_path_bits(&qp->remote_ah_attr));
665 ohdr->bth[0] = cpu_to_be32(bth0);
666 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
667 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
668
669 spin_unlock_irqrestore(&qp->s_lock, flags);
670
671
672 if (!(ppd->lflags & QIBL_LINKACTIVE))
673 goto done;
674
675 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
676 qp->s_srate, lrh0 >> 12);
677
678 pbc = ((u64) control << 32) | (hwords + 1);
679
680 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
681 if (!piobuf) {
682
683
684
685
686
687
688
689 spin_lock_irqsave(&qp->s_lock, flags);
690 goto queue_ack;
691 }
692
693
694
695
696
697
698 writeq(pbc, piobuf);
699
700 if (dd->flags & QIB_PIO_FLUSH_WC) {
701 u32 *hdrp = (u32 *) &hdr;
702
703 qib_flush_wc();
704 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
705 qib_flush_wc();
706 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
707 } else
708 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
709
710 if (dd->flags & QIB_USE_SPCL_TRIG) {
711 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
712
713 qib_flush_wc();
714 __raw_writel(0xaebecede, piobuf + spcl_off);
715 }
716
717 qib_flush_wc();
718 qib_sendbuf_done(dd, pbufn);
719
720 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
721 goto done;
722
723queue_ack:
724 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
725 this_cpu_inc(*ibp->rvp.rc_qacks);
726 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
727 qp->s_nak_state = qp->r_nak_state;
728 qp->s_ack_psn = qp->r_ack_psn;
729
730
731 qib_schedule_send(qp);
732 }
733unlock:
734 spin_unlock_irqrestore(&qp->s_lock, flags);
735done:
736 return;
737}
738
739
740
741
742
743
744
745
746
747
748static void reset_psn(struct rvt_qp *qp, u32 psn)
749{
750 u32 n = qp->s_acked;
751 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
752 u32 opcode;
753
754 qp->s_cur = n;
755
756
757
758
759
760 if (qib_cmp24(psn, wqe->psn) <= 0) {
761 qp->s_state = OP(SEND_LAST);
762 goto done;
763 }
764
765
766 opcode = wqe->wr.opcode;
767 for (;;) {
768 int diff;
769
770 if (++n == qp->s_size)
771 n = 0;
772 if (n == qp->s_tail)
773 break;
774 wqe = rvt_get_swqe_ptr(qp, n);
775 diff = qib_cmp24(psn, wqe->psn);
776 if (diff < 0)
777 break;
778 qp->s_cur = n;
779
780
781
782
783 if (diff == 0) {
784 qp->s_state = OP(SEND_LAST);
785 goto done;
786 }
787 opcode = wqe->wr.opcode;
788 }
789
790
791
792
793
794
795 switch (opcode) {
796 case IB_WR_SEND:
797 case IB_WR_SEND_WITH_IMM:
798 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
799 break;
800
801 case IB_WR_RDMA_WRITE:
802 case IB_WR_RDMA_WRITE_WITH_IMM:
803 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
804 break;
805
806 case IB_WR_RDMA_READ:
807 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
808 break;
809
810 default:
811
812
813
814
815 qp->s_state = OP(SEND_LAST);
816 }
817done:
818 qp->s_psn = psn;
819
820
821
822
823
824 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
825 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
826 qp->s_flags |= RVT_S_WAIT_PSN;
827}
828
829
830
831
832
833void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
834{
835 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
836 struct qib_ibport *ibp;
837
838 if (qp->s_retry == 0) {
839 if (qp->s_mig_state == IB_MIG_ARMED) {
840 qib_migrate_qp(qp);
841 qp->s_retry = qp->s_retry_cnt;
842 } else if (qp->s_last == qp->s_acked) {
843 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
844 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
845 return;
846 } else
847 return;
848 } else
849 qp->s_retry--;
850
851 ibp = to_iport(qp->ibqp.device, qp->port_num);
852 if (wqe->wr.opcode == IB_WR_RDMA_READ)
853 ibp->rvp.n_rc_resends++;
854 else
855 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
856
857 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
858 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
859 RVT_S_WAIT_ACK);
860 if (wait)
861 qp->s_flags |= RVT_S_SEND_ONE;
862 reset_psn(qp, psn);
863}
864
865
866
867
868
869static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
870{
871 struct rvt_swqe *wqe;
872 u32 n = qp->s_last;
873
874
875 for (;;) {
876 wqe = rvt_get_swqe_ptr(qp, n);
877 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
878 if (wqe->wr.opcode == IB_WR_RDMA_READ)
879 qp->s_sending_psn = wqe->lpsn + 1;
880 else
881 qp->s_sending_psn = psn + 1;
882 break;
883 }
884 if (++n == qp->s_size)
885 n = 0;
886 if (n == qp->s_tail)
887 break;
888 }
889}
890
891
892
893
894void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
895{
896 struct ib_other_headers *ohdr;
897 struct rvt_swqe *wqe;
898 u32 opcode;
899 u32 psn;
900
901 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
902 return;
903
904
905 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
906 ohdr = &hdr->u.oth;
907 else
908 ohdr = &hdr->u.l.oth;
909
910 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
911 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
912 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
913 WARN_ON(!qp->s_rdma_ack_cnt);
914 qp->s_rdma_ack_cnt--;
915 return;
916 }
917
918 psn = be32_to_cpu(ohdr->bth[2]);
919 reset_sending_psn(qp, psn);
920
921
922
923
924
925 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
926 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
927 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
928 rvt_add_retry_timer(qp);
929
930 while (qp->s_last != qp->s_acked) {
931 u32 s_last;
932
933 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
934 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
935 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
936 break;
937 s_last = qp->s_last;
938 if (++s_last >= qp->s_size)
939 s_last = 0;
940 qp->s_last = s_last;
941
942 barrier();
943 rvt_put_swqe(wqe);
944 rvt_qp_swqe_complete(qp,
945 wqe,
946 ib_qib_wc_opcode[wqe->wr.opcode],
947 IB_WC_SUCCESS);
948 }
949
950
951
952
953 if (qp->s_flags & RVT_S_WAIT_PSN &&
954 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
955 qp->s_flags &= ~RVT_S_WAIT_PSN;
956 qp->s_sending_psn = qp->s_psn;
957 qp->s_sending_hpsn = qp->s_psn - 1;
958 qib_schedule_send(qp);
959 }
960}
961
962static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
963{
964 qp->s_last_psn = psn;
965}
966
967
968
969
970
971
972static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
973 struct rvt_swqe *wqe,
974 struct qib_ibport *ibp)
975{
976
977
978
979
980
981 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
982 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
983 u32 s_last;
984
985 rvt_put_swqe(wqe);
986 s_last = qp->s_last;
987 if (++s_last >= qp->s_size)
988 s_last = 0;
989 qp->s_last = s_last;
990
991 barrier();
992 rvt_qp_swqe_complete(qp,
993 wqe,
994 ib_qib_wc_opcode[wqe->wr.opcode],
995 IB_WC_SUCCESS);
996 } else
997 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
998
999 qp->s_retry = qp->s_retry_cnt;
1000 update_last_psn(qp, wqe->lpsn);
1001
1002
1003
1004
1005
1006
1007 if (qp->s_acked == qp->s_cur) {
1008 if (++qp->s_cur >= qp->s_size)
1009 qp->s_cur = 0;
1010 qp->s_acked = qp->s_cur;
1011 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1012 if (qp->s_acked != qp->s_tail) {
1013 qp->s_state = OP(SEND_LAST);
1014 qp->s_psn = wqe->psn;
1015 }
1016 } else {
1017 if (++qp->s_acked >= qp->s_size)
1018 qp->s_acked = 0;
1019 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1020 qp->s_draining = 0;
1021 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1022 }
1023 return wqe;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1038 u64 val, struct qib_ctxtdata *rcd)
1039{
1040 struct qib_ibport *ibp;
1041 enum ib_wc_status status;
1042 struct rvt_swqe *wqe;
1043 int ret = 0;
1044 u32 ack_psn;
1045 int diff;
1046
1047
1048
1049
1050
1051
1052
1053 ack_psn = psn;
1054 if (aeth >> IB_AETH_NAK_SHIFT)
1055 ack_psn--;
1056 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1057 ibp = to_iport(qp->ibqp.device, qp->port_num);
1058
1059
1060
1061
1062
1063 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1064
1065
1066
1067
1068
1069
1070 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1071 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1072 diff == 0) {
1073 ret = 1;
1074 goto bail;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1086 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1087 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1088 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1089 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1090
1091 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1092 qp->r_flags |= RVT_R_RDMAR_SEQ;
1093 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1094 if (list_empty(&qp->rspwait)) {
1095 qp->r_flags |= RVT_R_RSP_SEND;
1096 rvt_get_qp(qp);
1097 list_add_tail(&qp->rspwait,
1098 &rcd->qp_wait_list);
1099 }
1100 }
1101
1102
1103
1104
1105 goto bail;
1106 }
1107 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1108 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1109 u64 *vaddr = wqe->sg_list[0].vaddr;
1110 *vaddr = val;
1111 }
1112 if (qp->s_num_rd_atomic &&
1113 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1114 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1115 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1116 qp->s_num_rd_atomic--;
1117
1118 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1119 !qp->s_num_rd_atomic) {
1120 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1121 RVT_S_WAIT_ACK);
1122 qib_schedule_send(qp);
1123 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1124 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1125 RVT_S_WAIT_ACK);
1126 qib_schedule_send(qp);
1127 }
1128 }
1129 wqe = do_rc_completion(qp, wqe, ibp);
1130 if (qp->s_acked == qp->s_tail)
1131 break;
1132 }
1133
1134 switch (aeth >> IB_AETH_NAK_SHIFT) {
1135 case 0:
1136 this_cpu_inc(*ibp->rvp.rc_acks);
1137 if (qp->s_acked != qp->s_tail) {
1138
1139
1140
1141
1142 rvt_mod_retry_timer(qp);
1143
1144
1145
1146
1147 if (qib_cmp24(qp->s_psn, psn) <= 0)
1148 reset_psn(qp, psn + 1);
1149 } else {
1150
1151 rvt_stop_rc_timers(qp);
1152 if (qib_cmp24(qp->s_psn, psn) <= 0) {
1153 qp->s_state = OP(SEND_LAST);
1154 qp->s_psn = psn + 1;
1155 }
1156 }
1157 if (qp->s_flags & RVT_S_WAIT_ACK) {
1158 qp->s_flags &= ~RVT_S_WAIT_ACK;
1159 qib_schedule_send(qp);
1160 }
1161 rvt_get_credit(qp, aeth);
1162 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1163 qp->s_retry = qp->s_retry_cnt;
1164 update_last_psn(qp, psn);
1165 return 1;
1166
1167 case 1:
1168 ibp->rvp.n_rnr_naks++;
1169 if (qp->s_acked == qp->s_tail)
1170 goto bail;
1171 if (qp->s_flags & RVT_S_WAIT_RNR)
1172 goto bail;
1173 if (qp->s_rnr_retry == 0) {
1174 status = IB_WC_RNR_RETRY_EXC_ERR;
1175 goto class_b;
1176 }
1177 if (qp->s_rnr_retry_cnt < 7)
1178 qp->s_rnr_retry--;
1179
1180
1181 update_last_psn(qp, psn - 1);
1182
1183 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1184
1185 reset_psn(qp, psn);
1186
1187 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1188 rvt_stop_rc_timers(qp);
1189 rvt_add_rnr_timer(qp, aeth);
1190 return 0;
1191
1192 case 3:
1193 if (qp->s_acked == qp->s_tail)
1194 goto bail;
1195
1196 update_last_psn(qp, psn - 1);
1197 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1198 IB_AETH_CREDIT_MASK) {
1199 case 0:
1200 ibp->rvp.n_seq_naks++;
1201
1202
1203
1204
1205
1206
1207 qib_restart_rc(qp, psn, 0);
1208 qib_schedule_send(qp);
1209 break;
1210
1211 case 1:
1212 status = IB_WC_REM_INV_REQ_ERR;
1213 ibp->rvp.n_other_naks++;
1214 goto class_b;
1215
1216 case 2:
1217 status = IB_WC_REM_ACCESS_ERR;
1218 ibp->rvp.n_other_naks++;
1219 goto class_b;
1220
1221 case 3:
1222 status = IB_WC_REM_OP_ERR;
1223 ibp->rvp.n_other_naks++;
1224class_b:
1225 if (qp->s_last == qp->s_acked) {
1226 qib_send_complete(qp, wqe, status);
1227 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1228 }
1229 break;
1230
1231 default:
1232
1233 goto reserved;
1234 }
1235 qp->s_retry = qp->s_retry_cnt;
1236 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1237 goto bail;
1238
1239 default:
1240reserved:
1241
1242 goto bail;
1243 }
1244
1245bail:
1246 rvt_stop_rc_timers(qp);
1247 return ret;
1248}
1249
1250
1251
1252
1253
1254static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1255 struct qib_ctxtdata *rcd)
1256{
1257 struct rvt_swqe *wqe;
1258
1259
1260 rvt_stop_rc_timers(qp);
1261
1262 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1263
1264 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1265 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1266 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1267 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1268 break;
1269 wqe = do_rc_completion(qp, wqe, ibp);
1270 }
1271
1272 ibp->rvp.n_rdma_seq++;
1273 qp->r_flags |= RVT_R_RDMAR_SEQ;
1274 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1275 if (list_empty(&qp->rspwait)) {
1276 qp->r_flags |= RVT_R_RSP_SEND;
1277 rvt_get_qp(qp);
1278 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1279 }
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1299 struct ib_other_headers *ohdr,
1300 void *data, u32 tlen,
1301 struct rvt_qp *qp,
1302 u32 opcode,
1303 u32 psn, u32 hdrsize, u32 pmtu,
1304 struct qib_ctxtdata *rcd)
1305{
1306 struct rvt_swqe *wqe;
1307 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1308 enum ib_wc_status status;
1309 unsigned long flags;
1310 int diff;
1311 u32 pad;
1312 u32 aeth;
1313 u64 val;
1314
1315 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1316
1317
1318
1319
1320 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1321 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1322
1323
1324
1325
1326
1327 if (!(qp->s_flags & RVT_S_BUSY)) {
1328
1329 spin_lock_irqsave(&ppd->sdma_lock, flags);
1330
1331 qib_sdma_make_progress(ppd);
1332
1333 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1334 }
1335 }
1336 }
1337
1338 spin_lock_irqsave(&qp->s_lock, flags);
1339 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1340 goto ack_done;
1341
1342
1343 smp_read_barrier_depends();
1344 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1345 goto ack_done;
1346
1347
1348 diff = qib_cmp24(psn, qp->s_last_psn);
1349 if (unlikely(diff <= 0)) {
1350
1351 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1352 aeth = be32_to_cpu(ohdr->u.aeth);
1353 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1354 rvt_get_credit(qp, aeth);
1355 }
1356 goto ack_done;
1357 }
1358
1359
1360
1361
1362
1363 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1364 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1365 goto ack_done;
1366 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1367 }
1368
1369 if (unlikely(qp->s_acked == qp->s_tail))
1370 goto ack_done;
1371 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1372 status = IB_WC_SUCCESS;
1373
1374 switch (opcode) {
1375 case OP(ACKNOWLEDGE):
1376 case OP(ATOMIC_ACKNOWLEDGE):
1377 case OP(RDMA_READ_RESPONSE_FIRST):
1378 aeth = be32_to_cpu(ohdr->u.aeth);
1379 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1380 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1381 else
1382 val = 0;
1383 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1384 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1385 goto ack_done;
1386 hdrsize += 4;
1387 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1388 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1389 goto ack_op_err;
1390
1391
1392
1393
1394
1395 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1396 wqe, psn, pmtu);
1397 goto read_middle;
1398
1399 case OP(RDMA_READ_RESPONSE_MIDDLE):
1400
1401 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1402 goto ack_seq_err;
1403 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1404 goto ack_op_err;
1405read_middle:
1406 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1407 goto ack_len_err;
1408 if (unlikely(pmtu >= qp->s_rdma_read_len))
1409 goto ack_len_err;
1410
1411
1412
1413
1414
1415 rvt_mod_retry_timer(qp);
1416 if (qp->s_flags & RVT_S_WAIT_ACK) {
1417 qp->s_flags &= ~RVT_S_WAIT_ACK;
1418 qib_schedule_send(qp);
1419 }
1420
1421 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1422 qp->s_retry = qp->s_retry_cnt;
1423
1424
1425
1426
1427
1428 qp->s_rdma_read_len -= pmtu;
1429 update_last_psn(qp, psn);
1430 spin_unlock_irqrestore(&qp->s_lock, flags);
1431 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1432 goto bail;
1433
1434 case OP(RDMA_READ_RESPONSE_ONLY):
1435 aeth = be32_to_cpu(ohdr->u.aeth);
1436 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1437 goto ack_done;
1438
1439 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1440
1441
1442
1443
1444
1445 if (unlikely(tlen < (hdrsize + pad + 8)))
1446 goto ack_len_err;
1447
1448
1449
1450
1451
1452 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1453 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1454 wqe, psn, pmtu);
1455 goto read_last;
1456
1457 case OP(RDMA_READ_RESPONSE_LAST):
1458
1459 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1460 goto ack_seq_err;
1461 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1462 goto ack_op_err;
1463
1464 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1465
1466
1467
1468
1469
1470 if (unlikely(tlen <= (hdrsize + pad + 8)))
1471 goto ack_len_err;
1472read_last:
1473 tlen -= hdrsize + pad + 8;
1474 if (unlikely(tlen != qp->s_rdma_read_len))
1475 goto ack_len_err;
1476 aeth = be32_to_cpu(ohdr->u.aeth);
1477 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1478 WARN_ON(qp->s_rdma_read_sge.num_sge);
1479 (void) do_rc_ack(qp, aeth, psn,
1480 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1481 goto ack_done;
1482 }
1483
1484ack_op_err:
1485 status = IB_WC_LOC_QP_OP_ERR;
1486 goto ack_err;
1487
1488ack_seq_err:
1489 rdma_seq_err(qp, ibp, psn, rcd);
1490 goto ack_done;
1491
1492ack_len_err:
1493 status = IB_WC_LOC_LEN_ERR;
1494ack_err:
1495 if (qp->s_last == qp->s_acked) {
1496 qib_send_complete(qp, wqe, status);
1497 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1498 }
1499ack_done:
1500 spin_unlock_irqrestore(&qp->s_lock, flags);
1501bail:
1502 return;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
1521 void *data,
1522 struct rvt_qp *qp,
1523 u32 opcode,
1524 u32 psn,
1525 int diff,
1526 struct qib_ctxtdata *rcd)
1527{
1528 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1529 struct rvt_ack_entry *e;
1530 unsigned long flags;
1531 u8 i, prev;
1532 int old_req;
1533
1534 if (diff > 0) {
1535
1536
1537
1538
1539
1540 if (!qp->r_nak_state) {
1541 ibp->rvp.n_rc_seqnak++;
1542 qp->r_nak_state = IB_NAK_PSN_ERROR;
1543
1544 qp->r_ack_psn = qp->r_psn;
1545
1546
1547
1548
1549
1550 if (list_empty(&qp->rspwait)) {
1551 qp->r_flags |= RVT_R_RSP_NAK;
1552 rvt_get_qp(qp);
1553 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1554 }
1555 }
1556 goto done;
1557 }
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 e = NULL;
1576 old_req = 1;
1577 ibp->rvp.n_rc_dupreq++;
1578
1579 spin_lock_irqsave(&qp->s_lock, flags);
1580
1581 for (i = qp->r_head_ack_queue; ; i = prev) {
1582 if (i == qp->s_tail_ack_queue)
1583 old_req = 0;
1584 if (i)
1585 prev = i - 1;
1586 else
1587 prev = QIB_MAX_RDMA_ATOMIC;
1588 if (prev == qp->r_head_ack_queue) {
1589 e = NULL;
1590 break;
1591 }
1592 e = &qp->s_ack_queue[prev];
1593 if (!e->opcode) {
1594 e = NULL;
1595 break;
1596 }
1597 if (qib_cmp24(psn, e->psn) >= 0) {
1598 if (prev == qp->s_tail_ack_queue &&
1599 qib_cmp24(psn, e->lpsn) <= 0)
1600 old_req = 0;
1601 break;
1602 }
1603 }
1604 switch (opcode) {
1605 case OP(RDMA_READ_REQUEST): {
1606 struct ib_reth *reth;
1607 u32 offset;
1608 u32 len;
1609
1610
1611
1612
1613
1614 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1615 goto unlock_done;
1616
1617 reth = &ohdr->u.rc.reth;
1618
1619
1620
1621
1622
1623
1624
1625 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1626 qp->pmtu;
1627 len = be32_to_cpu(reth->length);
1628 if (unlikely(offset + len != e->rdma_sge.sge_length))
1629 goto unlock_done;
1630 if (e->rdma_sge.mr) {
1631 rvt_put_mr(e->rdma_sge.mr);
1632 e->rdma_sge.mr = NULL;
1633 }
1634 if (len != 0) {
1635 u32 rkey = be32_to_cpu(reth->rkey);
1636 u64 vaddr = be64_to_cpu(reth->vaddr);
1637 int ok;
1638
1639 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1640 IB_ACCESS_REMOTE_READ);
1641 if (unlikely(!ok))
1642 goto unlock_done;
1643 } else {
1644 e->rdma_sge.vaddr = NULL;
1645 e->rdma_sge.length = 0;
1646 e->rdma_sge.sge_length = 0;
1647 }
1648 e->psn = psn;
1649 if (old_req)
1650 goto unlock_done;
1651 qp->s_tail_ack_queue = prev;
1652 break;
1653 }
1654
1655 case OP(COMPARE_SWAP):
1656 case OP(FETCH_ADD): {
1657
1658
1659
1660
1661
1662 if (!e || e->opcode != (u8) opcode || old_req)
1663 goto unlock_done;
1664 qp->s_tail_ack_queue = prev;
1665 break;
1666 }
1667
1668 default:
1669
1670
1671
1672
1673 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1674 goto unlock_done;
1675
1676
1677
1678
1679 if (i == qp->r_head_ack_queue) {
1680 spin_unlock_irqrestore(&qp->s_lock, flags);
1681 qp->r_nak_state = 0;
1682 qp->r_ack_psn = qp->r_psn - 1;
1683 goto send_ack;
1684 }
1685
1686
1687
1688
1689
1690 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
1691 spin_unlock_irqrestore(&qp->s_lock, flags);
1692 qp->r_nak_state = 0;
1693 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1694 goto send_ack;
1695 }
1696
1697
1698
1699
1700 qp->s_tail_ack_queue = i;
1701 break;
1702 }
1703 qp->s_ack_state = OP(ACKNOWLEDGE);
1704 qp->s_flags |= RVT_S_RESP_PENDING;
1705 qp->r_nak_state = 0;
1706 qib_schedule_send(qp);
1707
1708unlock_done:
1709 spin_unlock_irqrestore(&qp->s_lock, flags);
1710done:
1711 return 1;
1712
1713send_ack:
1714 return 0;
1715}
1716
1717static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
1718{
1719 unsigned next;
1720
1721 next = n + 1;
1722 if (next > QIB_MAX_RDMA_ATOMIC)
1723 next = 0;
1724 qp->s_tail_ack_queue = next;
1725 qp->s_ack_state = OP(ACKNOWLEDGE);
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
1742 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
1743{
1744 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1745 struct ib_other_headers *ohdr;
1746 u32 opcode;
1747 u32 hdrsize;
1748 u32 psn;
1749 u32 pad;
1750 struct ib_wc wc;
1751 u32 pmtu = qp->pmtu;
1752 int diff;
1753 struct ib_reth *reth;
1754 unsigned long flags;
1755 int ret;
1756
1757
1758 if (!has_grh) {
1759 ohdr = &hdr->u.oth;
1760 hdrsize = 8 + 12;
1761 } else {
1762 ohdr = &hdr->u.l.oth;
1763 hdrsize = 8 + 40 + 12;
1764 }
1765
1766 opcode = be32_to_cpu(ohdr->bth[0]);
1767 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1768 return;
1769
1770 psn = be32_to_cpu(ohdr->bth[2]);
1771 opcode >>= 24;
1772
1773
1774
1775
1776
1777
1778
1779 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1780 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1781 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1782 hdrsize, pmtu, rcd);
1783 return;
1784 }
1785
1786
1787 diff = qib_cmp24(psn, qp->r_psn);
1788 if (unlikely(diff)) {
1789 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1790 return;
1791 goto send_ack;
1792 }
1793
1794
1795 switch (qp->r_state) {
1796 case OP(SEND_FIRST):
1797 case OP(SEND_MIDDLE):
1798 if (opcode == OP(SEND_MIDDLE) ||
1799 opcode == OP(SEND_LAST) ||
1800 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1801 break;
1802 goto nack_inv;
1803
1804 case OP(RDMA_WRITE_FIRST):
1805 case OP(RDMA_WRITE_MIDDLE):
1806 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1807 opcode == OP(RDMA_WRITE_LAST) ||
1808 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1809 break;
1810 goto nack_inv;
1811
1812 default:
1813 if (opcode == OP(SEND_MIDDLE) ||
1814 opcode == OP(SEND_LAST) ||
1815 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1816 opcode == OP(RDMA_WRITE_MIDDLE) ||
1817 opcode == OP(RDMA_WRITE_LAST) ||
1818 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1819 goto nack_inv;
1820
1821
1822
1823
1824
1825 break;
1826 }
1827
1828 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1829 rvt_comm_est(qp);
1830
1831
1832 switch (opcode) {
1833 case OP(SEND_FIRST):
1834 ret = qib_get_rwqe(qp, 0);
1835 if (ret < 0)
1836 goto nack_op_err;
1837 if (!ret)
1838 goto rnr_nak;
1839 qp->r_rcv_len = 0;
1840
1841 case OP(SEND_MIDDLE):
1842 case OP(RDMA_WRITE_MIDDLE):
1843send_middle:
1844
1845 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1846 goto nack_inv;
1847 qp->r_rcv_len += pmtu;
1848 if (unlikely(qp->r_rcv_len > qp->r_len))
1849 goto nack_inv;
1850 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1851 break;
1852
1853 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1854
1855 ret = qib_get_rwqe(qp, 1);
1856 if (ret < 0)
1857 goto nack_op_err;
1858 if (!ret)
1859 goto rnr_nak;
1860 goto send_last_imm;
1861
1862 case OP(SEND_ONLY):
1863 case OP(SEND_ONLY_WITH_IMMEDIATE):
1864 ret = qib_get_rwqe(qp, 0);
1865 if (ret < 0)
1866 goto nack_op_err;
1867 if (!ret)
1868 goto rnr_nak;
1869 qp->r_rcv_len = 0;
1870 if (opcode == OP(SEND_ONLY))
1871 goto no_immediate_data;
1872
1873 case OP(SEND_LAST_WITH_IMMEDIATE):
1874send_last_imm:
1875 wc.ex.imm_data = ohdr->u.imm_data;
1876 hdrsize += 4;
1877 wc.wc_flags = IB_WC_WITH_IMM;
1878 goto send_last;
1879 case OP(SEND_LAST):
1880 case OP(RDMA_WRITE_LAST):
1881no_immediate_data:
1882 wc.wc_flags = 0;
1883 wc.ex.imm_data = 0;
1884send_last:
1885
1886 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1887
1888
1889 if (unlikely(tlen < (hdrsize + pad + 4)))
1890 goto nack_inv;
1891
1892 tlen -= (hdrsize + pad + 4);
1893 wc.byte_len = tlen + qp->r_rcv_len;
1894 if (unlikely(wc.byte_len > qp->r_len))
1895 goto nack_inv;
1896 qib_copy_sge(&qp->r_sge, data, tlen, 1);
1897 rvt_put_ss(&qp->r_sge);
1898 qp->r_msn++;
1899 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1900 break;
1901 wc.wr_id = qp->r_wr_id;
1902 wc.status = IB_WC_SUCCESS;
1903 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1904 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1905 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1906 else
1907 wc.opcode = IB_WC_RECV;
1908 wc.qp = &qp->ibqp;
1909 wc.src_qp = qp->remote_qpn;
1910 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1911 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1912
1913 wc.vendor_err = 0;
1914 wc.pkey_index = 0;
1915 wc.dlid_path_bits = 0;
1916 wc.port_num = 0;
1917
1918 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
1919 (ohdr->bth[0] &
1920 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
1921 break;
1922
1923 case OP(RDMA_WRITE_FIRST):
1924 case OP(RDMA_WRITE_ONLY):
1925 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1926 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1927 goto nack_inv;
1928
1929 reth = &ohdr->u.rc.reth;
1930 hdrsize += sizeof(*reth);
1931 qp->r_len = be32_to_cpu(reth->length);
1932 qp->r_rcv_len = 0;
1933 qp->r_sge.sg_list = NULL;
1934 if (qp->r_len != 0) {
1935 u32 rkey = be32_to_cpu(reth->rkey);
1936 u64 vaddr = be64_to_cpu(reth->vaddr);
1937 int ok;
1938
1939
1940 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
1941 rkey, IB_ACCESS_REMOTE_WRITE);
1942 if (unlikely(!ok))
1943 goto nack_acc;
1944 qp->r_sge.num_sge = 1;
1945 } else {
1946 qp->r_sge.num_sge = 0;
1947 qp->r_sge.sge.mr = NULL;
1948 qp->r_sge.sge.vaddr = NULL;
1949 qp->r_sge.sge.length = 0;
1950 qp->r_sge.sge.sge_length = 0;
1951 }
1952 if (opcode == OP(RDMA_WRITE_FIRST))
1953 goto send_middle;
1954 else if (opcode == OP(RDMA_WRITE_ONLY))
1955 goto no_immediate_data;
1956 ret = qib_get_rwqe(qp, 1);
1957 if (ret < 0)
1958 goto nack_op_err;
1959 if (!ret) {
1960 rvt_put_ss(&qp->r_sge);
1961 goto rnr_nak;
1962 }
1963 wc.ex.imm_data = ohdr->u.rc.imm_data;
1964 hdrsize += 4;
1965 wc.wc_flags = IB_WC_WITH_IMM;
1966 goto send_last;
1967
1968 case OP(RDMA_READ_REQUEST): {
1969 struct rvt_ack_entry *e;
1970 u32 len;
1971 u8 next;
1972
1973 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1974 goto nack_inv;
1975 next = qp->r_head_ack_queue + 1;
1976
1977 if (next > QIB_MAX_RDMA_ATOMIC)
1978 next = 0;
1979 spin_lock_irqsave(&qp->s_lock, flags);
1980 if (unlikely(next == qp->s_tail_ack_queue)) {
1981 if (!qp->s_ack_queue[next].sent)
1982 goto nack_inv_unlck;
1983 qib_update_ack_queue(qp, next);
1984 }
1985 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1986 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
1987 rvt_put_mr(e->rdma_sge.mr);
1988 e->rdma_sge.mr = NULL;
1989 }
1990 reth = &ohdr->u.rc.reth;
1991 len = be32_to_cpu(reth->length);
1992 if (len) {
1993 u32 rkey = be32_to_cpu(reth->rkey);
1994 u64 vaddr = be64_to_cpu(reth->vaddr);
1995 int ok;
1996
1997
1998 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1999 rkey, IB_ACCESS_REMOTE_READ);
2000 if (unlikely(!ok))
2001 goto nack_acc_unlck;
2002
2003
2004
2005
2006 qp->r_psn += rvt_div_mtu(qp, len - 1);
2007 } else {
2008 e->rdma_sge.mr = NULL;
2009 e->rdma_sge.vaddr = NULL;
2010 e->rdma_sge.length = 0;
2011 e->rdma_sge.sge_length = 0;
2012 }
2013 e->opcode = opcode;
2014 e->sent = 0;
2015 e->psn = psn;
2016 e->lpsn = qp->r_psn;
2017
2018
2019
2020
2021
2022 qp->r_msn++;
2023 qp->r_psn++;
2024 qp->r_state = opcode;
2025 qp->r_nak_state = 0;
2026 qp->r_head_ack_queue = next;
2027
2028
2029 qp->s_flags |= RVT_S_RESP_PENDING;
2030 qib_schedule_send(qp);
2031
2032 goto sunlock;
2033 }
2034
2035 case OP(COMPARE_SWAP):
2036 case OP(FETCH_ADD): {
2037 struct ib_atomic_eth *ateth;
2038 struct rvt_ack_entry *e;
2039 u64 vaddr;
2040 atomic64_t *maddr;
2041 u64 sdata;
2042 u32 rkey;
2043 u8 next;
2044
2045 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2046 goto nack_inv;
2047 next = qp->r_head_ack_queue + 1;
2048 if (next > QIB_MAX_RDMA_ATOMIC)
2049 next = 0;
2050 spin_lock_irqsave(&qp->s_lock, flags);
2051 if (unlikely(next == qp->s_tail_ack_queue)) {
2052 if (!qp->s_ack_queue[next].sent)
2053 goto nack_inv_unlck;
2054 qib_update_ack_queue(qp, next);
2055 }
2056 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2057 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2058 rvt_put_mr(e->rdma_sge.mr);
2059 e->rdma_sge.mr = NULL;
2060 }
2061 ateth = &ohdr->u.atomic_eth;
2062 vaddr = get_ib_ateth_vaddr(ateth);
2063 if (unlikely(vaddr & (sizeof(u64) - 1)))
2064 goto nack_inv_unlck;
2065 rkey = be32_to_cpu(ateth->rkey);
2066
2067 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2068 vaddr, rkey,
2069 IB_ACCESS_REMOTE_ATOMIC)))
2070 goto nack_acc_unlck;
2071
2072 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2073 sdata = get_ib_ateth_swap(ateth);
2074 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2075 (u64) atomic64_add_return(sdata, maddr) - sdata :
2076 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2077 get_ib_ateth_compare(ateth),
2078 sdata);
2079 rvt_put_mr(qp->r_sge.sge.mr);
2080 qp->r_sge.num_sge = 0;
2081 e->opcode = opcode;
2082 e->sent = 0;
2083 e->psn = psn;
2084 e->lpsn = psn;
2085 qp->r_msn++;
2086 qp->r_psn++;
2087 qp->r_state = opcode;
2088 qp->r_nak_state = 0;
2089 qp->r_head_ack_queue = next;
2090
2091
2092 qp->s_flags |= RVT_S_RESP_PENDING;
2093 qib_schedule_send(qp);
2094
2095 goto sunlock;
2096 }
2097
2098 default:
2099
2100 goto nack_inv;
2101 }
2102 qp->r_psn++;
2103 qp->r_state = opcode;
2104 qp->r_ack_psn = psn;
2105 qp->r_nak_state = 0;
2106
2107 if (psn & (1 << 31))
2108 goto send_ack;
2109 return;
2110
2111rnr_nak:
2112 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2113 qp->r_ack_psn = qp->r_psn;
2114
2115 if (list_empty(&qp->rspwait)) {
2116 qp->r_flags |= RVT_R_RSP_NAK;
2117 rvt_get_qp(qp);
2118 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2119 }
2120 return;
2121
2122nack_op_err:
2123 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2124 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2125 qp->r_ack_psn = qp->r_psn;
2126
2127 if (list_empty(&qp->rspwait)) {
2128 qp->r_flags |= RVT_R_RSP_NAK;
2129 rvt_get_qp(qp);
2130 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2131 }
2132 return;
2133
2134nack_inv_unlck:
2135 spin_unlock_irqrestore(&qp->s_lock, flags);
2136nack_inv:
2137 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2138 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2139 qp->r_ack_psn = qp->r_psn;
2140
2141 if (list_empty(&qp->rspwait)) {
2142 qp->r_flags |= RVT_R_RSP_NAK;
2143 rvt_get_qp(qp);
2144 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2145 }
2146 return;
2147
2148nack_acc_unlck:
2149 spin_unlock_irqrestore(&qp->s_lock, flags);
2150nack_acc:
2151 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2152 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2153 qp->r_ack_psn = qp->r_psn;
2154send_ack:
2155 qib_send_rc_ack(qp);
2156 return;
2157
2158sunlock:
2159 spin_unlock_irqrestore(&qp->s_lock, flags);
2160}
2161