1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/skbuff.h>
35
36#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
39
40enum resp_states {
41 RESPST_NONE,
42 RESPST_GET_REQ,
43 RESPST_CHK_PSN,
44 RESPST_CHK_OP_SEQ,
45 RESPST_CHK_OP_VALID,
46 RESPST_CHK_RESOURCE,
47 RESPST_CHK_LENGTH,
48 RESPST_CHK_RKEY,
49 RESPST_EXECUTE,
50 RESPST_READ_REPLY,
51 RESPST_COMPLETE,
52 RESPST_ACKNOWLEDGE,
53 RESPST_CLEANUP,
54 RESPST_DUPLICATE_REQUEST,
55 RESPST_ERR_MALFORMED_WQE,
56 RESPST_ERR_UNSUPPORTED_OPCODE,
57 RESPST_ERR_MISALIGNED_ATOMIC,
58 RESPST_ERR_PSN_OUT_OF_SEQ,
59 RESPST_ERR_MISSING_OPCODE_FIRST,
60 RESPST_ERR_MISSING_OPCODE_LAST_C,
61 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63 RESPST_ERR_RNR,
64 RESPST_ERR_RKEY_VIOLATION,
65 RESPST_ERR_LENGTH,
66 RESPST_ERR_CQ_OVERFLOW,
67 RESPST_ERROR,
68 RESPST_RESET,
69 RESPST_DONE,
70 RESPST_EXIT,
71};
72
73static char *resp_state_name[] = {
74 [RESPST_NONE] = "NONE",
75 [RESPST_GET_REQ] = "GET_REQ",
76 [RESPST_CHK_PSN] = "CHK_PSN",
77 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
78 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
79 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
80 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
81 [RESPST_CHK_RKEY] = "CHK_RKEY",
82 [RESPST_EXECUTE] = "EXECUTE",
83 [RESPST_READ_REPLY] = "READ_REPLY",
84 [RESPST_COMPLETE] = "COMPLETE",
85 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
86 [RESPST_CLEANUP] = "CLEANUP",
87 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
88 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
89 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
90 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
91 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
92 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
93 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
94 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
95 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
96 [RESPST_ERR_RNR] = "ERR_RNR",
97 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
98 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
99 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
100 [RESPST_ERROR] = "ERROR",
101 [RESPST_RESET] = "RESET",
102 [RESPST_DONE] = "DONE",
103 [RESPST_EXIT] = "EXIT",
104};
105
106
107void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108 struct sk_buff *skb)
109{
110 int must_sched;
111 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112
113 skb_queue_tail(&qp->req_pkts, skb);
114
115 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116 (skb_queue_len(&qp->req_pkts) > 1);
117
118 rxe_run_task(&qp->resp.task, must_sched);
119}
120
121static inline enum resp_states get_req(struct rxe_qp *qp,
122 struct rxe_pkt_info **pkt_p)
123{
124 struct sk_buff *skb;
125
126 if (qp->resp.state == QP_STATE_ERROR) {
127 skb = skb_dequeue(&qp->req_pkts);
128 if (skb) {
129
130 rxe_drop_ref(qp);
131 kfree_skb(skb);
132 return RESPST_GET_REQ;
133 }
134
135
136 return RESPST_CHK_RESOURCE;
137 }
138
139 skb = skb_peek(&qp->req_pkts);
140 if (!skb)
141 return RESPST_EXIT;
142
143 *pkt_p = SKB_TO_PKT(skb);
144
145 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146}
147
148static enum resp_states check_psn(struct rxe_qp *qp,
149 struct rxe_pkt_info *pkt)
150{
151 int diff = psn_compare(pkt->psn, qp->resp.psn);
152 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
153
154 switch (qp_type(qp)) {
155 case IB_QPT_RC:
156 if (diff > 0) {
157 if (qp->resp.sent_psn_nak)
158 return RESPST_CLEANUP;
159
160 qp->resp.sent_psn_nak = 1;
161 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
162 return RESPST_ERR_PSN_OUT_OF_SEQ;
163
164 } else if (diff < 0) {
165 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
166 return RESPST_DUPLICATE_REQUEST;
167 }
168
169 if (qp->resp.sent_psn_nak)
170 qp->resp.sent_psn_nak = 0;
171
172 break;
173
174 case IB_QPT_UC:
175 if (qp->resp.drop_msg || diff != 0) {
176 if (pkt->mask & RXE_START_MASK) {
177 qp->resp.drop_msg = 0;
178 return RESPST_CHK_OP_SEQ;
179 }
180
181 qp->resp.drop_msg = 1;
182 return RESPST_CLEANUP;
183 }
184 break;
185 default:
186 break;
187 }
188
189 return RESPST_CHK_OP_SEQ;
190}
191
192static enum resp_states check_op_seq(struct rxe_qp *qp,
193 struct rxe_pkt_info *pkt)
194{
195 switch (qp_type(qp)) {
196 case IB_QPT_RC:
197 switch (qp->resp.opcode) {
198 case IB_OPCODE_RC_SEND_FIRST:
199 case IB_OPCODE_RC_SEND_MIDDLE:
200 switch (pkt->opcode) {
201 case IB_OPCODE_RC_SEND_MIDDLE:
202 case IB_OPCODE_RC_SEND_LAST:
203 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
204 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
205 return RESPST_CHK_OP_VALID;
206 default:
207 return RESPST_ERR_MISSING_OPCODE_LAST_C;
208 }
209
210 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
211 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
212 switch (pkt->opcode) {
213 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
214 case IB_OPCODE_RC_RDMA_WRITE_LAST:
215 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
216 return RESPST_CHK_OP_VALID;
217 default:
218 return RESPST_ERR_MISSING_OPCODE_LAST_C;
219 }
220
221 default:
222 switch (pkt->opcode) {
223 case IB_OPCODE_RC_SEND_MIDDLE:
224 case IB_OPCODE_RC_SEND_LAST:
225 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
226 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
227 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
228 case IB_OPCODE_RC_RDMA_WRITE_LAST:
229 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
230 return RESPST_ERR_MISSING_OPCODE_FIRST;
231 default:
232 return RESPST_CHK_OP_VALID;
233 }
234 }
235 break;
236
237 case IB_QPT_UC:
238 switch (qp->resp.opcode) {
239 case IB_OPCODE_UC_SEND_FIRST:
240 case IB_OPCODE_UC_SEND_MIDDLE:
241 switch (pkt->opcode) {
242 case IB_OPCODE_UC_SEND_MIDDLE:
243 case IB_OPCODE_UC_SEND_LAST:
244 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
245 return RESPST_CHK_OP_VALID;
246 default:
247 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
248 }
249
250 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
251 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
252 switch (pkt->opcode) {
253 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
254 case IB_OPCODE_UC_RDMA_WRITE_LAST:
255 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
256 return RESPST_CHK_OP_VALID;
257 default:
258 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
259 }
260
261 default:
262 switch (pkt->opcode) {
263 case IB_OPCODE_UC_SEND_MIDDLE:
264 case IB_OPCODE_UC_SEND_LAST:
265 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
266 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
267 case IB_OPCODE_UC_RDMA_WRITE_LAST:
268 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
269 qp->resp.drop_msg = 1;
270 return RESPST_CLEANUP;
271 default:
272 return RESPST_CHK_OP_VALID;
273 }
274 }
275 break;
276
277 default:
278 return RESPST_CHK_OP_VALID;
279 }
280}
281
282static enum resp_states check_op_valid(struct rxe_qp *qp,
283 struct rxe_pkt_info *pkt)
284{
285 switch (qp_type(qp)) {
286 case IB_QPT_RC:
287 if (((pkt->mask & RXE_READ_MASK) &&
288 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
289 ((pkt->mask & RXE_WRITE_MASK) &&
290 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
291 ((pkt->mask & RXE_ATOMIC_MASK) &&
292 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
293 return RESPST_ERR_UNSUPPORTED_OPCODE;
294 }
295
296 break;
297
298 case IB_QPT_UC:
299 if ((pkt->mask & RXE_WRITE_MASK) &&
300 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
301 qp->resp.drop_msg = 1;
302 return RESPST_CLEANUP;
303 }
304
305 break;
306
307 case IB_QPT_UD:
308 case IB_QPT_SMI:
309 case IB_QPT_GSI:
310 break;
311
312 default:
313 WARN_ON_ONCE(1);
314 break;
315 }
316
317 return RESPST_CHK_RESOURCE;
318}
319
320static enum resp_states get_srq_wqe(struct rxe_qp *qp)
321{
322 struct rxe_srq *srq = qp->srq;
323 struct rxe_queue *q = srq->rq.queue;
324 struct rxe_recv_wqe *wqe;
325 struct ib_event ev;
326
327 if (srq->error)
328 return RESPST_ERR_RNR;
329
330 spin_lock_bh(&srq->rq.consumer_lock);
331
332 wqe = queue_head(q);
333 if (!wqe) {
334 spin_unlock_bh(&srq->rq.consumer_lock);
335 return RESPST_ERR_RNR;
336 }
337
338
339 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
340
341 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
342 advance_consumer(q);
343
344 if (srq->limit && srq->ibsrq.event_handler &&
345 (queue_count(q) < srq->limit)) {
346 srq->limit = 0;
347 goto event;
348 }
349
350 spin_unlock_bh(&srq->rq.consumer_lock);
351 return RESPST_CHK_LENGTH;
352
353event:
354 spin_unlock_bh(&srq->rq.consumer_lock);
355 ev.device = qp->ibqp.device;
356 ev.element.srq = qp->ibqp.srq;
357 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
358 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
359 return RESPST_CHK_LENGTH;
360}
361
362static enum resp_states check_resource(struct rxe_qp *qp,
363 struct rxe_pkt_info *pkt)
364{
365 struct rxe_srq *srq = qp->srq;
366
367 if (qp->resp.state == QP_STATE_ERROR) {
368 if (qp->resp.wqe) {
369 qp->resp.status = IB_WC_WR_FLUSH_ERR;
370 return RESPST_COMPLETE;
371 } else if (!srq) {
372 qp->resp.wqe = queue_head(qp->rq.queue);
373 if (qp->resp.wqe) {
374 qp->resp.status = IB_WC_WR_FLUSH_ERR;
375 return RESPST_COMPLETE;
376 } else {
377 return RESPST_EXIT;
378 }
379 } else {
380 return RESPST_EXIT;
381 }
382 }
383
384 if (pkt->mask & RXE_READ_OR_ATOMIC) {
385
386
387
388
389 if (likely(qp->attr.max_dest_rd_atomic > 0))
390 return RESPST_CHK_LENGTH;
391 else
392 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
393 }
394
395 if (pkt->mask & RXE_RWR_MASK) {
396 if (srq)
397 return get_srq_wqe(qp);
398
399 qp->resp.wqe = queue_head(qp->rq.queue);
400 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
401 }
402
403 return RESPST_CHK_LENGTH;
404}
405
406static enum resp_states check_length(struct rxe_qp *qp,
407 struct rxe_pkt_info *pkt)
408{
409 switch (qp_type(qp)) {
410 case IB_QPT_RC:
411 return RESPST_CHK_RKEY;
412
413 case IB_QPT_UC:
414 return RESPST_CHK_RKEY;
415
416 default:
417 return RESPST_CHK_RKEY;
418 }
419}
420
421static enum resp_states check_rkey(struct rxe_qp *qp,
422 struct rxe_pkt_info *pkt)
423{
424 struct rxe_mem *mem = NULL;
425 u64 va;
426 u32 rkey;
427 u32 resid;
428 u32 pktlen;
429 int mtu = qp->mtu;
430 enum resp_states state;
431 int access;
432
433 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
434 if (pkt->mask & RXE_RETH_MASK) {
435 qp->resp.va = reth_va(pkt);
436 qp->resp.rkey = reth_rkey(pkt);
437 qp->resp.resid = reth_len(pkt);
438 }
439 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
440 : IB_ACCESS_REMOTE_WRITE;
441 } else if (pkt->mask & RXE_ATOMIC_MASK) {
442 qp->resp.va = atmeth_va(pkt);
443 qp->resp.rkey = atmeth_rkey(pkt);
444 qp->resp.resid = sizeof(u64);
445 access = IB_ACCESS_REMOTE_ATOMIC;
446 } else {
447 return RESPST_EXECUTE;
448 }
449
450
451 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
452 (pkt->mask & RXE_RETH_MASK) &&
453 reth_len(pkt) == 0) {
454 return RESPST_EXECUTE;
455 }
456
457 va = qp->resp.va;
458 rkey = qp->resp.rkey;
459 resid = qp->resp.resid;
460 pktlen = payload_size(pkt);
461
462 mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
463 if (!mem) {
464 state = RESPST_ERR_RKEY_VIOLATION;
465 goto err;
466 }
467
468 if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
469 state = RESPST_ERR_RKEY_VIOLATION;
470 goto err;
471 }
472
473 if (mem_check_range(mem, va, resid)) {
474 state = RESPST_ERR_RKEY_VIOLATION;
475 goto err;
476 }
477
478 if (pkt->mask & RXE_WRITE_MASK) {
479 if (resid > mtu) {
480 if (pktlen != mtu || bth_pad(pkt)) {
481 state = RESPST_ERR_LENGTH;
482 goto err;
483 }
484 } else {
485 if (pktlen != resid) {
486 state = RESPST_ERR_LENGTH;
487 goto err;
488 }
489 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
490
491
492
493 state = RESPST_ERR_LENGTH;
494 goto err;
495 }
496 }
497 }
498
499 WARN_ON_ONCE(qp->resp.mr);
500
501 qp->resp.mr = mem;
502 return RESPST_EXECUTE;
503
504err:
505 if (mem)
506 rxe_drop_ref(mem);
507 return state;
508}
509
510static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
511 int data_len)
512{
513 int err;
514 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
515
516 err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
517 data_addr, data_len, to_mem_obj, NULL);
518 if (unlikely(err))
519 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
520 : RESPST_ERR_MALFORMED_WQE;
521
522 return RESPST_NONE;
523}
524
525static enum resp_states write_data_in(struct rxe_qp *qp,
526 struct rxe_pkt_info *pkt)
527{
528 enum resp_states rc = RESPST_NONE;
529 int err;
530 int data_len = payload_size(pkt);
531
532 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
533 data_len, to_mem_obj, NULL);
534 if (err) {
535 rc = RESPST_ERR_RKEY_VIOLATION;
536 goto out;
537 }
538
539 qp->resp.va += data_len;
540 qp->resp.resid -= data_len;
541
542out:
543 return rc;
544}
545
546
547static DEFINE_SPINLOCK(atomic_ops_lock);
548
549static enum resp_states process_atomic(struct rxe_qp *qp,
550 struct rxe_pkt_info *pkt)
551{
552 u64 iova = atmeth_va(pkt);
553 u64 *vaddr;
554 enum resp_states ret;
555 struct rxe_mem *mr = qp->resp.mr;
556
557 if (mr->state != RXE_MEM_STATE_VALID) {
558 ret = RESPST_ERR_RKEY_VIOLATION;
559 goto out;
560 }
561
562 vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
563
564
565 if (!vaddr || (uintptr_t)vaddr & 7) {
566 ret = RESPST_ERR_MISALIGNED_ATOMIC;
567 goto out;
568 }
569
570 spin_lock_bh(&atomic_ops_lock);
571
572 qp->resp.atomic_orig = *vaddr;
573
574 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
575 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
576 if (*vaddr == atmeth_comp(pkt))
577 *vaddr = atmeth_swap_add(pkt);
578 } else {
579 *vaddr += atmeth_swap_add(pkt);
580 }
581
582 spin_unlock_bh(&atomic_ops_lock);
583
584 ret = RESPST_NONE;
585out:
586 return ret;
587}
588
589static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
590 struct rxe_pkt_info *pkt,
591 struct rxe_pkt_info *ack,
592 int opcode,
593 int payload,
594 u32 psn,
595 u8 syndrome,
596 u32 *crcp)
597{
598 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
599 struct sk_buff *skb;
600 u32 crc = 0;
601 u32 *p;
602 int paylen;
603 int pad;
604 int err;
605
606
607
608
609 pad = (-payload) & 0x3;
610 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
611
612 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
613 if (!skb)
614 return NULL;
615
616 ack->qp = qp;
617 ack->opcode = opcode;
618 ack->mask = rxe_opcode[opcode].mask;
619 ack->offset = pkt->offset;
620 ack->paylen = paylen;
621
622
623 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
624
625 bth_set_opcode(ack, opcode);
626 bth_set_qpn(ack, qp->attr.dest_qp_num);
627 bth_set_pad(ack, pad);
628 bth_set_se(ack, 0);
629 bth_set_psn(ack, psn);
630 bth_set_ack(ack, 0);
631 ack->psn = psn;
632
633 if (ack->mask & RXE_AETH_MASK) {
634 aeth_set_syn(ack, syndrome);
635 aeth_set_msn(ack, qp->resp.msn);
636 }
637
638 if (ack->mask & RXE_ATMACK_MASK)
639 atmack_set_orig(ack, qp->resp.atomic_orig);
640
641 err = rxe_prepare(rxe, ack, skb, &crc);
642 if (err) {
643 kfree_skb(skb);
644 return NULL;
645 }
646
647 if (crcp) {
648
649 *crcp = crc;
650 } else {
651 p = payload_addr(ack) + payload + bth_pad(ack);
652 *p = ~crc;
653 }
654
655 return skb;
656}
657
658
659
660
661static enum resp_states read_reply(struct rxe_qp *qp,
662 struct rxe_pkt_info *req_pkt)
663{
664 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
665 struct rxe_pkt_info ack_pkt;
666 struct sk_buff *skb;
667 int mtu = qp->mtu;
668 enum resp_states state;
669 int payload;
670 int opcode;
671 int err;
672 struct resp_res *res = qp->resp.res;
673 u32 icrc;
674 u32 *p;
675
676 if (!res) {
677
678
679
680 res = &qp->resp.resources[qp->resp.res_head];
681
682 free_rd_atomic_resource(qp, res);
683 rxe_advance_resp_resource(qp);
684
685 res->type = RXE_READ_MASK;
686
687 res->read.va = qp->resp.va;
688 res->read.va_org = qp->resp.va;
689
690 res->first_psn = req_pkt->psn;
691
692 if (reth_len(req_pkt)) {
693 res->last_psn = (req_pkt->psn +
694 (reth_len(req_pkt) + mtu - 1) /
695 mtu - 1) & BTH_PSN_MASK;
696 } else {
697 res->last_psn = res->first_psn;
698 }
699 res->cur_psn = req_pkt->psn;
700
701 res->read.resid = qp->resp.resid;
702 res->read.length = qp->resp.resid;
703 res->read.rkey = qp->resp.rkey;
704
705
706 res->read.mr = qp->resp.mr;
707 qp->resp.mr = NULL;
708
709 qp->resp.res = res;
710 res->state = rdatm_res_state_new;
711 }
712
713 if (res->state == rdatm_res_state_new) {
714 if (res->read.resid <= mtu)
715 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
716 else
717 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
718 } else {
719 if (res->read.resid > mtu)
720 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
721 else
722 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
723 }
724
725 res->state = rdatm_res_state_next;
726
727 payload = min_t(int, res->read.resid, mtu);
728
729 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
730 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
731 if (!skb)
732 return RESPST_ERR_RNR;
733
734 err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
735 payload, from_mem_obj, &icrc);
736 if (err)
737 pr_err("Failed copying memory\n");
738
739 p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
740 *p = ~icrc;
741
742 err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
743 if (err) {
744 pr_err("Failed sending RDMA reply.\n");
745 return RESPST_ERR_RNR;
746 }
747
748 res->read.va += payload;
749 res->read.resid -= payload;
750 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
751
752 if (res->read.resid > 0) {
753 state = RESPST_DONE;
754 } else {
755 qp->resp.res = NULL;
756 qp->resp.opcode = -1;
757 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
758 qp->resp.psn = res->cur_psn;
759 state = RESPST_CLEANUP;
760 }
761
762 return state;
763}
764
765static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
766 struct rxe_pkt_info *pkt)
767{
768 struct sk_buff *skb = PKT_TO_SKB(pkt);
769
770 memset(hdr, 0, sizeof(*hdr));
771 if (skb->protocol == htons(ETH_P_IP))
772 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
773 else if (skb->protocol == htons(ETH_P_IPV6))
774 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
775}
776
777
778
779
780static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
781{
782 enum resp_states err;
783
784 if (pkt->mask & RXE_SEND_MASK) {
785 if (qp_type(qp) == IB_QPT_UD ||
786 qp_type(qp) == IB_QPT_SMI ||
787 qp_type(qp) == IB_QPT_GSI) {
788 union rdma_network_hdr hdr;
789
790 build_rdma_network_hdr(&hdr, pkt);
791
792 err = send_data_in(qp, &hdr, sizeof(hdr));
793 if (err)
794 return err;
795 }
796 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
797 if (err)
798 return err;
799 } else if (pkt->mask & RXE_WRITE_MASK) {
800 err = write_data_in(qp, pkt);
801 if (err)
802 return err;
803 } else if (pkt->mask & RXE_READ_MASK) {
804
805 qp->resp.msn++;
806 return RESPST_READ_REPLY;
807 } else if (pkt->mask & RXE_ATOMIC_MASK) {
808 err = process_atomic(qp, pkt);
809 if (err)
810 return err;
811 } else {
812
813 WARN_ON_ONCE(1);
814 }
815
816
817 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
818
819 qp->resp.opcode = pkt->opcode;
820 qp->resp.status = IB_WC_SUCCESS;
821
822 if (pkt->mask & RXE_COMP_MASK) {
823
824 qp->resp.msn++;
825 return RESPST_COMPLETE;
826 } else if (qp_type(qp) == IB_QPT_RC)
827 return RESPST_ACKNOWLEDGE;
828 else
829 return RESPST_CLEANUP;
830}
831
832static enum resp_states do_complete(struct rxe_qp *qp,
833 struct rxe_pkt_info *pkt)
834{
835 struct rxe_cqe cqe;
836 struct ib_wc *wc = &cqe.ibwc;
837 struct ib_uverbs_wc *uwc = &cqe.uibwc;
838 struct rxe_recv_wqe *wqe = qp->resp.wqe;
839
840 if (unlikely(!wqe))
841 return RESPST_CLEANUP;
842
843 memset(&cqe, 0, sizeof(cqe));
844
845 wc->wr_id = wqe->wr_id;
846 wc->status = qp->resp.status;
847 wc->qp = &qp->ibqp;
848
849
850 if (wc->status == IB_WC_SUCCESS) {
851 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
852 pkt->mask & RXE_WRITE_MASK) ?
853 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
854 wc->vendor_err = 0;
855 wc->byte_len = wqe->dma.length - wqe->dma.resid;
856
857
858
859
860 if (qp->rcq->is_user) {
861 uwc->wc_flags = IB_WC_GRH;
862
863 if (pkt->mask & RXE_IMMDT_MASK) {
864 uwc->wc_flags |= IB_WC_WITH_IMM;
865 uwc->ex.imm_data = immdt_imm(pkt);
866 }
867
868 if (pkt->mask & RXE_IETH_MASK) {
869 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
870 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
871 }
872
873 uwc->qp_num = qp->ibqp.qp_num;
874
875 if (pkt->mask & RXE_DETH_MASK)
876 uwc->src_qp = deth_sqp(pkt);
877
878 uwc->port_num = qp->attr.port_num;
879 } else {
880 struct sk_buff *skb = PKT_TO_SKB(pkt);
881
882 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
883 if (skb->protocol == htons(ETH_P_IP))
884 wc->network_hdr_type = RDMA_NETWORK_IPV4;
885 else
886 wc->network_hdr_type = RDMA_NETWORK_IPV6;
887
888 if (pkt->mask & RXE_IMMDT_MASK) {
889 wc->wc_flags |= IB_WC_WITH_IMM;
890 wc->ex.imm_data = immdt_imm(pkt);
891 }
892
893 if (pkt->mask & RXE_IETH_MASK) {
894 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
895 struct rxe_mem *rmr;
896
897 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
898 wc->ex.invalidate_rkey = ieth_rkey(pkt);
899
900 rmr = rxe_pool_get_index(&rxe->mr_pool,
901 wc->ex.invalidate_rkey >> 8);
902 if (unlikely(!rmr)) {
903 pr_err("Bad rkey %#x invalidation\n",
904 wc->ex.invalidate_rkey);
905 return RESPST_ERROR;
906 }
907 rmr->state = RXE_MEM_STATE_FREE;
908 rxe_drop_ref(rmr);
909 }
910
911 wc->qp = &qp->ibqp;
912
913 if (pkt->mask & RXE_DETH_MASK)
914 wc->src_qp = deth_sqp(pkt);
915
916 wc->port_num = qp->attr.port_num;
917 }
918 }
919
920
921 if (!qp->srq)
922 advance_consumer(qp->rq.queue);
923
924 qp->resp.wqe = NULL;
925
926 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
927 return RESPST_ERR_CQ_OVERFLOW;
928
929 if (qp->resp.state == QP_STATE_ERROR)
930 return RESPST_CHK_RESOURCE;
931
932 if (!pkt)
933 return RESPST_DONE;
934 else if (qp_type(qp) == IB_QPT_RC)
935 return RESPST_ACKNOWLEDGE;
936 else
937 return RESPST_CLEANUP;
938}
939
940static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
941 u8 syndrome, u32 psn)
942{
943 int err = 0;
944 struct rxe_pkt_info ack_pkt;
945 struct sk_buff *skb;
946 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
947
948 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
949 0, psn, syndrome, NULL);
950 if (!skb) {
951 err = -ENOMEM;
952 goto err1;
953 }
954
955 err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
956 if (err)
957 pr_err_ratelimited("Failed sending ack\n");
958
959err1:
960 return err;
961}
962
963static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
964 u8 syndrome)
965{
966 int rc = 0;
967 struct rxe_pkt_info ack_pkt;
968 struct sk_buff *skb;
969 struct sk_buff *skb_copy;
970 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
971 struct resp_res *res;
972
973 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
974 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
975 syndrome, NULL);
976 if (!skb) {
977 rc = -ENOMEM;
978 goto out;
979 }
980
981 skb_copy = skb_clone(skb, GFP_ATOMIC);
982 if (skb_copy)
983 rxe_add_ref(qp);
984 else {
985 pr_warn("Could not clone atomic response\n");
986 rc = -ENOMEM;
987 goto out;
988 }
989
990 res = &qp->resp.resources[qp->resp.res_head];
991 free_rd_atomic_resource(qp, res);
992 rxe_advance_resp_resource(qp);
993
994 memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
995 memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
996 sizeof(skb->cb) - sizeof(ack_pkt));
997
998 res->type = RXE_ATOMIC_MASK;
999 res->atomic.skb = skb;
1000 res->first_psn = ack_pkt.psn;
1001 res->last_psn = ack_pkt.psn;
1002 res->cur_psn = ack_pkt.psn;
1003
1004 rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
1005 if (rc) {
1006 pr_err_ratelimited("Failed sending ack\n");
1007 rxe_drop_ref(qp);
1008 kfree_skb(skb_copy);
1009 }
1010
1011out:
1012 return rc;
1013}
1014
1015static enum resp_states acknowledge(struct rxe_qp *qp,
1016 struct rxe_pkt_info *pkt)
1017{
1018 if (qp_type(qp) != IB_QPT_RC)
1019 return RESPST_CLEANUP;
1020
1021 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1022 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1023 else if (pkt->mask & RXE_ATOMIC_MASK)
1024 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1025 else if (bth_ack(pkt))
1026 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1027
1028 return RESPST_CLEANUP;
1029}
1030
1031static enum resp_states cleanup(struct rxe_qp *qp,
1032 struct rxe_pkt_info *pkt)
1033{
1034 struct sk_buff *skb;
1035
1036 if (pkt) {
1037 skb = skb_dequeue(&qp->req_pkts);
1038 rxe_drop_ref(qp);
1039 kfree_skb(skb);
1040 }
1041
1042 if (qp->resp.mr) {
1043 rxe_drop_ref(qp->resp.mr);
1044 qp->resp.mr = NULL;
1045 }
1046
1047 return RESPST_DONE;
1048}
1049
1050static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1051{
1052 int i;
1053
1054 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1055 struct resp_res *res = &qp->resp.resources[i];
1056
1057 if (res->type == 0)
1058 continue;
1059
1060 if (psn_compare(psn, res->first_psn) >= 0 &&
1061 psn_compare(psn, res->last_psn) <= 0) {
1062 return res;
1063 }
1064 }
1065
1066 return NULL;
1067}
1068
1069static enum resp_states duplicate_request(struct rxe_qp *qp,
1070 struct rxe_pkt_info *pkt)
1071{
1072 enum resp_states rc;
1073 u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
1074
1075 if (pkt->mask & RXE_SEND_MASK ||
1076 pkt->mask & RXE_WRITE_MASK) {
1077
1078 if (bth_ack(pkt))
1079 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1080 rc = RESPST_CLEANUP;
1081 goto out;
1082 } else if (pkt->mask & RXE_READ_MASK) {
1083 struct resp_res *res;
1084
1085 res = find_resource(qp, pkt->psn);
1086 if (!res) {
1087
1088
1089
1090 rc = RESPST_CLEANUP;
1091 goto out;
1092 } else {
1093
1094
1095
1096 u64 iova = reth_va(pkt);
1097 u32 resid = reth_len(pkt);
1098
1099 if (iova < res->read.va_org ||
1100 resid > res->read.length ||
1101 (iova + resid) > (res->read.va_org +
1102 res->read.length)) {
1103 rc = RESPST_CLEANUP;
1104 goto out;
1105 }
1106
1107 if (reth_rkey(pkt) != res->read.rkey) {
1108 rc = RESPST_CLEANUP;
1109 goto out;
1110 }
1111
1112 res->cur_psn = pkt->psn;
1113 res->state = (pkt->psn == res->first_psn) ?
1114 rdatm_res_state_new :
1115 rdatm_res_state_replay;
1116
1117
1118 res->read.va_org = iova;
1119 res->read.va = iova;
1120 res->read.resid = resid;
1121
1122
1123 qp->resp.res = res;
1124 rc = RESPST_READ_REPLY;
1125 goto out;
1126 }
1127 } else {
1128 struct resp_res *res;
1129
1130
1131 res = find_resource(qp, pkt->psn);
1132 if (res) {
1133 struct sk_buff *skb_copy;
1134
1135 skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1136 if (skb_copy) {
1137 rxe_add_ref(qp);
1138 } else {
1139 pr_warn("Couldn't clone atomic resp\n");
1140 rc = RESPST_CLEANUP;
1141 goto out;
1142 }
1143
1144
1145 rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1146 pkt, skb_copy);
1147 if (rc) {
1148 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1149 rxe_drop_ref(qp);
1150 rc = RESPST_CLEANUP;
1151 goto out;
1152 }
1153 }
1154
1155
1156 rc = RESPST_CLEANUP;
1157 goto out;
1158 }
1159out:
1160 return rc;
1161}
1162
1163
1164static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1165 enum ib_wc_status status)
1166{
1167 qp->resp.aeth_syndrome = syndrome;
1168 qp->resp.status = status;
1169
1170
1171 qp->resp.goto_error = 1;
1172}
1173
1174static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1175{
1176
1177 if (qp->srq) {
1178
1179 qp->resp.drop_msg = 1;
1180 if (qp->resp.wqe) {
1181 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1182 return RESPST_COMPLETE;
1183 } else {
1184 return RESPST_CLEANUP;
1185 }
1186 } else {
1187
1188
1189
1190
1191
1192 if (qp->resp.wqe) {
1193 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1194 qp->resp.wqe->dma.cur_sge = 0;
1195 qp->resp.wqe->dma.sge_offset = 0;
1196 qp->resp.opcode = -1;
1197 }
1198
1199 if (qp->resp.mr) {
1200 rxe_drop_ref(qp->resp.mr);
1201 qp->resp.mr = NULL;
1202 }
1203
1204 return RESPST_CLEANUP;
1205 }
1206}
1207
1208static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1209{
1210 struct sk_buff *skb;
1211
1212 while ((skb = skb_dequeue(&qp->req_pkts))) {
1213 rxe_drop_ref(qp);
1214 kfree_skb(skb);
1215 }
1216
1217 if (notify)
1218 return;
1219
1220 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1221 advance_consumer(qp->rq.queue);
1222}
1223
1224int rxe_responder(void *arg)
1225{
1226 struct rxe_qp *qp = (struct rxe_qp *)arg;
1227 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1228 enum resp_states state;
1229 struct rxe_pkt_info *pkt = NULL;
1230 int ret = 0;
1231
1232 rxe_add_ref(qp);
1233
1234 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1235
1236 if (!qp->valid) {
1237 ret = -EINVAL;
1238 goto done;
1239 }
1240
1241 switch (qp->resp.state) {
1242 case QP_STATE_RESET:
1243 state = RESPST_RESET;
1244 break;
1245
1246 default:
1247 state = RESPST_GET_REQ;
1248 break;
1249 }
1250
1251 while (1) {
1252 pr_debug("qp#%d state = %s\n", qp_num(qp),
1253 resp_state_name[state]);
1254 switch (state) {
1255 case RESPST_GET_REQ:
1256 state = get_req(qp, &pkt);
1257 break;
1258 case RESPST_CHK_PSN:
1259 state = check_psn(qp, pkt);
1260 break;
1261 case RESPST_CHK_OP_SEQ:
1262 state = check_op_seq(qp, pkt);
1263 break;
1264 case RESPST_CHK_OP_VALID:
1265 state = check_op_valid(qp, pkt);
1266 break;
1267 case RESPST_CHK_RESOURCE:
1268 state = check_resource(qp, pkt);
1269 break;
1270 case RESPST_CHK_LENGTH:
1271 state = check_length(qp, pkt);
1272 break;
1273 case RESPST_CHK_RKEY:
1274 state = check_rkey(qp, pkt);
1275 break;
1276 case RESPST_EXECUTE:
1277 state = execute(qp, pkt);
1278 break;
1279 case RESPST_COMPLETE:
1280 state = do_complete(qp, pkt);
1281 break;
1282 case RESPST_READ_REPLY:
1283 state = read_reply(qp, pkt);
1284 break;
1285 case RESPST_ACKNOWLEDGE:
1286 state = acknowledge(qp, pkt);
1287 break;
1288 case RESPST_CLEANUP:
1289 state = cleanup(qp, pkt);
1290 break;
1291 case RESPST_DUPLICATE_REQUEST:
1292 state = duplicate_request(qp, pkt);
1293 break;
1294 case RESPST_ERR_PSN_OUT_OF_SEQ:
1295
1296 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1297 state = RESPST_CLEANUP;
1298 break;
1299
1300 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1301 case RESPST_ERR_MISSING_OPCODE_FIRST:
1302 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1303 case RESPST_ERR_UNSUPPORTED_OPCODE:
1304 case RESPST_ERR_MISALIGNED_ATOMIC:
1305
1306 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1307 IB_WC_REM_INV_REQ_ERR);
1308 state = RESPST_COMPLETE;
1309 break;
1310
1311 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1312 state = do_class_d1e_error(qp);
1313 break;
1314 case RESPST_ERR_RNR:
1315 if (qp_type(qp) == IB_QPT_RC) {
1316 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1317
1318 send_ack(qp, pkt, AETH_RNR_NAK |
1319 (~AETH_TYPE_MASK &
1320 qp->attr.min_rnr_timer),
1321 pkt->psn);
1322 } else {
1323
1324 qp->resp.drop_msg = 1;
1325 }
1326 state = RESPST_CLEANUP;
1327 break;
1328
1329 case RESPST_ERR_RKEY_VIOLATION:
1330 if (qp_type(qp) == IB_QPT_RC) {
1331
1332 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1333 IB_WC_REM_ACCESS_ERR);
1334 state = RESPST_COMPLETE;
1335 } else {
1336 qp->resp.drop_msg = 1;
1337 if (qp->srq) {
1338
1339 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1340 state = RESPST_COMPLETE;
1341 } else {
1342
1343 state = RESPST_CLEANUP;
1344 }
1345 }
1346 break;
1347
1348 case RESPST_ERR_LENGTH:
1349 if (qp_type(qp) == IB_QPT_RC) {
1350
1351 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1352 IB_WC_REM_INV_REQ_ERR);
1353 state = RESPST_COMPLETE;
1354 } else if (qp->srq) {
1355
1356 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1357 state = RESPST_COMPLETE;
1358 } else {
1359
1360 qp->resp.drop_msg = 1;
1361 state = RESPST_CLEANUP;
1362 }
1363 break;
1364
1365 case RESPST_ERR_MALFORMED_WQE:
1366
1367 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1368 IB_WC_LOC_QP_OP_ERR);
1369 state = RESPST_COMPLETE;
1370 break;
1371
1372 case RESPST_ERR_CQ_OVERFLOW:
1373
1374 state = RESPST_ERROR;
1375 break;
1376
1377 case RESPST_DONE:
1378 if (qp->resp.goto_error) {
1379 state = RESPST_ERROR;
1380 break;
1381 }
1382
1383 goto done;
1384
1385 case RESPST_EXIT:
1386 if (qp->resp.goto_error) {
1387 state = RESPST_ERROR;
1388 break;
1389 }
1390
1391 goto exit;
1392
1393 case RESPST_RESET:
1394 rxe_drain_req_pkts(qp, false);
1395 qp->resp.wqe = NULL;
1396 goto exit;
1397
1398 case RESPST_ERROR:
1399 qp->resp.goto_error = 0;
1400 pr_warn("qp#%d moved to error state\n", qp_num(qp));
1401 rxe_qp_error(qp);
1402 goto exit;
1403
1404 default:
1405 WARN_ON_ONCE(1);
1406 }
1407 }
1408
1409exit:
1410 ret = -EAGAIN;
1411done:
1412 rxe_drop_ref(qp);
1413 return ret;
1414}
1415