1
2
3
4
5
6
7#include <linux/skbuff.h>
8
9#include "rxe.h"
10#include "rxe_loc.h"
11#include "rxe_queue.h"
12
13enum resp_states {
14 RESPST_NONE,
15 RESPST_GET_REQ,
16 RESPST_CHK_PSN,
17 RESPST_CHK_OP_SEQ,
18 RESPST_CHK_OP_VALID,
19 RESPST_CHK_RESOURCE,
20 RESPST_CHK_LENGTH,
21 RESPST_CHK_RKEY,
22 RESPST_EXECUTE,
23 RESPST_READ_REPLY,
24 RESPST_COMPLETE,
25 RESPST_ACKNOWLEDGE,
26 RESPST_CLEANUP,
27 RESPST_DUPLICATE_REQUEST,
28 RESPST_ERR_MALFORMED_WQE,
29 RESPST_ERR_UNSUPPORTED_OPCODE,
30 RESPST_ERR_MISALIGNED_ATOMIC,
31 RESPST_ERR_PSN_OUT_OF_SEQ,
32 RESPST_ERR_MISSING_OPCODE_FIRST,
33 RESPST_ERR_MISSING_OPCODE_LAST_C,
34 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36 RESPST_ERR_RNR,
37 RESPST_ERR_RKEY_VIOLATION,
38 RESPST_ERR_INVALIDATE_RKEY,
39 RESPST_ERR_LENGTH,
40 RESPST_ERR_CQ_OVERFLOW,
41 RESPST_ERROR,
42 RESPST_RESET,
43 RESPST_DONE,
44 RESPST_EXIT,
45};
46
47static char *resp_state_name[] = {
48 [RESPST_NONE] = "NONE",
49 [RESPST_GET_REQ] = "GET_REQ",
50 [RESPST_CHK_PSN] = "CHK_PSN",
51 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
52 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
53 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
54 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
55 [RESPST_CHK_RKEY] = "CHK_RKEY",
56 [RESPST_EXECUTE] = "EXECUTE",
57 [RESPST_READ_REPLY] = "READ_REPLY",
58 [RESPST_COMPLETE] = "COMPLETE",
59 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
60 [RESPST_CLEANUP] = "CLEANUP",
61 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
62 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
63 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
64 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
65 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
66 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
67 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
68 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
69 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
70 [RESPST_ERR_RNR] = "ERR_RNR",
71 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
72 [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
73 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
74 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
75 [RESPST_ERROR] = "ERROR",
76 [RESPST_RESET] = "RESET",
77 [RESPST_DONE] = "DONE",
78 [RESPST_EXIT] = "EXIT",
79};
80
81
82void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
83{
84 int must_sched;
85 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
86
87 skb_queue_tail(&qp->req_pkts, skb);
88
89 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
90 (skb_queue_len(&qp->req_pkts) > 1);
91
92 rxe_run_task(&qp->resp.task, must_sched);
93}
94
95static inline enum resp_states get_req(struct rxe_qp *qp,
96 struct rxe_pkt_info **pkt_p)
97{
98 struct sk_buff *skb;
99
100 if (qp->resp.state == QP_STATE_ERROR) {
101 while ((skb = skb_dequeue(&qp->req_pkts))) {
102 rxe_drop_ref(qp);
103 kfree_skb(skb);
104 ib_device_put(qp->ibqp.device);
105 }
106
107
108 return RESPST_CHK_RESOURCE;
109 }
110
111 skb = skb_peek(&qp->req_pkts);
112 if (!skb)
113 return RESPST_EXIT;
114
115 *pkt_p = SKB_TO_PKT(skb);
116
117 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
118}
119
120static enum resp_states check_psn(struct rxe_qp *qp,
121 struct rxe_pkt_info *pkt)
122{
123 int diff = psn_compare(pkt->psn, qp->resp.psn);
124 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
125
126 switch (qp_type(qp)) {
127 case IB_QPT_RC:
128 if (diff > 0) {
129 if (qp->resp.sent_psn_nak)
130 return RESPST_CLEANUP;
131
132 qp->resp.sent_psn_nak = 1;
133 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
134 return RESPST_ERR_PSN_OUT_OF_SEQ;
135
136 } else if (diff < 0) {
137 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
138 return RESPST_DUPLICATE_REQUEST;
139 }
140
141 if (qp->resp.sent_psn_nak)
142 qp->resp.sent_psn_nak = 0;
143
144 break;
145
146 case IB_QPT_UC:
147 if (qp->resp.drop_msg || diff != 0) {
148 if (pkt->mask & RXE_START_MASK) {
149 qp->resp.drop_msg = 0;
150 return RESPST_CHK_OP_SEQ;
151 }
152
153 qp->resp.drop_msg = 1;
154 return RESPST_CLEANUP;
155 }
156 break;
157 default:
158 break;
159 }
160
161 return RESPST_CHK_OP_SEQ;
162}
163
164static enum resp_states check_op_seq(struct rxe_qp *qp,
165 struct rxe_pkt_info *pkt)
166{
167 switch (qp_type(qp)) {
168 case IB_QPT_RC:
169 switch (qp->resp.opcode) {
170 case IB_OPCODE_RC_SEND_FIRST:
171 case IB_OPCODE_RC_SEND_MIDDLE:
172 switch (pkt->opcode) {
173 case IB_OPCODE_RC_SEND_MIDDLE:
174 case IB_OPCODE_RC_SEND_LAST:
175 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
176 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
177 return RESPST_CHK_OP_VALID;
178 default:
179 return RESPST_ERR_MISSING_OPCODE_LAST_C;
180 }
181
182 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
183 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
184 switch (pkt->opcode) {
185 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
186 case IB_OPCODE_RC_RDMA_WRITE_LAST:
187 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188 return RESPST_CHK_OP_VALID;
189 default:
190 return RESPST_ERR_MISSING_OPCODE_LAST_C;
191 }
192
193 default:
194 switch (pkt->opcode) {
195 case IB_OPCODE_RC_SEND_MIDDLE:
196 case IB_OPCODE_RC_SEND_LAST:
197 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
198 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
199 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
200 case IB_OPCODE_RC_RDMA_WRITE_LAST:
201 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
202 return RESPST_ERR_MISSING_OPCODE_FIRST;
203 default:
204 return RESPST_CHK_OP_VALID;
205 }
206 }
207 break;
208
209 case IB_QPT_UC:
210 switch (qp->resp.opcode) {
211 case IB_OPCODE_UC_SEND_FIRST:
212 case IB_OPCODE_UC_SEND_MIDDLE:
213 switch (pkt->opcode) {
214 case IB_OPCODE_UC_SEND_MIDDLE:
215 case IB_OPCODE_UC_SEND_LAST:
216 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
217 return RESPST_CHK_OP_VALID;
218 default:
219 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
220 }
221
222 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
223 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
224 switch (pkt->opcode) {
225 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
226 case IB_OPCODE_UC_RDMA_WRITE_LAST:
227 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
228 return RESPST_CHK_OP_VALID;
229 default:
230 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
231 }
232
233 default:
234 switch (pkt->opcode) {
235 case IB_OPCODE_UC_SEND_MIDDLE:
236 case IB_OPCODE_UC_SEND_LAST:
237 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
238 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
239 case IB_OPCODE_UC_RDMA_WRITE_LAST:
240 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
241 qp->resp.drop_msg = 1;
242 return RESPST_CLEANUP;
243 default:
244 return RESPST_CHK_OP_VALID;
245 }
246 }
247 break;
248
249 default:
250 return RESPST_CHK_OP_VALID;
251 }
252}
253
254static enum resp_states check_op_valid(struct rxe_qp *qp,
255 struct rxe_pkt_info *pkt)
256{
257 switch (qp_type(qp)) {
258 case IB_QPT_RC:
259 if (((pkt->mask & RXE_READ_MASK) &&
260 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
261 ((pkt->mask & RXE_WRITE_MASK) &&
262 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
263 ((pkt->mask & RXE_ATOMIC_MASK) &&
264 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
265 return RESPST_ERR_UNSUPPORTED_OPCODE;
266 }
267
268 break;
269
270 case IB_QPT_UC:
271 if ((pkt->mask & RXE_WRITE_MASK) &&
272 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
273 qp->resp.drop_msg = 1;
274 return RESPST_CLEANUP;
275 }
276
277 break;
278
279 case IB_QPT_UD:
280 case IB_QPT_SMI:
281 case IB_QPT_GSI:
282 break;
283
284 default:
285 WARN_ON_ONCE(1);
286 break;
287 }
288
289 return RESPST_CHK_RESOURCE;
290}
291
292static enum resp_states get_srq_wqe(struct rxe_qp *qp)
293{
294 struct rxe_srq *srq = qp->srq;
295 struct rxe_queue *q = srq->rq.queue;
296 struct rxe_recv_wqe *wqe;
297 struct ib_event ev;
298 unsigned int count;
299 size_t size;
300
301 if (srq->error)
302 return RESPST_ERR_RNR;
303
304 spin_lock_bh(&srq->rq.consumer_lock);
305
306 if (qp->is_user)
307 wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
308 else
309 wqe = queue_head(q, QUEUE_TYPE_KERNEL);
310 if (!wqe) {
311 spin_unlock_bh(&srq->rq.consumer_lock);
312 return RESPST_ERR_RNR;
313 }
314
315
316 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
317 spin_unlock_bh(&srq->rq.consumer_lock);
318 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
319 return RESPST_ERR_MALFORMED_WQE;
320 }
321 size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
322 memcpy(&qp->resp.srq_wqe, wqe, size);
323
324 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
325 if (qp->is_user) {
326 advance_consumer(q, QUEUE_TYPE_FROM_USER);
327 count = queue_count(q, QUEUE_TYPE_FROM_USER);
328 } else {
329 advance_consumer(q, QUEUE_TYPE_KERNEL);
330 count = queue_count(q, QUEUE_TYPE_KERNEL);
331 }
332
333 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
334 srq->limit = 0;
335 goto event;
336 }
337
338 spin_unlock_bh(&srq->rq.consumer_lock);
339 return RESPST_CHK_LENGTH;
340
341event:
342 spin_unlock_bh(&srq->rq.consumer_lock);
343 ev.device = qp->ibqp.device;
344 ev.element.srq = qp->ibqp.srq;
345 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
346 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
347 return RESPST_CHK_LENGTH;
348}
349
350static enum resp_states check_resource(struct rxe_qp *qp,
351 struct rxe_pkt_info *pkt)
352{
353 struct rxe_srq *srq = qp->srq;
354
355 if (qp->resp.state == QP_STATE_ERROR) {
356 if (qp->resp.wqe) {
357 qp->resp.status = IB_WC_WR_FLUSH_ERR;
358 return RESPST_COMPLETE;
359 } else if (!srq) {
360 if (qp->is_user)
361 qp->resp.wqe = queue_head(qp->rq.queue,
362 QUEUE_TYPE_FROM_USER);
363 else
364 qp->resp.wqe = queue_head(qp->rq.queue,
365 QUEUE_TYPE_KERNEL);
366 if (qp->resp.wqe) {
367 qp->resp.status = IB_WC_WR_FLUSH_ERR;
368 return RESPST_COMPLETE;
369 } else {
370 return RESPST_EXIT;
371 }
372 } else {
373 return RESPST_EXIT;
374 }
375 }
376
377 if (pkt->mask & RXE_READ_OR_ATOMIC) {
378
379
380
381
382 if (likely(qp->attr.max_dest_rd_atomic > 0))
383 return RESPST_CHK_LENGTH;
384 else
385 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
386 }
387
388 if (pkt->mask & RXE_RWR_MASK) {
389 if (srq)
390 return get_srq_wqe(qp);
391
392 if (qp->is_user)
393 qp->resp.wqe = queue_head(qp->rq.queue,
394 QUEUE_TYPE_FROM_USER);
395 else
396 qp->resp.wqe = queue_head(qp->rq.queue,
397 QUEUE_TYPE_KERNEL);
398 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
399 }
400
401 return RESPST_CHK_LENGTH;
402}
403
404static enum resp_states check_length(struct rxe_qp *qp,
405 struct rxe_pkt_info *pkt)
406{
407 switch (qp_type(qp)) {
408 case IB_QPT_RC:
409 return RESPST_CHK_RKEY;
410
411 case IB_QPT_UC:
412 return RESPST_CHK_RKEY;
413
414 default:
415 return RESPST_CHK_RKEY;
416 }
417}
418
419static enum resp_states check_rkey(struct rxe_qp *qp,
420 struct rxe_pkt_info *pkt)
421{
422 struct rxe_mr *mr = NULL;
423 struct rxe_mw *mw = NULL;
424 u64 va;
425 u32 rkey;
426 u32 resid;
427 u32 pktlen;
428 int mtu = qp->mtu;
429 enum resp_states state;
430 int access;
431
432 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
433 if (pkt->mask & RXE_RETH_MASK) {
434 qp->resp.va = reth_va(pkt);
435 qp->resp.offset = 0;
436 qp->resp.rkey = reth_rkey(pkt);
437 qp->resp.resid = reth_len(pkt);
438 qp->resp.length = reth_len(pkt);
439 }
440 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
441 : IB_ACCESS_REMOTE_WRITE;
442 } else if (pkt->mask & RXE_ATOMIC_MASK) {
443 qp->resp.va = atmeth_va(pkt);
444 qp->resp.offset = 0;
445 qp->resp.rkey = atmeth_rkey(pkt);
446 qp->resp.resid = sizeof(u64);
447 access = IB_ACCESS_REMOTE_ATOMIC;
448 } else {
449 return RESPST_EXECUTE;
450 }
451
452
453 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
454 (pkt->mask & RXE_RETH_MASK) &&
455 reth_len(pkt) == 0) {
456 return RESPST_EXECUTE;
457 }
458
459 va = qp->resp.va;
460 rkey = qp->resp.rkey;
461 resid = qp->resp.resid;
462 pktlen = payload_size(pkt);
463
464 if (rkey_is_mw(rkey)) {
465 mw = rxe_lookup_mw(qp, access, rkey);
466 if (!mw) {
467 pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
468 state = RESPST_ERR_RKEY_VIOLATION;
469 goto err;
470 }
471
472 mr = mw->mr;
473 if (!mr) {
474 pr_err("%s: MW doesn't have an MR\n", __func__);
475 state = RESPST_ERR_RKEY_VIOLATION;
476 goto err;
477 }
478
479 if (mw->access & IB_ZERO_BASED)
480 qp->resp.offset = mw->addr;
481
482 rxe_drop_ref(mw);
483 rxe_add_ref(mr);
484 } else {
485 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
486 if (!mr) {
487 pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
488 state = RESPST_ERR_RKEY_VIOLATION;
489 goto err;
490 }
491 }
492
493 if (mr_check_range(mr, va + qp->resp.offset, resid)) {
494 state = RESPST_ERR_RKEY_VIOLATION;
495 goto err;
496 }
497
498 if (pkt->mask & RXE_WRITE_MASK) {
499 if (resid > mtu) {
500 if (pktlen != mtu || bth_pad(pkt)) {
501 state = RESPST_ERR_LENGTH;
502 goto err;
503 }
504 } else {
505 if (pktlen != resid) {
506 state = RESPST_ERR_LENGTH;
507 goto err;
508 }
509 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
510
511
512
513 state = RESPST_ERR_LENGTH;
514 goto err;
515 }
516 }
517 }
518
519 WARN_ON_ONCE(qp->resp.mr);
520
521 qp->resp.mr = mr;
522 return RESPST_EXECUTE;
523
524err:
525 if (mr)
526 rxe_drop_ref(mr);
527 if (mw)
528 rxe_drop_ref(mw);
529
530 return state;
531}
532
533static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
534 int data_len)
535{
536 int err;
537
538 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
539 data_addr, data_len, RXE_TO_MR_OBJ);
540 if (unlikely(err))
541 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
542 : RESPST_ERR_MALFORMED_WQE;
543
544 return RESPST_NONE;
545}
546
547static enum resp_states write_data_in(struct rxe_qp *qp,
548 struct rxe_pkt_info *pkt)
549{
550 enum resp_states rc = RESPST_NONE;
551 int err;
552 int data_len = payload_size(pkt);
553
554 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
555 payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
556 if (err) {
557 rc = RESPST_ERR_RKEY_VIOLATION;
558 goto out;
559 }
560
561 qp->resp.va += data_len;
562 qp->resp.resid -= data_len;
563
564out:
565 return rc;
566}
567
568
569static DEFINE_SPINLOCK(atomic_ops_lock);
570
571static enum resp_states process_atomic(struct rxe_qp *qp,
572 struct rxe_pkt_info *pkt)
573{
574 u64 *vaddr;
575 enum resp_states ret;
576 struct rxe_mr *mr = qp->resp.mr;
577
578 if (mr->state != RXE_MR_STATE_VALID) {
579 ret = RESPST_ERR_RKEY_VIOLATION;
580 goto out;
581 }
582
583 vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
584
585
586 if (!vaddr || (uintptr_t)vaddr & 7) {
587 ret = RESPST_ERR_MISALIGNED_ATOMIC;
588 goto out;
589 }
590
591 spin_lock_bh(&atomic_ops_lock);
592
593 qp->resp.atomic_orig = *vaddr;
594
595 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
596 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
597 if (*vaddr == atmeth_comp(pkt))
598 *vaddr = atmeth_swap_add(pkt);
599 } else {
600 *vaddr += atmeth_swap_add(pkt);
601 }
602
603 spin_unlock_bh(&atomic_ops_lock);
604
605 ret = RESPST_NONE;
606out:
607 return ret;
608}
609
610static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
611 struct rxe_pkt_info *pkt,
612 struct rxe_pkt_info *ack,
613 int opcode,
614 int payload,
615 u32 psn,
616 u8 syndrome)
617{
618 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
619 struct sk_buff *skb;
620 int paylen;
621 int pad;
622 int err;
623
624
625
626
627 pad = (-payload) & 0x3;
628 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
629
630 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
631 if (!skb)
632 return NULL;
633
634 ack->qp = qp;
635 ack->opcode = opcode;
636 ack->mask = rxe_opcode[opcode].mask;
637 ack->paylen = paylen;
638 ack->psn = psn;
639
640 bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
641 qp->attr.dest_qp_num, 0, psn);
642
643 if (ack->mask & RXE_AETH_MASK) {
644 aeth_set_syn(ack, syndrome);
645 aeth_set_msn(ack, qp->resp.msn);
646 }
647
648 if (ack->mask & RXE_ATMACK_MASK)
649 atmack_set_orig(ack, qp->resp.atomic_orig);
650
651 err = rxe_prepare(ack, skb);
652 if (err) {
653 kfree_skb(skb);
654 return NULL;
655 }
656
657 return skb;
658}
659
660
661
662
663static enum resp_states read_reply(struct rxe_qp *qp,
664 struct rxe_pkt_info *req_pkt)
665{
666 struct rxe_pkt_info ack_pkt;
667 struct sk_buff *skb;
668 int mtu = qp->mtu;
669 enum resp_states state;
670 int payload;
671 int opcode;
672 int err;
673 struct resp_res *res = qp->resp.res;
674
675 if (!res) {
676
677
678
679 res = &qp->resp.resources[qp->resp.res_head];
680
681 free_rd_atomic_resource(qp, res);
682 rxe_advance_resp_resource(qp);
683
684 res->type = RXE_READ_MASK;
685 res->replay = 0;
686
687 res->read.va = qp->resp.va +
688 qp->resp.offset;
689 res->read.va_org = qp->resp.va +
690 qp->resp.offset;
691
692 res->first_psn = req_pkt->psn;
693
694 if (reth_len(req_pkt)) {
695 res->last_psn = (req_pkt->psn +
696 (reth_len(req_pkt) + mtu - 1) /
697 mtu - 1) & BTH_PSN_MASK;
698 } else {
699 res->last_psn = res->first_psn;
700 }
701 res->cur_psn = req_pkt->psn;
702
703 res->read.resid = qp->resp.resid;
704 res->read.length = qp->resp.resid;
705 res->read.rkey = qp->resp.rkey;
706
707
708 res->read.mr = qp->resp.mr;
709 qp->resp.mr = NULL;
710
711 qp->resp.res = res;
712 res->state = rdatm_res_state_new;
713 }
714
715 if (res->state == rdatm_res_state_new) {
716 if (res->read.resid <= mtu)
717 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
718 else
719 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
720 } else {
721 if (res->read.resid > mtu)
722 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
723 else
724 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
725 }
726
727 res->state = rdatm_res_state_next;
728
729 payload = min_t(int, res->read.resid, mtu);
730
731 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
732 res->cur_psn, AETH_ACK_UNLIMITED);
733 if (!skb)
734 return RESPST_ERR_RNR;
735
736 err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
737 payload, RXE_FROM_MR_OBJ);
738 if (err)
739 pr_err("Failed copying memory\n");
740
741 if (bth_pad(&ack_pkt)) {
742 u8 *pad = payload_addr(&ack_pkt) + payload;
743
744 memset(pad, 0, bth_pad(&ack_pkt));
745 }
746
747 err = rxe_xmit_packet(qp, &ack_pkt, skb);
748 if (err) {
749 pr_err("Failed sending RDMA reply.\n");
750 return RESPST_ERR_RNR;
751 }
752
753 res->read.va += payload;
754 res->read.resid -= payload;
755 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
756
757 if (res->read.resid > 0) {
758 state = RESPST_DONE;
759 } else {
760 qp->resp.res = NULL;
761 if (!res->replay)
762 qp->resp.opcode = -1;
763 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
764 qp->resp.psn = res->cur_psn;
765 state = RESPST_CLEANUP;
766 }
767
768 return state;
769}
770
771static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
772{
773 if (rkey_is_mw(rkey))
774 return rxe_invalidate_mw(qp, rkey);
775 else
776 return rxe_invalidate_mr(qp, rkey);
777}
778
779
780
781
782static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
783{
784 enum resp_states err;
785 struct sk_buff *skb = PKT_TO_SKB(pkt);
786 union rdma_network_hdr hdr;
787
788 if (pkt->mask & RXE_SEND_MASK) {
789 if (qp_type(qp) == IB_QPT_UD ||
790 qp_type(qp) == IB_QPT_SMI ||
791 qp_type(qp) == IB_QPT_GSI) {
792 if (skb->protocol == htons(ETH_P_IP)) {
793 memset(&hdr.reserved, 0,
794 sizeof(hdr.reserved));
795 memcpy(&hdr.roce4grh, ip_hdr(skb),
796 sizeof(hdr.roce4grh));
797 err = send_data_in(qp, &hdr, sizeof(hdr));
798 } else {
799 err = send_data_in(qp, ipv6_hdr(skb),
800 sizeof(hdr));
801 }
802 if (err)
803 return err;
804 }
805 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
806 if (err)
807 return err;
808 } else if (pkt->mask & RXE_WRITE_MASK) {
809 err = write_data_in(qp, pkt);
810 if (err)
811 return err;
812 } else if (pkt->mask & RXE_READ_MASK) {
813
814 qp->resp.msn++;
815 return RESPST_READ_REPLY;
816 } else if (pkt->mask & RXE_ATOMIC_MASK) {
817 err = process_atomic(qp, pkt);
818 if (err)
819 return err;
820 } else {
821
822 WARN_ON_ONCE(1);
823 }
824
825 if (pkt->mask & RXE_IETH_MASK) {
826 u32 rkey = ieth_rkey(pkt);
827
828 err = invalidate_rkey(qp, rkey);
829 if (err)
830 return RESPST_ERR_INVALIDATE_RKEY;
831 }
832
833
834 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
835 qp->resp.ack_psn = qp->resp.psn;
836
837 qp->resp.opcode = pkt->opcode;
838 qp->resp.status = IB_WC_SUCCESS;
839
840 if (pkt->mask & RXE_COMP_MASK) {
841
842 qp->resp.msn++;
843 return RESPST_COMPLETE;
844 } else if (qp_type(qp) == IB_QPT_RC)
845 return RESPST_ACKNOWLEDGE;
846 else
847 return RESPST_CLEANUP;
848}
849
850static enum resp_states do_complete(struct rxe_qp *qp,
851 struct rxe_pkt_info *pkt)
852{
853 struct rxe_cqe cqe;
854 struct ib_wc *wc = &cqe.ibwc;
855 struct ib_uverbs_wc *uwc = &cqe.uibwc;
856 struct rxe_recv_wqe *wqe = qp->resp.wqe;
857 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
858
859 if (!wqe)
860 goto finish;
861
862 memset(&cqe, 0, sizeof(cqe));
863
864 if (qp->rcq->is_user) {
865 uwc->status = qp->resp.status;
866 uwc->qp_num = qp->ibqp.qp_num;
867 uwc->wr_id = wqe->wr_id;
868 } else {
869 wc->status = qp->resp.status;
870 wc->qp = &qp->ibqp;
871 wc->wr_id = wqe->wr_id;
872 }
873
874 if (wc->status == IB_WC_SUCCESS) {
875 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
876 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
877 pkt->mask & RXE_WRITE_MASK) ?
878 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
879 wc->vendor_err = 0;
880 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
881 pkt->mask & RXE_WRITE_MASK) ?
882 qp->resp.length : wqe->dma.length - wqe->dma.resid;
883
884
885
886
887 if (qp->rcq->is_user) {
888 uwc->wc_flags = IB_WC_GRH;
889
890 if (pkt->mask & RXE_IMMDT_MASK) {
891 uwc->wc_flags |= IB_WC_WITH_IMM;
892 uwc->ex.imm_data = immdt_imm(pkt);
893 }
894
895 if (pkt->mask & RXE_IETH_MASK) {
896 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
897 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
898 }
899
900 uwc->qp_num = qp->ibqp.qp_num;
901
902 if (pkt->mask & RXE_DETH_MASK)
903 uwc->src_qp = deth_sqp(pkt);
904
905 uwc->port_num = qp->attr.port_num;
906 } else {
907 struct sk_buff *skb = PKT_TO_SKB(pkt);
908
909 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
910 if (skb->protocol == htons(ETH_P_IP))
911 wc->network_hdr_type = RDMA_NETWORK_IPV4;
912 else
913 wc->network_hdr_type = RDMA_NETWORK_IPV6;
914
915 if (is_vlan_dev(skb->dev)) {
916 wc->wc_flags |= IB_WC_WITH_VLAN;
917 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
918 }
919
920 if (pkt->mask & RXE_IMMDT_MASK) {
921 wc->wc_flags |= IB_WC_WITH_IMM;
922 wc->ex.imm_data = immdt_imm(pkt);
923 }
924
925 if (pkt->mask & RXE_IETH_MASK) {
926 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
927 wc->ex.invalidate_rkey = ieth_rkey(pkt);
928 }
929
930 if (pkt->mask & RXE_DETH_MASK)
931 wc->src_qp = deth_sqp(pkt);
932
933 wc->qp = &qp->ibqp;
934 wc->port_num = qp->attr.port_num;
935 }
936 }
937
938
939 if (!qp->srq) {
940 if (qp->is_user)
941 advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER);
942 else
943 advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL);
944 }
945
946 qp->resp.wqe = NULL;
947
948 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
949 return RESPST_ERR_CQ_OVERFLOW;
950
951finish:
952 if (unlikely(qp->resp.state == QP_STATE_ERROR))
953 return RESPST_CHK_RESOURCE;
954 if (unlikely(!pkt))
955 return RESPST_DONE;
956 if (qp_type(qp) == IB_QPT_RC)
957 return RESPST_ACKNOWLEDGE;
958 else
959 return RESPST_CLEANUP;
960}
961
962static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
963 u8 syndrome, u32 psn)
964{
965 int err = 0;
966 struct rxe_pkt_info ack_pkt;
967 struct sk_buff *skb;
968
969 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
970 0, psn, syndrome);
971 if (!skb) {
972 err = -ENOMEM;
973 goto err1;
974 }
975
976 err = rxe_xmit_packet(qp, &ack_pkt, skb);
977 if (err)
978 pr_err_ratelimited("Failed sending ack\n");
979
980err1:
981 return err;
982}
983
984static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
985 u8 syndrome)
986{
987 int rc = 0;
988 struct rxe_pkt_info ack_pkt;
989 struct sk_buff *skb;
990 struct resp_res *res;
991
992 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
993 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
994 syndrome);
995 if (!skb) {
996 rc = -ENOMEM;
997 goto out;
998 }
999
1000 res = &qp->resp.resources[qp->resp.res_head];
1001 free_rd_atomic_resource(qp, res);
1002 rxe_advance_resp_resource(qp);
1003
1004 skb_get(skb);
1005 res->type = RXE_ATOMIC_MASK;
1006 res->atomic.skb = skb;
1007 res->first_psn = ack_pkt.psn;
1008 res->last_psn = ack_pkt.psn;
1009 res->cur_psn = ack_pkt.psn;
1010
1011 rc = rxe_xmit_packet(qp, &ack_pkt, skb);
1012 if (rc) {
1013 pr_err_ratelimited("Failed sending ack\n");
1014 rxe_drop_ref(qp);
1015 }
1016out:
1017 return rc;
1018}
1019
1020static enum resp_states acknowledge(struct rxe_qp *qp,
1021 struct rxe_pkt_info *pkt)
1022{
1023 if (qp_type(qp) != IB_QPT_RC)
1024 return RESPST_CLEANUP;
1025
1026 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1027 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1028 else if (pkt->mask & RXE_ATOMIC_MASK)
1029 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1030 else if (bth_ack(pkt))
1031 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1032
1033 return RESPST_CLEANUP;
1034}
1035
1036static enum resp_states cleanup(struct rxe_qp *qp,
1037 struct rxe_pkt_info *pkt)
1038{
1039 struct sk_buff *skb;
1040
1041 if (pkt) {
1042 skb = skb_dequeue(&qp->req_pkts);
1043 rxe_drop_ref(qp);
1044 kfree_skb(skb);
1045 ib_device_put(qp->ibqp.device);
1046 }
1047
1048 if (qp->resp.mr) {
1049 rxe_drop_ref(qp->resp.mr);
1050 qp->resp.mr = NULL;
1051 }
1052
1053 return RESPST_DONE;
1054}
1055
1056static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1057{
1058 int i;
1059
1060 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1061 struct resp_res *res = &qp->resp.resources[i];
1062
1063 if (res->type == 0)
1064 continue;
1065
1066 if (psn_compare(psn, res->first_psn) >= 0 &&
1067 psn_compare(psn, res->last_psn) <= 0) {
1068 return res;
1069 }
1070 }
1071
1072 return NULL;
1073}
1074
1075static enum resp_states duplicate_request(struct rxe_qp *qp,
1076 struct rxe_pkt_info *pkt)
1077{
1078 enum resp_states rc;
1079 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1080
1081 if (pkt->mask & RXE_SEND_MASK ||
1082 pkt->mask & RXE_WRITE_MASK) {
1083
1084 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1085 return RESPST_CLEANUP;
1086 } else if (pkt->mask & RXE_READ_MASK) {
1087 struct resp_res *res;
1088
1089 res = find_resource(qp, pkt->psn);
1090 if (!res) {
1091
1092
1093
1094 rc = RESPST_CLEANUP;
1095 goto out;
1096 } else {
1097
1098
1099
1100 u64 iova = reth_va(pkt);
1101 u32 resid = reth_len(pkt);
1102
1103 if (iova < res->read.va_org ||
1104 resid > res->read.length ||
1105 (iova + resid) > (res->read.va_org +
1106 res->read.length)) {
1107 rc = RESPST_CLEANUP;
1108 goto out;
1109 }
1110
1111 if (reth_rkey(pkt) != res->read.rkey) {
1112 rc = RESPST_CLEANUP;
1113 goto out;
1114 }
1115
1116 res->cur_psn = pkt->psn;
1117 res->state = (pkt->psn == res->first_psn) ?
1118 rdatm_res_state_new :
1119 rdatm_res_state_replay;
1120 res->replay = 1;
1121
1122
1123 res->read.va_org = iova;
1124 res->read.va = iova;
1125 res->read.resid = resid;
1126
1127
1128 qp->resp.res = res;
1129 rc = RESPST_READ_REPLY;
1130 goto out;
1131 }
1132 } else {
1133 struct resp_res *res;
1134
1135
1136 res = find_resource(qp, pkt->psn);
1137 if (res) {
1138 skb_get(res->atomic.skb);
1139
1140 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1141 if (rc) {
1142 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1143 rc = RESPST_CLEANUP;
1144 goto out;
1145 }
1146 }
1147
1148
1149 rc = RESPST_CLEANUP;
1150 goto out;
1151 }
1152out:
1153 return rc;
1154}
1155
1156
1157static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1158 enum ib_wc_status status)
1159{
1160 qp->resp.aeth_syndrome = syndrome;
1161 qp->resp.status = status;
1162
1163
1164 qp->resp.goto_error = 1;
1165}
1166
1167static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1168{
1169
1170 if (qp->srq) {
1171
1172 qp->resp.drop_msg = 1;
1173 if (qp->resp.wqe) {
1174 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1175 return RESPST_COMPLETE;
1176 } else {
1177 return RESPST_CLEANUP;
1178 }
1179 } else {
1180
1181
1182
1183
1184
1185 if (qp->resp.wqe) {
1186 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1187 qp->resp.wqe->dma.cur_sge = 0;
1188 qp->resp.wqe->dma.sge_offset = 0;
1189 qp->resp.opcode = -1;
1190 }
1191
1192 if (qp->resp.mr) {
1193 rxe_drop_ref(qp->resp.mr);
1194 qp->resp.mr = NULL;
1195 }
1196
1197 return RESPST_CLEANUP;
1198 }
1199}
1200
1201static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1202{
1203 struct sk_buff *skb;
1204 struct rxe_queue *q = qp->rq.queue;
1205
1206 while ((skb = skb_dequeue(&qp->req_pkts))) {
1207 rxe_drop_ref(qp);
1208 kfree_skb(skb);
1209 ib_device_put(qp->ibqp.device);
1210 }
1211
1212 if (notify)
1213 return;
1214
1215 while (!qp->srq && q && queue_head(q, q->type))
1216 advance_consumer(q, q->type);
1217}
1218
1219int rxe_responder(void *arg)
1220{
1221 struct rxe_qp *qp = (struct rxe_qp *)arg;
1222 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1223 enum resp_states state;
1224 struct rxe_pkt_info *pkt = NULL;
1225 int ret = 0;
1226
1227 rxe_add_ref(qp);
1228
1229 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1230
1231 if (!qp->valid) {
1232 ret = -EINVAL;
1233 goto done;
1234 }
1235
1236 switch (qp->resp.state) {
1237 case QP_STATE_RESET:
1238 state = RESPST_RESET;
1239 break;
1240
1241 default:
1242 state = RESPST_GET_REQ;
1243 break;
1244 }
1245
1246 while (1) {
1247 pr_debug("qp#%d state = %s\n", qp_num(qp),
1248 resp_state_name[state]);
1249 switch (state) {
1250 case RESPST_GET_REQ:
1251 state = get_req(qp, &pkt);
1252 break;
1253 case RESPST_CHK_PSN:
1254 state = check_psn(qp, pkt);
1255 break;
1256 case RESPST_CHK_OP_SEQ:
1257 state = check_op_seq(qp, pkt);
1258 break;
1259 case RESPST_CHK_OP_VALID:
1260 state = check_op_valid(qp, pkt);
1261 break;
1262 case RESPST_CHK_RESOURCE:
1263 state = check_resource(qp, pkt);
1264 break;
1265 case RESPST_CHK_LENGTH:
1266 state = check_length(qp, pkt);
1267 break;
1268 case RESPST_CHK_RKEY:
1269 state = check_rkey(qp, pkt);
1270 break;
1271 case RESPST_EXECUTE:
1272 state = execute(qp, pkt);
1273 break;
1274 case RESPST_COMPLETE:
1275 state = do_complete(qp, pkt);
1276 break;
1277 case RESPST_READ_REPLY:
1278 state = read_reply(qp, pkt);
1279 break;
1280 case RESPST_ACKNOWLEDGE:
1281 state = acknowledge(qp, pkt);
1282 break;
1283 case RESPST_CLEANUP:
1284 state = cleanup(qp, pkt);
1285 break;
1286 case RESPST_DUPLICATE_REQUEST:
1287 state = duplicate_request(qp, pkt);
1288 break;
1289 case RESPST_ERR_PSN_OUT_OF_SEQ:
1290
1291 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1292 state = RESPST_CLEANUP;
1293 break;
1294
1295 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1296 case RESPST_ERR_MISSING_OPCODE_FIRST:
1297 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1298 case RESPST_ERR_UNSUPPORTED_OPCODE:
1299 case RESPST_ERR_MISALIGNED_ATOMIC:
1300
1301 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1302 IB_WC_REM_INV_REQ_ERR);
1303 state = RESPST_COMPLETE;
1304 break;
1305
1306 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1307 state = do_class_d1e_error(qp);
1308 break;
1309 case RESPST_ERR_RNR:
1310 if (qp_type(qp) == IB_QPT_RC) {
1311 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1312
1313 send_ack(qp, pkt, AETH_RNR_NAK |
1314 (~AETH_TYPE_MASK &
1315 qp->attr.min_rnr_timer),
1316 pkt->psn);
1317 } else {
1318
1319 qp->resp.drop_msg = 1;
1320 }
1321 state = RESPST_CLEANUP;
1322 break;
1323
1324 case RESPST_ERR_RKEY_VIOLATION:
1325 if (qp_type(qp) == IB_QPT_RC) {
1326
1327 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1328 IB_WC_REM_ACCESS_ERR);
1329 state = RESPST_COMPLETE;
1330 } else {
1331 qp->resp.drop_msg = 1;
1332 if (qp->srq) {
1333
1334 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1335 state = RESPST_COMPLETE;
1336 } else {
1337
1338 state = RESPST_CLEANUP;
1339 }
1340 }
1341 break;
1342
1343 case RESPST_ERR_INVALIDATE_RKEY:
1344
1345 qp->resp.goto_error = 1;
1346 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1347 state = RESPST_COMPLETE;
1348 break;
1349
1350 case RESPST_ERR_LENGTH:
1351 if (qp_type(qp) == IB_QPT_RC) {
1352
1353 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1354 IB_WC_REM_INV_REQ_ERR);
1355 state = RESPST_COMPLETE;
1356 } else if (qp->srq) {
1357
1358 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1359 state = RESPST_COMPLETE;
1360 } else {
1361
1362 qp->resp.drop_msg = 1;
1363 state = RESPST_CLEANUP;
1364 }
1365 break;
1366
1367 case RESPST_ERR_MALFORMED_WQE:
1368
1369 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1370 IB_WC_LOC_QP_OP_ERR);
1371 state = RESPST_COMPLETE;
1372 break;
1373
1374 case RESPST_ERR_CQ_OVERFLOW:
1375
1376 state = RESPST_ERROR;
1377 break;
1378
1379 case RESPST_DONE:
1380 if (qp->resp.goto_error) {
1381 state = RESPST_ERROR;
1382 break;
1383 }
1384
1385 goto done;
1386
1387 case RESPST_EXIT:
1388 if (qp->resp.goto_error) {
1389 state = RESPST_ERROR;
1390 break;
1391 }
1392
1393 goto exit;
1394
1395 case RESPST_RESET:
1396 rxe_drain_req_pkts(qp, false);
1397 qp->resp.wqe = NULL;
1398 goto exit;
1399
1400 case RESPST_ERROR:
1401 qp->resp.goto_error = 0;
1402 pr_warn("qp#%d moved to error state\n", qp_num(qp));
1403 rxe_qp_error(qp);
1404 goto exit;
1405
1406 default:
1407 WARN_ON_ONCE(1);
1408 }
1409 }
1410
1411exit:
1412 ret = -EAGAIN;
1413done:
1414 rxe_drop_ref(qp);
1415 return ret;
1416}
1417