1
2
3
4
5
6
7#include <linux/skbuff.h>
8#include <crypto/hash.h>
9
10#include "rxe.h"
11#include "rxe_loc.h"
12#include "rxe_queue.h"
13
14static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 u32 opcode);
16
17static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe,
19 unsigned int mask, int npsn)
20{
21 int i;
22
23 for (i = 0; i < npsn; i++) {
24 int to_send = (wqe->dma.resid > qp->mtu) ?
25 qp->mtu : wqe->dma.resid;
26
27 qp->req.opcode = next_opcode(qp, wqe,
28 wqe->wr.opcode);
29
30 if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 wqe->dma.resid -= to_send;
32 wqe->dma.sge_offset += to_send;
33 } else {
34 advance_dma_data(&wqe->dma, to_send);
35 }
36 if (mask & WR_WRITE_MASK)
37 wqe->iova += qp->mtu;
38 }
39}
40
41static void req_retry(struct rxe_qp *qp)
42{
43 struct rxe_send_wqe *wqe;
44 unsigned int wqe_index;
45 unsigned int mask;
46 int npsn;
47 int first = 1;
48 struct rxe_queue *q = qp->sq.queue;
49 unsigned int cons;
50 unsigned int prod;
51
52 if (qp->is_user) {
53 cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
54 prod = producer_index(q, QUEUE_TYPE_FROM_USER);
55 } else {
56 cons = consumer_index(q, QUEUE_TYPE_KERNEL);
57 prod = producer_index(q, QUEUE_TYPE_KERNEL);
58 }
59
60 qp->req.wqe_index = cons;
61 qp->req.psn = qp->comp.psn;
62 qp->req.opcode = -1;
63
64 for (wqe_index = cons; wqe_index != prod;
65 wqe_index = next_index(q, wqe_index)) {
66 wqe = addr_from_index(qp->sq.queue, wqe_index);
67 mask = wr_opcode_mask(wqe->wr.opcode, qp);
68
69 if (wqe->state == wqe_state_posted)
70 break;
71
72 if (wqe->state == wqe_state_done)
73 continue;
74
75 wqe->iova = (mask & WR_ATOMIC_MASK) ?
76 wqe->wr.wr.atomic.remote_addr :
77 (mask & WR_READ_OR_WRITE_MASK) ?
78 wqe->wr.wr.rdma.remote_addr :
79 0;
80
81 if (!first || (mask & WR_READ_MASK) == 0) {
82 wqe->dma.resid = wqe->dma.length;
83 wqe->dma.cur_sge = 0;
84 wqe->dma.sge_offset = 0;
85 }
86
87 if (first) {
88 first = 0;
89
90 if (mask & WR_WRITE_OR_SEND_MASK) {
91 npsn = (qp->comp.psn - wqe->first_psn) &
92 BTH_PSN_MASK;
93 retry_first_write_send(qp, wqe, mask, npsn);
94 }
95
96 if (mask & WR_READ_MASK) {
97 npsn = (wqe->dma.length - wqe->dma.resid) /
98 qp->mtu;
99 wqe->iova += npsn * qp->mtu;
100 }
101 }
102
103 wqe->state = wqe_state_posted;
104 }
105}
106
107void rnr_nak_timer(struct timer_list *t)
108{
109 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
110
111 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
112 rxe_run_task(&qp->req.task, 1);
113}
114
115static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
116{
117 struct rxe_send_wqe *wqe;
118 unsigned long flags;
119 struct rxe_queue *q = qp->sq.queue;
120 unsigned int index = qp->req.wqe_index;
121 unsigned int cons;
122 unsigned int prod;
123
124 if (qp->is_user) {
125 wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
126 cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
127 prod = producer_index(q, QUEUE_TYPE_FROM_USER);
128 } else {
129 wqe = queue_head(q, QUEUE_TYPE_KERNEL);
130 cons = consumer_index(q, QUEUE_TYPE_KERNEL);
131 prod = producer_index(q, QUEUE_TYPE_KERNEL);
132 }
133
134 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
135
136
137
138 spin_lock_irqsave(&qp->state_lock, flags);
139 do {
140 if (qp->req.state != QP_STATE_DRAIN) {
141
142 spin_unlock_irqrestore(&qp->state_lock,
143 flags);
144 break;
145 }
146
147 if (wqe && ((index != cons) ||
148 (wqe->state != wqe_state_posted))) {
149
150 spin_unlock_irqrestore(&qp->state_lock,
151 flags);
152 break;
153 }
154
155 qp->req.state = QP_STATE_DRAINED;
156 spin_unlock_irqrestore(&qp->state_lock, flags);
157
158 if (qp->ibqp.event_handler) {
159 struct ib_event ev;
160
161 ev.device = qp->ibqp.device;
162 ev.element.qp = &qp->ibqp;
163 ev.event = IB_EVENT_SQ_DRAINED;
164 qp->ibqp.event_handler(&ev,
165 qp->ibqp.qp_context);
166 }
167 } while (0);
168 }
169
170 if (index == prod)
171 return NULL;
172
173 wqe = addr_from_index(q, index);
174
175 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
176 qp->req.state == QP_STATE_DRAINED) &&
177 (wqe->state != wqe_state_processing)))
178 return NULL;
179
180 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
181 (index != cons))) {
182 qp->req.wait_fence = 1;
183 return NULL;
184 }
185
186 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
187 return wqe;
188}
189
190static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
191{
192 switch (opcode) {
193 case IB_WR_RDMA_WRITE:
194 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
195 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
196 return fits ?
197 IB_OPCODE_RC_RDMA_WRITE_LAST :
198 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
199 else
200 return fits ?
201 IB_OPCODE_RC_RDMA_WRITE_ONLY :
202 IB_OPCODE_RC_RDMA_WRITE_FIRST;
203
204 case IB_WR_RDMA_WRITE_WITH_IMM:
205 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
206 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
207 return fits ?
208 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
209 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
210 else
211 return fits ?
212 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
213 IB_OPCODE_RC_RDMA_WRITE_FIRST;
214
215 case IB_WR_SEND:
216 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
217 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
218 return fits ?
219 IB_OPCODE_RC_SEND_LAST :
220 IB_OPCODE_RC_SEND_MIDDLE;
221 else
222 return fits ?
223 IB_OPCODE_RC_SEND_ONLY :
224 IB_OPCODE_RC_SEND_FIRST;
225
226 case IB_WR_SEND_WITH_IMM:
227 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
228 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
229 return fits ?
230 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
231 IB_OPCODE_RC_SEND_MIDDLE;
232 else
233 return fits ?
234 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
235 IB_OPCODE_RC_SEND_FIRST;
236
237 case IB_WR_RDMA_READ:
238 return IB_OPCODE_RC_RDMA_READ_REQUEST;
239
240 case IB_WR_ATOMIC_CMP_AND_SWP:
241 return IB_OPCODE_RC_COMPARE_SWAP;
242
243 case IB_WR_ATOMIC_FETCH_AND_ADD:
244 return IB_OPCODE_RC_FETCH_ADD;
245
246 case IB_WR_SEND_WITH_INV:
247 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
248 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
249 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
250 IB_OPCODE_RC_SEND_MIDDLE;
251 else
252 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
253 IB_OPCODE_RC_SEND_FIRST;
254 case IB_WR_REG_MR:
255 case IB_WR_LOCAL_INV:
256 return opcode;
257 }
258
259 return -EINVAL;
260}
261
262static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
263{
264 switch (opcode) {
265 case IB_WR_RDMA_WRITE:
266 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
267 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
268 return fits ?
269 IB_OPCODE_UC_RDMA_WRITE_LAST :
270 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
271 else
272 return fits ?
273 IB_OPCODE_UC_RDMA_WRITE_ONLY :
274 IB_OPCODE_UC_RDMA_WRITE_FIRST;
275
276 case IB_WR_RDMA_WRITE_WITH_IMM:
277 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
278 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
279 return fits ?
280 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
281 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
282 else
283 return fits ?
284 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
285 IB_OPCODE_UC_RDMA_WRITE_FIRST;
286
287 case IB_WR_SEND:
288 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
289 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290 return fits ?
291 IB_OPCODE_UC_SEND_LAST :
292 IB_OPCODE_UC_SEND_MIDDLE;
293 else
294 return fits ?
295 IB_OPCODE_UC_SEND_ONLY :
296 IB_OPCODE_UC_SEND_FIRST;
297
298 case IB_WR_SEND_WITH_IMM:
299 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
300 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
301 return fits ?
302 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
303 IB_OPCODE_UC_SEND_MIDDLE;
304 else
305 return fits ?
306 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
307 IB_OPCODE_UC_SEND_FIRST;
308 }
309
310 return -EINVAL;
311}
312
313static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
314 u32 opcode)
315{
316 int fits = (wqe->dma.resid <= qp->mtu);
317
318 switch (qp_type(qp)) {
319 case IB_QPT_RC:
320 return next_opcode_rc(qp, opcode, fits);
321
322 case IB_QPT_UC:
323 return next_opcode_uc(qp, opcode, fits);
324
325 case IB_QPT_SMI:
326 case IB_QPT_UD:
327 case IB_QPT_GSI:
328 switch (opcode) {
329 case IB_WR_SEND:
330 return IB_OPCODE_UD_SEND_ONLY;
331
332 case IB_WR_SEND_WITH_IMM:
333 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
334 }
335 break;
336
337 default:
338 break;
339 }
340
341 return -EINVAL;
342}
343
344static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
345{
346 int depth;
347
348 if (wqe->has_rd_atomic)
349 return 0;
350
351 qp->req.need_rd_atomic = 1;
352 depth = atomic_dec_return(&qp->req.rd_atomic);
353
354 if (depth >= 0) {
355 qp->req.need_rd_atomic = 0;
356 wqe->has_rd_atomic = 1;
357 return 0;
358 }
359
360 atomic_inc(&qp->req.rd_atomic);
361 return -EAGAIN;
362}
363
364static inline int get_mtu(struct rxe_qp *qp)
365{
366 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
367
368 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
369 return qp->mtu;
370
371 return rxe->port.mtu_cap;
372}
373
374static struct sk_buff *init_req_packet(struct rxe_qp *qp,
375 struct rxe_send_wqe *wqe,
376 int opcode, int payload,
377 struct rxe_pkt_info *pkt)
378{
379 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
380 struct sk_buff *skb;
381 struct rxe_send_wr *ibwr = &wqe->wr;
382 struct rxe_av *av;
383 int pad = (-payload) & 0x3;
384 int paylen;
385 int solicited;
386 u16 pkey;
387 u32 qp_num;
388 int ack_req;
389
390
391 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
392
393
394
395
396 pkt->opcode = opcode;
397 pkt->qp = qp;
398 pkt->psn = qp->req.psn;
399 pkt->mask = rxe_opcode[opcode].mask;
400 pkt->paylen = paylen;
401 pkt->wqe = wqe;
402
403
404 av = rxe_get_av(pkt);
405 skb = rxe_init_packet(rxe, av, paylen, pkt);
406 if (unlikely(!skb))
407 return NULL;
408
409
410 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
411 (pkt->mask & RXE_END_MASK) &&
412 ((pkt->mask & (RXE_SEND_MASK)) ||
413 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
414 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
415
416 pkey = IB_DEFAULT_PKEY_FULL;
417
418 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
419 qp->attr.dest_qp_num;
420
421 ack_req = ((pkt->mask & RXE_END_MASK) ||
422 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
423 if (ack_req)
424 qp->req.noack_pkts = 0;
425
426 bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
427 ack_req, pkt->psn);
428
429
430 if (pkt->mask & RXE_RETH_MASK) {
431 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
432 reth_set_va(pkt, wqe->iova);
433 reth_set_len(pkt, wqe->dma.resid);
434 }
435
436 if (pkt->mask & RXE_IMMDT_MASK)
437 immdt_set_imm(pkt, ibwr->ex.imm_data);
438
439 if (pkt->mask & RXE_IETH_MASK)
440 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
441
442 if (pkt->mask & RXE_ATMETH_MASK) {
443 atmeth_set_va(pkt, wqe->iova);
444 if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
445 opcode == IB_OPCODE_RD_COMPARE_SWAP) {
446 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
447 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
448 } else {
449 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
450 }
451 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
452 }
453
454 if (pkt->mask & RXE_DETH_MASK) {
455 if (qp->ibqp.qp_num == 1)
456 deth_set_qkey(pkt, GSI_QKEY);
457 else
458 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
459 deth_set_sqp(pkt, qp->ibqp.qp_num);
460 }
461
462 return skb;
463}
464
465static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
466 struct rxe_pkt_info *pkt, struct sk_buff *skb,
467 int paylen)
468{
469 int err;
470
471 err = rxe_prepare(pkt, skb);
472 if (err)
473 return err;
474
475 if (pkt->mask & RXE_WRITE_OR_SEND) {
476 if (wqe->wr.send_flags & IB_SEND_INLINE) {
477 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
478
479 memcpy(payload_addr(pkt), tmp, paylen);
480
481 wqe->dma.resid -= paylen;
482 wqe->dma.sge_offset += paylen;
483 } else {
484 err = copy_data(qp->pd, 0, &wqe->dma,
485 payload_addr(pkt), paylen,
486 RXE_FROM_MR_OBJ);
487 if (err)
488 return err;
489 }
490 if (bth_pad(pkt)) {
491 u8 *pad = payload_addr(pkt) + paylen;
492
493 memset(pad, 0, bth_pad(pkt));
494 }
495 }
496
497 return 0;
498}
499
500static void update_wqe_state(struct rxe_qp *qp,
501 struct rxe_send_wqe *wqe,
502 struct rxe_pkt_info *pkt)
503{
504 if (pkt->mask & RXE_END_MASK) {
505 if (qp_type(qp) == IB_QPT_RC)
506 wqe->state = wqe_state_pending;
507 } else {
508 wqe->state = wqe_state_processing;
509 }
510}
511
512static void update_wqe_psn(struct rxe_qp *qp,
513 struct rxe_send_wqe *wqe,
514 struct rxe_pkt_info *pkt,
515 int payload)
516{
517
518 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
519
520
521 if (num_pkt == 0)
522 num_pkt = 1;
523
524 if (pkt->mask & RXE_START_MASK) {
525 wqe->first_psn = qp->req.psn;
526 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
527 }
528
529 if (pkt->mask & RXE_READ_MASK)
530 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
531 else
532 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
533}
534
535static void save_state(struct rxe_send_wqe *wqe,
536 struct rxe_qp *qp,
537 struct rxe_send_wqe *rollback_wqe,
538 u32 *rollback_psn)
539{
540 rollback_wqe->state = wqe->state;
541 rollback_wqe->first_psn = wqe->first_psn;
542 rollback_wqe->last_psn = wqe->last_psn;
543 *rollback_psn = qp->req.psn;
544}
545
546static void rollback_state(struct rxe_send_wqe *wqe,
547 struct rxe_qp *qp,
548 struct rxe_send_wqe *rollback_wqe,
549 u32 rollback_psn)
550{
551 wqe->state = rollback_wqe->state;
552 wqe->first_psn = rollback_wqe->first_psn;
553 wqe->last_psn = rollback_wqe->last_psn;
554 qp->req.psn = rollback_psn;
555}
556
557static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
558 struct rxe_pkt_info *pkt, int payload)
559{
560 qp->req.opcode = pkt->opcode;
561
562 if (pkt->mask & RXE_END_MASK)
563 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
564
565 qp->need_req_skb = 0;
566
567 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
568 mod_timer(&qp->retrans_timer,
569 jiffies + qp->qp_timeout_jiffies);
570}
571
572static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
573{
574 u8 opcode = wqe->wr.opcode;
575 struct rxe_mr *mr;
576 u32 rkey;
577 int ret;
578
579 switch (opcode) {
580 case IB_WR_LOCAL_INV:
581 rkey = wqe->wr.ex.invalidate_rkey;
582 if (rkey_is_mw(rkey))
583 ret = rxe_invalidate_mw(qp, rkey);
584 else
585 ret = rxe_invalidate_mr(qp, rkey);
586
587 if (unlikely(ret)) {
588 wqe->status = IB_WC_LOC_QP_OP_ERR;
589 return ret;
590 }
591 break;
592 case IB_WR_REG_MR:
593 mr = to_rmr(wqe->wr.wr.reg.mr);
594 rxe_add_ref(mr);
595 mr->state = RXE_MR_STATE_VALID;
596 mr->access = wqe->wr.wr.reg.access;
597 mr->ibmr.lkey = wqe->wr.wr.reg.key;
598 mr->ibmr.rkey = wqe->wr.wr.reg.key;
599 mr->iova = wqe->wr.wr.reg.mr->iova;
600 rxe_drop_ref(mr);
601 break;
602 case IB_WR_BIND_MW:
603 ret = rxe_bind_mw(qp, wqe);
604 if (unlikely(ret)) {
605 wqe->status = IB_WC_MW_BIND_ERR;
606 return ret;
607 }
608 break;
609 default:
610 pr_err("Unexpected send wqe opcode %d\n", opcode);
611 wqe->status = IB_WC_LOC_QP_OP_ERR;
612 return -EINVAL;
613 }
614
615 wqe->state = wqe_state_done;
616 wqe->status = IB_WC_SUCCESS;
617 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
618
619 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
620 qp->sq_sig_type == IB_SIGNAL_ALL_WR)
621 rxe_run_task(&qp->comp.task, 1);
622
623 return 0;
624}
625
626int rxe_requester(void *arg)
627{
628 struct rxe_qp *qp = (struct rxe_qp *)arg;
629 struct rxe_pkt_info pkt;
630 struct sk_buff *skb;
631 struct rxe_send_wqe *wqe;
632 enum rxe_hdr_mask mask;
633 int payload;
634 int mtu;
635 int opcode;
636 int ret;
637 struct rxe_send_wqe rollback_wqe;
638 u32 rollback_psn;
639 struct rxe_queue *q = qp->sq.queue;
640
641 rxe_add_ref(qp);
642
643next_wqe:
644 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
645 goto exit;
646
647 if (unlikely(qp->req.state == QP_STATE_RESET)) {
648 qp->req.wqe_index = consumer_index(q, q->type);
649 qp->req.opcode = -1;
650 qp->req.need_rd_atomic = 0;
651 qp->req.wait_psn = 0;
652 qp->req.need_retry = 0;
653 goto exit;
654 }
655
656 if (unlikely(qp->req.need_retry)) {
657 req_retry(qp);
658 qp->req.need_retry = 0;
659 }
660
661 wqe = req_next_wqe(qp);
662 if (unlikely(!wqe))
663 goto exit;
664
665 if (wqe->mask & WR_LOCAL_OP_MASK) {
666 ret = rxe_do_local_ops(qp, wqe);
667 if (unlikely(ret))
668 goto err;
669 else
670 goto next_wqe;
671 }
672
673 if (unlikely(qp_type(qp) == IB_QPT_RC &&
674 psn_compare(qp->req.psn, (qp->comp.psn +
675 RXE_MAX_UNACKED_PSNS)) > 0)) {
676 qp->req.wait_psn = 1;
677 goto exit;
678 }
679
680
681 if (unlikely(atomic_read(&qp->skb_out) >
682 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
683 qp->need_req_skb = 1;
684 goto exit;
685 }
686
687 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
688 if (unlikely(opcode < 0)) {
689 wqe->status = IB_WC_LOC_QP_OP_ERR;
690 goto exit;
691 }
692
693 mask = rxe_opcode[opcode].mask;
694 if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
695 if (check_init_depth(qp, wqe))
696 goto exit;
697 }
698
699 mtu = get_mtu(qp);
700 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
701 if (payload > mtu) {
702 if (qp_type(qp) == IB_QPT_UD) {
703
704
705
706
707
708
709
710 wqe->first_psn = qp->req.psn;
711 wqe->last_psn = qp->req.psn;
712 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
713 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
714 qp->req.wqe_index = next_index(qp->sq.queue,
715 qp->req.wqe_index);
716 wqe->state = wqe_state_done;
717 wqe->status = IB_WC_SUCCESS;
718 __rxe_do_task(&qp->comp.task);
719 rxe_drop_ref(qp);
720 return 0;
721 }
722 payload = mtu;
723 }
724
725 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
726 if (unlikely(!skb)) {
727 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
728 wqe->status = IB_WC_LOC_QP_OP_ERR;
729 goto err;
730 }
731
732 ret = finish_packet(qp, wqe, &pkt, skb, payload);
733 if (unlikely(ret)) {
734 pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
735 if (ret == -EFAULT)
736 wqe->status = IB_WC_LOC_PROT_ERR;
737 else
738 wqe->status = IB_WC_LOC_QP_OP_ERR;
739 kfree_skb(skb);
740 goto err;
741 }
742
743
744
745
746
747
748
749 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
750 update_wqe_state(qp, wqe, &pkt);
751 update_wqe_psn(qp, wqe, &pkt, payload);
752 ret = rxe_xmit_packet(qp, &pkt, skb);
753 if (ret) {
754 qp->need_req_skb = 1;
755
756 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
757
758 if (ret == -EAGAIN) {
759 rxe_run_task(&qp->req.task, 1);
760 goto exit;
761 }
762
763 wqe->status = IB_WC_LOC_QP_OP_ERR;
764 goto err;
765 }
766
767 update_state(qp, wqe, &pkt, payload);
768
769 goto next_wqe;
770
771err:
772 wqe->state = wqe_state_error;
773 __rxe_do_task(&qp->comp.task);
774
775exit:
776 rxe_drop_ref(qp);
777 return -EAGAIN;
778}
779