1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/uverbs_ioctl.h>
34
35#include "iw_cxgb4.h"
36
37static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
38 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
39 struct c4iw_wr_wait *wr_waitp)
40{
41 struct fw_ri_res_wr *res_wr;
42 struct fw_ri_res *res;
43 int wr_len;
44
45 wr_len = sizeof(*res_wr) + sizeof(*res);
46 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
47
48 res_wr = __skb_put_zero(skb, wr_len);
49 res_wr->op_nres = cpu_to_be32(
50 FW_WR_OP_V(FW_RI_RES_WR) |
51 FW_RI_RES_WR_NRES_V(1) |
52 FW_WR_COMPL_F);
53 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
54 res_wr->cookie = (uintptr_t)wr_waitp;
55 res = res_wr->res;
56 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
57 res->u.cq.op = FW_RI_RES_OP_RESET;
58 res->u.cq.iqid = cpu_to_be32(cq->cqid);
59
60 c4iw_init_wr_wait(wr_waitp);
61 c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
62
63 kfree(cq->sw_queue);
64 dma_free_coherent(&(rdev->lldi.pdev->dev),
65 cq->memsize, cq->queue,
66 dma_unmap_addr(cq, mapping));
67 c4iw_put_cqid(rdev, cq->cqid, uctx);
68}
69
70static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
71 struct c4iw_dev_ucontext *uctx,
72 struct c4iw_wr_wait *wr_waitp)
73{
74 struct fw_ri_res_wr *res_wr;
75 struct fw_ri_res *res;
76 int wr_len;
77 int user = (uctx != &rdev->uctx);
78 int ret;
79 struct sk_buff *skb;
80 struct c4iw_ucontext *ucontext = NULL;
81
82 if (user)
83 ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
84
85 cq->cqid = c4iw_get_cqid(rdev, uctx);
86 if (!cq->cqid) {
87 ret = -ENOMEM;
88 goto err1;
89 }
90
91 if (!user) {
92 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
93 if (!cq->sw_queue) {
94 ret = -ENOMEM;
95 goto err2;
96 }
97 }
98 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
99 &cq->dma_addr, GFP_KERNEL);
100 if (!cq->queue) {
101 ret = -ENOMEM;
102 goto err3;
103 }
104 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
105
106 if (user && ucontext->is_32b_cqe) {
107 cq->qp_errp = &((struct t4_status_page *)
108 ((u8 *)cq->queue + (cq->size - 1) *
109 (sizeof(*cq->queue) / 2)))->qp_err;
110 } else {
111 cq->qp_errp = &((struct t4_status_page *)
112 ((u8 *)cq->queue + (cq->size - 1) *
113 sizeof(*cq->queue)))->qp_err;
114 }
115
116
117 wr_len = sizeof(*res_wr) + sizeof(*res);
118
119 skb = alloc_skb(wr_len, GFP_KERNEL);
120 if (!skb) {
121 ret = -ENOMEM;
122 goto err4;
123 }
124 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
125
126 res_wr = __skb_put_zero(skb, wr_len);
127 res_wr->op_nres = cpu_to_be32(
128 FW_WR_OP_V(FW_RI_RES_WR) |
129 FW_RI_RES_WR_NRES_V(1) |
130 FW_WR_COMPL_F);
131 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
132 res_wr->cookie = (uintptr_t)wr_waitp;
133 res = res_wr->res;
134 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
135 res->u.cq.op = FW_RI_RES_OP_WRITE;
136 res->u.cq.iqid = cpu_to_be32(cq->cqid);
137 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
138 FW_RI_RES_WR_IQANUS_V(0) |
139 FW_RI_RES_WR_IQANUD_V(1) |
140 FW_RI_RES_WR_IQANDST_F |
141 FW_RI_RES_WR_IQANDSTINDEX_V(
142 rdev->lldi.ciq_ids[cq->vector]));
143 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
144 FW_RI_RES_WR_IQDROPRSS_F |
145 FW_RI_RES_WR_IQPCIECH_V(2) |
146 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
147 FW_RI_RES_WR_IQO_F |
148 ((user && ucontext->is_32b_cqe) ?
149 FW_RI_RES_WR_IQESIZE_V(1) :
150 FW_RI_RES_WR_IQESIZE_V(2)));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(wr_waitp);
155 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
156 if (ret)
157 goto err4;
158
159 cq->gen = 1;
160 cq->gts = rdev->lldi.gts_reg;
161 cq->rdev = rdev;
162
163 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
164 &cq->bar2_qid,
165 user ? &cq->bar2_pa : NULL);
166 if (user && !cq->bar2_pa) {
167 pr_warn("%s: cqid %u not in BAR2 range\n",
168 pci_name(rdev->lldi.pdev), cq->cqid);
169 ret = -EINVAL;
170 goto err4;
171 }
172 return 0;
173err4:
174 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
175 dma_unmap_addr(cq, mapping));
176err3:
177 kfree(cq->sw_queue);
178err2:
179 c4iw_put_cqid(rdev, cq->cqid, uctx);
180err1:
181 return ret;
182}
183
184static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
185{
186 struct t4_cqe cqe;
187
188 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
189 wq, cq, cq->sw_cidx, cq->sw_pidx);
190 memset(&cqe, 0, sizeof(cqe));
191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
192 CQE_OPCODE_V(FW_RI_SEND) |
193 CQE_TYPE_V(0) |
194 CQE_SWCQE_V(1) |
195 CQE_QPID_V(wq->sq.qid));
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
197 if (srqidx)
198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
199 cq->sw_queue[cq->sw_pidx] = cqe;
200 t4_swcq_produce(cq);
201}
202
203int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
204{
205 int flushed = 0;
206 int in_use = wq->rq.in_use - count;
207
208 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
209 wq, cq, wq->rq.in_use, count);
210 while (in_use--) {
211 insert_recv_cqe(wq, cq, 0);
212 flushed++;
213 }
214 return flushed;
215}
216
217static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
218 struct t4_swsqe *swcqe)
219{
220 struct t4_cqe cqe;
221
222 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
223 wq, cq, cq->sw_cidx, cq->sw_pidx);
224 memset(&cqe, 0, sizeof(cqe));
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
226 CQE_OPCODE_V(swcqe->opcode) |
227 CQE_TYPE_V(1) |
228 CQE_SWCQE_V(1) |
229 CQE_QPID_V(wq->sq.qid));
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
231 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
232 cq->sw_queue[cq->sw_pidx] = cqe;
233 t4_swcq_produce(cq);
234}
235
236static void advance_oldest_read(struct t4_wq *wq);
237
238int c4iw_flush_sq(struct c4iw_qp *qhp)
239{
240 int flushed = 0;
241 struct t4_wq *wq = &qhp->wq;
242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
243 struct t4_cq *cq = &chp->cq;
244 int idx;
245 struct t4_swsqe *swsqe;
246
247 if (wq->sq.flush_cidx == -1)
248 wq->sq.flush_cidx = wq->sq.cidx;
249 idx = wq->sq.flush_cidx;
250 while (idx != wq->sq.pidx) {
251 swsqe = &wq->sq.sw_sq[idx];
252 swsqe->flushed = 1;
253 insert_sq_cqe(wq, cq, swsqe);
254 if (wq->sq.oldest_read == swsqe) {
255 advance_oldest_read(wq);
256 }
257 flushed++;
258 if (++idx == wq->sq.size)
259 idx = 0;
260 }
261 wq->sq.flush_cidx += flushed;
262 if (wq->sq.flush_cidx >= wq->sq.size)
263 wq->sq.flush_cidx -= wq->sq.size;
264 return flushed;
265}
266
267static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
268{
269 struct t4_swsqe *swsqe;
270 int cidx;
271
272 if (wq->sq.flush_cidx == -1)
273 wq->sq.flush_cidx = wq->sq.cidx;
274 cidx = wq->sq.flush_cidx;
275
276 while (cidx != wq->sq.pidx) {
277 swsqe = &wq->sq.sw_sq[cidx];
278 if (!swsqe->signaled) {
279 if (++cidx == wq->sq.size)
280 cidx = 0;
281 } else if (swsqe->complete) {
282
283
284
285
286 pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
287 cidx, cq->sw_pidx);
288 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
289 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
290 t4_swcq_produce(cq);
291 swsqe->flushed = 1;
292 if (++cidx == wq->sq.size)
293 cidx = 0;
294 wq->sq.flush_cidx = cidx;
295 } else
296 break;
297 }
298}
299
300static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301 struct t4_cqe *read_cqe)
302{
303 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
304 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
305 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
306 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
307 CQE_OPCODE_V(FW_RI_READ_REQ) |
308 CQE_TYPE_V(1));
309 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
310}
311
312static void advance_oldest_read(struct t4_wq *wq)
313{
314
315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
316
317 if (rptr == wq->sq.size)
318 rptr = 0;
319 while (rptr != wq->sq.pidx) {
320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
321
322 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
323 return;
324 if (++rptr == wq->sq.size)
325 rptr = 0;
326 }
327 wq->sq.oldest_read = NULL;
328}
329
330
331
332
333
334
335void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
336{
337 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
338 struct c4iw_qp *qhp;
339 struct t4_swsqe *swsqe;
340 int ret;
341
342 pr_debug("cqid 0x%x\n", chp->cq.cqid);
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
344
345
346
347
348
349
350 while (!ret) {
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
352
353
354
355
356 if (qhp == NULL)
357 goto next_cqe;
358
359 if (flush_qhp != qhp) {
360 spin_lock(&qhp->lock);
361
362 if (qhp->wq.flushed == 1)
363 goto next_cqe;
364 }
365
366 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
367 goto next_cqe;
368
369 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
370
371
372
373
374
375 if (CQE_TYPE(hw_cqe) == 1)
376 goto next_cqe;
377
378
379
380 if (CQE_WRID_STAG(hw_cqe) == 1)
381 goto next_cqe;
382
383
384
385
386 if (!qhp->wq.sq.oldest_read->signaled) {
387 advance_oldest_read(&qhp->wq);
388 goto next_cqe;
389 }
390
391
392
393
394
395 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
396 hw_cqe = &read_cqe;
397 advance_oldest_read(&qhp->wq);
398 }
399
400
401
402
403 if (SQ_TYPE(hw_cqe)) {
404 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
405 swsqe->cqe = *hw_cqe;
406 swsqe->complete = 1;
407 flush_completed_wrs(&qhp->wq, &chp->cq);
408 } else {
409 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
410 *swcqe = *hw_cqe;
411 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
412 t4_swcq_produce(&chp->cq);
413 }
414next_cqe:
415 t4_hwcq_consume(&chp->cq);
416 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
417 if (qhp && flush_qhp != qhp)
418 spin_unlock(&qhp->lock);
419 }
420}
421
422static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
423{
424 if (DRAIN_CQE(cqe)) {
425 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
426 return 0;
427 }
428
429 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
430 return 0;
431
432 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
433 return 0;
434
435 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
436 return 0;
437
438 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
439 return 0;
440 return 1;
441}
442
443void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
444{
445 struct t4_cqe *cqe;
446 u32 ptr;
447
448 *count = 0;
449 pr_debug("count zero %d\n", *count);
450 ptr = cq->sw_cidx;
451 while (ptr != cq->sw_pidx) {
452 cqe = &cq->sw_queue[ptr];
453 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
454 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
455 (*count)++;
456 if (++ptr == cq->size)
457 ptr = 0;
458 }
459 pr_debug("cq %p count %d\n", cq, *count);
460}
461
462static void post_pending_srq_wrs(struct t4_srq *srq)
463{
464 struct t4_srq_pending_wr *pwr;
465 u16 idx = 0;
466
467 while (srq->pending_in_use) {
468 pwr = &srq->pending_wrs[srq->pending_cidx];
469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
470 srq->sw_rq[srq->pidx].valid = 1;
471
472 pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
473 __func__,
474 srq->cidx, srq->pidx, srq->wq_pidx,
475 srq->in_use, srq->size,
476 (unsigned long long)pwr->wr_id);
477
478 c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
479 t4_srq_consume_pending_wr(srq);
480 t4_srq_produce(srq, pwr->len16);
481 idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
482 }
483
484 if (idx) {
485 t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
486 srq->queue[srq->size].status.host_wq_pidx =
487 srq->wq_pidx;
488 }
489}
490
491static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
492{
493 int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
494 u64 wr_id;
495
496 srq->sw_rq[rel_idx].valid = 0;
497 wr_id = srq->sw_rq[rel_idx].wr_id;
498
499 if (rel_idx == srq->cidx) {
500 pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
501 __func__, rel_idx, srq->cidx, srq->pidx,
502 srq->wq_pidx, srq->in_use, srq->size,
503 (unsigned long long)srq->sw_rq[rel_idx].wr_id);
504 t4_srq_consume(srq);
505 while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
506 pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
507 __func__, srq->cidx, srq->pidx,
508 srq->wq_pidx, srq->in_use,
509 srq->size, srq->ooo_count,
510 (unsigned long long)
511 srq->sw_rq[srq->cidx].wr_id);
512 t4_srq_consume_ooo(srq);
513 }
514 if (srq->ooo_count == 0 && srq->pending_in_use)
515 post_pending_srq_wrs(srq);
516 } else {
517 pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
518 __func__, rel_idx, srq->cidx,
519 srq->pidx, srq->wq_pidx,
520 srq->in_use, srq->size,
521 srq->ooo_count,
522 (unsigned long long)srq->sw_rq[rel_idx].wr_id);
523 t4_srq_produce_ooo(srq);
524 }
525 return wr_id;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
545 u8 *cqe_flushed, u64 *cookie, u32 *credit,
546 struct t4_srq *srq)
547{
548 int ret = 0;
549 struct t4_cqe *hw_cqe, read_cqe;
550
551 *cqe_flushed = 0;
552 *credit = 0;
553 ret = t4_next_cqe(cq, &hw_cqe);
554 if (ret)
555 return ret;
556
557 pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
558 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
559 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
560 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
561 CQE_WRID_LOW(hw_cqe));
562
563
564
565
566 if (wq == NULL) {
567 ret = -EAGAIN;
568 goto skip_cqe;
569 }
570
571
572
573
574 if (wq->flushed && !SW_CQE(hw_cqe)) {
575 ret = -EAGAIN;
576 goto skip_cqe;
577 }
578
579
580
581
582 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
583 ret = -EAGAIN;
584 goto skip_cqe;
585 }
586
587
588
589
590 if (DRAIN_CQE(hw_cqe)) {
591 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
592 *cqe = *hw_cqe;
593 goto skip_cqe;
594 }
595
596
597
598
599
600
601
602
603 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
604
605
606
607
608
609 if (CQE_TYPE(hw_cqe) == 1) {
610 if (CQE_STATUS(hw_cqe))
611 t4_set_wq_in_error(wq, 0);
612 ret = -EAGAIN;
613 goto skip_cqe;
614 }
615
616
617
618
619
620 if (CQE_WRID_STAG(hw_cqe) == 1) {
621 if (CQE_STATUS(hw_cqe))
622 t4_set_wq_in_error(wq, 0);
623 ret = -EAGAIN;
624 goto skip_cqe;
625 }
626
627
628
629
630 if (!wq->sq.oldest_read->signaled) {
631 advance_oldest_read(wq);
632 ret = -EAGAIN;
633 goto skip_cqe;
634 }
635
636
637
638
639
640 create_read_req_cqe(wq, hw_cqe, &read_cqe);
641 hw_cqe = &read_cqe;
642 advance_oldest_read(wq);
643 }
644
645 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
646 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
647 t4_set_wq_in_error(wq, 0);
648 }
649
650
651
652
653 if (RQ_TYPE(hw_cqe)) {
654
655
656
657
658
659
660
661 if (unlikely(!CQE_STATUS(hw_cqe) &&
662 CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
663 t4_set_wq_in_error(wq, 0);
664 hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
665 }
666 goto proc_cqe;
667 }
668
669
670
671
672
673
674
675
676
677
678
679
680 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
681 struct t4_swsqe *swsqe;
682
683 pr_debug("out of order completion going in sw_sq at idx %u\n",
684 CQE_WRID_SQ_IDX(hw_cqe));
685 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
686 swsqe->cqe = *hw_cqe;
687 swsqe->complete = 1;
688 ret = -EAGAIN;
689 goto flush_wq;
690 }
691
692proc_cqe:
693 *cqe = *hw_cqe;
694
695
696
697
698
699 if (SQ_TYPE(hw_cqe)) {
700 int idx = CQE_WRID_SQ_IDX(hw_cqe);
701
702
703
704
705
706
707
708
709
710 if (idx < wq->sq.cidx)
711 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
712 else
713 wq->sq.in_use -= idx - wq->sq.cidx;
714
715 wq->sq.cidx = (uint16_t)idx;
716 pr_debug("completing sq idx %u\n", wq->sq.cidx);
717 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
718 if (c4iw_wr_log)
719 c4iw_log_wr_stats(wq, hw_cqe);
720 t4_sq_consume(wq);
721 } else {
722 if (!srq) {
723 pr_debug("completing rq idx %u\n", wq->rq.cidx);
724 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
725 if (c4iw_wr_log)
726 c4iw_log_wr_stats(wq, hw_cqe);
727 t4_rq_consume(wq);
728 } else {
729 *cookie = reap_srq_cqe(hw_cqe, srq);
730 }
731 wq->rq.msn++;
732 goto skip_cqe;
733 }
734
735flush_wq:
736
737
738
739 flush_completed_wrs(wq, cq);
740
741skip_cqe:
742 if (SW_CQE(hw_cqe)) {
743 pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
744 cq, cq->cqid, cq->sw_cidx);
745 t4_swcq_consume(cq);
746 } else {
747 pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
748 cq, cq->cqid, cq->cidx);
749 t4_hwcq_consume(cq);
750 }
751 return ret;
752}
753
754static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
755 struct ib_wc *wc, struct c4iw_srq *srq)
756{
757 struct t4_cqe cqe;
758 struct t4_wq *wq = qhp ? &qhp->wq : NULL;
759 u32 credit = 0;
760 u8 cqe_flushed;
761 u64 cookie = 0;
762 int ret;
763
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
765 srq ? &srq->wq : NULL);
766 if (ret)
767 goto out;
768
769 wc->wr_id = cookie;
770 wc->qp = qhp ? &qhp->ibqp : NULL;
771 wc->vendor_err = CQE_STATUS(&cqe);
772 wc->wc_flags = 0;
773
774
775
776
777 if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
778 srq->wq.in_use < srq->srq_limit)
779 c4iw_dispatch_srq_limit_reached_event(srq);
780
781 pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
782 CQE_QPID(&cqe),
783 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
784 CQE_STATUS(&cqe), CQE_LEN(&cqe),
785 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
786 (unsigned long long)cookie);
787
788 if (CQE_TYPE(&cqe) == 0) {
789 if (!CQE_STATUS(&cqe))
790 wc->byte_len = CQE_LEN(&cqe);
791 else
792 wc->byte_len = 0;
793
794 switch (CQE_OPCODE(&cqe)) {
795 case FW_RI_SEND:
796 wc->opcode = IB_WC_RECV;
797 break;
798 case FW_RI_SEND_WITH_INV:
799 case FW_RI_SEND_WITH_SE_INV:
800 wc->opcode = IB_WC_RECV;
801 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
802 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
803 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
804 break;
805 case FW_RI_WRITE_IMMEDIATE:
806 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
807 wc->ex.imm_data = CQE_IMM_DATA(&cqe);
808 wc->wc_flags |= IB_WC_WITH_IMM;
809 break;
810 default:
811 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
812 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
813 ret = -EINVAL;
814 goto out;
815 }
816 } else {
817 switch (CQE_OPCODE(&cqe)) {
818 case FW_RI_WRITE_IMMEDIATE:
819 case FW_RI_RDMA_WRITE:
820 wc->opcode = IB_WC_RDMA_WRITE;
821 break;
822 case FW_RI_READ_REQ:
823 wc->opcode = IB_WC_RDMA_READ;
824 wc->byte_len = CQE_LEN(&cqe);
825 break;
826 case FW_RI_SEND_WITH_INV:
827 case FW_RI_SEND_WITH_SE_INV:
828 wc->opcode = IB_WC_SEND;
829 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
830 break;
831 case FW_RI_SEND:
832 case FW_RI_SEND_WITH_SE:
833 wc->opcode = IB_WC_SEND;
834 break;
835
836 case FW_RI_LOCAL_INV:
837 wc->opcode = IB_WC_LOCAL_INV;
838 break;
839 case FW_RI_FAST_REGISTER:
840 wc->opcode = IB_WC_REG_MR;
841
842
843 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
844 c4iw_invalidate_mr(qhp->rhp,
845 CQE_WRID_FR_STAG(&cqe));
846 break;
847 default:
848 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
849 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
850 ret = -EINVAL;
851 goto out;
852 }
853 }
854
855 if (cqe_flushed)
856 wc->status = IB_WC_WR_FLUSH_ERR;
857 else {
858
859 switch (CQE_STATUS(&cqe)) {
860 case T4_ERR_SUCCESS:
861 wc->status = IB_WC_SUCCESS;
862 break;
863 case T4_ERR_STAG:
864 wc->status = IB_WC_LOC_ACCESS_ERR;
865 break;
866 case T4_ERR_PDID:
867 wc->status = IB_WC_LOC_PROT_ERR;
868 break;
869 case T4_ERR_QPID:
870 case T4_ERR_ACCESS:
871 wc->status = IB_WC_LOC_ACCESS_ERR;
872 break;
873 case T4_ERR_WRAP:
874 wc->status = IB_WC_GENERAL_ERR;
875 break;
876 case T4_ERR_BOUND:
877 wc->status = IB_WC_LOC_LEN_ERR;
878 break;
879 case T4_ERR_INVALIDATE_SHARED_MR:
880 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
881 wc->status = IB_WC_MW_BIND_ERR;
882 break;
883 case T4_ERR_CRC:
884 case T4_ERR_MARKER:
885 case T4_ERR_PDU_LEN_ERR:
886 case T4_ERR_OUT_OF_RQE:
887 case T4_ERR_DDP_VERSION:
888 case T4_ERR_RDMA_VERSION:
889 case T4_ERR_DDP_QUEUE_NUM:
890 case T4_ERR_MSN:
891 case T4_ERR_TBIT:
892 case T4_ERR_MO:
893 case T4_ERR_MSN_RANGE:
894 case T4_ERR_IRD_OVERFLOW:
895 case T4_ERR_OPCODE:
896 case T4_ERR_INTERNAL_ERR:
897 wc->status = IB_WC_FATAL_ERR;
898 break;
899 case T4_ERR_SWFLUSH:
900 wc->status = IB_WC_WR_FLUSH_ERR;
901 break;
902 default:
903 pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
904 CQE_STATUS(&cqe), CQE_QPID(&cqe));
905 wc->status = IB_WC_FATAL_ERR;
906 }
907 }
908out:
909 return ret;
910}
911
912
913
914
915
916
917
918
919
920
921static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
922{
923 struct c4iw_srq *srq = NULL;
924 struct c4iw_qp *qhp = NULL;
925 struct t4_cqe *rd_cqe;
926 int ret;
927
928 ret = t4_next_cqe(&chp->cq, &rd_cqe);
929
930 if (ret)
931 return ret;
932
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
934 if (qhp) {
935 spin_lock(&qhp->lock);
936 srq = qhp->srq;
937 if (srq)
938 spin_lock(&srq->lock);
939 ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
940 spin_unlock(&qhp->lock);
941 if (srq)
942 spin_unlock(&srq->lock);
943 } else {
944 ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
945 }
946 return ret;
947}
948
949int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
950{
951 struct c4iw_cq *chp;
952 unsigned long flags;
953 int npolled;
954 int err = 0;
955
956 chp = to_c4iw_cq(ibcq);
957
958 spin_lock_irqsave(&chp->lock, flags);
959 for (npolled = 0; npolled < num_entries; ++npolled) {
960 do {
961 err = c4iw_poll_cq_one(chp, wc + npolled);
962 } while (err == -EAGAIN);
963 if (err)
964 break;
965 }
966 spin_unlock_irqrestore(&chp->lock, flags);
967 return !err || err == -ENODATA ? npolled : err;
968}
969
970int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
971{
972 struct c4iw_cq *chp;
973 struct c4iw_ucontext *ucontext;
974
975 pr_debug("ib_cq %p\n", ib_cq);
976 chp = to_c4iw_cq(ib_cq);
977
978 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
979 atomic_dec(&chp->refcnt);
980 wait_event(chp->wait, !atomic_read(&chp->refcnt));
981
982 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
983 ibucontext);
984 destroy_cq(&chp->rhp->rdev, &chp->cq,
985 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
986 chp->destroy_skb, chp->wr_waitp);
987 c4iw_put_wr_wait(chp->wr_waitp);
988 return 0;
989}
990
991int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
992 struct ib_udata *udata)
993{
994 struct ib_device *ibdev = ibcq->device;
995 int entries = attr->cqe;
996 int vector = attr->comp_vector;
997 struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device);
998 struct c4iw_cq *chp = to_c4iw_cq(ibcq);
999 struct c4iw_create_cq ucmd;
1000 struct c4iw_create_cq_resp uresp;
1001 int ret, wr_len;
1002 size_t memsize, hwentries;
1003 struct c4iw_mm_entry *mm, *mm2;
1004 struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
1005 udata, struct c4iw_ucontext, ibucontext);
1006
1007 pr_debug("ib_dev %p entries %d\n", ibdev, entries);
1008 if (attr->flags)
1009 return -EINVAL;
1010
1011 if (vector >= rhp->rdev.lldi.nciq)
1012 return -EINVAL;
1013
1014 if (udata) {
1015 if (udata->inlen < sizeof(ucmd))
1016 ucontext->is_32b_cqe = 1;
1017 }
1018
1019 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1020 if (!chp->wr_waitp) {
1021 ret = -ENOMEM;
1022 goto err_free_chp;
1023 }
1024 c4iw_init_wr_wait(chp->wr_waitp);
1025
1026 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
1027 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
1028 if (!chp->destroy_skb) {
1029 ret = -ENOMEM;
1030 goto err_free_wr_wait;
1031 }
1032
1033
1034 entries++;
1035
1036
1037 entries++;
1038
1039
1040
1041
1042 entries = roundup(entries, 16);
1043
1044
1045
1046
1047 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
1048
1049
1050
1051
1052
1053 if (hwentries < 64)
1054 hwentries = 64;
1055
1056 memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
1057 (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
1058
1059
1060
1061
1062 if (udata)
1063 memsize = roundup(memsize, PAGE_SIZE);
1064
1065 chp->cq.size = hwentries;
1066 chp->cq.memsize = memsize;
1067 chp->cq.vector = vector;
1068
1069 ret = create_cq(&rhp->rdev, &chp->cq,
1070 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1071 chp->wr_waitp);
1072 if (ret)
1073 goto err_free_skb;
1074
1075 chp->rhp = rhp;
1076 chp->cq.size--;
1077 chp->ibcq.cqe = entries - 2;
1078 spin_lock_init(&chp->lock);
1079 spin_lock_init(&chp->comp_handler_lock);
1080 atomic_set(&chp->refcnt, 1);
1081 init_waitqueue_head(&chp->wait);
1082 ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
1083 if (ret)
1084 goto err_destroy_cq;
1085
1086 if (ucontext) {
1087 ret = -ENOMEM;
1088 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
1089 if (!mm)
1090 goto err_remove_handle;
1091 mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
1092 if (!mm2)
1093 goto err_free_mm;
1094
1095 memset(&uresp, 0, sizeof(uresp));
1096 uresp.qid_mask = rhp->rdev.cqmask;
1097 uresp.cqid = chp->cq.cqid;
1098 uresp.size = chp->cq.size;
1099 uresp.memsize = chp->cq.memsize;
1100 spin_lock(&ucontext->mmap_lock);
1101 uresp.key = ucontext->key;
1102 ucontext->key += PAGE_SIZE;
1103 uresp.gts_key = ucontext->key;
1104 ucontext->key += PAGE_SIZE;
1105
1106
1107
1108 uresp.flags |= C4IW_64B_CQE;
1109
1110 spin_unlock(&ucontext->mmap_lock);
1111 ret = ib_copy_to_udata(udata, &uresp,
1112 ucontext->is_32b_cqe ?
1113 sizeof(uresp) - sizeof(uresp.flags) :
1114 sizeof(uresp));
1115 if (ret)
1116 goto err_free_mm2;
1117
1118 mm->key = uresp.key;
1119 mm->addr = virt_to_phys(chp->cq.queue);
1120 mm->len = chp->cq.memsize;
1121 insert_mmap(ucontext, mm);
1122
1123 mm2->key = uresp.gts_key;
1124 mm2->addr = chp->cq.bar2_pa;
1125 mm2->len = PAGE_SIZE;
1126 insert_mmap(ucontext, mm2);
1127 }
1128
1129 pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
1130 chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1131 &chp->cq.dma_addr);
1132 return 0;
1133err_free_mm2:
1134 kfree(mm2);
1135err_free_mm:
1136 kfree(mm);
1137err_remove_handle:
1138 xa_erase_irq(&rhp->cqs, chp->cq.cqid);
1139err_destroy_cq:
1140 destroy_cq(&chp->rhp->rdev, &chp->cq,
1141 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1142 chp->destroy_skb, chp->wr_waitp);
1143err_free_skb:
1144 kfree_skb(chp->destroy_skb);
1145err_free_wr_wait:
1146 c4iw_put_wr_wait(chp->wr_waitp);
1147err_free_chp:
1148 return ret;
1149}
1150
1151int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1152{
1153 struct c4iw_cq *chp;
1154 int ret = 0;
1155 unsigned long flag;
1156
1157 chp = to_c4iw_cq(ibcq);
1158 spin_lock_irqsave(&chp->lock, flag);
1159 t4_arm_cq(&chp->cq,
1160 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1161 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1162 ret = t4_cq_notempty(&chp->cq);
1163 spin_unlock_irqrestore(&chp->lock, flag);
1164 return ret;
1165}
1166
1167void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
1168{
1169 struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1170 unsigned long flag;
1171
1172
1173 spin_lock_irqsave(&rchp->lock, flag);
1174 spin_lock(&qhp->lock);
1175
1176
1177 insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
1178
1179 spin_unlock(&qhp->lock);
1180 spin_unlock_irqrestore(&rchp->lock, flag);
1181}
1182