1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/slab.h>
37
38#include "mlx4_ib.h"
39#include "user.h"
40
41static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
42{
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44 ibcq->comp_handler(ibcq, ibcq->cq_context);
45}
46
47static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
48{
49 struct ib_event event;
50 struct ib_cq *ibcq;
51
52 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
53 pr_warn("Unexpected event type %d "
54 "on CQ %06x\n", type, cq->cqn);
55 return;
56 }
57
58 ibcq = &to_mibcq(cq)->ibcq;
59 if (ibcq->event_handler) {
60 event.device = ibcq->device;
61 event.event = IB_EVENT_CQ_ERR;
62 event.element.cq = ibcq;
63 ibcq->event_handler(&event, ibcq->cq_context);
64 }
65}
66
67static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
68{
69 return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
70}
71
72static void *get_cqe(struct mlx4_ib_cq *cq, int n)
73{
74 return get_cqe_from_buf(&cq->buf, n);
75}
76
77static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
78{
79 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
80
81 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
82 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
83}
84
85static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
86{
87 return get_sw_cqe(cq, cq->mcq.cons_index);
88}
89
90int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
91{
92 struct mlx4_ib_cq *mcq = to_mcq(cq);
93 struct mlx4_ib_dev *dev = to_mdev(cq->device);
94
95 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
96}
97
98static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
99{
100 int err;
101
102 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
103 PAGE_SIZE * 2, &buf->buf);
104
105 if (err)
106 goto out;
107
108 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
109 &buf->mtt);
110 if (err)
111 goto err_buf;
112
113 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
114 if (err)
115 goto err_mtt;
116
117 return 0;
118
119err_mtt:
120 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
121
122err_buf:
123 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
124 &buf->buf);
125
126out:
127 return err;
128}
129
130static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
131{
132 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
133}
134
135static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
136 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
137 u64 buf_addr, int cqe)
138{
139 int err;
140
141 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
142 IB_ACCESS_LOCAL_WRITE, 1);
143 if (IS_ERR(*umem))
144 return PTR_ERR(*umem);
145
146 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
147 ilog2((*umem)->page_size), &buf->mtt);
148 if (err)
149 goto err_buf;
150
151 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
152 if (err)
153 goto err_mtt;
154
155 return 0;
156
157err_mtt:
158 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
159
160err_buf:
161 ib_umem_release(*umem);
162
163 return err;
164}
165
166struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
167 struct ib_ucontext *context,
168 struct ib_udata *udata)
169{
170 struct mlx4_ib_dev *dev = to_mdev(ibdev);
171 struct mlx4_ib_cq *cq;
172 struct mlx4_uar *uar;
173 int err;
174
175 if (entries < 1 || entries > dev->dev->caps.max_cqes)
176 return ERR_PTR(-EINVAL);
177
178 cq = kmalloc(sizeof *cq, GFP_KERNEL);
179 if (!cq)
180 return ERR_PTR(-ENOMEM);
181
182 entries = roundup_pow_of_two(entries + 1);
183 cq->ibcq.cqe = entries - 1;
184 mutex_init(&cq->resize_mutex);
185 spin_lock_init(&cq->lock);
186 cq->resize_buf = NULL;
187 cq->resize_umem = NULL;
188
189 if (context) {
190 struct mlx4_ib_create_cq ucmd;
191
192 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
193 err = -EFAULT;
194 goto err_cq;
195 }
196
197 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
198 ucmd.buf_addr, entries);
199 if (err)
200 goto err_cq;
201
202 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
203 &cq->db);
204 if (err)
205 goto err_mtt;
206
207 uar = &to_mucontext(context)->uar;
208 } else {
209 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
210 if (err)
211 goto err_cq;
212
213 cq->mcq.set_ci_db = cq->db.db;
214 cq->mcq.arm_db = cq->db.db + 1;
215 *cq->mcq.set_ci_db = 0;
216 *cq->mcq.arm_db = 0;
217
218 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
219 if (err)
220 goto err_db;
221
222 uar = &dev->priv_uar;
223 }
224
225 if (dev->eq_table)
226 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
227
228 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
229 cq->db.dma, &cq->mcq, vector, 0);
230 if (err)
231 goto err_dbmap;
232
233 cq->mcq.comp = mlx4_ib_cq_comp;
234 cq->mcq.event = mlx4_ib_cq_event;
235
236 if (context)
237 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
238 err = -EFAULT;
239 goto err_dbmap;
240 }
241
242 return &cq->ibcq;
243
244err_dbmap:
245 if (context)
246 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
247
248err_mtt:
249 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
250
251 if (context)
252 ib_umem_release(cq->umem);
253 else
254 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
255
256err_db:
257 if (!context)
258 mlx4_db_free(dev->dev, &cq->db);
259
260err_cq:
261 kfree(cq);
262
263 return ERR_PTR(err);
264}
265
266static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
267 int entries)
268{
269 int err;
270
271 if (cq->resize_buf)
272 return -EBUSY;
273
274 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
275 if (!cq->resize_buf)
276 return -ENOMEM;
277
278 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
279 if (err) {
280 kfree(cq->resize_buf);
281 cq->resize_buf = NULL;
282 return err;
283 }
284
285 cq->resize_buf->cqe = entries - 1;
286
287 return 0;
288}
289
290static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
291 int entries, struct ib_udata *udata)
292{
293 struct mlx4_ib_resize_cq ucmd;
294 int err;
295
296 if (cq->resize_umem)
297 return -EBUSY;
298
299 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
300 return -EFAULT;
301
302 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
303 if (!cq->resize_buf)
304 return -ENOMEM;
305
306 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
307 &cq->resize_umem, ucmd.buf_addr, entries);
308 if (err) {
309 kfree(cq->resize_buf);
310 cq->resize_buf = NULL;
311 return err;
312 }
313
314 cq->resize_buf->cqe = entries - 1;
315
316 return 0;
317}
318
319static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
320{
321 u32 i;
322
323 i = cq->mcq.cons_index;
324 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
325 ++i;
326
327 return i - cq->mcq.cons_index;
328}
329
330static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
331{
332 struct mlx4_cqe *cqe, *new_cqe;
333 int i;
334
335 i = cq->mcq.cons_index;
336 cqe = get_cqe(cq, i & cq->ibcq.cqe);
337 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
338 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
339 (i + 1) & cq->resize_buf->cqe);
340 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
341 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
342 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
343 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
344 }
345 ++cq->mcq.cons_index;
346}
347
348int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
349{
350 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
351 struct mlx4_ib_cq *cq = to_mcq(ibcq);
352 struct mlx4_mtt mtt;
353 int outst_cqe;
354 int err;
355
356 mutex_lock(&cq->resize_mutex);
357
358 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
359 err = -EINVAL;
360 goto out;
361 }
362
363 entries = roundup_pow_of_two(entries + 1);
364 if (entries == ibcq->cqe + 1) {
365 err = 0;
366 goto out;
367 }
368
369 if (ibcq->uobject) {
370 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
371 if (err)
372 goto out;
373 } else {
374
375 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
376 if (entries < outst_cqe + 1) {
377 err = 0;
378 goto out;
379 }
380
381 err = mlx4_alloc_resize_buf(dev, cq, entries);
382 if (err)
383 goto out;
384 }
385
386 mtt = cq->buf.mtt;
387
388 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
389 if (err)
390 goto err_buf;
391
392 mlx4_mtt_cleanup(dev->dev, &mtt);
393 if (ibcq->uobject) {
394 cq->buf = cq->resize_buf->buf;
395 cq->ibcq.cqe = cq->resize_buf->cqe;
396 ib_umem_release(cq->umem);
397 cq->umem = cq->resize_umem;
398
399 kfree(cq->resize_buf);
400 cq->resize_buf = NULL;
401 cq->resize_umem = NULL;
402 } else {
403 struct mlx4_ib_cq_buf tmp_buf;
404 int tmp_cqe = 0;
405
406 spin_lock_irq(&cq->lock);
407 if (cq->resize_buf) {
408 mlx4_ib_cq_resize_copy_cqes(cq);
409 tmp_buf = cq->buf;
410 tmp_cqe = cq->ibcq.cqe;
411 cq->buf = cq->resize_buf->buf;
412 cq->ibcq.cqe = cq->resize_buf->cqe;
413
414 kfree(cq->resize_buf);
415 cq->resize_buf = NULL;
416 }
417 spin_unlock_irq(&cq->lock);
418
419 if (tmp_cqe)
420 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
421 }
422
423 goto out;
424
425err_buf:
426 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
427 if (!ibcq->uobject)
428 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
429 cq->resize_buf->cqe);
430
431 kfree(cq->resize_buf);
432 cq->resize_buf = NULL;
433
434 if (cq->resize_umem) {
435 ib_umem_release(cq->resize_umem);
436 cq->resize_umem = NULL;
437 }
438
439out:
440 mutex_unlock(&cq->resize_mutex);
441 return err;
442}
443
444int mlx4_ib_destroy_cq(struct ib_cq *cq)
445{
446 struct mlx4_ib_dev *dev = to_mdev(cq->device);
447 struct mlx4_ib_cq *mcq = to_mcq(cq);
448
449 mlx4_cq_free(dev->dev, &mcq->mcq);
450 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
451
452 if (cq->uobject) {
453 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
454 ib_umem_release(mcq->umem);
455 } else {
456 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
457 mlx4_db_free(dev->dev, &mcq->db);
458 }
459
460 kfree(mcq);
461
462 return 0;
463}
464
465static void dump_cqe(void *cqe)
466{
467 __be32 *buf = cqe;
468
469 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
470 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
471 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
472 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
473}
474
475static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
476 struct ib_wc *wc)
477{
478 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
479 pr_debug("local QP operation err "
480 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
481 "opcode = %02x)\n",
482 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
483 cqe->vendor_err_syndrome,
484 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
485 dump_cqe(cqe);
486 }
487
488 switch (cqe->syndrome) {
489 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
490 wc->status = IB_WC_LOC_LEN_ERR;
491 break;
492 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
493 wc->status = IB_WC_LOC_QP_OP_ERR;
494 break;
495 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
496 wc->status = IB_WC_LOC_PROT_ERR;
497 break;
498 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
499 wc->status = IB_WC_WR_FLUSH_ERR;
500 break;
501 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
502 wc->status = IB_WC_MW_BIND_ERR;
503 break;
504 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
505 wc->status = IB_WC_BAD_RESP_ERR;
506 break;
507 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
508 wc->status = IB_WC_LOC_ACCESS_ERR;
509 break;
510 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
511 wc->status = IB_WC_REM_INV_REQ_ERR;
512 break;
513 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
514 wc->status = IB_WC_REM_ACCESS_ERR;
515 break;
516 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
517 wc->status = IB_WC_REM_OP_ERR;
518 break;
519 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
520 wc->status = IB_WC_RETRY_EXC_ERR;
521 break;
522 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
523 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
524 break;
525 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
526 wc->status = IB_WC_REM_ABORT_ERR;
527 break;
528 default:
529 wc->status = IB_WC_GENERAL_ERR;
530 break;
531 }
532
533 wc->vendor_err = cqe->vendor_err_syndrome;
534}
535
536static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
537{
538 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
539 MLX4_CQE_STATUS_IPV4F |
540 MLX4_CQE_STATUS_IPV4OPT |
541 MLX4_CQE_STATUS_IPV6 |
542 MLX4_CQE_STATUS_IPOK)) ==
543 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
544 MLX4_CQE_STATUS_IPOK)) &&
545 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
546 MLX4_CQE_STATUS_TCP)) &&
547 checksum == cpu_to_be16(0xffff);
548}
549
550static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
551 struct mlx4_ib_qp **cur_qp,
552 struct ib_wc *wc)
553{
554 struct mlx4_cqe *cqe;
555 struct mlx4_qp *mqp;
556 struct mlx4_ib_wq *wq;
557 struct mlx4_ib_srq *srq;
558 int is_send;
559 int is_error;
560 u32 g_mlpath_rqpn;
561 u16 wqe_ctr;
562
563repoll:
564 cqe = next_cqe_sw(cq);
565 if (!cqe)
566 return -EAGAIN;
567
568 ++cq->mcq.cons_index;
569
570
571
572
573
574 rmb();
575
576 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
577 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
578 MLX4_CQE_OPCODE_ERROR;
579
580 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
581 is_send)) {
582 pr_warn("Completion for NOP opcode detected!\n");
583 return -EINVAL;
584 }
585
586
587 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
588 if (cq->resize_buf) {
589 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
590
591 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
592 cq->buf = cq->resize_buf->buf;
593 cq->ibcq.cqe = cq->resize_buf->cqe;
594
595 kfree(cq->resize_buf);
596 cq->resize_buf = NULL;
597 }
598
599 goto repoll;
600 }
601
602 if (!*cur_qp ||
603 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
604
605
606
607
608
609 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
610 be32_to_cpu(cqe->vlan_my_qpn));
611 if (unlikely(!mqp)) {
612 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
613 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
614 return -EINVAL;
615 }
616
617 *cur_qp = to_mibqp(mqp);
618 }
619
620 wc->qp = &(*cur_qp)->ibqp;
621
622 if (is_send) {
623 wq = &(*cur_qp)->sq;
624 if (!(*cur_qp)->sq_signal_bits) {
625 wqe_ctr = be16_to_cpu(cqe->wqe_index);
626 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
627 }
628 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
629 ++wq->tail;
630 } else if ((*cur_qp)->ibqp.srq) {
631 srq = to_msrq((*cur_qp)->ibqp.srq);
632 wqe_ctr = be16_to_cpu(cqe->wqe_index);
633 wc->wr_id = srq->wrid[wqe_ctr];
634 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
635 } else {
636 wq = &(*cur_qp)->rq;
637 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
638 ++wq->tail;
639 }
640
641 if (unlikely(is_error)) {
642 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
643 return 0;
644 }
645
646 wc->status = IB_WC_SUCCESS;
647
648 if (is_send) {
649 wc->wc_flags = 0;
650 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
651 case MLX4_OPCODE_RDMA_WRITE_IMM:
652 wc->wc_flags |= IB_WC_WITH_IMM;
653 case MLX4_OPCODE_RDMA_WRITE:
654 wc->opcode = IB_WC_RDMA_WRITE;
655 break;
656 case MLX4_OPCODE_SEND_IMM:
657 wc->wc_flags |= IB_WC_WITH_IMM;
658 case MLX4_OPCODE_SEND:
659 case MLX4_OPCODE_SEND_INVAL:
660 wc->opcode = IB_WC_SEND;
661 break;
662 case MLX4_OPCODE_RDMA_READ:
663 wc->opcode = IB_WC_RDMA_READ;
664 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
665 break;
666 case MLX4_OPCODE_ATOMIC_CS:
667 wc->opcode = IB_WC_COMP_SWAP;
668 wc->byte_len = 8;
669 break;
670 case MLX4_OPCODE_ATOMIC_FA:
671 wc->opcode = IB_WC_FETCH_ADD;
672 wc->byte_len = 8;
673 break;
674 case MLX4_OPCODE_MASKED_ATOMIC_CS:
675 wc->opcode = IB_WC_MASKED_COMP_SWAP;
676 wc->byte_len = 8;
677 break;
678 case MLX4_OPCODE_MASKED_ATOMIC_FA:
679 wc->opcode = IB_WC_MASKED_FETCH_ADD;
680 wc->byte_len = 8;
681 break;
682 case MLX4_OPCODE_BIND_MW:
683 wc->opcode = IB_WC_BIND_MW;
684 break;
685 case MLX4_OPCODE_LSO:
686 wc->opcode = IB_WC_LSO;
687 break;
688 case MLX4_OPCODE_FMR:
689 wc->opcode = IB_WC_FAST_REG_MR;
690 break;
691 case MLX4_OPCODE_LOCAL_INVAL:
692 wc->opcode = IB_WC_LOCAL_INV;
693 break;
694 }
695 } else {
696 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
697
698 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
699 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
700 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
701 wc->wc_flags = IB_WC_WITH_IMM;
702 wc->ex.imm_data = cqe->immed_rss_invalid;
703 break;
704 case MLX4_RECV_OPCODE_SEND_INVAL:
705 wc->opcode = IB_WC_RECV;
706 wc->wc_flags = IB_WC_WITH_INVALIDATE;
707 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
708 break;
709 case MLX4_RECV_OPCODE_SEND:
710 wc->opcode = IB_WC_RECV;
711 wc->wc_flags = 0;
712 break;
713 case MLX4_RECV_OPCODE_SEND_IMM:
714 wc->opcode = IB_WC_RECV;
715 wc->wc_flags = IB_WC_WITH_IMM;
716 wc->ex.imm_data = cqe->immed_rss_invalid;
717 break;
718 }
719
720 wc->slid = be16_to_cpu(cqe->rlid);
721 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
722 wc->src_qp = g_mlpath_rqpn & 0xffffff;
723 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
724 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
725 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
726 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
727 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
728 if (rdma_port_get_link_layer(wc->qp->device,
729 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
730 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
731 else
732 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
733 }
734
735 return 0;
736}
737
738int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
739{
740 struct mlx4_ib_cq *cq = to_mcq(ibcq);
741 struct mlx4_ib_qp *cur_qp = NULL;
742 unsigned long flags;
743 int npolled;
744 int err = 0;
745
746 spin_lock_irqsave(&cq->lock, flags);
747
748 for (npolled = 0; npolled < num_entries; ++npolled) {
749 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
750 if (err)
751 break;
752 }
753
754 mlx4_cq_set_ci(&cq->mcq);
755
756 spin_unlock_irqrestore(&cq->lock, flags);
757
758 if (err == 0 || err == -EAGAIN)
759 return npolled;
760 else
761 return err;
762}
763
764int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
765{
766 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
767 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
768 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
769 to_mdev(ibcq->device)->uar_map,
770 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
771
772 return 0;
773}
774
775void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
776{
777 u32 prod_index;
778 int nfreed = 0;
779 struct mlx4_cqe *cqe, *dest;
780 u8 owner_bit;
781
782
783
784
785
786
787
788
789 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
790 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
791 break;
792
793
794
795
796
797 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
798 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
799 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
800 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
801 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
802 ++nfreed;
803 } else if (nfreed) {
804 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
805 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
806 memcpy(dest, cqe, sizeof *cqe);
807 dest->owner_sr_opcode = owner_bit |
808 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
809 }
810 }
811
812 if (nfreed) {
813 cq->mcq.cons_index += nfreed;
814
815
816
817
818 wmb();
819 mlx4_cq_set_ci(&cq->mcq);
820 }
821}
822
823void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
824{
825 spin_lock_irq(&cq->lock);
826 __mlx4_ib_cq_clean(cq, qpn, srq);
827 spin_unlock_irq(&cq->lock);
828}
829