1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/srq.h>
37#include <linux/slab.h>
38
39#include "mlx4_ib.h"
40#include <rdma/mlx4-abi.h>
41
42static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43{
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49{
50 struct ib_event event;
51 struct ib_cq *ibcq;
52
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
54 pr_warn("Unexpected event type %d "
55 "on CQ %06x\n", type, cq->cqn);
56 return;
57 }
58
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69{
70 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
71}
72
73static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74{
75 return get_cqe_from_buf(&cq->buf, n);
76}
77
78static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79{
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
82
83 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85}
86
87static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88{
89 return get_sw_cqe(cq, cq->mcq.cons_index);
90}
91
92int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93{
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98}
99
100static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101{
102 int err;
103
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 PAGE_SIZE * 2, &buf->buf);
106
107 if (err)
108 goto out;
109
110 buf->entry_size = dev->dev->caps.cqe_size;
111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
115
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 if (err)
118 goto err_mtt;
119
120 return 0;
121
122err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125err_buf:
126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
127
128out:
129 return err;
130}
131
132static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133{
134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
135}
136
137static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 u64 buf_addr, int cqe)
140{
141 int err;
142 int cqe_size = dev->dev->caps.cqe_size;
143 int shift;
144 int n;
145
146 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
147 IB_ACCESS_LOCAL_WRITE, 1);
148 if (IS_ERR(*umem))
149 return PTR_ERR(*umem);
150
151 n = ib_umem_page_count(*umem);
152 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
153 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
154
155 if (err)
156 goto err_buf;
157
158 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
159 if (err)
160 goto err_mtt;
161
162 return 0;
163
164err_mtt:
165 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
166
167err_buf:
168 ib_umem_release(*umem);
169
170 return err;
171}
172
173#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
174struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
175 const struct ib_cq_init_attr *attr,
176 struct ib_ucontext *context,
177 struct ib_udata *udata)
178{
179 int entries = attr->cqe;
180 int vector = attr->comp_vector;
181 struct mlx4_ib_dev *dev = to_mdev(ibdev);
182 struct mlx4_ib_cq *cq;
183 struct mlx4_uar *uar;
184 int err;
185
186 if (entries < 1 || entries > dev->dev->caps.max_cqes)
187 return ERR_PTR(-EINVAL);
188
189 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
190 return ERR_PTR(-EINVAL);
191
192 cq = kmalloc(sizeof *cq, GFP_KERNEL);
193 if (!cq)
194 return ERR_PTR(-ENOMEM);
195
196 entries = roundup_pow_of_two(entries + 1);
197 cq->ibcq.cqe = entries - 1;
198 mutex_init(&cq->resize_mutex);
199 spin_lock_init(&cq->lock);
200 cq->resize_buf = NULL;
201 cq->resize_umem = NULL;
202 cq->create_flags = attr->flags;
203 INIT_LIST_HEAD(&cq->send_qp_list);
204 INIT_LIST_HEAD(&cq->recv_qp_list);
205
206 if (context) {
207 struct mlx4_ib_create_cq ucmd;
208
209 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
210 err = -EFAULT;
211 goto err_cq;
212 }
213
214 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
215 ucmd.buf_addr, entries);
216 if (err)
217 goto err_cq;
218
219 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
220 &cq->db);
221 if (err)
222 goto err_mtt;
223
224 uar = &to_mucontext(context)->uar;
225 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
226 } else {
227 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
228 if (err)
229 goto err_cq;
230
231 cq->mcq.set_ci_db = cq->db.db;
232 cq->mcq.arm_db = cq->db.db + 1;
233 *cq->mcq.set_ci_db = 0;
234 *cq->mcq.arm_db = 0;
235
236 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
237 if (err)
238 goto err_db;
239
240 uar = &dev->priv_uar;
241 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
242 }
243
244 if (dev->eq_table)
245 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
246
247 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
248 cq->db.dma, &cq->mcq, vector, 0,
249 !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
250 if (err)
251 goto err_dbmap;
252
253 if (context)
254 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
255 else
256 cq->mcq.comp = mlx4_ib_cq_comp;
257 cq->mcq.event = mlx4_ib_cq_event;
258
259 if (context)
260 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
261 err = -EFAULT;
262 goto err_cq_free;
263 }
264
265 return &cq->ibcq;
266
267err_cq_free:
268 mlx4_cq_free(dev->dev, &cq->mcq);
269
270err_dbmap:
271 if (context)
272 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
273
274err_mtt:
275 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
276
277 if (context)
278 ib_umem_release(cq->umem);
279 else
280 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
281
282err_db:
283 if (!context)
284 mlx4_db_free(dev->dev, &cq->db);
285
286err_cq:
287 kfree(cq);
288
289 return ERR_PTR(err);
290}
291
292static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
293 int entries)
294{
295 int err;
296
297 if (cq->resize_buf)
298 return -EBUSY;
299
300 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
301 if (!cq->resize_buf)
302 return -ENOMEM;
303
304 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
305 if (err) {
306 kfree(cq->resize_buf);
307 cq->resize_buf = NULL;
308 return err;
309 }
310
311 cq->resize_buf->cqe = entries - 1;
312
313 return 0;
314}
315
316static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
317 int entries, struct ib_udata *udata)
318{
319 struct mlx4_ib_resize_cq ucmd;
320 int err;
321
322 if (cq->resize_umem)
323 return -EBUSY;
324
325 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
326 return -EFAULT;
327
328 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
329 if (!cq->resize_buf)
330 return -ENOMEM;
331
332 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
333 &cq->resize_umem, ucmd.buf_addr, entries);
334 if (err) {
335 kfree(cq->resize_buf);
336 cq->resize_buf = NULL;
337 return err;
338 }
339
340 cq->resize_buf->cqe = entries - 1;
341
342 return 0;
343}
344
345static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
346{
347 u32 i;
348
349 i = cq->mcq.cons_index;
350 while (get_sw_cqe(cq, i))
351 ++i;
352
353 return i - cq->mcq.cons_index;
354}
355
356static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
357{
358 struct mlx4_cqe *cqe, *new_cqe;
359 int i;
360 int cqe_size = cq->buf.entry_size;
361 int cqe_inc = cqe_size == 64 ? 1 : 0;
362
363 i = cq->mcq.cons_index;
364 cqe = get_cqe(cq, i & cq->ibcq.cqe);
365 cqe += cqe_inc;
366
367 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
368 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
369 (i + 1) & cq->resize_buf->cqe);
370 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
371 new_cqe += cqe_inc;
372
373 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
374 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
375 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
376 cqe += cqe_inc;
377 }
378 ++cq->mcq.cons_index;
379}
380
381int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
382{
383 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
384 struct mlx4_ib_cq *cq = to_mcq(ibcq);
385 struct mlx4_mtt mtt;
386 int outst_cqe;
387 int err;
388
389 mutex_lock(&cq->resize_mutex);
390 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
391 err = -EINVAL;
392 goto out;
393 }
394
395 entries = roundup_pow_of_two(entries + 1);
396 if (entries == ibcq->cqe + 1) {
397 err = 0;
398 goto out;
399 }
400
401 if (entries > dev->dev->caps.max_cqes + 1) {
402 err = -EINVAL;
403 goto out;
404 }
405
406 if (ibcq->uobject) {
407 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
408 if (err)
409 goto out;
410 } else {
411
412 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
413 if (entries < outst_cqe + 1) {
414 err = -EINVAL;
415 goto out;
416 }
417
418 err = mlx4_alloc_resize_buf(dev, cq, entries);
419 if (err)
420 goto out;
421 }
422
423 mtt = cq->buf.mtt;
424
425 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
426 if (err)
427 goto err_buf;
428
429 mlx4_mtt_cleanup(dev->dev, &mtt);
430 if (ibcq->uobject) {
431 cq->buf = cq->resize_buf->buf;
432 cq->ibcq.cqe = cq->resize_buf->cqe;
433 ib_umem_release(cq->umem);
434 cq->umem = cq->resize_umem;
435
436 kfree(cq->resize_buf);
437 cq->resize_buf = NULL;
438 cq->resize_umem = NULL;
439 } else {
440 struct mlx4_ib_cq_buf tmp_buf;
441 int tmp_cqe = 0;
442
443 spin_lock_irq(&cq->lock);
444 if (cq->resize_buf) {
445 mlx4_ib_cq_resize_copy_cqes(cq);
446 tmp_buf = cq->buf;
447 tmp_cqe = cq->ibcq.cqe;
448 cq->buf = cq->resize_buf->buf;
449 cq->ibcq.cqe = cq->resize_buf->cqe;
450
451 kfree(cq->resize_buf);
452 cq->resize_buf = NULL;
453 }
454 spin_unlock_irq(&cq->lock);
455
456 if (tmp_cqe)
457 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
458 }
459
460 goto out;
461
462err_buf:
463 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
464 if (!ibcq->uobject)
465 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
466 cq->resize_buf->cqe);
467
468 kfree(cq->resize_buf);
469 cq->resize_buf = NULL;
470
471 if (cq->resize_umem) {
472 ib_umem_release(cq->resize_umem);
473 cq->resize_umem = NULL;
474 }
475
476out:
477 mutex_unlock(&cq->resize_mutex);
478
479 return err;
480}
481
482int mlx4_ib_destroy_cq(struct ib_cq *cq)
483{
484 struct mlx4_ib_dev *dev = to_mdev(cq->device);
485 struct mlx4_ib_cq *mcq = to_mcq(cq);
486
487 mlx4_cq_free(dev->dev, &mcq->mcq);
488 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
489
490 if (cq->uobject) {
491 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
492 ib_umem_release(mcq->umem);
493 } else {
494 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
495 mlx4_db_free(dev->dev, &mcq->db);
496 }
497
498 kfree(mcq);
499
500 return 0;
501}
502
503static void dump_cqe(void *cqe)
504{
505 __be32 *buf = cqe;
506
507 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
508 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
509 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
510 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
511}
512
513static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
514 struct ib_wc *wc)
515{
516 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
517 pr_debug("local QP operation err "
518 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
519 "opcode = %02x)\n",
520 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
521 cqe->vendor_err_syndrome,
522 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
523 dump_cqe(cqe);
524 }
525
526 switch (cqe->syndrome) {
527 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
528 wc->status = IB_WC_LOC_LEN_ERR;
529 break;
530 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
531 wc->status = IB_WC_LOC_QP_OP_ERR;
532 break;
533 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
534 wc->status = IB_WC_LOC_PROT_ERR;
535 break;
536 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
537 wc->status = IB_WC_WR_FLUSH_ERR;
538 break;
539 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
540 wc->status = IB_WC_MW_BIND_ERR;
541 break;
542 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
543 wc->status = IB_WC_BAD_RESP_ERR;
544 break;
545 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
546 wc->status = IB_WC_LOC_ACCESS_ERR;
547 break;
548 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
549 wc->status = IB_WC_REM_INV_REQ_ERR;
550 break;
551 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
552 wc->status = IB_WC_REM_ACCESS_ERR;
553 break;
554 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
555 wc->status = IB_WC_REM_OP_ERR;
556 break;
557 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
558 wc->status = IB_WC_RETRY_EXC_ERR;
559 break;
560 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
561 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
562 break;
563 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
564 wc->status = IB_WC_REM_ABORT_ERR;
565 break;
566 default:
567 wc->status = IB_WC_GENERAL_ERR;
568 break;
569 }
570
571 wc->vendor_err = cqe->vendor_err_syndrome;
572}
573
574static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
575{
576 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
577 MLX4_CQE_STATUS_IPV4F |
578 MLX4_CQE_STATUS_IPV4OPT |
579 MLX4_CQE_STATUS_IPV6 |
580 MLX4_CQE_STATUS_IPOK)) ==
581 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
582 MLX4_CQE_STATUS_IPOK)) &&
583 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
584 MLX4_CQE_STATUS_TCP)) &&
585 checksum == cpu_to_be16(0xffff);
586}
587
588static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
589 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
590{
591 struct mlx4_ib_proxy_sqp_hdr *hdr;
592
593 ib_dma_sync_single_for_cpu(qp->ibqp.device,
594 qp->sqp_proxy_rcv[tail].map,
595 sizeof (struct mlx4_ib_proxy_sqp_hdr),
596 DMA_FROM_DEVICE);
597 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
598 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
599 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
600 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
601 wc->dlid_path_bits = 0;
602
603 if (is_eth) {
604 wc->slid = 0;
605 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
606 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
607 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
608 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
609 } else {
610 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
611 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
612 }
613}
614
615static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
616 struct ib_wc *wc, int *npolled, int is_send)
617{
618 struct mlx4_ib_wq *wq;
619 unsigned cur;
620 int i;
621
622 wq = is_send ? &qp->sq : &qp->rq;
623 cur = wq->head - wq->tail;
624
625 if (cur == 0)
626 return;
627
628 for (i = 0; i < cur && *npolled < num_entries; i++) {
629 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
630 wc->status = IB_WC_WR_FLUSH_ERR;
631 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
632 wq->tail++;
633 (*npolled)++;
634 wc->qp = &qp->ibqp;
635 wc++;
636 }
637}
638
639static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
640 struct ib_wc *wc, int *npolled)
641{
642 struct mlx4_ib_qp *qp;
643
644 *npolled = 0;
645
646
647
648 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
649 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
650 if (*npolled >= num_entries)
651 goto out;
652 }
653
654 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
655 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
656 if (*npolled >= num_entries)
657 goto out;
658 }
659
660out:
661 return;
662}
663
664static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
665 struct mlx4_ib_qp **cur_qp,
666 struct ib_wc *wc)
667{
668 struct mlx4_cqe *cqe;
669 struct mlx4_qp *mqp;
670 struct mlx4_ib_wq *wq;
671 struct mlx4_ib_srq *srq;
672 struct mlx4_srq *msrq = NULL;
673 int is_send;
674 int is_error;
675 int is_eth;
676 u32 g_mlpath_rqpn;
677 u16 wqe_ctr;
678 unsigned tail = 0;
679
680repoll:
681 cqe = next_cqe_sw(cq);
682 if (!cqe)
683 return -EAGAIN;
684
685 if (cq->buf.entry_size == 64)
686 cqe++;
687
688 ++cq->mcq.cons_index;
689
690
691
692
693
694 rmb();
695
696 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
697 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
698 MLX4_CQE_OPCODE_ERROR;
699
700
701 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
702 if (cq->resize_buf) {
703 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
704
705 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
706 cq->buf = cq->resize_buf->buf;
707 cq->ibcq.cqe = cq->resize_buf->cqe;
708
709 kfree(cq->resize_buf);
710 cq->resize_buf = NULL;
711 }
712
713 goto repoll;
714 }
715
716 if (!*cur_qp ||
717 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
718
719
720
721
722
723 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
724 be32_to_cpu(cqe->vlan_my_qpn));
725 *cur_qp = to_mibqp(mqp);
726 }
727
728 wc->qp = &(*cur_qp)->ibqp;
729
730 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
731 u32 srq_num;
732 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
733 srq_num = g_mlpath_rqpn & 0xffffff;
734
735 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
736 srq_num);
737 }
738
739 if (is_send) {
740 wq = &(*cur_qp)->sq;
741 if (!(*cur_qp)->sq_signal_bits) {
742 wqe_ctr = be16_to_cpu(cqe->wqe_index);
743 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
744 }
745 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
746 ++wq->tail;
747 } else if ((*cur_qp)->ibqp.srq) {
748 srq = to_msrq((*cur_qp)->ibqp.srq);
749 wqe_ctr = be16_to_cpu(cqe->wqe_index);
750 wc->wr_id = srq->wrid[wqe_ctr];
751 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
752 } else if (msrq) {
753 srq = to_mibsrq(msrq);
754 wqe_ctr = be16_to_cpu(cqe->wqe_index);
755 wc->wr_id = srq->wrid[wqe_ctr];
756 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
757 } else {
758 wq = &(*cur_qp)->rq;
759 tail = wq->tail & (wq->wqe_cnt - 1);
760 wc->wr_id = wq->wrid[tail];
761 ++wq->tail;
762 }
763
764 if (unlikely(is_error)) {
765 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
766 return 0;
767 }
768
769 wc->status = IB_WC_SUCCESS;
770
771 if (is_send) {
772 wc->wc_flags = 0;
773 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
774 case MLX4_OPCODE_RDMA_WRITE_IMM:
775 wc->wc_flags |= IB_WC_WITH_IMM;
776
777 case MLX4_OPCODE_RDMA_WRITE:
778 wc->opcode = IB_WC_RDMA_WRITE;
779 break;
780 case MLX4_OPCODE_SEND_IMM:
781 wc->wc_flags |= IB_WC_WITH_IMM;
782
783 case MLX4_OPCODE_SEND:
784 case MLX4_OPCODE_SEND_INVAL:
785 wc->opcode = IB_WC_SEND;
786 break;
787 case MLX4_OPCODE_RDMA_READ:
788 wc->opcode = IB_WC_RDMA_READ;
789 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
790 break;
791 case MLX4_OPCODE_ATOMIC_CS:
792 wc->opcode = IB_WC_COMP_SWAP;
793 wc->byte_len = 8;
794 break;
795 case MLX4_OPCODE_ATOMIC_FA:
796 wc->opcode = IB_WC_FETCH_ADD;
797 wc->byte_len = 8;
798 break;
799 case MLX4_OPCODE_MASKED_ATOMIC_CS:
800 wc->opcode = IB_WC_MASKED_COMP_SWAP;
801 wc->byte_len = 8;
802 break;
803 case MLX4_OPCODE_MASKED_ATOMIC_FA:
804 wc->opcode = IB_WC_MASKED_FETCH_ADD;
805 wc->byte_len = 8;
806 break;
807 case MLX4_OPCODE_LSO:
808 wc->opcode = IB_WC_LSO;
809 break;
810 case MLX4_OPCODE_FMR:
811 wc->opcode = IB_WC_REG_MR;
812 break;
813 case MLX4_OPCODE_LOCAL_INVAL:
814 wc->opcode = IB_WC_LOCAL_INV;
815 break;
816 }
817 } else {
818 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
819
820 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
821 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
822 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
823 wc->wc_flags = IB_WC_WITH_IMM;
824 wc->ex.imm_data = cqe->immed_rss_invalid;
825 break;
826 case MLX4_RECV_OPCODE_SEND_INVAL:
827 wc->opcode = IB_WC_RECV;
828 wc->wc_flags = IB_WC_WITH_INVALIDATE;
829 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
830 break;
831 case MLX4_RECV_OPCODE_SEND:
832 wc->opcode = IB_WC_RECV;
833 wc->wc_flags = 0;
834 break;
835 case MLX4_RECV_OPCODE_SEND_IMM:
836 wc->opcode = IB_WC_RECV;
837 wc->wc_flags = IB_WC_WITH_IMM;
838 wc->ex.imm_data = cqe->immed_rss_invalid;
839 break;
840 }
841
842 is_eth = (rdma_port_get_link_layer(wc->qp->device,
843 (*cur_qp)->port) ==
844 IB_LINK_LAYER_ETHERNET);
845 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
846 if ((*cur_qp)->mlx4_ib_qp_type &
847 (MLX4_IB_QPT_PROXY_SMI_OWNER |
848 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
849 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
850 is_eth);
851 return 0;
852 }
853 }
854
855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
856 wc->src_qp = g_mlpath_rqpn & 0xffffff;
857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
858 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
859 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862 if (is_eth) {
863 wc->slid = 0;
864 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
865 if (be32_to_cpu(cqe->vlan_my_qpn) &
866 MLX4_CQE_CVLAN_PRESENT_MASK) {
867 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
868 MLX4_CQE_VID_MASK;
869 } else {
870 wc->vlan_id = 0xffff;
871 }
872 memcpy(wc->smac, cqe->smac, ETH_ALEN);
873 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
874 } else {
875 wc->slid = be16_to_cpu(cqe->rlid);
876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
877 wc->vlan_id = 0xffff;
878 }
879 }
880
881 return 0;
882}
883
884int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
885{
886 struct mlx4_ib_cq *cq = to_mcq(ibcq);
887 struct mlx4_ib_qp *cur_qp = NULL;
888 unsigned long flags;
889 int npolled;
890 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
891
892 spin_lock_irqsave(&cq->lock, flags);
893 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
894 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
895 goto out;
896 }
897
898 for (npolled = 0; npolled < num_entries; ++npolled) {
899 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
900 break;
901 }
902
903 mlx4_cq_set_ci(&cq->mcq);
904
905out:
906 spin_unlock_irqrestore(&cq->lock, flags);
907
908 return npolled;
909}
910
911int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
912{
913 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
914 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
915 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
916 to_mdev(ibcq->device)->uar_map,
917 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
918
919 return 0;
920}
921
922void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
923{
924 u32 prod_index;
925 int nfreed = 0;
926 struct mlx4_cqe *cqe, *dest;
927 u8 owner_bit;
928 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
929
930
931
932
933
934
935
936
937 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
938 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
939 break;
940
941
942
943
944
945 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
946 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
947 cqe += cqe_inc;
948
949 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
950 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
951 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
952 ++nfreed;
953 } else if (nfreed) {
954 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
955 dest += cqe_inc;
956
957 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
958 memcpy(dest, cqe, sizeof *cqe);
959 dest->owner_sr_opcode = owner_bit |
960 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
961 }
962 }
963
964 if (nfreed) {
965 cq->mcq.cons_index += nfreed;
966
967
968
969
970 wmb();
971 mlx4_cq_set_ci(&cq->mcq);
972 }
973}
974
975void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
976{
977 spin_lock_irq(&cq->lock);
978 __mlx4_ib_cq_clean(cq, qpn, srq);
979 spin_unlock_irq(&cq->lock);
980}
981