1
2
3
4
5
6#include <linux/errno.h>
7#include <linux/types.h>
8#include <linux/uaccess.h>
9#include <linux/vmalloc.h>
10#include <linux/xarray.h>
11
12#include <rdma/iw_cm.h>
13#include <rdma/ib_verbs.h>
14#include <rdma/ib_user_verbs.h>
15#include <rdma/uverbs_ioctl.h>
16
17#include "siw.h"
18#include "siw_verbs.h"
19#include "siw_mem.h"
20
21static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
22 [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
23 [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
24 [IB_QPS_RTR] = SIW_QP_STATE_RTR,
25 [IB_QPS_RTS] = SIW_QP_STATE_RTS,
26 [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
27 [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
28 [IB_QPS_ERR] = SIW_QP_STATE_ERROR
29};
30
31static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
32 [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
33 [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
34 [IB_QPS_ERR] = "ERR"
35};
36
37void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
38{
39 struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
40
41 kfree(entry);
42}
43
44int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
45{
46 struct siw_ucontext *uctx = to_siw_ctx(ctx);
47 size_t size = vma->vm_end - vma->vm_start;
48 struct rdma_user_mmap_entry *rdma_entry;
49 struct siw_user_mmap_entry *entry;
50 int rv = -EINVAL;
51
52
53
54
55 if (vma->vm_start & (PAGE_SIZE - 1)) {
56 pr_warn("siw: mmap not page aligned\n");
57 return -EINVAL;
58 }
59 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
60 if (!rdma_entry) {
61 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
62 vma->vm_pgoff, size);
63 return -EINVAL;
64 }
65 entry = to_siw_mmap_entry(rdma_entry);
66
67 rv = remap_vmalloc_range(vma, entry->address, 0);
68 if (rv) {
69 pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
70 size);
71 goto out;
72 }
73out:
74 rdma_user_mmap_entry_put(rdma_entry);
75
76 return rv;
77}
78
79int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
80{
81 struct siw_device *sdev = to_siw_dev(base_ctx->device);
82 struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
83 struct siw_uresp_alloc_ctx uresp = {};
84 int rv;
85
86 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
87 rv = -ENOMEM;
88 goto err_out;
89 }
90 ctx->sdev = sdev;
91
92 uresp.dev_id = sdev->vendor_part_id;
93
94 if (udata->outlen < sizeof(uresp)) {
95 rv = -EINVAL;
96 goto err_out;
97 }
98 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
99 if (rv)
100 goto err_out;
101
102 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
103 atomic_read(&sdev->num_ctx));
104
105 return 0;
106
107err_out:
108 atomic_dec(&sdev->num_ctx);
109 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
110 atomic_read(&sdev->num_ctx));
111
112 return rv;
113}
114
115void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
116{
117 struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
118
119 atomic_dec(&uctx->sdev->num_ctx);
120}
121
122int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
123 struct ib_udata *udata)
124{
125 struct siw_device *sdev = to_siw_dev(base_dev);
126
127 if (udata->inlen || udata->outlen)
128 return -EINVAL;
129
130 memset(attr, 0, sizeof(*attr));
131
132
133 attr->atomic_cap = 0;
134 attr->device_cap_flags =
135 IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG;
136 attr->max_cq = sdev->attrs.max_cq;
137 attr->max_cqe = sdev->attrs.max_cqe;
138 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
139 attr->max_mr = sdev->attrs.max_mr;
140 attr->max_mw = sdev->attrs.max_mw;
141 attr->max_mr_size = ~0ull;
142 attr->max_pd = sdev->attrs.max_pd;
143 attr->max_qp = sdev->attrs.max_qp;
144 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
145 attr->max_qp_rd_atom = sdev->attrs.max_ord;
146 attr->max_qp_wr = sdev->attrs.max_qp_wr;
147 attr->max_recv_sge = sdev->attrs.max_sge;
148 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
149 attr->max_send_sge = sdev->attrs.max_sge;
150 attr->max_sge_rd = sdev->attrs.max_sge_rd;
151 attr->max_srq = sdev->attrs.max_srq;
152 attr->max_srq_sge = sdev->attrs.max_srq_sge;
153 attr->max_srq_wr = sdev->attrs.max_srq_wr;
154 attr->page_size_cap = PAGE_SIZE;
155 attr->vendor_id = SIW_VENDOR_ID;
156 attr->vendor_part_id = sdev->vendor_part_id;
157
158 memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
159
160 return 0;
161}
162
163int siw_query_port(struct ib_device *base_dev, u8 port,
164 struct ib_port_attr *attr)
165{
166 struct siw_device *sdev = to_siw_dev(base_dev);
167 int rv;
168
169 memset(attr, 0, sizeof(*attr));
170
171 rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
172 &attr->active_width);
173 attr->gid_tbl_len = 1;
174 attr->max_msg_sz = -1;
175 attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
176 attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
177 attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
178 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
179 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
180 attr->state = sdev->state;
181
182
183
184
185
186
187
188
189
190
191
192
193
194 return rv;
195}
196
197int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
198 struct ib_port_immutable *port_immutable)
199{
200 struct ib_port_attr attr;
201 int rv = siw_query_port(base_dev, port, &attr);
202
203 if (rv)
204 return rv;
205
206 port_immutable->gid_tbl_len = attr.gid_tbl_len;
207 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
208
209 return 0;
210}
211
212int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
213 union ib_gid *gid)
214{
215 struct siw_device *sdev = to_siw_dev(base_dev);
216
217
218 memset(gid, 0, sizeof(*gid));
219 memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
220
221 return 0;
222}
223
224int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
225{
226 struct siw_device *sdev = to_siw_dev(pd->device);
227
228 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
229 atomic_dec(&sdev->num_pd);
230 return -ENOMEM;
231 }
232 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
233
234 return 0;
235}
236
237void siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
238{
239 struct siw_device *sdev = to_siw_dev(pd->device);
240
241 siw_dbg_pd(pd, "free PD\n");
242 atomic_dec(&sdev->num_pd);
243}
244
245void siw_qp_get_ref(struct ib_qp *base_qp)
246{
247 siw_qp_get(to_siw_qp(base_qp));
248}
249
250void siw_qp_put_ref(struct ib_qp *base_qp)
251{
252 siw_qp_put(to_siw_qp(base_qp));
253}
254
255static struct rdma_user_mmap_entry *
256siw_mmap_entry_insert(struct siw_ucontext *uctx,
257 void *address, size_t length,
258 u64 *offset)
259{
260 struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
261 int rv;
262
263 *offset = SIW_INVAL_UOBJ_KEY;
264 if (!entry)
265 return NULL;
266
267 entry->address = address;
268
269 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
270 &entry->rdma_entry,
271 length);
272 if (rv) {
273 kfree(entry);
274 return NULL;
275 }
276
277 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
278
279 return &entry->rdma_entry;
280}
281
282
283
284
285
286
287
288
289
290
291
292struct ib_qp *siw_create_qp(struct ib_pd *pd,
293 struct ib_qp_init_attr *attrs,
294 struct ib_udata *udata)
295{
296 struct siw_qp *qp = NULL;
297 struct ib_device *base_dev = pd->device;
298 struct siw_device *sdev = to_siw_dev(base_dev);
299 struct siw_ucontext *uctx =
300 rdma_udata_to_drv_context(udata, struct siw_ucontext,
301 base_ucontext);
302 struct siw_cq *scq = NULL, *rcq = NULL;
303 unsigned long flags;
304 int num_sqe, num_rqe, rv = 0;
305 size_t length;
306
307 siw_dbg(base_dev, "create new QP\n");
308
309 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
310 siw_dbg(base_dev, "too many QP's\n");
311 rv = -ENOMEM;
312 goto err_out;
313 }
314 if (attrs->qp_type != IB_QPT_RC) {
315 siw_dbg(base_dev, "only RC QP's supported\n");
316 rv = -EOPNOTSUPP;
317 goto err_out;
318 }
319 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
320 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
321 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
322 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
323 siw_dbg(base_dev, "QP size error\n");
324 rv = -EINVAL;
325 goto err_out;
326 }
327 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
328 siw_dbg(base_dev, "max inline send: %d > %d\n",
329 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
330 rv = -EINVAL;
331 goto err_out;
332 }
333
334
335
336
337 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
338 siw_dbg(base_dev, "QP must have send or receive queue\n");
339 rv = -EINVAL;
340 goto err_out;
341 }
342 scq = to_siw_cq(attrs->send_cq);
343 rcq = to_siw_cq(attrs->recv_cq);
344
345 if (!scq || (!rcq && !attrs->srq)) {
346 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
347 rv = -EINVAL;
348 goto err_out;
349 }
350 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
351 if (!qp) {
352 rv = -ENOMEM;
353 goto err_out;
354 }
355 init_rwsem(&qp->state_lock);
356 spin_lock_init(&qp->sq_lock);
357 spin_lock_init(&qp->rq_lock);
358 spin_lock_init(&qp->orq_lock);
359
360 rv = siw_qp_add(sdev, qp);
361 if (rv)
362 goto err_out;
363
364
365
366
367
368
369 num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
370 num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
371
372 if (udata)
373 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
374 else
375 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
376
377 if (qp->sendq == NULL) {
378 siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
379 rv = -ENOMEM;
380 goto err_out_xa;
381 }
382 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
383 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
384 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
385 else {
386 rv = -EINVAL;
387 goto err_out_xa;
388 }
389 }
390 qp->pd = pd;
391 qp->scq = scq;
392 qp->rcq = rcq;
393
394 if (attrs->srq) {
395
396
397
398
399
400 qp->srq = to_siw_srq(attrs->srq);
401 qp->attrs.rq_size = 0;
402 siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
403 qp->base_qp.qp_num);
404 } else if (num_rqe) {
405 if (udata)
406 qp->recvq =
407 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
408 else
409 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
410
411 if (qp->recvq == NULL) {
412 siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
413 rv = -ENOMEM;
414 goto err_out_xa;
415 }
416 qp->attrs.rq_size = num_rqe;
417 }
418 qp->attrs.sq_size = num_sqe;
419 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
420 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
421
422
423 qp->tx_ctx.gso_seg_limit = 1;
424 qp->tx_ctx.zcopy_tx = zcopy_tx;
425
426 qp->attrs.state = SIW_QP_STATE_IDLE;
427
428 if (udata) {
429 struct siw_uresp_create_qp uresp = {};
430
431 uresp.num_sqe = num_sqe;
432 uresp.num_rqe = num_rqe;
433 uresp.qp_id = qp_id(qp);
434
435 if (qp->sendq) {
436 length = num_sqe * sizeof(struct siw_sqe);
437 qp->sq_entry =
438 siw_mmap_entry_insert(uctx, qp->sendq,
439 length, &uresp.sq_key);
440 if (!qp->sq_entry) {
441 rv = -ENOMEM;
442 goto err_out_xa;
443 }
444 }
445
446 if (qp->recvq) {
447 length = num_rqe * sizeof(struct siw_rqe);
448 qp->rq_entry =
449 siw_mmap_entry_insert(uctx, qp->recvq,
450 length, &uresp.rq_key);
451 if (!qp->rq_entry) {
452 uresp.sq_key = SIW_INVAL_UOBJ_KEY;
453 rv = -ENOMEM;
454 goto err_out_xa;
455 }
456 }
457
458 if (udata->outlen < sizeof(uresp)) {
459 rv = -EINVAL;
460 goto err_out_xa;
461 }
462 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
463 if (rv)
464 goto err_out_xa;
465 }
466 qp->tx_cpu = siw_get_tx_cpu(sdev);
467 if (qp->tx_cpu < 0) {
468 rv = -EINVAL;
469 goto err_out_xa;
470 }
471 INIT_LIST_HEAD(&qp->devq);
472 spin_lock_irqsave(&sdev->lock, flags);
473 list_add_tail(&qp->devq, &sdev->qp_list);
474 spin_unlock_irqrestore(&sdev->lock, flags);
475
476 return &qp->base_qp;
477
478err_out_xa:
479 xa_erase(&sdev->qp_xa, qp_id(qp));
480err_out:
481 if (qp) {
482 if (uctx) {
483 rdma_user_mmap_entry_remove(qp->sq_entry);
484 rdma_user_mmap_entry_remove(qp->rq_entry);
485 }
486 vfree(qp->sendq);
487 vfree(qp->recvq);
488 kfree(qp);
489 }
490 atomic_dec(&sdev->num_qp);
491
492 return ERR_PTR(rv);
493}
494
495
496
497
498
499
500int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
501 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
502{
503 struct siw_qp *qp;
504 struct siw_device *sdev;
505
506 if (base_qp && qp_attr && qp_init_attr) {
507 qp = to_siw_qp(base_qp);
508 sdev = to_siw_dev(base_qp->device);
509 } else {
510 return -EINVAL;
511 }
512 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
513 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
514 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
515 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
516 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
517 qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
518 qp_attr->max_rd_atomic = qp->attrs.irq_size;
519 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
520
521 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
522 IB_ACCESS_REMOTE_WRITE |
523 IB_ACCESS_REMOTE_READ;
524
525 qp_init_attr->qp_type = base_qp->qp_type;
526 qp_init_attr->send_cq = base_qp->send_cq;
527 qp_init_attr->recv_cq = base_qp->recv_cq;
528 qp_init_attr->srq = base_qp->srq;
529
530 qp_init_attr->cap = qp_attr->cap;
531
532 return 0;
533}
534
535int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
536 int attr_mask, struct ib_udata *udata)
537{
538 struct siw_qp_attrs new_attrs;
539 enum siw_qp_attr_mask siw_attr_mask = 0;
540 struct siw_qp *qp = to_siw_qp(base_qp);
541 int rv = 0;
542
543 if (!attr_mask)
544 return 0;
545
546 memset(&new_attrs, 0, sizeof(new_attrs));
547
548 if (attr_mask & IB_QP_ACCESS_FLAGS) {
549 siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
550
551 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
552 new_attrs.flags |= SIW_RDMA_READ_ENABLED;
553 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
554 new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
555 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
556 new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
557 }
558 if (attr_mask & IB_QP_STATE) {
559 siw_dbg_qp(qp, "desired IB QP state: %s\n",
560 ib_qp_state_to_string[attr->qp_state]);
561
562 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
563
564 if (new_attrs.state > SIW_QP_STATE_RTS)
565 qp->tx_ctx.tx_suspend = 1;
566
567 siw_attr_mask |= SIW_QP_ATTR_STATE;
568 }
569 if (!siw_attr_mask)
570 goto out;
571
572 down_write(&qp->state_lock);
573
574 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
575
576 up_write(&qp->state_lock);
577out:
578 return rv;
579}
580
581int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
582{
583 struct siw_qp *qp = to_siw_qp(base_qp);
584 struct siw_ucontext *uctx =
585 rdma_udata_to_drv_context(udata, struct siw_ucontext,
586 base_ucontext);
587 struct siw_qp_attrs qp_attrs;
588
589 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
590
591
592
593
594
595 qp->attrs.flags |= SIW_QP_IN_DESTROY;
596 qp->rx_stream.rx_suspend = 1;
597
598 if (uctx) {
599 rdma_user_mmap_entry_remove(qp->sq_entry);
600 rdma_user_mmap_entry_remove(qp->rq_entry);
601 }
602
603 down_write(&qp->state_lock);
604
605 qp_attrs.state = SIW_QP_STATE_ERROR;
606 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
607
608 if (qp->cep) {
609 siw_cep_put(qp->cep);
610 qp->cep = NULL;
611 }
612 up_write(&qp->state_lock);
613
614 kfree(qp->tx_ctx.mpa_crc_hd);
615 kfree(qp->rx_stream.mpa_crc_hd);
616
617 qp->scq = qp->rcq = NULL;
618
619 siw_qp_put(qp);
620
621 return 0;
622}
623
624
625
626
627
628
629
630
631
632
633static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
634 struct siw_sqe *sqe)
635{
636 struct ib_sge *core_sge = core_wr->sg_list;
637 void *kbuf = &sqe->sge[1];
638 int num_sge = core_wr->num_sge, bytes = 0;
639
640 sqe->sge[0].laddr = (uintptr_t)kbuf;
641 sqe->sge[0].lkey = 0;
642
643 while (num_sge--) {
644 if (!core_sge->length) {
645 core_sge++;
646 continue;
647 }
648 bytes += core_sge->length;
649 if (bytes > SIW_MAX_INLINE) {
650 bytes = -EINVAL;
651 break;
652 }
653 memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
654 core_sge->length);
655
656 kbuf += core_sge->length;
657 core_sge++;
658 }
659 sqe->sge[0].length = bytes > 0 ? bytes : 0;
660 sqe->num_sge = bytes > 0 ? 1 : 0;
661
662 return bytes;
663}
664
665
666static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
667 const struct ib_send_wr **bad_wr)
668{
669 struct siw_sqe sqe = {};
670 int rv = 0;
671
672 while (wr) {
673 sqe.id = wr->wr_id;
674 sqe.opcode = wr->opcode;
675 rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
676 if (rv) {
677 if (bad_wr)
678 *bad_wr = wr;
679 break;
680 }
681 wr = wr->next;
682 }
683 return rv;
684}
685
686
687static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
688 const struct ib_recv_wr **bad_wr)
689{
690 struct siw_rqe rqe = {};
691 int rv = 0;
692
693 while (wr) {
694 rqe.id = wr->wr_id;
695 rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
696 if (rv) {
697 if (bad_wr)
698 *bad_wr = wr;
699 break;
700 }
701 wr = wr->next;
702 }
703 return rv;
704}
705
706
707
708
709
710
711
712
713
714
715int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
716 const struct ib_send_wr **bad_wr)
717{
718 struct siw_qp *qp = to_siw_qp(base_qp);
719 struct siw_wqe *wqe = tx_wqe(qp);
720
721 unsigned long flags;
722 int rv = 0;
723
724 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
725 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
726 *bad_wr = wr;
727 return -EINVAL;
728 }
729
730
731
732
733
734 if (!down_read_trylock(&qp->state_lock)) {
735 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
736
737
738
739
740
741
742
743
744
745 rv = siw_sq_flush_wr(qp, wr, bad_wr);
746 } else {
747 siw_dbg_qp(qp, "QP locked, state %d\n",
748 qp->attrs.state);
749 *bad_wr = wr;
750 rv = -ENOTCONN;
751 }
752 return rv;
753 }
754 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
755 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
756
757
758
759
760
761
762
763 rv = siw_sq_flush_wr(qp, wr, bad_wr);
764 } else {
765 siw_dbg_qp(qp, "QP out of state %d\n",
766 qp->attrs.state);
767 *bad_wr = wr;
768 rv = -ENOTCONN;
769 }
770 up_read(&qp->state_lock);
771 return rv;
772 }
773 spin_lock_irqsave(&qp->sq_lock, flags);
774
775 while (wr) {
776 u32 idx = qp->sq_put % qp->attrs.sq_size;
777 struct siw_sqe *sqe = &qp->sendq[idx];
778
779 if (sqe->flags) {
780 siw_dbg_qp(qp, "sq full\n");
781 rv = -ENOMEM;
782 break;
783 }
784 if (wr->num_sge > qp->attrs.sq_max_sges) {
785 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
786 rv = -EINVAL;
787 break;
788 }
789 sqe->id = wr->wr_id;
790
791 if ((wr->send_flags & IB_SEND_SIGNALED) ||
792 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
793 sqe->flags |= SIW_WQE_SIGNALLED;
794
795 if (wr->send_flags & IB_SEND_FENCE)
796 sqe->flags |= SIW_WQE_READ_FENCE;
797
798 switch (wr->opcode) {
799 case IB_WR_SEND:
800 case IB_WR_SEND_WITH_INV:
801 if (wr->send_flags & IB_SEND_SOLICITED)
802 sqe->flags |= SIW_WQE_SOLICITED;
803
804 if (!(wr->send_flags & IB_SEND_INLINE)) {
805 siw_copy_sgl(wr->sg_list, sqe->sge,
806 wr->num_sge);
807 sqe->num_sge = wr->num_sge;
808 } else {
809 rv = siw_copy_inline_sgl(wr, sqe);
810 if (rv <= 0) {
811 rv = -EINVAL;
812 break;
813 }
814 sqe->flags |= SIW_WQE_INLINE;
815 sqe->num_sge = 1;
816 }
817 if (wr->opcode == IB_WR_SEND)
818 sqe->opcode = SIW_OP_SEND;
819 else {
820 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
821 sqe->rkey = wr->ex.invalidate_rkey;
822 }
823 break;
824
825 case IB_WR_RDMA_READ_WITH_INV:
826 case IB_WR_RDMA_READ:
827
828
829
830
831
832
833
834 if (unlikely(wr->num_sge != 1)) {
835 rv = -EINVAL;
836 break;
837 }
838 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
839
840
841
842 sqe->raddr = rdma_wr(wr)->remote_addr;
843 sqe->rkey = rdma_wr(wr)->rkey;
844 sqe->num_sge = 1;
845
846 if (wr->opcode == IB_WR_RDMA_READ)
847 sqe->opcode = SIW_OP_READ;
848 else
849 sqe->opcode = SIW_OP_READ_LOCAL_INV;
850 break;
851
852 case IB_WR_RDMA_WRITE:
853 if (!(wr->send_flags & IB_SEND_INLINE)) {
854 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
855 wr->num_sge);
856 sqe->num_sge = wr->num_sge;
857 } else {
858 rv = siw_copy_inline_sgl(wr, sqe);
859 if (unlikely(rv < 0)) {
860 rv = -EINVAL;
861 break;
862 }
863 sqe->flags |= SIW_WQE_INLINE;
864 sqe->num_sge = 1;
865 }
866 sqe->raddr = rdma_wr(wr)->remote_addr;
867 sqe->rkey = rdma_wr(wr)->rkey;
868 sqe->opcode = SIW_OP_WRITE;
869 break;
870
871 case IB_WR_REG_MR:
872 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
873 sqe->rkey = reg_wr(wr)->key;
874 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
875 sqe->opcode = SIW_OP_REG_MR;
876 break;
877
878 case IB_WR_LOCAL_INV:
879 sqe->rkey = wr->ex.invalidate_rkey;
880 sqe->opcode = SIW_OP_INVAL_STAG;
881 break;
882
883 default:
884 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
885 wr->opcode);
886 rv = -EINVAL;
887 break;
888 }
889 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
890 sqe->opcode, sqe->flags,
891 (void *)(uintptr_t)sqe->id);
892
893 if (unlikely(rv < 0))
894 break;
895
896
897 smp_wmb();
898 sqe->flags |= SIW_WQE_VALID;
899
900 qp->sq_put++;
901 wr = wr->next;
902 }
903
904
905
906
907
908
909
910
911 if (wqe->wr_status != SIW_WR_IDLE) {
912 spin_unlock_irqrestore(&qp->sq_lock, flags);
913 goto skip_direct_sending;
914 }
915 rv = siw_activate_tx(qp);
916 spin_unlock_irqrestore(&qp->sq_lock, flags);
917
918 if (rv <= 0)
919 goto skip_direct_sending;
920
921 if (rdma_is_kernel_res(&qp->base_qp.res)) {
922 rv = siw_sq_start(qp);
923 } else {
924 qp->tx_ctx.in_syscall = 1;
925
926 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
927 siw_qp_cm_drop(qp, 0);
928
929 qp->tx_ctx.in_syscall = 0;
930 }
931skip_direct_sending:
932
933 up_read(&qp->state_lock);
934
935 if (rv >= 0)
936 return 0;
937
938
939
940 siw_dbg_qp(qp, "error %d\n", rv);
941
942 *bad_wr = wr;
943 return rv;
944}
945
946
947
948
949
950
951
952
953
954
955int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
956 const struct ib_recv_wr **bad_wr)
957{
958 struct siw_qp *qp = to_siw_qp(base_qp);
959 unsigned long flags;
960 int rv = 0;
961
962 if (qp->srq) {
963 *bad_wr = wr;
964 return -EOPNOTSUPP;
965 }
966 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
967 siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
968 *bad_wr = wr;
969 return -EINVAL;
970 }
971
972
973
974
975
976 if (!down_read_trylock(&qp->state_lock)) {
977 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
978
979
980
981
982
983
984
985
986
987 rv = siw_rq_flush_wr(qp, wr, bad_wr);
988 } else {
989 siw_dbg_qp(qp, "QP locked, state %d\n",
990 qp->attrs.state);
991 *bad_wr = wr;
992 rv = -ENOTCONN;
993 }
994 return rv;
995 }
996 if (qp->attrs.state > SIW_QP_STATE_RTS) {
997 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
998
999
1000
1001
1002
1003
1004
1005 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1006 } else {
1007 siw_dbg_qp(qp, "QP out of state %d\n",
1008 qp->attrs.state);
1009 *bad_wr = wr;
1010 rv = -ENOTCONN;
1011 }
1012 up_read(&qp->state_lock);
1013 return rv;
1014 }
1015
1016
1017
1018
1019 spin_lock_irqsave(&qp->rq_lock, flags);
1020
1021 while (wr) {
1022 u32 idx = qp->rq_put % qp->attrs.rq_size;
1023 struct siw_rqe *rqe = &qp->recvq[idx];
1024
1025 if (rqe->flags) {
1026 siw_dbg_qp(qp, "RQ full\n");
1027 rv = -ENOMEM;
1028 break;
1029 }
1030 if (wr->num_sge > qp->attrs.rq_max_sges) {
1031 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1032 rv = -EINVAL;
1033 break;
1034 }
1035 rqe->id = wr->wr_id;
1036 rqe->num_sge = wr->num_sge;
1037 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1038
1039
1040 smp_wmb();
1041
1042 rqe->flags = SIW_WQE_VALID;
1043
1044 qp->rq_put++;
1045 wr = wr->next;
1046 }
1047 spin_unlock_irqrestore(&qp->rq_lock, flags);
1048
1049 up_read(&qp->state_lock);
1050
1051 if (rv < 0) {
1052 siw_dbg_qp(qp, "error %d\n", rv);
1053 *bad_wr = wr;
1054 }
1055 return rv > 0 ? 0 : rv;
1056}
1057
1058void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
1059{
1060 struct siw_cq *cq = to_siw_cq(base_cq);
1061 struct siw_device *sdev = to_siw_dev(base_cq->device);
1062 struct siw_ucontext *ctx =
1063 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1064 base_ucontext);
1065
1066 siw_dbg_cq(cq, "free CQ resources\n");
1067
1068 siw_cq_flush(cq);
1069
1070 if (ctx)
1071 rdma_user_mmap_entry_remove(cq->cq_entry);
1072
1073 atomic_dec(&sdev->num_cq);
1074
1075 vfree(cq->queue);
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1089 struct ib_udata *udata)
1090{
1091 struct siw_device *sdev = to_siw_dev(base_cq->device);
1092 struct siw_cq *cq = to_siw_cq(base_cq);
1093 int rv, size = attr->cqe;
1094
1095 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1096 siw_dbg(base_cq->device, "too many CQ's\n");
1097 rv = -ENOMEM;
1098 goto err_out;
1099 }
1100 if (size < 1 || size > sdev->attrs.max_cqe) {
1101 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1102 rv = -EINVAL;
1103 goto err_out;
1104 }
1105 size = roundup_pow_of_two(size);
1106 cq->base_cq.cqe = size;
1107 cq->num_cqe = size;
1108
1109 if (udata)
1110 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1111 sizeof(struct siw_cq_ctrl));
1112 else
1113 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1114 sizeof(struct siw_cq_ctrl));
1115
1116 if (cq->queue == NULL) {
1117 rv = -ENOMEM;
1118 goto err_out;
1119 }
1120 get_random_bytes(&cq->id, 4);
1121 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1122
1123 spin_lock_init(&cq->lock);
1124
1125 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1126
1127 if (udata) {
1128 struct siw_uresp_create_cq uresp = {};
1129 struct siw_ucontext *ctx =
1130 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1131 base_ucontext);
1132 size_t length = size * sizeof(struct siw_cqe) +
1133 sizeof(struct siw_cq_ctrl);
1134
1135 cq->cq_entry =
1136 siw_mmap_entry_insert(ctx, cq->queue,
1137 length, &uresp.cq_key);
1138 if (!cq->cq_entry) {
1139 rv = -ENOMEM;
1140 goto err_out;
1141 }
1142
1143 uresp.cq_id = cq->id;
1144 uresp.num_cqe = size;
1145
1146 if (udata->outlen < sizeof(uresp)) {
1147 rv = -EINVAL;
1148 goto err_out;
1149 }
1150 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1151 if (rv)
1152 goto err_out;
1153 }
1154 return 0;
1155
1156err_out:
1157 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1158
1159 if (cq && cq->queue) {
1160 struct siw_ucontext *ctx =
1161 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1162 base_ucontext);
1163 if (ctx)
1164 rdma_user_mmap_entry_remove(cq->cq_entry);
1165 vfree(cq->queue);
1166 }
1167 atomic_dec(&sdev->num_cq);
1168
1169 return rv;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1183{
1184 struct siw_cq *cq = to_siw_cq(base_cq);
1185 int i;
1186
1187 for (i = 0; i < num_cqe; i++) {
1188 if (!siw_reap_cqe(cq, wc))
1189 break;
1190 wc++;
1191 }
1192 return i;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1212{
1213 struct siw_cq *cq = to_siw_cq(base_cq);
1214
1215 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1216
1217 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1218
1219
1220
1221
1222 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1223 else
1224
1225
1226
1227
1228 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1229
1230 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1231 return cq->cq_put - cq->cq_get;
1232
1233 return 0;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1245{
1246 struct siw_mr *mr = to_siw_mr(base_mr);
1247 struct siw_device *sdev = to_siw_dev(base_mr->device);
1248
1249 siw_dbg_mem(mr->mem, "deregister MR\n");
1250
1251 atomic_dec(&sdev->num_mr);
1252
1253 siw_mr_drop_mem(mr);
1254 kfree_rcu(mr, rcu);
1255
1256 return 0;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1272 u64 rnic_va, int rights, struct ib_udata *udata)
1273{
1274 struct siw_mr *mr = NULL;
1275 struct siw_umem *umem = NULL;
1276 struct siw_ureq_reg_mr ureq;
1277 struct siw_device *sdev = to_siw_dev(pd->device);
1278
1279 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1280 int rv;
1281
1282 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1283 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1284 (unsigned long long)len);
1285
1286 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1287 siw_dbg_pd(pd, "too many mr's\n");
1288 rv = -ENOMEM;
1289 goto err_out;
1290 }
1291 if (!len) {
1292 rv = -EINVAL;
1293 goto err_out;
1294 }
1295 if (mem_limit != RLIM_INFINITY) {
1296 unsigned long num_pages =
1297 (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1298 mem_limit >>= PAGE_SHIFT;
1299
1300 if (num_pages > mem_limit - current->mm->locked_vm) {
1301 siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1302 num_pages, mem_limit,
1303 current->mm->locked_vm);
1304 rv = -ENOMEM;
1305 goto err_out;
1306 }
1307 }
1308 umem = siw_umem_get(start, len, ib_access_writable(rights));
1309 if (IS_ERR(umem)) {
1310 rv = PTR_ERR(umem);
1311 siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1312 umem = NULL;
1313 goto err_out;
1314 }
1315 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1316 if (!mr) {
1317 rv = -ENOMEM;
1318 goto err_out;
1319 }
1320 rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1321 if (rv)
1322 goto err_out;
1323
1324 if (udata) {
1325 struct siw_uresp_reg_mr uresp = {};
1326 struct siw_mem *mem = mr->mem;
1327
1328 if (udata->inlen < sizeof(ureq)) {
1329 rv = -EINVAL;
1330 goto err_out;
1331 }
1332 rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1333 if (rv)
1334 goto err_out;
1335
1336 mr->base_mr.lkey |= ureq.stag_key;
1337 mr->base_mr.rkey |= ureq.stag_key;
1338 mem->stag |= ureq.stag_key;
1339 uresp.stag = mem->stag;
1340
1341 if (udata->outlen < sizeof(uresp)) {
1342 rv = -EINVAL;
1343 goto err_out;
1344 }
1345 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1346 if (rv)
1347 goto err_out;
1348 }
1349 mr->mem->stag_valid = 1;
1350
1351 return &mr->base_mr;
1352
1353err_out:
1354 atomic_dec(&sdev->num_mr);
1355 if (mr) {
1356 if (mr->mem)
1357 siw_mr_drop_mem(mr);
1358 kfree_rcu(mr, rcu);
1359 } else {
1360 if (umem)
1361 siw_umem_release(umem, false);
1362 }
1363 return ERR_PTR(rv);
1364}
1365
1366struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1367 u32 max_sge)
1368{
1369 struct siw_device *sdev = to_siw_dev(pd->device);
1370 struct siw_mr *mr = NULL;
1371 struct siw_pbl *pbl = NULL;
1372 int rv;
1373
1374 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1375 siw_dbg_pd(pd, "too many mr's\n");
1376 rv = -ENOMEM;
1377 goto err_out;
1378 }
1379 if (mr_type != IB_MR_TYPE_MEM_REG) {
1380 siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1381 rv = -EOPNOTSUPP;
1382 goto err_out;
1383 }
1384 if (max_sge > SIW_MAX_SGE_PBL) {
1385 siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1386 rv = -ENOMEM;
1387 goto err_out;
1388 }
1389 pbl = siw_pbl_alloc(max_sge);
1390 if (IS_ERR(pbl)) {
1391 rv = PTR_ERR(pbl);
1392 siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1393 pbl = NULL;
1394 goto err_out;
1395 }
1396 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1397 if (!mr) {
1398 rv = -ENOMEM;
1399 goto err_out;
1400 }
1401 rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1402 if (rv)
1403 goto err_out;
1404
1405 mr->mem->is_pbl = 1;
1406
1407 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1408
1409 return &mr->base_mr;
1410
1411err_out:
1412 atomic_dec(&sdev->num_mr);
1413
1414 if (!mr) {
1415 kfree(pbl);
1416 } else {
1417 if (mr->mem)
1418 siw_mr_drop_mem(mr);
1419 kfree_rcu(mr, rcu);
1420 }
1421 siw_dbg_pd(pd, "failed: %d\n", rv);
1422
1423 return ERR_PTR(rv);
1424}
1425
1426
1427static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1428{
1429 return 0;
1430}
1431
1432int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1433 unsigned int *sg_off)
1434{
1435 struct scatterlist *slp;
1436 struct siw_mr *mr = to_siw_mr(base_mr);
1437 struct siw_mem *mem = mr->mem;
1438 struct siw_pbl *pbl = mem->pbl;
1439 struct siw_pble *pble;
1440 unsigned long pbl_size;
1441 int i, rv;
1442
1443 if (!pbl) {
1444 siw_dbg_mem(mem, "no PBL allocated\n");
1445 return -EINVAL;
1446 }
1447 pble = pbl->pbe;
1448
1449 if (pbl->max_buf < num_sle) {
1450 siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1451 mem->pbl->max_buf, num_sle);
1452 return -ENOMEM;
1453 }
1454 for_each_sg(sl, slp, num_sle, i) {
1455 if (sg_dma_len(slp) == 0) {
1456 siw_dbg_mem(mem, "empty SGE\n");
1457 return -EINVAL;
1458 }
1459 if (i == 0) {
1460 pble->addr = sg_dma_address(slp);
1461 pble->size = sg_dma_len(slp);
1462 pble->pbl_off = 0;
1463 pbl_size = pble->size;
1464 pbl->num_buf = 1;
1465 } else {
1466
1467 if (pble->addr + pble->size == sg_dma_address(slp)) {
1468 pble->size += sg_dma_len(slp);
1469 } else {
1470 pble++;
1471 pbl->num_buf++;
1472 pble->addr = sg_dma_address(slp);
1473 pble->size = sg_dma_len(slp);
1474 pble->pbl_off = pbl_size;
1475 }
1476 pbl_size += sg_dma_len(slp);
1477 }
1478 siw_dbg_mem(mem,
1479 "sge[%d], size %u, addr 0x%p, total %lu\n",
1480 i, pble->size, (void *)(uintptr_t)pble->addr,
1481 pbl_size);
1482 }
1483 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1484 if (rv > 0) {
1485 mem->len = base_mr->length;
1486 mem->va = base_mr->iova;
1487 siw_dbg_mem(mem,
1488 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1489 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1490 pbl->num_buf);
1491 }
1492 return rv;
1493}
1494
1495
1496
1497
1498
1499
1500struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1501{
1502 struct siw_device *sdev = to_siw_dev(pd->device);
1503 struct siw_mr *mr = NULL;
1504 int rv;
1505
1506 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1507 siw_dbg_pd(pd, "too many mr's\n");
1508 rv = -ENOMEM;
1509 goto err_out;
1510 }
1511 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1512 if (!mr) {
1513 rv = -ENOMEM;
1514 goto err_out;
1515 }
1516 rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1517 if (rv)
1518 goto err_out;
1519
1520 mr->mem->stag_valid = 1;
1521
1522 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1523
1524 return &mr->base_mr;
1525
1526err_out:
1527 if (rv)
1528 kfree(mr);
1529
1530 atomic_dec(&sdev->num_mr);
1531
1532 return ERR_PTR(rv);
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545int siw_create_srq(struct ib_srq *base_srq,
1546 struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1547{
1548 struct siw_srq *srq = to_siw_srq(base_srq);
1549 struct ib_srq_attr *attrs = &init_attrs->attr;
1550 struct siw_device *sdev = to_siw_dev(base_srq->device);
1551 struct siw_ucontext *ctx =
1552 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1553 base_ucontext);
1554 int rv;
1555
1556 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1557 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1558 rv = -ENOMEM;
1559 goto err_out;
1560 }
1561 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1562 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1563 rv = -EINVAL;
1564 goto err_out;
1565 }
1566 srq->max_sge = attrs->max_sge;
1567 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1568 srq->limit = attrs->srq_limit;
1569 if (srq->limit)
1570 srq->armed = true;
1571
1572 srq->is_kernel_res = !udata;
1573
1574 if (udata)
1575 srq->recvq =
1576 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1577 else
1578 srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1579
1580 if (srq->recvq == NULL) {
1581 rv = -ENOMEM;
1582 goto err_out;
1583 }
1584 if (udata) {
1585 struct siw_uresp_create_srq uresp = {};
1586 size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1587
1588 srq->srq_entry =
1589 siw_mmap_entry_insert(ctx, srq->recvq,
1590 length, &uresp.srq_key);
1591 if (!srq->srq_entry) {
1592 rv = -ENOMEM;
1593 goto err_out;
1594 }
1595
1596 uresp.num_rqe = srq->num_rqe;
1597
1598 if (udata->outlen < sizeof(uresp)) {
1599 rv = -EINVAL;
1600 goto err_out;
1601 }
1602 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1603 if (rv)
1604 goto err_out;
1605 }
1606 spin_lock_init(&srq->lock);
1607
1608 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1609
1610 return 0;
1611
1612err_out:
1613 if (srq->recvq) {
1614 if (ctx)
1615 rdma_user_mmap_entry_remove(srq->srq_entry);
1616 vfree(srq->recvq);
1617 }
1618 atomic_dec(&sdev->num_srq);
1619
1620 return rv;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1633 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1634{
1635 struct siw_srq *srq = to_siw_srq(base_srq);
1636 unsigned long flags;
1637 int rv = 0;
1638
1639 spin_lock_irqsave(&srq->lock, flags);
1640
1641 if (attr_mask & IB_SRQ_MAX_WR) {
1642
1643 rv = -EOPNOTSUPP;
1644 goto out;
1645 }
1646 if (attr_mask & IB_SRQ_LIMIT) {
1647 if (attrs->srq_limit) {
1648 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1649 rv = -EINVAL;
1650 goto out;
1651 }
1652 srq->armed = true;
1653 } else {
1654 srq->armed = false;
1655 }
1656 srq->limit = attrs->srq_limit;
1657 }
1658out:
1659 spin_unlock_irqrestore(&srq->lock, flags);
1660
1661 return rv;
1662}
1663
1664
1665
1666
1667
1668
1669int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1670{
1671 struct siw_srq *srq = to_siw_srq(base_srq);
1672 unsigned long flags;
1673
1674 spin_lock_irqsave(&srq->lock, flags);
1675
1676 attrs->max_wr = srq->num_rqe;
1677 attrs->max_sge = srq->max_sge;
1678 attrs->srq_limit = srq->limit;
1679
1680 spin_unlock_irqrestore(&srq->lock, flags);
1681
1682 return 0;
1683}
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1694{
1695 struct siw_srq *srq = to_siw_srq(base_srq);
1696 struct siw_device *sdev = to_siw_dev(base_srq->device);
1697 struct siw_ucontext *ctx =
1698 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1699 base_ucontext);
1700
1701 if (ctx)
1702 rdma_user_mmap_entry_remove(srq->srq_entry);
1703 vfree(srq->recvq);
1704 atomic_dec(&sdev->num_srq);
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1720 const struct ib_recv_wr **bad_wr)
1721{
1722 struct siw_srq *srq = to_siw_srq(base_srq);
1723 unsigned long flags;
1724 int rv = 0;
1725
1726 if (unlikely(!srq->is_kernel_res)) {
1727 siw_dbg_pd(base_srq->pd,
1728 "[SRQ]: no kernel post_recv for mapped srq\n");
1729 rv = -EINVAL;
1730 goto out;
1731 }
1732
1733
1734
1735
1736
1737 spin_lock_irqsave(&srq->lock, flags);
1738
1739 while (wr) {
1740 u32 idx = srq->rq_put % srq->num_rqe;
1741 struct siw_rqe *rqe = &srq->recvq[idx];
1742
1743 if (rqe->flags) {
1744 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1745 rv = -ENOMEM;
1746 break;
1747 }
1748 if (unlikely(wr->num_sge > srq->max_sge)) {
1749 siw_dbg_pd(base_srq->pd,
1750 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1751 rv = -EINVAL;
1752 break;
1753 }
1754 rqe->id = wr->wr_id;
1755 rqe->num_sge = wr->num_sge;
1756 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1757
1758
1759 smp_wmb();
1760
1761 rqe->flags = SIW_WQE_VALID;
1762
1763 srq->rq_put++;
1764 wr = wr->next;
1765 }
1766 spin_unlock_irqrestore(&srq->lock, flags);
1767out:
1768 if (unlikely(rv < 0)) {
1769 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1770 *bad_wr = wr;
1771 }
1772 return rv;
1773}
1774
1775void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1776{
1777 struct ib_event event;
1778 struct ib_qp *base_qp = &qp->base_qp;
1779
1780
1781
1782
1783
1784 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1785 return;
1786
1787 event.event = etype;
1788 event.device = base_qp->device;
1789 event.element.qp = base_qp;
1790
1791 if (base_qp->event_handler) {
1792 siw_dbg_qp(qp, "reporting event %d\n", etype);
1793 base_qp->event_handler(&event, base_qp->qp_context);
1794 }
1795}
1796
1797void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1798{
1799 struct ib_event event;
1800 struct ib_cq *base_cq = &cq->base_cq;
1801
1802 event.event = etype;
1803 event.device = base_cq->device;
1804 event.element.cq = base_cq;
1805
1806 if (base_cq->event_handler) {
1807 siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1808 base_cq->event_handler(&event, base_cq->cq_context);
1809 }
1810}
1811
1812void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1813{
1814 struct ib_event event;
1815 struct ib_srq *base_srq = &srq->base_srq;
1816
1817 event.event = etype;
1818 event.device = base_srq->device;
1819 event.element.srq = base_srq;
1820
1821 if (base_srq->event_handler) {
1822 siw_dbg_pd(srq->base_srq.pd,
1823 "reporting SRQ event %d\n", etype);
1824 base_srq->event_handler(&event, base_srq->srq_context);
1825 }
1826}
1827
1828void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype)
1829{
1830 struct ib_event event;
1831
1832 event.event = etype;
1833 event.device = &sdev->base_dev;
1834 event.element.port_num = port;
1835
1836 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1837
1838 ib_dispatch_event(&event);
1839}
1840