1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "cpu.h"
18#include "hw/pci/pci.h"
19#include "hw/pci/pci_ids.h"
20
21#include "../rdma_backend.h"
22#include "../rdma_rm.h"
23#include "../rdma_utils.h"
24
25#include "trace.h"
26#include "pvrdma.h"
27#include "standard-headers/rdma/vmw_pvrdma-abi.h"
28
29static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
30 uint32_t nchunks, size_t length)
31{
32 uint64_t *dir, *tbl;
33 int tbl_idx, dir_idx, addr_idx;
34 void *host_virt = NULL, *curr_page;
35
36 if (!nchunks) {
37 rdma_error_report("Got nchunks=0");
38 return NULL;
39 }
40
41 length = ROUND_UP(length, TARGET_PAGE_SIZE);
42 if (nchunks * TARGET_PAGE_SIZE != length) {
43 rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks,
44 (unsigned long)length);
45 return NULL;
46 }
47
48 dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
49 if (!dir) {
50 rdma_error_report("Failed to map to page directory");
51 return NULL;
52 }
53
54 tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
55 if (!tbl) {
56 rdma_error_report("Failed to map to page table 0");
57 goto out_unmap_dir;
58 }
59
60 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
61 if (!curr_page) {
62 rdma_error_report("Failed to map the page 0");
63 goto out_unmap_tbl;
64 }
65
66 host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
67 if (host_virt == MAP_FAILED) {
68 host_virt = NULL;
69 rdma_error_report("Failed to remap memory for host_virt");
70 goto out_unmap_tbl;
71 }
72 trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
73
74 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
75
76 dir_idx = 0;
77 tbl_idx = 1;
78 addr_idx = 1;
79 while (addr_idx < nchunks) {
80 if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
81 tbl_idx = 0;
82 dir_idx++;
83 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
84 tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
85 if (!tbl) {
86 rdma_error_report("Failed to map to page table %d", dir_idx);
87 goto out_unmap_host_virt;
88 }
89 }
90
91 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
92 TARGET_PAGE_SIZE);
93 if (!curr_page) {
94 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
95 dir_idx);
96 goto out_unmap_host_virt;
97 }
98
99 mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
100 host_virt + TARGET_PAGE_SIZE * addr_idx);
101
102 trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
103 TARGET_PAGE_SIZE * addr_idx);
104
105 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
106
107 addr_idx++;
108
109 tbl_idx++;
110 }
111
112 goto out_unmap_tbl;
113
114out_unmap_host_virt:
115 munmap(host_virt, length);
116 host_virt = NULL;
117
118out_unmap_tbl:
119 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
120
121out_unmap_dir:
122 rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
123
124 return host_virt;
125}
126
127static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
128 union pvrdma_cmd_resp *rsp)
129{
130 struct pvrdma_cmd_query_port *cmd = &req->query_port;
131 struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
132 struct pvrdma_port_attr attrs = {};
133
134 if (cmd->port_num > MAX_PORTS) {
135 return -EINVAL;
136 }
137
138 if (rdma_backend_query_port(&dev->backend_dev,
139 (struct ibv_port_attr *)&attrs)) {
140 return -ENOMEM;
141 }
142
143 memset(resp, 0, sizeof(*resp));
144
145 resp->attrs.state = dev->func0->device_active ? attrs.state :
146 PVRDMA_PORT_DOWN;
147 resp->attrs.max_mtu = attrs.max_mtu;
148 resp->attrs.active_mtu = attrs.active_mtu;
149 resp->attrs.phys_state = attrs.phys_state;
150 resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
151 resp->attrs.max_msg_sz = 1024;
152 resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
153 resp->attrs.active_width = 1;
154 resp->attrs.active_speed = 1;
155
156 return 0;
157}
158
159static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
160 union pvrdma_cmd_resp *rsp)
161{
162 struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
163 struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
164
165 if (cmd->port_num > MAX_PORTS) {
166 return -EINVAL;
167 }
168
169 if (cmd->index > MAX_PKEYS) {
170 return -EINVAL;
171 }
172
173 memset(resp, 0, sizeof(*resp));
174
175 resp->pkey = PVRDMA_PKEY;
176
177 return 0;
178}
179
180static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
181 union pvrdma_cmd_resp *rsp)
182{
183 struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
184 struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
185
186 memset(resp, 0, sizeof(*resp));
187 return rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
188 &resp->pd_handle, cmd->ctx_handle);
189}
190
191static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
192 union pvrdma_cmd_resp *rsp)
193{
194 struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
195
196 rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
197
198 return 0;
199}
200
201static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
202 union pvrdma_cmd_resp *rsp)
203{
204 struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
205 struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
206 PCIDevice *pci_dev = PCI_DEVICE(dev);
207 void *host_virt = NULL;
208 int rc = 0;
209
210 memset(resp, 0, sizeof(*resp));
211
212 if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
213 host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
214 cmd->length);
215 if (!host_virt) {
216 rdma_error_report("Failed to map to pdir");
217 return -EINVAL;
218 }
219 }
220
221 rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
222 cmd->length, host_virt, cmd->access_flags,
223 &resp->mr_handle, &resp->lkey, &resp->rkey);
224 if (rc && host_virt) {
225 munmap(host_virt, cmd->length);
226 }
227
228 return rc;
229}
230
231static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
232 union pvrdma_cmd_resp *rsp)
233{
234 struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
235
236 rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
237
238 return 0;
239}
240
241static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
242 uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
243{
244 uint64_t *dir = NULL, *tbl = NULL;
245 PvrdmaRing *r;
246 int rc = -EINVAL;
247 char ring_name[MAX_RING_NAME_SZ];
248
249 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
250 rdma_error_report("Got invalid nchunks: %d", nchunks);
251 return rc;
252 }
253
254 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
255 if (!dir) {
256 rdma_error_report("Failed to map to CQ page directory");
257 goto out;
258 }
259
260 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
261 if (!tbl) {
262 rdma_error_report("Failed to map to CQ page table");
263 goto out;
264 }
265
266 r = g_malloc(sizeof(*r));
267 *ring = r;
268
269 r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
270
271 if (!r->ring_state) {
272 rdma_error_report("Failed to map to CQ ring state");
273 goto out_free_ring;
274 }
275
276 sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
277 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
278 cqe, sizeof(struct pvrdma_cqe),
279
280 (dma_addr_t *)&tbl[1], nchunks - 1);
281 if (rc) {
282 goto out_unmap_ring_state;
283 }
284
285 goto out;
286
287out_unmap_ring_state:
288
289 rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
290
291out_free_ring:
292 g_free(r);
293
294out:
295 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
296 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
297
298 return rc;
299}
300
301static void destroy_cq_ring(PvrdmaRing *ring)
302{
303 pvrdma_ring_free(ring);
304
305 rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
306 g_free(ring);
307}
308
309static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
310 union pvrdma_cmd_resp *rsp)
311{
312 struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
313 struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
314 PvrdmaRing *ring = NULL;
315 int rc;
316
317 memset(resp, 0, sizeof(*resp));
318
319 resp->cqe = cmd->cqe;
320
321 rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
322 cmd->cqe);
323 if (rc) {
324 return rc;
325 }
326
327 rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
328 &resp->cq_handle, ring);
329 if (rc) {
330 destroy_cq_ring(ring);
331 }
332
333 resp->cqe = cmd->cqe;
334
335 return rc;
336}
337
338static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
339 union pvrdma_cmd_resp *rsp)
340{
341 struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
342 RdmaRmCQ *cq;
343 PvrdmaRing *ring;
344
345 cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
346 if (!cq) {
347 rdma_error_report("Got invalid CQ handle");
348 return -EINVAL;
349 }
350
351 ring = (PvrdmaRing *)cq->opaque;
352 destroy_cq_ring(ring);
353
354 rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
355
356 return 0;
357}
358
359static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
360 PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
361 uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
362 uint32_t rpages, uint8_t is_srq)
363{
364 uint64_t *dir = NULL, *tbl = NULL;
365 PvrdmaRing *sr, *rr;
366 int rc = -EINVAL;
367 char ring_name[MAX_RING_NAME_SZ];
368 uint32_t wqe_sz;
369
370 if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
371 rdma_error_report("Got invalid send page count for QP ring: %d",
372 spages);
373 return rc;
374 }
375
376 if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
377 rdma_error_report("Got invalid recv page count for QP ring: %d",
378 rpages);
379 return rc;
380 }
381
382 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
383 if (!dir) {
384 rdma_error_report("Failed to map to QP page directory");
385 goto out;
386 }
387
388 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
389 if (!tbl) {
390 rdma_error_report("Failed to map to QP page table");
391 goto out;
392 }
393
394 if (!is_srq) {
395 sr = g_malloc(2 * sizeof(*rr));
396 rr = &sr[1];
397 } else {
398 sr = g_malloc(sizeof(*sr));
399 }
400
401 *rings = sr;
402
403
404 sr->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
405 if (!sr->ring_state) {
406 rdma_error_report("Failed to map to QP ring state");
407 goto out_free_sr_mem;
408 }
409
410 wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
411 sizeof(struct pvrdma_sge) * smax_sge - 1);
412
413 sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
414 rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
415 scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
416 if (rc) {
417 goto out_unmap_ring_state;
418 }
419
420 if (!is_srq) {
421
422 rr->ring_state = &sr->ring_state[1];
423 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
424 sizeof(struct pvrdma_sge) * rmax_sge - 1);
425 sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
426 rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
427 rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
428 rpages);
429 if (rc) {
430 goto out_free_sr;
431 }
432 }
433
434 goto out;
435
436out_free_sr:
437 pvrdma_ring_free(sr);
438
439out_unmap_ring_state:
440 rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
441
442out_free_sr_mem:
443 g_free(sr);
444
445out:
446 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
447 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
448
449 return rc;
450}
451
452static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
453{
454 pvrdma_ring_free(&ring[0]);
455 if (!is_srq) {
456 pvrdma_ring_free(&ring[1]);
457 }
458
459 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
460 g_free(ring);
461}
462
463static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
464 union pvrdma_cmd_resp *rsp)
465{
466 struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
467 struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
468 PvrdmaRing *rings = NULL;
469 int rc;
470
471 memset(resp, 0, sizeof(*resp));
472
473 rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
474 cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
475 cmd->max_recv_wr, cmd->max_recv_sge,
476 cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
477 if (rc) {
478 return rc;
479 }
480
481 rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
482 cmd->max_send_wr, cmd->max_send_sge,
483 cmd->send_cq_handle, cmd->max_recv_wr,
484 cmd->max_recv_sge, cmd->recv_cq_handle, rings,
485 &resp->qpn, cmd->is_srq, cmd->srq_handle);
486 if (rc) {
487 destroy_qp_rings(rings, cmd->is_srq);
488 return rc;
489 }
490
491 resp->max_send_wr = cmd->max_send_wr;
492 resp->max_recv_wr = cmd->max_recv_wr;
493 resp->max_send_sge = cmd->max_send_sge;
494 resp->max_recv_sge = cmd->max_recv_sge;
495 resp->max_inline_data = cmd->max_inline_data;
496
497 return 0;
498}
499
500static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
501 union pvrdma_cmd_resp *rsp)
502{
503 struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
504
505
506
507 return rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
508 cmd->qp_handle, cmd->attr_mask,
509 cmd->attrs.ah_attr.grh.sgid_index,
510 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
511 cmd->attrs.dest_qp_num,
512 (enum ibv_qp_state)cmd->attrs.qp_state,
513 cmd->attrs.qkey, cmd->attrs.rq_psn,
514 cmd->attrs.sq_psn);
515}
516
517static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
518 union pvrdma_cmd_resp *rsp)
519{
520 struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
521 struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
522 struct ibv_qp_init_attr init_attr;
523
524 memset(resp, 0, sizeof(*resp));
525
526 return rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev,
527 cmd->qp_handle,
528 (struct ibv_qp_attr *)&resp->attrs,
529 cmd->attr_mask,
530 &init_attr);
531}
532
533static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
534 union pvrdma_cmd_resp *rsp)
535{
536 struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
537 RdmaRmQP *qp;
538 PvrdmaRing *ring;
539
540 qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
541 if (!qp) {
542 return -EINVAL;
543 }
544
545 ring = (PvrdmaRing *)qp->opaque;
546 destroy_qp_rings(ring, qp->is_srq);
547 rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
548
549 return 0;
550}
551
552static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
553 union pvrdma_cmd_resp *rsp)
554{
555 struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
556 union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
557
558 if (cmd->index >= MAX_PORT_GIDS) {
559 return -EINVAL;
560 }
561
562 return rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
563 dev->backend_eth_device_name, gid, cmd->index);
564}
565
566static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
567 union pvrdma_cmd_resp *rsp)
568{
569 struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
570
571 if (cmd->index >= MAX_PORT_GIDS) {
572 return -EINVAL;
573 }
574
575 return rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
576 dev->backend_eth_device_name, cmd->index);
577}
578
579static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
580 union pvrdma_cmd_resp *rsp)
581{
582 struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
583 struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
584
585 memset(resp, 0, sizeof(*resp));
586 return rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
587}
588
589static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
590 union pvrdma_cmd_resp *rsp)
591{
592 struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
593
594 rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
595
596 return 0;
597}
598
599static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
600 uint64_t pdir_dma, uint32_t max_wr,
601 uint32_t max_sge, uint32_t nchunks)
602{
603 uint64_t *dir = NULL, *tbl = NULL;
604 PvrdmaRing *r;
605 int rc = -EINVAL;
606 char ring_name[MAX_RING_NAME_SZ];
607 uint32_t wqe_sz;
608
609 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
610 rdma_error_report("Got invalid page count for SRQ ring: %d",
611 nchunks);
612 return rc;
613 }
614
615 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
616 if (!dir) {
617 rdma_error_report("Failed to map to SRQ page directory");
618 goto out;
619 }
620
621 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
622 if (!tbl) {
623 rdma_error_report("Failed to map to SRQ page table");
624 goto out;
625 }
626
627 r = g_malloc(sizeof(*r));
628 *ring = r;
629
630 r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
631 if (!r->ring_state) {
632 rdma_error_report("Failed to map tp SRQ ring state");
633 goto out_free_ring_mem;
634 }
635
636 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
637 sizeof(struct pvrdma_sge) * max_sge - 1);
638 sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
639 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
640 wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
641 if (rc) {
642 goto out_unmap_ring_state;
643 }
644
645 goto out;
646
647out_unmap_ring_state:
648 rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
649
650out_free_ring_mem:
651 g_free(r);
652
653out:
654 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
655 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
656
657 return rc;
658}
659
660static void destroy_srq_ring(PvrdmaRing *ring)
661{
662 pvrdma_ring_free(ring);
663 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
664 g_free(ring);
665}
666
667static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
668 union pvrdma_cmd_resp *rsp)
669{
670 struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
671 struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
672 PvrdmaRing *ring = NULL;
673 int rc;
674
675 memset(resp, 0, sizeof(*resp));
676
677 rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
678 cmd->attrs.max_wr, cmd->attrs.max_sge,
679 cmd->nchunks);
680 if (rc) {
681 return rc;
682 }
683
684 rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
685 cmd->attrs.max_wr, cmd->attrs.max_sge,
686 cmd->attrs.srq_limit, &resp->srqn, ring);
687 if (rc) {
688 destroy_srq_ring(ring);
689 return rc;
690 }
691
692 return 0;
693}
694
695static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
696 union pvrdma_cmd_resp *rsp)
697{
698 struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
699 struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
700
701 memset(resp, 0, sizeof(*resp));
702
703 return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
704 (struct ibv_srq_attr *)&resp->attrs);
705}
706
707static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
708 union pvrdma_cmd_resp *rsp)
709{
710 struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
711
712
713 if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
714 (cmd->attr_mask & IBV_SRQ_MAX_WR))
715 return -EINVAL;
716
717 return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
718 (struct ibv_srq_attr *)&cmd->attrs,
719 cmd->attr_mask);
720}
721
722static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
723 union pvrdma_cmd_resp *rsp)
724{
725 struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
726 RdmaRmSRQ *srq;
727 PvrdmaRing *ring;
728
729 srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
730 if (!srq) {
731 return -EINVAL;
732 }
733
734 ring = (PvrdmaRing *)srq->opaque;
735 destroy_srq_ring(ring);
736 rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
737
738 return 0;
739}
740
741struct cmd_handler {
742 uint32_t cmd;
743 uint32_t ack;
744 int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
745 union pvrdma_cmd_resp *rsp);
746};
747
748static struct cmd_handler cmd_handlers[] = {
749 {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port},
750 {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey},
751 {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd},
752 {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd},
753 {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr},
754 {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr},
755 {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq},
756 {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL},
757 {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq},
758 {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp},
759 {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp},
760 {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp},
761 {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp},
762 {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc},
763 {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc},
764 {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind},
765 {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
766 {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq},
767 {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq},
768 {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq},
769 {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq},
770};
771
772int pvrdma_exec_cmd(PVRDMADev *dev)
773{
774 int err = 0xFFFF;
775 DSRInfo *dsr_info;
776
777 dsr_info = &dev->dsr_info;
778
779 if (!dsr_info->dsr) {
780
781 rdma_error_report("Exec command without dsr, req or rsp buffers");
782 goto out;
783 }
784
785 if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
786 sizeof(struct cmd_handler)) {
787 rdma_error_report("Unsupported command");
788 goto out;
789 }
790
791 if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
792 rdma_error_report("Unsupported command (not implemented yet)");
793 goto out;
794 }
795
796 err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
797 dsr_info->rsp);
798 dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
799 dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
800 dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
801
802 trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err);
803
804 dev->stats.commands++;
805
806out:
807 set_reg_val(dev, PVRDMA_REG_ERR, err);
808 post_interrupt(dev, INTR_VEC_CMD_RING);
809
810 return (err == 0) ? 0 : -EINVAL;
811}
812