1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "cpu.h"
18#include "hw/hw.h"
19#include "hw/pci/pci.h"
20#include "hw/pci/pci_ids.h"
21
22#include "../rdma_backend.h"
23#include "../rdma_rm.h"
24#include "../rdma_utils.h"
25
26#include "trace.h"
27#include "pvrdma.h"
28#include "standard-headers/rdma/vmw_pvrdma-abi.h"
29
30static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
31 uint32_t nchunks, size_t length)
32{
33 uint64_t *dir, *tbl;
34 int tbl_idx, dir_idx, addr_idx;
35 void *host_virt = NULL, *curr_page;
36
37 if (!nchunks) {
38 rdma_error_report("Got nchunks=0");
39 return NULL;
40 }
41
42 dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
43 if (!dir) {
44 rdma_error_report("Failed to map to page directory");
45 return NULL;
46 }
47
48 tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
49 if (!tbl) {
50 rdma_error_report("Failed to map to page table 0");
51 goto out_unmap_dir;
52 }
53
54 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
55 if (!curr_page) {
56 rdma_error_report("Failed to map the page 0");
57 goto out_unmap_tbl;
58 }
59
60 host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
61 if (host_virt == MAP_FAILED) {
62 host_virt = NULL;
63 rdma_error_report("Failed to remap memory for host_virt");
64 goto out_unmap_tbl;
65 }
66 trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
67
68 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
69
70 dir_idx = 0;
71 tbl_idx = 1;
72 addr_idx = 1;
73 while (addr_idx < nchunks) {
74 if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
75 tbl_idx = 0;
76 dir_idx++;
77 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
78 tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
79 if (!tbl) {
80 rdma_error_report("Failed to map to page table %d", dir_idx);
81 goto out_unmap_host_virt;
82 }
83 }
84
85 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
86 TARGET_PAGE_SIZE);
87 if (!curr_page) {
88 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
89 dir_idx);
90 goto out_unmap_host_virt;
91 }
92
93 mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
94 host_virt + TARGET_PAGE_SIZE * addr_idx);
95
96 trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
97 TARGET_PAGE_SIZE * addr_idx);
98
99 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
100
101 addr_idx++;
102
103 tbl_idx++;
104 }
105
106 goto out_unmap_tbl;
107
108out_unmap_host_virt:
109 munmap(host_virt, length);
110 host_virt = NULL;
111
112out_unmap_tbl:
113 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
114
115out_unmap_dir:
116 rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
117
118 return host_virt;
119}
120
121static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
122 union pvrdma_cmd_resp *rsp)
123{
124 struct pvrdma_cmd_query_port *cmd = &req->query_port;
125 struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
126 struct pvrdma_port_attr attrs = {};
127
128 if (cmd->port_num > MAX_PORTS) {
129 return -EINVAL;
130 }
131
132 if (rdma_backend_query_port(&dev->backend_dev,
133 (struct ibv_port_attr *)&attrs)) {
134 return -ENOMEM;
135 }
136
137 memset(resp, 0, sizeof(*resp));
138
139 resp->attrs.state = dev->func0->device_active ? attrs.state :
140 PVRDMA_PORT_DOWN;
141 resp->attrs.max_mtu = attrs.max_mtu;
142 resp->attrs.active_mtu = attrs.active_mtu;
143 resp->attrs.phys_state = attrs.phys_state;
144 resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
145 resp->attrs.max_msg_sz = 1024;
146 resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
147 resp->attrs.active_width = 1;
148 resp->attrs.active_speed = 1;
149
150 return 0;
151}
152
153static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
154 union pvrdma_cmd_resp *rsp)
155{
156 struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
157 struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
158
159 if (cmd->port_num > MAX_PORTS) {
160 return -EINVAL;
161 }
162
163 if (cmd->index > MAX_PKEYS) {
164 return -EINVAL;
165 }
166
167 memset(resp, 0, sizeof(*resp));
168
169 resp->pkey = PVRDMA_PKEY;
170
171 return 0;
172}
173
174static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
175 union pvrdma_cmd_resp *rsp)
176{
177 struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
178 struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
179 int rc;
180
181 memset(resp, 0, sizeof(*resp));
182 rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
183 &resp->pd_handle, cmd->ctx_handle);
184
185 return rc;
186}
187
188static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
189 union pvrdma_cmd_resp *rsp)
190{
191 struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
192
193 rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
194
195 return 0;
196}
197
198static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
199 union pvrdma_cmd_resp *rsp)
200{
201 struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
202 struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
203 PCIDevice *pci_dev = PCI_DEVICE(dev);
204 void *host_virt = NULL;
205 int rc = 0;
206
207 memset(resp, 0, sizeof(*resp));
208
209 if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
210 host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
211 cmd->length);
212 if (!host_virt) {
213 rdma_error_report("Failed to map to pdir");
214 return -EINVAL;
215 }
216 }
217
218 rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
219 cmd->length, host_virt, cmd->access_flags,
220 &resp->mr_handle, &resp->lkey, &resp->rkey);
221 if (rc && host_virt) {
222 munmap(host_virt, cmd->length);
223 }
224
225 return rc;
226}
227
228static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
229 union pvrdma_cmd_resp *rsp)
230{
231 struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
232
233 rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
234
235 return 0;
236}
237
238static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
239 uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
240{
241 uint64_t *dir = NULL, *tbl = NULL;
242 PvrdmaRing *r;
243 int rc = -EINVAL;
244 char ring_name[MAX_RING_NAME_SZ];
245
246 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
247 rdma_error_report("Got invalid nchunks: %d", nchunks);
248 return rc;
249 }
250
251 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
252 if (!dir) {
253 rdma_error_report("Failed to map to CQ page directory");
254 goto out;
255 }
256
257 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
258 if (!tbl) {
259 rdma_error_report("Failed to map to CQ page table");
260 goto out;
261 }
262
263 r = g_malloc(sizeof(*r));
264 *ring = r;
265
266 r->ring_state = (struct pvrdma_ring *)
267 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
268
269 if (!r->ring_state) {
270 rdma_error_report("Failed to map to CQ ring state");
271 goto out_free_ring;
272 }
273
274 sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
275 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
276 cqe, sizeof(struct pvrdma_cqe),
277
278 (dma_addr_t *)&tbl[1], nchunks - 1);
279 if (rc) {
280 goto out_unmap_ring_state;
281 }
282
283 goto out;
284
285out_unmap_ring_state:
286
287 rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
288
289out_free_ring:
290 g_free(r);
291
292out:
293 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
294 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
295
296 return rc;
297}
298
299static void destroy_cq_ring(PvrdmaRing *ring)
300{
301 pvrdma_ring_free(ring);
302
303 rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
304 g_free(ring);
305}
306
307static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
308 union pvrdma_cmd_resp *rsp)
309{
310 struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
311 struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
312 PvrdmaRing *ring = NULL;
313 int rc;
314
315 memset(resp, 0, sizeof(*resp));
316
317 resp->cqe = cmd->cqe;
318
319 rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
320 cmd->cqe);
321 if (rc) {
322 return rc;
323 }
324
325 rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
326 &resp->cq_handle, ring);
327 if (rc) {
328 destroy_cq_ring(ring);
329 }
330
331 resp->cqe = cmd->cqe;
332
333 return rc;
334}
335
336static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
337 union pvrdma_cmd_resp *rsp)
338{
339 struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
340 RdmaRmCQ *cq;
341 PvrdmaRing *ring;
342
343 cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
344 if (!cq) {
345 rdma_error_report("Got invalid CQ handle");
346 return -EINVAL;
347 }
348
349 ring = (PvrdmaRing *)cq->opaque;
350 destroy_cq_ring(ring);
351
352 rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
353
354 return 0;
355}
356
357static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
358 PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
359 uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
360 uint32_t rpages)
361{
362 uint64_t *dir = NULL, *tbl = NULL;
363 PvrdmaRing *sr, *rr;
364 int rc = -EINVAL;
365 char ring_name[MAX_RING_NAME_SZ];
366 uint32_t wqe_sz;
367
368 if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES
369 || !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) {
370 rdma_error_report("Got invalid page count for QP ring: %d, %d", spages,
371 rpages);
372 return rc;
373 }
374
375 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
376 if (!dir) {
377 rdma_error_report("Failed to map to QP page directory");
378 goto out;
379 }
380
381 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
382 if (!tbl) {
383 rdma_error_report("Failed to map to QP page table");
384 goto out;
385 }
386
387 sr = g_malloc(2 * sizeof(*rr));
388 rr = &sr[1];
389
390 *rings = sr;
391
392
393 sr->ring_state = (struct pvrdma_ring *)
394 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
395 if (!sr->ring_state) {
396 rdma_error_report("Failed to map to QP ring state");
397 goto out_free_sr_mem;
398 }
399
400 wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
401 sizeof(struct pvrdma_sge) * smax_sge - 1);
402
403 sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
404 rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
405 scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
406 if (rc) {
407 goto out_unmap_ring_state;
408 }
409
410
411 rr->ring_state = &sr->ring_state[1];
412 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
413 sizeof(struct pvrdma_sge) * rmax_sge - 1);
414 sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
415 rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
416 rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
417 if (rc) {
418 goto out_free_sr;
419 }
420
421 goto out;
422
423out_free_sr:
424 pvrdma_ring_free(sr);
425
426out_unmap_ring_state:
427 rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
428
429out_free_sr_mem:
430 g_free(sr);
431
432out:
433 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
434 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
435
436 return rc;
437}
438
439static void destroy_qp_rings(PvrdmaRing *ring)
440{
441 pvrdma_ring_free(&ring[0]);
442 pvrdma_ring_free(&ring[1]);
443
444 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
445 g_free(ring);
446}
447
448static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
449 union pvrdma_cmd_resp *rsp)
450{
451 struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
452 struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
453 PvrdmaRing *rings = NULL;
454 int rc;
455
456 memset(resp, 0, sizeof(*resp));
457
458 rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
459 cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
460 cmd->max_recv_wr, cmd->max_recv_sge,
461 cmd->total_chunks - cmd->send_chunks - 1);
462 if (rc) {
463 return rc;
464 }
465
466 rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
467 cmd->max_send_wr, cmd->max_send_sge,
468 cmd->send_cq_handle, cmd->max_recv_wr,
469 cmd->max_recv_sge, cmd->recv_cq_handle, rings,
470 &resp->qpn);
471 if (rc) {
472 destroy_qp_rings(rings);
473 return rc;
474 }
475
476 resp->max_send_wr = cmd->max_send_wr;
477 resp->max_recv_wr = cmd->max_recv_wr;
478 resp->max_send_sge = cmd->max_send_sge;
479 resp->max_recv_sge = cmd->max_recv_sge;
480 resp->max_inline_data = cmd->max_inline_data;
481
482 return 0;
483}
484
485static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
486 union pvrdma_cmd_resp *rsp)
487{
488 struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
489 int rc;
490
491
492
493 rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
494 cmd->qp_handle, cmd->attr_mask,
495 cmd->attrs.ah_attr.grh.sgid_index,
496 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
497 cmd->attrs.dest_qp_num,
498 (enum ibv_qp_state)cmd->attrs.qp_state,
499 cmd->attrs.qkey, cmd->attrs.rq_psn,
500 cmd->attrs.sq_psn);
501
502 return rc;
503}
504
505static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
506 union pvrdma_cmd_resp *rsp)
507{
508 struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
509 struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
510 struct ibv_qp_init_attr init_attr;
511 int rc;
512
513 memset(resp, 0, sizeof(*resp));
514
515 rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
516 (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask,
517 &init_attr);
518
519 return rc;
520}
521
522static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
523 union pvrdma_cmd_resp *rsp)
524{
525 struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
526 RdmaRmQP *qp;
527 PvrdmaRing *ring;
528
529 qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
530 if (!qp) {
531 return -EINVAL;
532 }
533
534 rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
535
536 ring = (PvrdmaRing *)qp->opaque;
537 destroy_qp_rings(ring);
538
539 return 0;
540}
541
542static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
543 union pvrdma_cmd_resp *rsp)
544{
545 struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
546 int rc;
547 union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
548
549 if (cmd->index >= MAX_PORT_GIDS) {
550 return -EINVAL;
551 }
552
553 rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
554 dev->backend_eth_device_name, gid, cmd->index);
555
556 return rc;
557}
558
559static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
560 union pvrdma_cmd_resp *rsp)
561{
562 int rc;
563
564 struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
565
566 if (cmd->index >= MAX_PORT_GIDS) {
567 return -EINVAL;
568 }
569
570 rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
571 dev->backend_eth_device_name, cmd->index);
572
573 return rc;
574}
575
576static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
577 union pvrdma_cmd_resp *rsp)
578{
579 struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
580 struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
581 int rc;
582
583 memset(resp, 0, sizeof(*resp));
584 rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
585
586 return rc;
587}
588
589static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
590 union pvrdma_cmd_resp *rsp)
591{
592 struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
593
594 rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
595
596 return 0;
597}
598
599struct cmd_handler {
600 uint32_t cmd;
601 uint32_t ack;
602 int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
603 union pvrdma_cmd_resp *rsp);
604};
605
606static struct cmd_handler cmd_handlers[] = {
607 {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port},
608 {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey},
609 {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd},
610 {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd},
611 {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr},
612 {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr},
613 {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq},
614 {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL},
615 {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq},
616 {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp},
617 {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp},
618 {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp},
619 {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp},
620 {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc},
621 {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc},
622 {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind},
623 {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
624};
625
626int pvrdma_exec_cmd(PVRDMADev *dev)
627{
628 int err = 0xFFFF;
629 DSRInfo *dsr_info;
630
631 dsr_info = &dev->dsr_info;
632
633 if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
634 sizeof(struct cmd_handler)) {
635 rdma_error_report("Unsupported command");
636 goto out;
637 }
638
639 if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
640 rdma_error_report("Unsupported command (not implemented yet)");
641 goto out;
642 }
643
644 err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
645 dsr_info->rsp);
646 dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
647 dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
648 dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
649
650 trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err);
651
652 dev->stats.commands++;
653
654out:
655 set_reg_val(dev, PVRDMA_REG_ERR, err);
656 post_interrupt(dev, INTR_VEC_CMD_RING);
657
658 return (err == 0) ? 0 : -EINVAL;
659}
660