qemu/hw/rdma/vmw/pvrdma_cmd.c
<<
>>
Prefs
   1/*
   2 * QEMU paravirtual RDMA - Command channel
   3 *
   4 * Copyright (C) 2018 Oracle
   5 * Copyright (C) 2018 Red Hat Inc
   6 *
   7 * Authors:
   8 *     Yuval Shaia <yuval.shaia@oracle.com>
   9 *     Marcel Apfelbaum <marcel@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qemu/error-report.h"
  18#include "cpu.h"
  19#include "hw/hw.h"
  20#include "hw/pci/pci.h"
  21#include "hw/pci/pci_ids.h"
  22
  23#include "../rdma_backend.h"
  24#include "../rdma_rm.h"
  25#include "../rdma_utils.h"
  26
  27#include "pvrdma.h"
  28#include "standard-headers/rdma/vmw_pvrdma-abi.h"
  29
  30static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
  31                                uint32_t nchunks, size_t length)
  32{
  33    uint64_t *dir, *tbl;
  34    int tbl_idx, dir_idx, addr_idx;
  35    void *host_virt = NULL, *curr_page;
  36
  37    if (!nchunks) {
  38        pr_dbg("nchunks=0\n");
  39        return NULL;
  40    }
  41
  42    dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
  43    if (!dir) {
  44        error_report("PVRDMA: Failed to map to page directory");
  45        return NULL;
  46    }
  47
  48    tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
  49    if (!tbl) {
  50        error_report("PVRDMA: Failed to map to page table 0");
  51        goto out_unmap_dir;
  52    }
  53
  54    curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
  55    if (!curr_page) {
  56        error_report("PVRDMA: Failed to map the first page");
  57        goto out_unmap_tbl;
  58    }
  59
  60    host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
  61    pr_dbg("mremap %p -> %p\n", curr_page, host_virt);
  62    if (host_virt == MAP_FAILED) {
  63        host_virt = NULL;
  64        error_report("PVRDMA: Failed to remap memory for host_virt");
  65        goto out_unmap_tbl;
  66    }
  67
  68    rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
  69
  70    pr_dbg("host_virt=%p\n", host_virt);
  71
  72    dir_idx = 0;
  73    tbl_idx = 1;
  74    addr_idx = 1;
  75    while (addr_idx < nchunks) {
  76        if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
  77            tbl_idx = 0;
  78            dir_idx++;
  79            pr_dbg("Mapping to table %d\n", dir_idx);
  80            rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
  81            tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
  82            if (!tbl) {
  83                error_report("PVRDMA: Failed to map to page table %d", dir_idx);
  84                goto out_unmap_host_virt;
  85            }
  86        }
  87
  88        pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]);
  89
  90        curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
  91                                     TARGET_PAGE_SIZE);
  92        if (!curr_page) {
  93            error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx,
  94                         dir_idx);
  95            goto out_unmap_host_virt;
  96        }
  97
  98        mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
  99               host_virt + TARGET_PAGE_SIZE * addr_idx);
 100
 101        rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
 102
 103        addr_idx++;
 104
 105        tbl_idx++;
 106    }
 107
 108    goto out_unmap_tbl;
 109
 110out_unmap_host_virt:
 111    munmap(host_virt, length);
 112    host_virt = NULL;
 113
 114out_unmap_tbl:
 115    rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
 116
 117out_unmap_dir:
 118    rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
 119
 120    return host_virt;
 121}
 122
 123static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
 124                      union pvrdma_cmd_resp *rsp)
 125{
 126    struct pvrdma_cmd_query_port *cmd = &req->query_port;
 127    struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
 128    struct pvrdma_port_attr attrs = {0};
 129
 130    pr_dbg("port=%d\n", cmd->port_num);
 131
 132    if (rdma_backend_query_port(&dev->backend_dev,
 133                                (struct ibv_port_attr *)&attrs)) {
 134        return -ENOMEM;
 135    }
 136
 137    memset(resp, 0, sizeof(*resp));
 138    resp->hdr.response = cmd->hdr.response;
 139    resp->hdr.ack = PVRDMA_CMD_QUERY_PORT_RESP;
 140    resp->hdr.err = 0;
 141
 142    resp->attrs.state = attrs.state;
 143    resp->attrs.max_mtu = attrs.max_mtu;
 144    resp->attrs.active_mtu = attrs.active_mtu;
 145    resp->attrs.phys_state = attrs.phys_state;
 146    resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
 147    resp->attrs.max_msg_sz = 1024;
 148    resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
 149    resp->attrs.active_width = 1;
 150    resp->attrs.active_speed = 1;
 151
 152    return 0;
 153}
 154
 155static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
 156                      union pvrdma_cmd_resp *rsp)
 157{
 158    struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
 159    struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
 160
 161    pr_dbg("port=%d\n", cmd->port_num);
 162    pr_dbg("index=%d\n", cmd->index);
 163
 164    memset(resp, 0, sizeof(*resp));
 165    resp->hdr.response = cmd->hdr.response;
 166    resp->hdr.ack = PVRDMA_CMD_QUERY_PKEY_RESP;
 167    resp->hdr.err = 0;
 168
 169    resp->pkey = PVRDMA_PKEY;
 170    pr_dbg("pkey=0x%x\n", resp->pkey);
 171
 172    return 0;
 173}
 174
 175static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
 176                     union pvrdma_cmd_resp *rsp)
 177{
 178    struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
 179    struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
 180
 181    pr_dbg("context=0x%x\n", cmd->ctx_handle ? cmd->ctx_handle : 0);
 182
 183    memset(resp, 0, sizeof(*resp));
 184    resp->hdr.response = cmd->hdr.response;
 185    resp->hdr.ack = PVRDMA_CMD_CREATE_PD_RESP;
 186    resp->hdr.err = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
 187                                     &resp->pd_handle, cmd->ctx_handle);
 188
 189    pr_dbg("ret=%d\n", resp->hdr.err);
 190    return resp->hdr.err;
 191}
 192
 193static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
 194                      union pvrdma_cmd_resp *rsp)
 195{
 196    struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
 197
 198    pr_dbg("pd_handle=%d\n", cmd->pd_handle);
 199
 200    rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
 201
 202    return 0;
 203}
 204
 205static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
 206                     union pvrdma_cmd_resp *rsp)
 207{
 208    struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
 209    struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
 210    PCIDevice *pci_dev = PCI_DEVICE(dev);
 211    void *host_virt = NULL;
 212
 213    memset(resp, 0, sizeof(*resp));
 214    resp->hdr.response = cmd->hdr.response;
 215    resp->hdr.ack = PVRDMA_CMD_CREATE_MR_RESP;
 216
 217    pr_dbg("pd_handle=%d\n", cmd->pd_handle);
 218    pr_dbg("access_flags=0x%x\n", cmd->access_flags);
 219    pr_dbg("flags=0x%x\n", cmd->flags);
 220
 221    if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
 222        host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
 223                                       cmd->length);
 224        if (!host_virt) {
 225            pr_dbg("Failed to map to pdir\n");
 226            resp->hdr.err = -EINVAL;
 227            goto out;
 228        }
 229    }
 230
 231    resp->hdr.err = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle,
 232                                     cmd->start, cmd->length, host_virt,
 233                                     cmd->access_flags, &resp->mr_handle,
 234                                     &resp->lkey, &resp->rkey);
 235    if (host_virt && !resp->hdr.err) {
 236        munmap(host_virt, cmd->length);
 237    }
 238
 239out:
 240    pr_dbg("ret=%d\n", resp->hdr.err);
 241    return resp->hdr.err;
 242}
 243
 244static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
 245                      union pvrdma_cmd_resp *rsp)
 246{
 247    struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
 248
 249    pr_dbg("mr_handle=%d\n", cmd->mr_handle);
 250
 251    rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
 252
 253    return 0;
 254}
 255
 256static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
 257                          uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
 258{
 259    uint64_t *dir = NULL, *tbl = NULL;
 260    PvrdmaRing *r;
 261    int rc = -EINVAL;
 262    char ring_name[MAX_RING_NAME_SZ];
 263
 264    if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
 265        pr_dbg("invalid nchunks: %d\n", nchunks);
 266        return rc;
 267    }
 268
 269    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
 270    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
 271    if (!dir) {
 272        pr_dbg("Failed to map to CQ page directory\n");
 273        goto out;
 274    }
 275
 276    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
 277    if (!tbl) {
 278        pr_dbg("Failed to map to CQ page table\n");
 279        goto out;
 280    }
 281
 282    r = g_malloc(sizeof(*r));
 283    *ring = r;
 284
 285    r->ring_state = (struct pvrdma_ring *)
 286        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
 287
 288    if (!r->ring_state) {
 289        pr_dbg("Failed to map to CQ ring state\n");
 290        goto out_free_ring;
 291    }
 292
 293    sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
 294    rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
 295                          cqe, sizeof(struct pvrdma_cqe),
 296                          /* first page is ring state */
 297                          (dma_addr_t *)&tbl[1], nchunks - 1);
 298    if (rc) {
 299        goto out_unmap_ring_state;
 300    }
 301
 302    goto out;
 303
 304out_unmap_ring_state:
 305    /* ring_state was in slot 1, not 0 so need to jump back */
 306    rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
 307
 308out_free_ring:
 309    g_free(r);
 310
 311out:
 312    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
 313    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
 314
 315    return rc;
 316}
 317
 318static void destroy_cq_ring(PvrdmaRing *ring)
 319{
 320    pvrdma_ring_free(ring);
 321    /* ring_state was in slot 1, not 0 so need to jump back */
 322    rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
 323    g_free(ring);
 324}
 325
 326static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
 327                     union pvrdma_cmd_resp *rsp)
 328{
 329    struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
 330    struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
 331    PvrdmaRing *ring = NULL;
 332
 333    memset(resp, 0, sizeof(*resp));
 334    resp->hdr.response = cmd->hdr.response;
 335    resp->hdr.ack = PVRDMA_CMD_CREATE_CQ_RESP;
 336
 337    resp->cqe = cmd->cqe;
 338
 339    resp->hdr.err = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
 340                                   cmd->nchunks, cmd->cqe);
 341    if (resp->hdr.err) {
 342        goto out;
 343    }
 344
 345    pr_dbg("ring=%p\n", ring);
 346
 347    resp->hdr.err = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev,
 348                                     cmd->cqe, &resp->cq_handle, ring);
 349    if (resp->hdr.err) {
 350        destroy_cq_ring(ring);
 351    }
 352
 353    resp->cqe = cmd->cqe;
 354
 355out:
 356    pr_dbg("ret=%d\n", resp->hdr.err);
 357    return resp->hdr.err;
 358}
 359
 360static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
 361                      union pvrdma_cmd_resp *rsp)
 362{
 363    struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
 364    RdmaRmCQ *cq;
 365    PvrdmaRing *ring;
 366
 367    pr_dbg("cq_handle=%d\n", cmd->cq_handle);
 368
 369    cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
 370    if (!cq) {
 371        pr_dbg("Invalid CQ handle\n");
 372        return -EINVAL;
 373    }
 374
 375    ring = (PvrdmaRing *)cq->opaque;
 376    destroy_cq_ring(ring);
 377
 378    rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
 379
 380    return 0;
 381}
 382
 383static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
 384                           PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
 385                           uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
 386                           uint32_t rpages)
 387{
 388    uint64_t *dir = NULL, *tbl = NULL;
 389    PvrdmaRing *sr, *rr;
 390    int rc = -EINVAL;
 391    char ring_name[MAX_RING_NAME_SZ];
 392    uint32_t wqe_sz;
 393
 394    if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES
 395        || !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) {
 396        pr_dbg("invalid pages: %d, %d\n", spages, rpages);
 397        return rc;
 398    }
 399
 400    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
 401    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
 402    if (!dir) {
 403        pr_dbg("Failed to map to CQ page directory\n");
 404        goto out;
 405    }
 406
 407    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
 408    if (!tbl) {
 409        pr_dbg("Failed to map to CQ page table\n");
 410        goto out;
 411    }
 412
 413    sr = g_malloc(2 * sizeof(*rr));
 414    rr = &sr[1];
 415    pr_dbg("sring=%p\n", sr);
 416    pr_dbg("rring=%p\n", rr);
 417
 418    *rings = sr;
 419
 420    pr_dbg("scqe=%d\n", scqe);
 421    pr_dbg("smax_sge=%d\n", smax_sge);
 422    pr_dbg("spages=%d\n", spages);
 423    pr_dbg("rcqe=%d\n", rcqe);
 424    pr_dbg("rmax_sge=%d\n", rmax_sge);
 425    pr_dbg("rpages=%d\n", rpages);
 426
 427    /* Create send ring */
 428    sr->ring_state = (struct pvrdma_ring *)
 429        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
 430    if (!sr->ring_state) {
 431        pr_dbg("Failed to map to CQ ring state\n");
 432        goto out_free_sr_mem;
 433    }
 434
 435    wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
 436                      sizeof(struct pvrdma_sge) * smax_sge - 1);
 437
 438    sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
 439    rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
 440                          scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
 441    if (rc) {
 442        goto out_unmap_ring_state;
 443    }
 444
 445    /* Create recv ring */
 446    rr->ring_state = &sr->ring_state[1];
 447    wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
 448                      sizeof(struct pvrdma_sge) * rmax_sge - 1);
 449    sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
 450    rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
 451                          rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
 452    if (rc) {
 453        goto out_free_sr;
 454    }
 455
 456    goto out;
 457
 458out_free_sr:
 459    pvrdma_ring_free(sr);
 460
 461out_unmap_ring_state:
 462    rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
 463
 464out_free_sr_mem:
 465    g_free(sr);
 466
 467out:
 468    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
 469    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
 470
 471    return rc;
 472}
 473
 474static void destroy_qp_rings(PvrdmaRing *ring)
 475{
 476    pr_dbg("sring=%p\n", &ring[0]);
 477    pvrdma_ring_free(&ring[0]);
 478    pr_dbg("rring=%p\n", &ring[1]);
 479    pvrdma_ring_free(&ring[1]);
 480
 481    rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
 482    g_free(ring);
 483}
 484
 485static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 486                     union pvrdma_cmd_resp *rsp)
 487{
 488    struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
 489    struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
 490    PvrdmaRing *rings = NULL;
 491
 492    memset(resp, 0, sizeof(*resp));
 493    resp->hdr.response = cmd->hdr.response;
 494    resp->hdr.ack = PVRDMA_CMD_CREATE_QP_RESP;
 495
 496    pr_dbg("total_chunks=%d\n", cmd->total_chunks);
 497    pr_dbg("send_chunks=%d\n", cmd->send_chunks);
 498
 499    resp->hdr.err = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
 500                                    cmd->max_send_wr, cmd->max_send_sge,
 501                                    cmd->send_chunks, cmd->max_recv_wr,
 502                                    cmd->max_recv_sge, cmd->total_chunks -
 503                                    cmd->send_chunks - 1);
 504    if (resp->hdr.err) {
 505        goto out;
 506    }
 507
 508    pr_dbg("rings=%p\n", rings);
 509
 510    resp->hdr.err = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle,
 511                                     cmd->qp_type, cmd->max_send_wr,
 512                                     cmd->max_send_sge, cmd->send_cq_handle,
 513                                     cmd->max_recv_wr, cmd->max_recv_sge,
 514                                     cmd->recv_cq_handle, rings, &resp->qpn);
 515    if (resp->hdr.err) {
 516        destroy_qp_rings(rings);
 517        goto out;
 518    }
 519
 520    resp->max_send_wr = cmd->max_send_wr;
 521    resp->max_recv_wr = cmd->max_recv_wr;
 522    resp->max_send_sge = cmd->max_send_sge;
 523    resp->max_recv_sge = cmd->max_recv_sge;
 524    resp->max_inline_data = cmd->max_inline_data;
 525
 526out:
 527    pr_dbg("ret=%d\n", resp->hdr.err);
 528    return resp->hdr.err;
 529}
 530
 531static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 532                     union pvrdma_cmd_resp *rsp)
 533{
 534    struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
 535
 536    pr_dbg("qp_handle=%d\n", cmd->qp_handle);
 537
 538    memset(rsp, 0, sizeof(*rsp));
 539    rsp->hdr.response = cmd->hdr.response;
 540    rsp->hdr.ack = PVRDMA_CMD_MODIFY_QP_RESP;
 541
 542    rsp->hdr.err = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
 543                                 cmd->qp_handle, cmd->attr_mask,
 544                                 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
 545                                 cmd->attrs.dest_qp_num,
 546                                 (enum ibv_qp_state)cmd->attrs.qp_state,
 547                                 cmd->attrs.qkey, cmd->attrs.rq_psn,
 548                                 cmd->attrs.sq_psn);
 549
 550    pr_dbg("ret=%d\n", rsp->hdr.err);
 551    return rsp->hdr.err;
 552}
 553
 554static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 555                     union pvrdma_cmd_resp *rsp)
 556{
 557    struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
 558    struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
 559    struct ibv_qp_init_attr init_attr;
 560
 561    pr_dbg("qp_handle=%d\n", cmd->qp_handle);
 562    pr_dbg("attr_mask=0x%x\n", cmd->attr_mask);
 563
 564    memset(rsp, 0, sizeof(*rsp));
 565    rsp->hdr.response = cmd->hdr.response;
 566    rsp->hdr.ack = PVRDMA_CMD_QUERY_QP_RESP;
 567
 568    rsp->hdr.err = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev,
 569                                    cmd->qp_handle,
 570                                    (struct ibv_qp_attr *)&resp->attrs,
 571                                    cmd->attr_mask, &init_attr);
 572
 573    pr_dbg("ret=%d\n", rsp->hdr.err);
 574    return rsp->hdr.err;
 575}
 576
 577static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 578                      union pvrdma_cmd_resp *rsp)
 579{
 580    struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
 581    RdmaRmQP *qp;
 582    PvrdmaRing *ring;
 583
 584    qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
 585    if (!qp) {
 586        pr_dbg("Invalid QP handle\n");
 587        return -EINVAL;
 588    }
 589
 590    rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
 591
 592    ring = (PvrdmaRing *)qp->opaque;
 593    destroy_qp_rings(ring);
 594
 595    return 0;
 596}
 597
 598static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
 599                       union pvrdma_cmd_resp *rsp)
 600{
 601    struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
 602#ifdef PVRDMA_DEBUG
 603    __be64 *subnet = (__be64 *)&cmd->new_gid[0];
 604    __be64 *if_id = (__be64 *)&cmd->new_gid[8];
 605#endif
 606
 607    pr_dbg("index=%d\n", cmd->index);
 608
 609    if (cmd->index >= MAX_PORT_GIDS) {
 610        return -EINVAL;
 611    }
 612
 613    pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd->index,
 614           (long long unsigned int)be64_to_cpu(*subnet),
 615           (long long unsigned int)be64_to_cpu(*if_id));
 616
 617    /* Driver forces to one port only */
 618    memcpy(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw, &cmd->new_gid,
 619           sizeof(cmd->new_gid));
 620
 621    /* TODO: Since drivers stores node_guid at load_dsr phase then this
 622     * assignment is not relevant, i need to figure out a way how to
 623     * retrieve MAC of our netdev */
 624    dev->node_guid = dev->rdma_dev_res.ports[0].gid_tbl[0].global.interface_id;
 625    pr_dbg("dev->node_guid=0x%llx\n",
 626           (long long unsigned int)be64_to_cpu(dev->node_guid));
 627
 628    return 0;
 629}
 630
 631static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
 632                        union pvrdma_cmd_resp *rsp)
 633{
 634    struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
 635
 636    pr_dbg("index=%d\n", cmd->index);
 637
 638    if (cmd->index >= MAX_PORT_GIDS) {
 639        return -EINVAL;
 640    }
 641
 642    memset(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw, 0,
 643           sizeof(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw));
 644
 645    return 0;
 646}
 647
 648static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
 649                     union pvrdma_cmd_resp *rsp)
 650{
 651    struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
 652    struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
 653
 654    pr_dbg("pfn=%d\n", cmd->pfn);
 655
 656    memset(resp, 0, sizeof(*resp));
 657    resp->hdr.response = cmd->hdr.response;
 658    resp->hdr.ack = PVRDMA_CMD_CREATE_UC_RESP;
 659    resp->hdr.err = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn,
 660                                     &resp->ctx_handle);
 661
 662    pr_dbg("ret=%d\n", resp->hdr.err);
 663
 664    return 0;
 665}
 666
 667static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
 668                      union pvrdma_cmd_resp *rsp)
 669{
 670    struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
 671
 672    pr_dbg("ctx_handle=%d\n", cmd->ctx_handle);
 673
 674    rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
 675
 676    return 0;
 677}
 678struct cmd_handler {
 679    uint32_t cmd;
 680    int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
 681            union pvrdma_cmd_resp *rsp);
 682};
 683
 684static struct cmd_handler cmd_handlers[] = {
 685    {PVRDMA_CMD_QUERY_PORT, query_port},
 686    {PVRDMA_CMD_QUERY_PKEY, query_pkey},
 687    {PVRDMA_CMD_CREATE_PD, create_pd},
 688    {PVRDMA_CMD_DESTROY_PD, destroy_pd},
 689    {PVRDMA_CMD_CREATE_MR, create_mr},
 690    {PVRDMA_CMD_DESTROY_MR, destroy_mr},
 691    {PVRDMA_CMD_CREATE_CQ, create_cq},
 692    {PVRDMA_CMD_RESIZE_CQ, NULL},
 693    {PVRDMA_CMD_DESTROY_CQ, destroy_cq},
 694    {PVRDMA_CMD_CREATE_QP, create_qp},
 695    {PVRDMA_CMD_MODIFY_QP, modify_qp},
 696    {PVRDMA_CMD_QUERY_QP, query_qp},
 697    {PVRDMA_CMD_DESTROY_QP, destroy_qp},
 698    {PVRDMA_CMD_CREATE_UC, create_uc},
 699    {PVRDMA_CMD_DESTROY_UC, destroy_uc},
 700    {PVRDMA_CMD_CREATE_BIND, create_bind},
 701    {PVRDMA_CMD_DESTROY_BIND, destroy_bind},
 702};
 703
 704int execute_command(PVRDMADev *dev)
 705{
 706    int err = 0xFFFF;
 707    DSRInfo *dsr_info;
 708
 709    dsr_info = &dev->dsr_info;
 710
 711    pr_dbg("cmd=%d\n", dsr_info->req->hdr.cmd);
 712    if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
 713                      sizeof(struct cmd_handler)) {
 714        pr_dbg("Unsupported command\n");
 715        goto out;
 716    }
 717
 718    if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
 719        pr_dbg("Unsupported command (not implemented yet)\n");
 720        goto out;
 721    }
 722
 723    err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
 724                            dsr_info->rsp);
 725out:
 726    set_reg_val(dev, PVRDMA_REG_ERR, err);
 727    post_interrupt(dev, INTR_VEC_CMD_RING);
 728
 729    return (err == 0) ? 0 : -EINVAL;
 730}
 731