qemu/hw/rdma/vmw/pvrdma_cmd.c
<<
>>
Prefs
   1/*
   2 * QEMU paravirtual RDMA - Command channel
   3 *
   4 * Copyright (C) 2018 Oracle
   5 * Copyright (C) 2018 Red Hat Inc
   6 *
   7 * Authors:
   8 *     Yuval Shaia <yuval.shaia@oracle.com>
   9 *     Marcel Apfelbaum <marcel@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qemu/error-report.h"
  18#include "cpu.h"
  19#include "hw/hw.h"
  20#include "hw/pci/pci.h"
  21#include "hw/pci/pci_ids.h"
  22
  23#include "../rdma_backend.h"
  24#include "../rdma_rm.h"
  25#include "../rdma_utils.h"
  26
  27#include "pvrdma.h"
  28#include "standard-headers/rdma/vmw_pvrdma-abi.h"
  29
  30static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
  31                                uint32_t nchunks, size_t length)
  32{
  33    uint64_t *dir, *tbl;
  34    int tbl_idx, dir_idx, addr_idx;
  35    void *host_virt = NULL, *curr_page;
  36
  37    if (!nchunks) {
  38        pr_dbg("nchunks=0\n");
  39        return NULL;
  40    }
  41
  42    dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
  43    if (!dir) {
  44        error_report("PVRDMA: Failed to map to page directory");
  45        return NULL;
  46    }
  47
  48    tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
  49    if (!tbl) {
  50        error_report("PVRDMA: Failed to map to page table 0");
  51        goto out_unmap_dir;
  52    }
  53
  54    curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
  55    if (!curr_page) {
  56        error_report("PVRDMA: Failed to map the first page");
  57        goto out_unmap_tbl;
  58    }
  59
  60    host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
  61    pr_dbg("mremap %p -> %p\n", curr_page, host_virt);
  62    if (host_virt == MAP_FAILED) {
  63        host_virt = NULL;
  64        error_report("PVRDMA: Failed to remap memory for host_virt");
  65        goto out_unmap_tbl;
  66    }
  67
  68    rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
  69
  70    pr_dbg("host_virt=%p\n", host_virt);
  71
  72    dir_idx = 0;
  73    tbl_idx = 1;
  74    addr_idx = 1;
  75    while (addr_idx < nchunks) {
  76        if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
  77            tbl_idx = 0;
  78            dir_idx++;
  79            pr_dbg("Mapping to table %d\n", dir_idx);
  80            rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
  81            tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
  82            if (!tbl) {
  83                error_report("PVRDMA: Failed to map to page table %d", dir_idx);
  84                goto out_unmap_host_virt;
  85            }
  86        }
  87
  88        pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]);
  89
  90        curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
  91                                     TARGET_PAGE_SIZE);
  92        if (!curr_page) {
  93            error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx,
  94                         dir_idx);
  95            goto out_unmap_host_virt;
  96        }
  97
  98        mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
  99               host_virt + TARGET_PAGE_SIZE * addr_idx);
 100
 101        rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
 102
 103        addr_idx++;
 104
 105        tbl_idx++;
 106    }
 107
 108    goto out_unmap_tbl;
 109
 110out_unmap_host_virt:
 111    munmap(host_virt, length);
 112    host_virt = NULL;
 113
 114out_unmap_tbl:
 115    rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
 116
 117out_unmap_dir:
 118    rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
 119
 120    return host_virt;
 121}
 122
 123static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
 124                      union pvrdma_cmd_resp *rsp)
 125{
 126    struct pvrdma_cmd_query_port *cmd = &req->query_port;
 127    struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
 128    struct pvrdma_port_attr attrs = {0};
 129
 130    pr_dbg("port=%d\n", cmd->port_num);
 131
 132    if (rdma_backend_query_port(&dev->backend_dev,
 133                                (struct ibv_port_attr *)&attrs)) {
 134        return -ENOMEM;
 135    }
 136
 137    memset(resp, 0, sizeof(*resp));
 138    resp->hdr.response = cmd->hdr.response;
 139    resp->hdr.ack = PVRDMA_CMD_QUERY_PORT_RESP;
 140    resp->hdr.err = 0;
 141
 142    resp->attrs.state = attrs.state;
 143    resp->attrs.max_mtu = attrs.max_mtu;
 144    resp->attrs.active_mtu = attrs.active_mtu;
 145    resp->attrs.phys_state = attrs.phys_state;
 146    resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
 147    resp->attrs.max_msg_sz = 1024;
 148    resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
 149    resp->attrs.active_width = 1;
 150    resp->attrs.active_speed = 1;
 151
 152    return 0;
 153}
 154
 155static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
 156                      union pvrdma_cmd_resp *rsp)
 157{
 158    struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
 159    struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
 160
 161    pr_dbg("port=%d\n", cmd->port_num);
 162    pr_dbg("index=%d\n", cmd->index);
 163
 164    memset(resp, 0, sizeof(*resp));
 165    resp->hdr.response = cmd->hdr.response;
 166    resp->hdr.ack = PVRDMA_CMD_QUERY_PKEY_RESP;
 167    resp->hdr.err = 0;
 168
 169    resp->pkey = PVRDMA_PKEY;
 170    pr_dbg("pkey=0x%x\n", resp->pkey);
 171
 172    return 0;
 173}
 174
 175static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
 176                     union pvrdma_cmd_resp *rsp)
 177{
 178    struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
 179    struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
 180
 181    pr_dbg("context=0x%x\n", cmd->ctx_handle ? cmd->ctx_handle : 0);
 182
 183    memset(resp, 0, sizeof(*resp));
 184    resp->hdr.response = cmd->hdr.response;
 185    resp->hdr.ack = PVRDMA_CMD_CREATE_PD_RESP;
 186    resp->hdr.err = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
 187                                     &resp->pd_handle, cmd->ctx_handle);
 188
 189    pr_dbg("ret=%d\n", resp->hdr.err);
 190    return resp->hdr.err;
 191}
 192
 193static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
 194                      union pvrdma_cmd_resp *rsp)
 195{
 196    struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
 197
 198    pr_dbg("pd_handle=%d\n", cmd->pd_handle);
 199
 200    rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
 201
 202    return 0;
 203}
 204
 205static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
 206                     union pvrdma_cmd_resp *rsp)
 207{
 208    struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
 209    struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
 210    PCIDevice *pci_dev = PCI_DEVICE(dev);
 211    void *host_virt = NULL;
 212
 213    memset(resp, 0, sizeof(*resp));
 214    resp->hdr.response = cmd->hdr.response;
 215    resp->hdr.ack = PVRDMA_CMD_CREATE_MR_RESP;
 216
 217    pr_dbg("pd_handle=%d\n", cmd->pd_handle);
 218    pr_dbg("access_flags=0x%x\n", cmd->access_flags);
 219    pr_dbg("flags=0x%x\n", cmd->flags);
 220
 221    if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
 222        host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
 223                                       cmd->length);
 224        if (!host_virt) {
 225            pr_dbg("Failed to map to pdir\n");
 226            resp->hdr.err = -EINVAL;
 227            goto out;
 228        }
 229    }
 230
 231    resp->hdr.err = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle,
 232                                     cmd->start, cmd->length, host_virt,
 233                                     cmd->access_flags, &resp->mr_handle,
 234                                     &resp->lkey, &resp->rkey);
 235    if (host_virt && !resp->hdr.err) {
 236        munmap(host_virt, cmd->length);
 237    }
 238
 239out:
 240    pr_dbg("ret=%d\n", resp->hdr.err);
 241    return resp->hdr.err;
 242}
 243
 244static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
 245                      union pvrdma_cmd_resp *rsp)
 246{
 247    struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
 248
 249    pr_dbg("mr_handle=%d\n", cmd->mr_handle);
 250
 251    rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
 252
 253    return 0;
 254}
 255
 256static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
 257                          uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
 258{
 259    uint64_t *dir = NULL, *tbl = NULL;
 260    PvrdmaRing *r;
 261    int rc = -EINVAL;
 262    char ring_name[MAX_RING_NAME_SZ];
 263
 264    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
 265    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
 266    if (!dir) {
 267        pr_dbg("Failed to map to CQ page directory\n");
 268        goto out;
 269    }
 270
 271    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
 272    if (!tbl) {
 273        pr_dbg("Failed to map to CQ page table\n");
 274        goto out;
 275    }
 276
 277    r = g_malloc(sizeof(*r));
 278    *ring = r;
 279
 280    r->ring_state = (struct pvrdma_ring *)
 281        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
 282
 283    if (!r->ring_state) {
 284        pr_dbg("Failed to map to CQ ring state\n");
 285        goto out_free_ring;
 286    }
 287
 288    sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
 289    rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
 290                          cqe, sizeof(struct pvrdma_cqe),
 291                          /* first page is ring state */
 292                          (dma_addr_t *)&tbl[1], nchunks - 1);
 293    if (rc) {
 294        goto out_unmap_ring_state;
 295    }
 296
 297    goto out;
 298
 299out_unmap_ring_state:
 300    /* ring_state was in slot 1, not 0 so need to jump back */
 301    rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
 302
 303out_free_ring:
 304    g_free(r);
 305
 306out:
 307    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
 308    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
 309
 310    return rc;
 311}
 312
 313static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
 314                     union pvrdma_cmd_resp *rsp)
 315{
 316    struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
 317    struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
 318    PvrdmaRing *ring = NULL;
 319
 320    memset(resp, 0, sizeof(*resp));
 321    resp->hdr.response = cmd->hdr.response;
 322    resp->hdr.ack = PVRDMA_CMD_CREATE_CQ_RESP;
 323
 324    resp->cqe = cmd->cqe;
 325
 326    resp->hdr.err = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
 327                                   cmd->nchunks, cmd->cqe);
 328    if (resp->hdr.err) {
 329        goto out;
 330    }
 331
 332    pr_dbg("ring=%p\n", ring);
 333
 334    resp->hdr.err = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev,
 335                                     cmd->cqe, &resp->cq_handle, ring);
 336    resp->cqe = cmd->cqe;
 337
 338out:
 339    pr_dbg("ret=%d\n", resp->hdr.err);
 340    return resp->hdr.err;
 341}
 342
 343static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
 344                      union pvrdma_cmd_resp *rsp)
 345{
 346    struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
 347    RdmaRmCQ *cq;
 348    PvrdmaRing *ring;
 349
 350    pr_dbg("cq_handle=%d\n", cmd->cq_handle);
 351
 352    cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
 353    if (!cq) {
 354        pr_dbg("Invalid CQ handle\n");
 355        return -EINVAL;
 356    }
 357
 358    ring = (PvrdmaRing *)cq->opaque;
 359    pvrdma_ring_free(ring);
 360    /* ring_state was in slot 1, not 0 so need to jump back */
 361    rdma_pci_dma_unmap(PCI_DEVICE(dev), --ring->ring_state, TARGET_PAGE_SIZE);
 362    g_free(ring);
 363
 364    rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
 365
 366    return 0;
 367}
 368
 369static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
 370                           PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
 371                           uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
 372                           uint32_t rpages)
 373{
 374    uint64_t *dir = NULL, *tbl = NULL;
 375    PvrdmaRing *sr, *rr;
 376    int rc = -EINVAL;
 377    char ring_name[MAX_RING_NAME_SZ];
 378    uint32_t wqe_sz;
 379
 380    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
 381    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
 382    if (!dir) {
 383        pr_dbg("Failed to map to CQ page directory\n");
 384        goto out;
 385    }
 386
 387    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
 388    if (!tbl) {
 389        pr_dbg("Failed to map to CQ page table\n");
 390        goto out;
 391    }
 392
 393    sr = g_malloc(2 * sizeof(*rr));
 394    rr = &sr[1];
 395    pr_dbg("sring=%p\n", sr);
 396    pr_dbg("rring=%p\n", rr);
 397
 398    *rings = sr;
 399
 400    pr_dbg("scqe=%d\n", scqe);
 401    pr_dbg("smax_sge=%d\n", smax_sge);
 402    pr_dbg("spages=%d\n", spages);
 403    pr_dbg("rcqe=%d\n", rcqe);
 404    pr_dbg("rmax_sge=%d\n", rmax_sge);
 405    pr_dbg("rpages=%d\n", rpages);
 406
 407    /* Create send ring */
 408    sr->ring_state = (struct pvrdma_ring *)
 409        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
 410    if (!sr->ring_state) {
 411        pr_dbg("Failed to map to CQ ring state\n");
 412        goto out_free_sr_mem;
 413    }
 414
 415    wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
 416                      sizeof(struct pvrdma_sge) * smax_sge - 1);
 417
 418    sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
 419    rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
 420                          scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
 421    if (rc) {
 422        goto out_unmap_ring_state;
 423    }
 424
 425    /* Create recv ring */
 426    rr->ring_state = &sr->ring_state[1];
 427    wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
 428                      sizeof(struct pvrdma_sge) * rmax_sge - 1);
 429    sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
 430    rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
 431                          rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
 432    if (rc) {
 433        goto out_free_sr;
 434    }
 435
 436    goto out;
 437
 438out_free_sr:
 439    pvrdma_ring_free(sr);
 440
 441out_unmap_ring_state:
 442    rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
 443
 444out_free_sr_mem:
 445    g_free(sr);
 446
 447out:
 448    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
 449    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
 450
 451    return rc;
 452}
 453
 454static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 455                     union pvrdma_cmd_resp *rsp)
 456{
 457    struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
 458    struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
 459    PvrdmaRing *rings = NULL;
 460
 461    memset(resp, 0, sizeof(*resp));
 462    resp->hdr.response = cmd->hdr.response;
 463    resp->hdr.ack = PVRDMA_CMD_CREATE_QP_RESP;
 464
 465    pr_dbg("total_chunks=%d\n", cmd->total_chunks);
 466    pr_dbg("send_chunks=%d\n", cmd->send_chunks);
 467
 468    resp->hdr.err = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
 469                                    cmd->max_send_wr, cmd->max_send_sge,
 470                                    cmd->send_chunks, cmd->max_recv_wr,
 471                                    cmd->max_recv_sge, cmd->total_chunks -
 472                                    cmd->send_chunks - 1);
 473    if (resp->hdr.err) {
 474        goto out;
 475    }
 476
 477    pr_dbg("rings=%p\n", rings);
 478
 479    resp->hdr.err = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle,
 480                                     cmd->qp_type, cmd->max_send_wr,
 481                                     cmd->max_send_sge, cmd->send_cq_handle,
 482                                     cmd->max_recv_wr, cmd->max_recv_sge,
 483                                     cmd->recv_cq_handle, rings, &resp->qpn);
 484
 485    resp->max_send_wr = cmd->max_send_wr;
 486    resp->max_recv_wr = cmd->max_recv_wr;
 487    resp->max_send_sge = cmd->max_send_sge;
 488    resp->max_recv_sge = cmd->max_recv_sge;
 489    resp->max_inline_data = cmd->max_inline_data;
 490
 491out:
 492    pr_dbg("ret=%d\n", resp->hdr.err);
 493    return resp->hdr.err;
 494}
 495
 496static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 497                     union pvrdma_cmd_resp *rsp)
 498{
 499    struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
 500
 501    pr_dbg("qp_handle=%d\n", cmd->qp_handle);
 502
 503    memset(rsp, 0, sizeof(*rsp));
 504    rsp->hdr.response = cmd->hdr.response;
 505    rsp->hdr.ack = PVRDMA_CMD_MODIFY_QP_RESP;
 506
 507    rsp->hdr.err = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
 508                                 cmd->qp_handle, cmd->attr_mask,
 509                                 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
 510                                 cmd->attrs.dest_qp_num,
 511                                 (enum ibv_qp_state)cmd->attrs.qp_state,
 512                                 cmd->attrs.qkey, cmd->attrs.rq_psn,
 513                                 cmd->attrs.sq_psn);
 514
 515    pr_dbg("ret=%d\n", rsp->hdr.err);
 516    return rsp->hdr.err;
 517}
 518
 519static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 520                     union pvrdma_cmd_resp *rsp)
 521{
 522    struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
 523    struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
 524    struct ibv_qp_init_attr init_attr;
 525
 526    pr_dbg("qp_handle=%d\n", cmd->qp_handle);
 527    pr_dbg("attr_mask=0x%x\n", cmd->attr_mask);
 528
 529    memset(rsp, 0, sizeof(*rsp));
 530    rsp->hdr.response = cmd->hdr.response;
 531    rsp->hdr.ack = PVRDMA_CMD_QUERY_QP_RESP;
 532
 533    rsp->hdr.err = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev,
 534                                    cmd->qp_handle,
 535                                    (struct ibv_qp_attr *)&resp->attrs,
 536                                    cmd->attr_mask, &init_attr);
 537
 538    pr_dbg("ret=%d\n", rsp->hdr.err);
 539    return rsp->hdr.err;
 540}
 541
 542static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
 543                      union pvrdma_cmd_resp *rsp)
 544{
 545    struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
 546    RdmaRmQP *qp;
 547    PvrdmaRing *ring;
 548
 549    qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
 550    if (!qp) {
 551        pr_dbg("Invalid QP handle\n");
 552        return -EINVAL;
 553    }
 554
 555    rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
 556
 557    ring = (PvrdmaRing *)qp->opaque;
 558    pr_dbg("sring=%p\n", &ring[0]);
 559    pvrdma_ring_free(&ring[0]);
 560    pr_dbg("rring=%p\n", &ring[1]);
 561    pvrdma_ring_free(&ring[1]);
 562
 563    rdma_pci_dma_unmap(PCI_DEVICE(dev), ring->ring_state, TARGET_PAGE_SIZE);
 564    g_free(ring);
 565
 566    return 0;
 567}
 568
 569static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
 570                       union pvrdma_cmd_resp *rsp)
 571{
 572    struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
 573#ifdef PVRDMA_DEBUG
 574    __be64 *subnet = (__be64 *)&cmd->new_gid[0];
 575    __be64 *if_id = (__be64 *)&cmd->new_gid[8];
 576#endif
 577
 578    pr_dbg("index=%d\n", cmd->index);
 579
 580    if (cmd->index >= MAX_PORT_GIDS) {
 581        return -EINVAL;
 582    }
 583
 584    pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd->index,
 585           (long long unsigned int)be64_to_cpu(*subnet),
 586           (long long unsigned int)be64_to_cpu(*if_id));
 587
 588    /* Driver forces to one port only */
 589    memcpy(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw, &cmd->new_gid,
 590           sizeof(cmd->new_gid));
 591
 592    /* TODO: Since drivers stores node_guid at load_dsr phase then this
 593     * assignment is not relevant, i need to figure out a way how to
 594     * retrieve MAC of our netdev */
 595    dev->node_guid = dev->rdma_dev_res.ports[0].gid_tbl[0].global.interface_id;
 596    pr_dbg("dev->node_guid=0x%llx\n",
 597           (long long unsigned int)be64_to_cpu(dev->node_guid));
 598
 599    return 0;
 600}
 601
 602static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
 603                        union pvrdma_cmd_resp *rsp)
 604{
 605    struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
 606
 607    pr_dbg("index=%d\n", cmd->index);
 608
 609    if (cmd->index >= MAX_PORT_GIDS) {
 610        return -EINVAL;
 611    }
 612
 613    memset(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw, 0,
 614           sizeof(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw));
 615
 616    return 0;
 617}
 618
 619static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
 620                     union pvrdma_cmd_resp *rsp)
 621{
 622    struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
 623    struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
 624
 625    pr_dbg("pfn=%d\n", cmd->pfn);
 626
 627    memset(resp, 0, sizeof(*resp));
 628    resp->hdr.response = cmd->hdr.response;
 629    resp->hdr.ack = PVRDMA_CMD_CREATE_UC_RESP;
 630    resp->hdr.err = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn,
 631                                     &resp->ctx_handle);
 632
 633    pr_dbg("ret=%d\n", resp->hdr.err);
 634
 635    return 0;
 636}
 637
 638static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
 639                      union pvrdma_cmd_resp *rsp)
 640{
 641    struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
 642
 643    pr_dbg("ctx_handle=%d\n", cmd->ctx_handle);
 644
 645    rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
 646
 647    return 0;
 648}
 649struct cmd_handler {
 650    uint32_t cmd;
 651    int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
 652            union pvrdma_cmd_resp *rsp);
 653};
 654
 655static struct cmd_handler cmd_handlers[] = {
 656    {PVRDMA_CMD_QUERY_PORT, query_port},
 657    {PVRDMA_CMD_QUERY_PKEY, query_pkey},
 658    {PVRDMA_CMD_CREATE_PD, create_pd},
 659    {PVRDMA_CMD_DESTROY_PD, destroy_pd},
 660    {PVRDMA_CMD_CREATE_MR, create_mr},
 661    {PVRDMA_CMD_DESTROY_MR, destroy_mr},
 662    {PVRDMA_CMD_CREATE_CQ, create_cq},
 663    {PVRDMA_CMD_RESIZE_CQ, NULL},
 664    {PVRDMA_CMD_DESTROY_CQ, destroy_cq},
 665    {PVRDMA_CMD_CREATE_QP, create_qp},
 666    {PVRDMA_CMD_MODIFY_QP, modify_qp},
 667    {PVRDMA_CMD_QUERY_QP, query_qp},
 668    {PVRDMA_CMD_DESTROY_QP, destroy_qp},
 669    {PVRDMA_CMD_CREATE_UC, create_uc},
 670    {PVRDMA_CMD_DESTROY_UC, destroy_uc},
 671    {PVRDMA_CMD_CREATE_BIND, create_bind},
 672    {PVRDMA_CMD_DESTROY_BIND, destroy_bind},
 673};
 674
 675int execute_command(PVRDMADev *dev)
 676{
 677    int err = 0xFFFF;
 678    DSRInfo *dsr_info;
 679
 680    dsr_info = &dev->dsr_info;
 681
 682    pr_dbg("cmd=%d\n", dsr_info->req->hdr.cmd);
 683    if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
 684                      sizeof(struct cmd_handler)) {
 685        pr_dbg("Unsupported command\n");
 686        goto out;
 687    }
 688
 689    if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
 690        pr_dbg("Unsupported command (not implemented yet)\n");
 691        goto out;
 692    }
 693
 694    err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
 695                            dsr_info->rsp);
 696out:
 697    set_reg_val(dev, PVRDMA_REG_ERR, err);
 698    post_interrupt(dev, INTR_VEC_CMD_RING);
 699
 700    return (err == 0) ? 0 : -EINVAL;
 701}
 702