qemu/hw/rdma/vmw/pvrdma_dev_ring.c
<<
>>
Prefs
   1/*
   2 * QEMU paravirtual RDMA - Device rings
   3 *
   4 * Copyright (C) 2018 Oracle
   5 * Copyright (C) 2018 Red Hat Inc
   6 *
   7 * Authors:
   8 *     Yuval Shaia <yuval.shaia@oracle.com>
   9 *     Marcel Apfelbaum <marcel@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "hw/pci/pci.h"
  18#include "cpu.h"
  19
  20#include "trace.h"
  21
  22#include "../rdma_utils.h"
  23#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h"
  24#include "pvrdma_dev_ring.h"
  25
  26int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
  27                     struct pvrdma_ring *ring_state, uint32_t max_elems,
  28                     size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
  29{
  30    int i;
  31    int rc = 0;
  32
  33    strncpy(ring->name, name, MAX_RING_NAME_SZ);
  34    ring->name[MAX_RING_NAME_SZ - 1] = 0;
  35    ring->dev = dev;
  36    ring->ring_state = ring_state;
  37    ring->max_elems = max_elems;
  38    ring->elem_sz = elem_sz;
  39    /* TODO: Give a moment to think if we want to redo driver settings
  40    atomic_set(&ring->ring_state->prod_tail, 0);
  41    atomic_set(&ring->ring_state->cons_head, 0);
  42    */
  43    ring->npages = npages;
  44    ring->pages = g_malloc(npages * sizeof(void *));
  45
  46    for (i = 0; i < npages; i++) {
  47        if (!tbl[i]) {
  48            rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
  49            continue;
  50        }
  51
  52        ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
  53        if (!ring->pages[i]) {
  54            rc = -ENOMEM;
  55            rdma_error_report("Failed to map to page %d in ring %s", i, name);
  56            goto out_free;
  57        }
  58        memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
  59    }
  60
  61    goto out;
  62
  63out_free:
  64    while (i--) {
  65        rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
  66    }
  67    g_free(ring->pages);
  68
  69out:
  70    return rc;
  71}
  72
  73void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
  74{
  75    int e;
  76    unsigned int idx = 0, offset;
  77
  78    e = pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx);
  79    if (e <= 0) {
  80        trace_pvrdma_ring_next_elem_read_no_data(ring->name);
  81        return NULL;
  82    }
  83
  84    offset = idx * ring->elem_sz;
  85    return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
  86}
  87
  88void pvrdma_ring_read_inc(PvrdmaRing *ring)
  89{
  90    pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems);
  91}
  92
  93void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
  94{
  95    int idx;
  96    unsigned int offset, tail;
  97
  98    idx = pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail);
  99    if (idx <= 0) {
 100        rdma_error_report("CQ is full");
 101        return NULL;
 102    }
 103
 104    idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems);
 105    if (idx < 0 || tail != idx) {
 106        rdma_error_report("Invalid idx %d", idx);
 107        return NULL;
 108    }
 109
 110    offset = idx * ring->elem_sz;
 111    return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
 112}
 113
 114void pvrdma_ring_write_inc(PvrdmaRing *ring)
 115{
 116    pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems);
 117}
 118
 119void pvrdma_ring_free(PvrdmaRing *ring)
 120{
 121    if (!ring) {
 122        return;
 123    }
 124
 125    if (!ring->pages) {
 126        return;
 127    }
 128
 129    while (ring->npages--) {
 130        rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
 131                           TARGET_PAGE_SIZE);
 132    }
 133
 134    g_free(ring->pages);
 135    ring->pages = NULL;
 136}
 137