qemu/hw/rdma/vmw/pvrdma_dev_ring.c
<<
>>
Prefs
   1/*
   2 * QEMU paravirtual RDMA - Device rings
   3 *
   4 * Copyright (C) 2018 Oracle
   5 * Copyright (C) 2018 Red Hat Inc
   6 *
   7 * Authors:
   8 *     Yuval Shaia <yuval.shaia@oracle.com>
   9 *     Marcel Apfelbaum <marcel@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "hw/pci/pci.h"
  18#include "cpu.h"
  19
  20#include "../rdma_utils.h"
  21#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h"
  22#include "pvrdma_dev_ring.h"
  23
  24int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
  25                     struct pvrdma_ring *ring_state, uint32_t max_elems,
  26                     size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
  27{
  28    int i;
  29    int rc = 0;
  30
  31    strncpy(ring->name, name, MAX_RING_NAME_SZ);
  32    ring->name[MAX_RING_NAME_SZ - 1] = 0;
  33    pr_dbg("Initializing %s ring\n", ring->name);
  34    ring->dev = dev;
  35    ring->ring_state = ring_state;
  36    ring->max_elems = max_elems;
  37    ring->elem_sz = elem_sz;
  38    pr_dbg("ring->elem_sz=%zu\n", ring->elem_sz);
  39    pr_dbg("npages=%d\n", npages);
  40    /* TODO: Give a moment to think if we want to redo driver settings
  41    atomic_set(&ring->ring_state->prod_tail, 0);
  42    atomic_set(&ring->ring_state->cons_head, 0);
  43    */
  44    ring->npages = npages;
  45    ring->pages = g_malloc(npages * sizeof(void *));
  46
  47    for (i = 0; i < npages; i++) {
  48        if (!tbl[i]) {
  49            pr_err("npages=%ld but tbl[%d] is NULL\n", (long)npages, i);
  50            continue;
  51        }
  52
  53        ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
  54        if (!ring->pages[i]) {
  55            rc = -ENOMEM;
  56            pr_dbg("Failed to map to page %d\n", i);
  57            goto out_free;
  58        }
  59        memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
  60    }
  61
  62    goto out;
  63
  64out_free:
  65    while (i--) {
  66        rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
  67    }
  68    g_free(ring->pages);
  69
  70out:
  71    return rc;
  72}
  73
  74void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
  75{
  76    unsigned int idx = 0, offset;
  77
  78    /*
  79    pr_dbg("%s: t=%d, h=%d\n", ring->name, ring->ring_state->prod_tail,
  80           ring->ring_state->cons_head);
  81    */
  82
  83    if (!pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx)) {
  84        pr_dbg("No more data in ring\n");
  85        return NULL;
  86    }
  87
  88    offset = idx * ring->elem_sz;
  89    /*
  90    pr_dbg("idx=%d\n", idx);
  91    pr_dbg("offset=%d\n", offset);
  92    */
  93    return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
  94}
  95
  96void pvrdma_ring_read_inc(PvrdmaRing *ring)
  97{
  98    pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems);
  99    /*
 100    pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name,
 101           ring->ring_state->prod_tail, ring->ring_state->cons_head,
 102           ring->max_elems);
 103    */
 104}
 105
 106void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
 107{
 108    unsigned int idx, offset, tail;
 109
 110    /*
 111    pr_dbg("%s: t=%d, h=%d\n", ring->name, ring->ring_state->prod_tail,
 112           ring->ring_state->cons_head);
 113    */
 114
 115    if (!pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail)) {
 116        pr_dbg("CQ is full\n");
 117        return NULL;
 118    }
 119
 120    idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems);
 121    /* TODO: tail == idx */
 122
 123    offset = idx * ring->elem_sz;
 124    return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
 125}
 126
 127void pvrdma_ring_write_inc(PvrdmaRing *ring)
 128{
 129    pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems);
 130    /*
 131    pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name,
 132           ring->ring_state->prod_tail, ring->ring_state->cons_head,
 133           ring->max_elems);
 134    */
 135}
 136
 137void pvrdma_ring_free(PvrdmaRing *ring)
 138{
 139    if (!ring) {
 140        return;
 141    }
 142
 143    if (!ring->pages) {
 144        return;
 145    }
 146
 147    pr_dbg("ring->npages=%d\n", ring->npages);
 148    while (ring->npages--) {
 149        rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
 150                           TARGET_PAGE_SIZE);
 151    }
 152
 153    g_free(ring->pages);
 154    ring->pages = NULL;
 155}
 156