qemu/hw/rdma/rdma_utils.c
<<
>>
Prefs
   1/*
   2 * QEMU paravirtual RDMA - Generic RDMA backend
   3 *
   4 * Copyright (C) 2018 Oracle
   5 * Copyright (C) 2018 Red Hat Inc
   6 *
   7 * Authors:
   8 *     Yuval Shaia <yuval.shaia@oracle.com>
   9 *     Marcel Apfelbaum <marcel@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "trace.h"
  18#include "rdma_utils.h"
  19
  20void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen)
  21{
  22    void *p;
  23    hwaddr len = plen;
  24
  25    if (!addr) {
  26        rdma_error_report("addr is NULL");
  27        return NULL;
  28    }
  29
  30    p = pci_dma_map(dev, addr, &len, DMA_DIRECTION_TO_DEVICE);
  31    if (!p) {
  32        rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64,
  33                          addr, len);
  34        return NULL;
  35    }
  36
  37    if (len != plen) {
  38        rdma_pci_dma_unmap(dev, p, len);
  39        return NULL;
  40    }
  41
  42    trace_rdma_pci_dma_map(addr, p, len);
  43
  44    return p;
  45}
  46
  47void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
  48{
  49    trace_rdma_pci_dma_unmap(buffer);
  50    if (buffer) {
  51        pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
  52    }
  53}
  54
  55void rdma_protected_gqueue_init(RdmaProtectedGQueue *list)
  56{
  57    qemu_mutex_init(&list->lock);
  58    list->list = g_queue_new();
  59}
  60
  61void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list)
  62{
  63    if (list->list) {
  64        g_queue_free_full(list->list, g_free);
  65        qemu_mutex_destroy(&list->lock);
  66        list->list = NULL;
  67    }
  68}
  69
  70void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
  71                                        int64_t value)
  72{
  73    qemu_mutex_lock(&list->lock);
  74    g_queue_push_tail(list->list, g_memdup(&value, sizeof(value)));
  75    qemu_mutex_unlock(&list->lock);
  76}
  77
  78int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list)
  79{
  80    int64_t *valp;
  81    int64_t val;
  82
  83    qemu_mutex_lock(&list->lock);
  84
  85    valp = g_queue_pop_head(list->list);
  86    qemu_mutex_unlock(&list->lock);
  87
  88    if (!valp) {
  89        return -ENOENT;
  90    }
  91
  92    val = *valp;
  93    g_free(valp);
  94    return val;
  95}
  96
  97void rdma_protected_gslist_init(RdmaProtectedGSList *list)
  98{
  99    qemu_mutex_init(&list->lock);
 100}
 101
 102void rdma_protected_gslist_destroy(RdmaProtectedGSList *list)
 103{
 104    if (list->list) {
 105        g_slist_free(list->list);
 106        qemu_mutex_destroy(&list->lock);
 107        list->list = NULL;
 108    }
 109}
 110
 111void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list,
 112                                        int32_t value)
 113{
 114    qemu_mutex_lock(&list->lock);
 115    list->list = g_slist_prepend(list->list, GINT_TO_POINTER(value));
 116    qemu_mutex_unlock(&list->lock);
 117}
 118
 119void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list,
 120                                        int32_t value)
 121{
 122    qemu_mutex_lock(&list->lock);
 123    list->list = g_slist_remove(list->list, GINT_TO_POINTER(value));
 124    qemu_mutex_unlock(&list->lock);
 125}
 126