qemu/contrib/vhost-user-gpu/vugbm.c
<<
>>
Prefs
   1/*
   2 * Virtio vhost-user GPU Device
   3 *
   4 * DRM helpers
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 */
   9
  10#include "vugbm.h"
  11
  12static bool
  13mem_alloc_bo(struct vugbm_buffer *buf)
  14{
  15    buf->mmap = g_malloc(buf->width * buf->height * 4);
  16    buf->stride = buf->width * 4;
  17    return true;
  18}
  19
  20static void
  21mem_free_bo(struct vugbm_buffer *buf)
  22{
  23    g_free(buf->mmap);
  24}
  25
  26static bool
  27mem_map_bo(struct vugbm_buffer *buf)
  28{
  29    return buf->mmap != NULL;
  30}
  31
  32static void
  33mem_unmap_bo(struct vugbm_buffer *buf)
  34{
  35}
  36
  37static void
  38mem_device_destroy(struct vugbm_device *dev)
  39{
  40}
  41
  42#ifdef CONFIG_MEMFD
  43struct udmabuf_create {
  44        uint32_t memfd;
  45        uint32_t flags;
  46        uint64_t offset;
  47        uint64_t size;
  48};
  49
  50#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
  51
  52static size_t
  53udmabuf_get_size(struct vugbm_buffer *buf)
  54{
  55    return ROUND_UP(buf->width * buf->height * 4, getpagesize());
  56}
  57
  58static bool
  59udmabuf_alloc_bo(struct vugbm_buffer *buf)
  60{
  61    int ret;
  62
  63    buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
  64    if (buf->memfd < 0) {
  65        return false;
  66    }
  67
  68    ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
  69    if (ret < 0) {
  70        close(buf->memfd);
  71        return false;
  72    }
  73
  74    ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
  75    if (ret < 0) {
  76        close(buf->memfd);
  77        return false;
  78    }
  79
  80    buf->stride = buf->width * 4;
  81
  82    return true;
  83}
  84
  85static void
  86udmabuf_free_bo(struct vugbm_buffer *buf)
  87{
  88    close(buf->memfd);
  89}
  90
  91static bool
  92udmabuf_map_bo(struct vugbm_buffer *buf)
  93{
  94    buf->mmap = mmap(NULL, udmabuf_get_size(buf),
  95                     PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
  96    if (buf->mmap == MAP_FAILED) {
  97        return false;
  98    }
  99
 100    return true;
 101}
 102
 103static bool
 104udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
 105{
 106    struct udmabuf_create create = {
 107        .memfd = buf->memfd,
 108        .offset = 0,
 109        .size = udmabuf_get_size(buf),
 110    };
 111
 112    *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
 113
 114    return *fd >= 0;
 115}
 116
 117static void
 118udmabuf_unmap_bo(struct vugbm_buffer *buf)
 119{
 120    munmap(buf->mmap, udmabuf_get_size(buf));
 121}
 122
 123static void
 124udmabuf_device_destroy(struct vugbm_device *dev)
 125{
 126    close(dev->fd);
 127}
 128#endif
 129
 130#ifdef CONFIG_GBM
 131static bool
 132alloc_bo(struct vugbm_buffer *buf)
 133{
 134    struct gbm_device *dev = buf->dev->dev;
 135
 136    assert(!buf->bo);
 137
 138    buf->bo = gbm_bo_create(dev, buf->width, buf->height,
 139                            buf->format,
 140                            GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
 141
 142    if (buf->bo) {
 143        buf->stride = gbm_bo_get_stride(buf->bo);
 144        return true;
 145    }
 146
 147    return false;
 148}
 149
 150static void
 151free_bo(struct vugbm_buffer *buf)
 152{
 153    gbm_bo_destroy(buf->bo);
 154}
 155
 156static bool
 157map_bo(struct vugbm_buffer *buf)
 158{
 159    uint32_t stride;
 160
 161    buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
 162                           GBM_BO_TRANSFER_READ_WRITE, &stride,
 163                           &buf->mmap_data);
 164
 165    assert(stride == buf->stride);
 166
 167    return buf->mmap != NULL;
 168}
 169
 170static void
 171unmap_bo(struct vugbm_buffer *buf)
 172{
 173    gbm_bo_unmap(buf->bo, buf->mmap_data);
 174}
 175
 176static bool
 177get_fd(struct vugbm_buffer *buf, int *fd)
 178{
 179    *fd = gbm_bo_get_fd(buf->bo);
 180
 181    return *fd >= 0;
 182}
 183
 184static void
 185device_destroy(struct vugbm_device *dev)
 186{
 187    gbm_device_destroy(dev->dev);
 188}
 189#endif
 190
 191void
 192vugbm_device_destroy(struct vugbm_device *dev)
 193{
 194    if (!dev->inited) {
 195        return;
 196    }
 197
 198    dev->device_destroy(dev);
 199}
 200
 201bool
 202vugbm_device_init(struct vugbm_device *dev, int fd)
 203{
 204    dev->fd = fd;
 205
 206#ifdef CONFIG_GBM
 207    dev->dev = gbm_create_device(fd);
 208#endif
 209
 210    if (0) {
 211        /* nothing */
 212    }
 213#ifdef CONFIG_GBM
 214    else if (dev->dev != NULL) {
 215        dev->alloc_bo = alloc_bo;
 216        dev->free_bo = free_bo;
 217        dev->get_fd = get_fd;
 218        dev->map_bo = map_bo;
 219        dev->unmap_bo = unmap_bo;
 220        dev->device_destroy = device_destroy;
 221    }
 222#endif
 223#ifdef CONFIG_MEMFD
 224    else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
 225        dev->fd = open("/dev/udmabuf", O_RDWR);
 226        if (dev->fd < 0) {
 227            return false;
 228        }
 229        g_debug("Using experimental udmabuf backend");
 230        dev->alloc_bo = udmabuf_alloc_bo;
 231        dev->free_bo = udmabuf_free_bo;
 232        dev->get_fd = udmabuf_get_fd;
 233        dev->map_bo = udmabuf_map_bo;
 234        dev->unmap_bo = udmabuf_unmap_bo;
 235        dev->device_destroy = udmabuf_device_destroy;
 236    }
 237#endif
 238    else {
 239        g_debug("Using mem fallback");
 240        dev->alloc_bo = mem_alloc_bo;
 241        dev->free_bo = mem_free_bo;
 242        dev->map_bo = mem_map_bo;
 243        dev->unmap_bo = mem_unmap_bo;
 244        dev->device_destroy = mem_device_destroy;
 245        return false;
 246    }
 247
 248    dev->inited = true;
 249    return true;
 250}
 251
 252static bool
 253vugbm_buffer_map(struct vugbm_buffer *buf)
 254{
 255    struct vugbm_device *dev = buf->dev;
 256
 257    return dev->map_bo(buf);
 258}
 259
 260static void
 261vugbm_buffer_unmap(struct vugbm_buffer *buf)
 262{
 263    struct vugbm_device *dev = buf->dev;
 264
 265    dev->unmap_bo(buf);
 266}
 267
 268bool
 269vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
 270{
 271    if (!buffer->dev->get_fd) {
 272        return false;
 273    }
 274
 275    return true;
 276}
 277
 278bool
 279vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
 280{
 281    if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
 282        !buffer->dev->get_fd(buffer, fd)) {
 283        g_warning("Failed to get dmabuf");
 284        return false;
 285    }
 286
 287    if (*fd < 0) {
 288        g_warning("error: dmabuf_fd < 0");
 289        return false;
 290    }
 291
 292    return true;
 293}
 294
 295bool
 296vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
 297                    uint32_t width, uint32_t height)
 298{
 299    buffer->dev = dev;
 300    buffer->width = width;
 301    buffer->height = height;
 302    buffer->format = GBM_FORMAT_XRGB8888;
 303    buffer->stride = 0; /* modified during alloc */
 304    if (!dev->alloc_bo(buffer)) {
 305        g_warning("alloc_bo failed");
 306        return false;
 307    }
 308
 309    if (!vugbm_buffer_map(buffer)) {
 310        g_warning("map_bo failed");
 311        goto err;
 312    }
 313
 314    return true;
 315
 316err:
 317    dev->free_bo(buffer);
 318    return false;
 319}
 320
 321void
 322vugbm_buffer_destroy(struct vugbm_buffer *buffer)
 323{
 324    struct vugbm_device *dev = buffer->dev;
 325
 326    vugbm_buffer_unmap(buffer);
 327    dev->free_bo(buffer);
 328}
 329