linux/drivers/gpu/drm/virtio/virtgpu_vq.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <drm/drmP.h>
  30#include "virtgpu_drv.h"
  31#include <linux/virtio.h>
  32#include <linux/virtio_config.h>
  33#include <linux/virtio_ring.h>
  34
  35#define MAX_INLINE_CMD_SIZE   96
  36#define MAX_INLINE_RESP_SIZE  24
  37#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  38                               + MAX_INLINE_CMD_SIZE             \
  39                               + MAX_INLINE_RESP_SIZE)
  40
  41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
  42                                uint32_t *resid)
  43{
  44        int handle;
  45
  46        idr_preload(GFP_KERNEL);
  47        spin_lock(&vgdev->resource_idr_lock);
  48        handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
  49        spin_unlock(&vgdev->resource_idr_lock);
  50        idr_preload_end();
  51        *resid = handle;
  52}
  53
  54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
  55{
  56        spin_lock(&vgdev->resource_idr_lock);
  57        idr_remove(&vgdev->resource_idr, id);
  58        spin_unlock(&vgdev->resource_idr_lock);
  59}
  60
  61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  62{
  63        struct drm_device *dev = vq->vdev->priv;
  64        struct virtio_gpu_device *vgdev = dev->dev_private;
  65
  66        schedule_work(&vgdev->ctrlq.dequeue_work);
  67}
  68
  69void virtio_gpu_cursor_ack(struct virtqueue *vq)
  70{
  71        struct drm_device *dev = vq->vdev->priv;
  72        struct virtio_gpu_device *vgdev = dev->dev_private;
  73
  74        schedule_work(&vgdev->cursorq.dequeue_work);
  75}
  76
  77int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  78{
  79        vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  80                                         VBUFFER_SIZE,
  81                                         __alignof__(struct virtio_gpu_vbuffer),
  82                                         0, NULL);
  83        if (!vgdev->vbufs)
  84                return -ENOMEM;
  85        return 0;
  86}
  87
  88void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  89{
  90        kmem_cache_destroy(vgdev->vbufs);
  91        vgdev->vbufs = NULL;
  92}
  93
  94static struct virtio_gpu_vbuffer*
  95virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  96                    int size, int resp_size, void *resp_buf,
  97                    virtio_gpu_resp_cb resp_cb)
  98{
  99        struct virtio_gpu_vbuffer *vbuf;
 100
 101        vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
 102        if (!vbuf)
 103                return ERR_PTR(-ENOMEM);
 104        memset(vbuf, 0, VBUFFER_SIZE);
 105
 106        BUG_ON(size > MAX_INLINE_CMD_SIZE);
 107        vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 108        vbuf->size = size;
 109
 110        vbuf->resp_cb = resp_cb;
 111        vbuf->resp_size = resp_size;
 112        if (resp_size <= MAX_INLINE_RESP_SIZE)
 113                vbuf->resp_buf = (void *)vbuf->buf + size;
 114        else
 115                vbuf->resp_buf = resp_buf;
 116        BUG_ON(!vbuf->resp_buf);
 117        return vbuf;
 118}
 119
 120static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 121                                  struct virtio_gpu_vbuffer **vbuffer_p,
 122                                  int size)
 123{
 124        struct virtio_gpu_vbuffer *vbuf;
 125
 126        vbuf = virtio_gpu_get_vbuf(vgdev, size,
 127                                   sizeof(struct virtio_gpu_ctrl_hdr),
 128                                   NULL, NULL);
 129        if (IS_ERR(vbuf)) {
 130                *vbuffer_p = NULL;
 131                return ERR_CAST(vbuf);
 132        }
 133        *vbuffer_p = vbuf;
 134        return vbuf->buf;
 135}
 136
 137static struct virtio_gpu_update_cursor*
 138virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 139                        struct virtio_gpu_vbuffer **vbuffer_p)
 140{
 141        struct virtio_gpu_vbuffer *vbuf;
 142
 143        vbuf = virtio_gpu_get_vbuf
 144                (vgdev, sizeof(struct virtio_gpu_update_cursor),
 145                 0, NULL, NULL);
 146        if (IS_ERR(vbuf)) {
 147                *vbuffer_p = NULL;
 148                return ERR_CAST(vbuf);
 149        }
 150        *vbuffer_p = vbuf;
 151        return (struct virtio_gpu_update_cursor *)vbuf->buf;
 152}
 153
 154static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 155                                       virtio_gpu_resp_cb cb,
 156                                       struct virtio_gpu_vbuffer **vbuffer_p,
 157                                       int cmd_size, int resp_size,
 158                                       void *resp_buf)
 159{
 160        struct virtio_gpu_vbuffer *vbuf;
 161
 162        vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 163                                   resp_size, resp_buf, cb);
 164        if (IS_ERR(vbuf)) {
 165                *vbuffer_p = NULL;
 166                return ERR_CAST(vbuf);
 167        }
 168        *vbuffer_p = vbuf;
 169        return (struct virtio_gpu_command *)vbuf->buf;
 170}
 171
 172static void free_vbuf(struct virtio_gpu_device *vgdev,
 173                      struct virtio_gpu_vbuffer *vbuf)
 174{
 175        if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 176                kfree(vbuf->resp_buf);
 177        kfree(vbuf->data_buf);
 178        kmem_cache_free(vgdev->vbufs, vbuf);
 179}
 180
 181static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 182{
 183        struct virtio_gpu_vbuffer *vbuf;
 184        unsigned int len;
 185        int freed = 0;
 186
 187        while ((vbuf = virtqueue_get_buf(vq, &len))) {
 188                list_add_tail(&vbuf->list, reclaim_list);
 189                freed++;
 190        }
 191        if (freed == 0)
 192                DRM_DEBUG("Huh? zero vbufs reclaimed");
 193}
 194
 195void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 196{
 197        struct virtio_gpu_device *vgdev =
 198                container_of(work, struct virtio_gpu_device,
 199                             ctrlq.dequeue_work);
 200        struct list_head reclaim_list;
 201        struct virtio_gpu_vbuffer *entry, *tmp;
 202        struct virtio_gpu_ctrl_hdr *resp;
 203        u64 fence_id = 0;
 204
 205        INIT_LIST_HEAD(&reclaim_list);
 206        spin_lock(&vgdev->ctrlq.qlock);
 207        do {
 208                virtqueue_disable_cb(vgdev->ctrlq.vq);
 209                reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 210
 211        } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 212        spin_unlock(&vgdev->ctrlq.qlock);
 213
 214        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 215                resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 216                if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
 217                        DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 218                if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 219                        u64 f = le64_to_cpu(resp->fence_id);
 220
 221                        if (fence_id > f) {
 222                                DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 223                                          __func__, fence_id, f);
 224                        } else {
 225                                fence_id = f;
 226                        }
 227                }
 228                if (entry->resp_cb)
 229                        entry->resp_cb(vgdev, entry);
 230
 231                list_del(&entry->list);
 232                free_vbuf(vgdev, entry);
 233        }
 234        wake_up(&vgdev->ctrlq.ack_queue);
 235
 236        if (fence_id)
 237                virtio_gpu_fence_event_process(vgdev, fence_id);
 238}
 239
 240void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 241{
 242        struct virtio_gpu_device *vgdev =
 243                container_of(work, struct virtio_gpu_device,
 244                             cursorq.dequeue_work);
 245        struct list_head reclaim_list;
 246        struct virtio_gpu_vbuffer *entry, *tmp;
 247
 248        INIT_LIST_HEAD(&reclaim_list);
 249        spin_lock(&vgdev->cursorq.qlock);
 250        do {
 251                virtqueue_disable_cb(vgdev->cursorq.vq);
 252                reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 253        } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 254        spin_unlock(&vgdev->cursorq.qlock);
 255
 256        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 257                list_del(&entry->list);
 258                free_vbuf(vgdev, entry);
 259        }
 260        wake_up(&vgdev->cursorq.ack_queue);
 261}
 262
 263static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 264                                               struct virtio_gpu_vbuffer *vbuf)
 265                __releases(&vgdev->ctrlq.qlock)
 266                __acquires(&vgdev->ctrlq.qlock)
 267{
 268        struct virtqueue *vq = vgdev->ctrlq.vq;
 269        struct scatterlist *sgs[3], vcmd, vout, vresp;
 270        int outcnt = 0, incnt = 0;
 271        int ret;
 272
 273        if (!vgdev->vqs_ready)
 274                return -ENODEV;
 275
 276        sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 277        sgs[outcnt + incnt] = &vcmd;
 278        outcnt++;
 279
 280        if (vbuf->data_size) {
 281                sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 282                sgs[outcnt + incnt] = &vout;
 283                outcnt++;
 284        }
 285
 286        if (vbuf->resp_size) {
 287                sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 288                sgs[outcnt + incnt] = &vresp;
 289                incnt++;
 290        }
 291
 292retry:
 293        ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 294        if (ret == -ENOSPC) {
 295                spin_unlock(&vgdev->ctrlq.qlock);
 296                wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
 297                spin_lock(&vgdev->ctrlq.qlock);
 298                goto retry;
 299        } else {
 300                virtqueue_kick(vq);
 301        }
 302
 303        if (!ret)
 304                ret = vq->num_free;
 305        return ret;
 306}
 307
 308static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 309                                        struct virtio_gpu_vbuffer *vbuf)
 310{
 311        int rc;
 312
 313        spin_lock(&vgdev->ctrlq.qlock);
 314        rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 315        spin_unlock(&vgdev->ctrlq.qlock);
 316        return rc;
 317}
 318
 319static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 320                                               struct virtio_gpu_vbuffer *vbuf,
 321                                               struct virtio_gpu_ctrl_hdr *hdr,
 322                                               struct virtio_gpu_fence **fence)
 323{
 324        struct virtqueue *vq = vgdev->ctrlq.vq;
 325        int rc;
 326
 327again:
 328        spin_lock(&vgdev->ctrlq.qlock);
 329
 330        /*
 331         * Make sure we have enouth space in the virtqueue.  If not
 332         * wait here until we have.
 333         *
 334         * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
 335         * to wait for free space, which can result in fence ids being
 336         * submitted out-of-order.
 337         */
 338        if (vq->num_free < 3) {
 339                spin_unlock(&vgdev->ctrlq.qlock);
 340                wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
 341                goto again;
 342        }
 343
 344        if (fence)
 345                virtio_gpu_fence_emit(vgdev, hdr, fence);
 346        rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 347        spin_unlock(&vgdev->ctrlq.qlock);
 348        return rc;
 349}
 350
 351static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 352                                   struct virtio_gpu_vbuffer *vbuf)
 353{
 354        struct virtqueue *vq = vgdev->cursorq.vq;
 355        struct scatterlist *sgs[1], ccmd;
 356        int ret;
 357        int outcnt;
 358
 359        if (!vgdev->vqs_ready)
 360                return -ENODEV;
 361
 362        sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 363        sgs[0] = &ccmd;
 364        outcnt = 1;
 365
 366        spin_lock(&vgdev->cursorq.qlock);
 367retry:
 368        ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 369        if (ret == -ENOSPC) {
 370                spin_unlock(&vgdev->cursorq.qlock);
 371                wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 372                spin_lock(&vgdev->cursorq.qlock);
 373                goto retry;
 374        } else {
 375                virtqueue_kick(vq);
 376        }
 377
 378        spin_unlock(&vgdev->cursorq.qlock);
 379
 380        if (!ret)
 381                ret = vq->num_free;
 382        return ret;
 383}
 384
 385/* just create gem objects for userspace and long lived objects,
 386 * just use dma_alloced pages for the queue objects?
 387 */
 388
 389/* create a basic resource */
 390void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 391                                    uint32_t resource_id,
 392                                    uint32_t format,
 393                                    uint32_t width,
 394                                    uint32_t height)
 395{
 396        struct virtio_gpu_resource_create_2d *cmd_p;
 397        struct virtio_gpu_vbuffer *vbuf;
 398
 399        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 400        memset(cmd_p, 0, sizeof(*cmd_p));
 401
 402        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 403        cmd_p->resource_id = cpu_to_le32(resource_id);
 404        cmd_p->format = cpu_to_le32(format);
 405        cmd_p->width = cpu_to_le32(width);
 406        cmd_p->height = cpu_to_le32(height);
 407
 408        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 409}
 410
 411void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 412                                   uint32_t resource_id)
 413{
 414        struct virtio_gpu_resource_unref *cmd_p;
 415        struct virtio_gpu_vbuffer *vbuf;
 416
 417        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 418        memset(cmd_p, 0, sizeof(*cmd_p));
 419
 420        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 421        cmd_p->resource_id = cpu_to_le32(resource_id);
 422
 423        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 424}
 425
 426void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
 427                                           uint32_t resource_id)
 428{
 429        struct virtio_gpu_resource_detach_backing *cmd_p;
 430        struct virtio_gpu_vbuffer *vbuf;
 431
 432        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 433        memset(cmd_p, 0, sizeof(*cmd_p));
 434
 435        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
 436        cmd_p->resource_id = cpu_to_le32(resource_id);
 437
 438        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 439}
 440
 441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 442                                uint32_t scanout_id, uint32_t resource_id,
 443                                uint32_t width, uint32_t height,
 444                                uint32_t x, uint32_t y)
 445{
 446        struct virtio_gpu_set_scanout *cmd_p;
 447        struct virtio_gpu_vbuffer *vbuf;
 448
 449        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 450        memset(cmd_p, 0, sizeof(*cmd_p));
 451
 452        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 453        cmd_p->resource_id = cpu_to_le32(resource_id);
 454        cmd_p->scanout_id = cpu_to_le32(scanout_id);
 455        cmd_p->r.width = cpu_to_le32(width);
 456        cmd_p->r.height = cpu_to_le32(height);
 457        cmd_p->r.x = cpu_to_le32(x);
 458        cmd_p->r.y = cpu_to_le32(y);
 459
 460        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 461}
 462
 463void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 464                                   uint32_t resource_id,
 465                                   uint32_t x, uint32_t y,
 466                                   uint32_t width, uint32_t height)
 467{
 468        struct virtio_gpu_resource_flush *cmd_p;
 469        struct virtio_gpu_vbuffer *vbuf;
 470
 471        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 472        memset(cmd_p, 0, sizeof(*cmd_p));
 473
 474        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 475        cmd_p->resource_id = cpu_to_le32(resource_id);
 476        cmd_p->r.width = cpu_to_le32(width);
 477        cmd_p->r.height = cpu_to_le32(height);
 478        cmd_p->r.x = cpu_to_le32(x);
 479        cmd_p->r.y = cpu_to_le32(y);
 480
 481        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 482}
 483
 484void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 485                                        uint32_t resource_id, uint64_t offset,
 486                                        __le32 width, __le32 height,
 487                                        __le32 x, __le32 y,
 488                                        struct virtio_gpu_fence **fence)
 489{
 490        struct virtio_gpu_transfer_to_host_2d *cmd_p;
 491        struct virtio_gpu_vbuffer *vbuf;
 492
 493        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 494        memset(cmd_p, 0, sizeof(*cmd_p));
 495
 496        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 497        cmd_p->resource_id = cpu_to_le32(resource_id);
 498        cmd_p->offset = cpu_to_le64(offset);
 499        cmd_p->r.width = width;
 500        cmd_p->r.height = height;
 501        cmd_p->r.x = x;
 502        cmd_p->r.y = y;
 503
 504        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 505}
 506
 507static void
 508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 509                                       uint32_t resource_id,
 510                                       struct virtio_gpu_mem_entry *ents,
 511                                       uint32_t nents,
 512                                       struct virtio_gpu_fence **fence)
 513{
 514        struct virtio_gpu_resource_attach_backing *cmd_p;
 515        struct virtio_gpu_vbuffer *vbuf;
 516
 517        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 518        memset(cmd_p, 0, sizeof(*cmd_p));
 519
 520        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 521        cmd_p->resource_id = cpu_to_le32(resource_id);
 522        cmd_p->nr_entries = cpu_to_le32(nents);
 523
 524        vbuf->data_buf = ents;
 525        vbuf->data_size = sizeof(*ents) * nents;
 526
 527        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 528}
 529
 530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 531                                               struct virtio_gpu_vbuffer *vbuf)
 532{
 533        struct virtio_gpu_resp_display_info *resp =
 534                (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 535        int i;
 536
 537        spin_lock(&vgdev->display_info_lock);
 538        for (i = 0; i < vgdev->num_scanouts; i++) {
 539                vgdev->outputs[i].info = resp->pmodes[i];
 540                if (resp->pmodes[i].enabled) {
 541                        DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 542                                  le32_to_cpu(resp->pmodes[i].r.width),
 543                                  le32_to_cpu(resp->pmodes[i].r.height),
 544                                  le32_to_cpu(resp->pmodes[i].r.x),
 545                                  le32_to_cpu(resp->pmodes[i].r.y));
 546                } else {
 547                        DRM_DEBUG("output %d: disabled", i);
 548                }
 549        }
 550
 551        vgdev->display_info_pending = false;
 552        spin_unlock(&vgdev->display_info_lock);
 553        wake_up(&vgdev->resp_wq);
 554
 555        if (!drm_helper_hpd_irq_event(vgdev->ddev))
 556                drm_kms_helper_hotplug_event(vgdev->ddev);
 557}
 558
 559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 560                                              struct virtio_gpu_vbuffer *vbuf)
 561{
 562        struct virtio_gpu_get_capset_info *cmd =
 563                (struct virtio_gpu_get_capset_info *)vbuf->buf;
 564        struct virtio_gpu_resp_capset_info *resp =
 565                (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 566        int i = le32_to_cpu(cmd->capset_index);
 567
 568        spin_lock(&vgdev->display_info_lock);
 569        vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 570        vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 571        vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 572        spin_unlock(&vgdev->display_info_lock);
 573        wake_up(&vgdev->resp_wq);
 574}
 575
 576static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 577                                     struct virtio_gpu_vbuffer *vbuf)
 578{
 579        struct virtio_gpu_get_capset *cmd =
 580                (struct virtio_gpu_get_capset *)vbuf->buf;
 581        struct virtio_gpu_resp_capset *resp =
 582                (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 583        struct virtio_gpu_drv_cap_cache *cache_ent;
 584
 585        spin_lock(&vgdev->display_info_lock);
 586        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 587                if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 588                    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 589                        memcpy(cache_ent->caps_cache, resp->capset_data,
 590                               cache_ent->size);
 591                        atomic_set(&cache_ent->is_valid, 1);
 592                        break;
 593                }
 594        }
 595        spin_unlock(&vgdev->display_info_lock);
 596        wake_up(&vgdev->resp_wq);
 597}
 598
 599int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 600{
 601        struct virtio_gpu_ctrl_hdr *cmd_p;
 602        struct virtio_gpu_vbuffer *vbuf;
 603        void *resp_buf;
 604
 605        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 606                           GFP_KERNEL);
 607        if (!resp_buf)
 608                return -ENOMEM;
 609
 610        cmd_p = virtio_gpu_alloc_cmd_resp
 611                (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 612                 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 613                 resp_buf);
 614        memset(cmd_p, 0, sizeof(*cmd_p));
 615
 616        vgdev->display_info_pending = true;
 617        cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 618        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 619        return 0;
 620}
 621
 622int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 623{
 624        struct virtio_gpu_get_capset_info *cmd_p;
 625        struct virtio_gpu_vbuffer *vbuf;
 626        void *resp_buf;
 627
 628        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 629                           GFP_KERNEL);
 630        if (!resp_buf)
 631                return -ENOMEM;
 632
 633        cmd_p = virtio_gpu_alloc_cmd_resp
 634                (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 635                 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 636                 resp_buf);
 637        memset(cmd_p, 0, sizeof(*cmd_p));
 638
 639        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 640        cmd_p->capset_index = cpu_to_le32(idx);
 641        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 642        return 0;
 643}
 644
 645int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 646                              int idx, int version,
 647                              struct virtio_gpu_drv_cap_cache **cache_p)
 648{
 649        struct virtio_gpu_get_capset *cmd_p;
 650        struct virtio_gpu_vbuffer *vbuf;
 651        int max_size = vgdev->capsets[idx].max_size;
 652        struct virtio_gpu_drv_cap_cache *cache_ent;
 653        void *resp_buf;
 654
 655        if (idx > vgdev->num_capsets)
 656                return -EINVAL;
 657
 658        if (version > vgdev->capsets[idx].max_version)
 659                return -EINVAL;
 660
 661        cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 662        if (!cache_ent)
 663                return -ENOMEM;
 664
 665        cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 666        if (!cache_ent->caps_cache) {
 667                kfree(cache_ent);
 668                return -ENOMEM;
 669        }
 670
 671        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 672                           GFP_KERNEL);
 673        if (!resp_buf) {
 674                kfree(cache_ent->caps_cache);
 675                kfree(cache_ent);
 676                return -ENOMEM;
 677        }
 678
 679        cache_ent->version = version;
 680        cache_ent->id = vgdev->capsets[idx].id;
 681        atomic_set(&cache_ent->is_valid, 0);
 682        cache_ent->size = max_size;
 683        spin_lock(&vgdev->display_info_lock);
 684        list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 685        spin_unlock(&vgdev->display_info_lock);
 686
 687        cmd_p = virtio_gpu_alloc_cmd_resp
 688                (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 689                 sizeof(struct virtio_gpu_resp_capset) + max_size,
 690                 resp_buf);
 691        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 692        cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 693        cmd_p->capset_version = cpu_to_le32(version);
 694        *cache_p = cache_ent;
 695        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 696
 697        return 0;
 698}
 699
 700void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 701                                   uint32_t nlen, const char *name)
 702{
 703        struct virtio_gpu_ctx_create *cmd_p;
 704        struct virtio_gpu_vbuffer *vbuf;
 705
 706        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 707        memset(cmd_p, 0, sizeof(*cmd_p));
 708
 709        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 710        cmd_p->hdr.ctx_id = cpu_to_le32(id);
 711        cmd_p->nlen = cpu_to_le32(nlen);
 712        strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 713        cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 714        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 715}
 716
 717void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 718                                    uint32_t id)
 719{
 720        struct virtio_gpu_ctx_destroy *cmd_p;
 721        struct virtio_gpu_vbuffer *vbuf;
 722
 723        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 724        memset(cmd_p, 0, sizeof(*cmd_p));
 725
 726        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 727        cmd_p->hdr.ctx_id = cpu_to_le32(id);
 728        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 729}
 730
 731void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 732                                            uint32_t ctx_id,
 733                                            uint32_t resource_id)
 734{
 735        struct virtio_gpu_ctx_resource *cmd_p;
 736        struct virtio_gpu_vbuffer *vbuf;
 737
 738        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 739        memset(cmd_p, 0, sizeof(*cmd_p));
 740
 741        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 742        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 743        cmd_p->resource_id = cpu_to_le32(resource_id);
 744        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 745
 746}
 747
 748void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 749                                            uint32_t ctx_id,
 750                                            uint32_t resource_id)
 751{
 752        struct virtio_gpu_ctx_resource *cmd_p;
 753        struct virtio_gpu_vbuffer *vbuf;
 754
 755        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 756        memset(cmd_p, 0, sizeof(*cmd_p));
 757
 758        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 759        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 760        cmd_p->resource_id = cpu_to_le32(resource_id);
 761        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 762}
 763
 764void
 765virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 766                                  struct virtio_gpu_resource_create_3d *rc_3d,
 767                                  struct virtio_gpu_fence **fence)
 768{
 769        struct virtio_gpu_resource_create_3d *cmd_p;
 770        struct virtio_gpu_vbuffer *vbuf;
 771
 772        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 773        memset(cmd_p, 0, sizeof(*cmd_p));
 774
 775        *cmd_p = *rc_3d;
 776        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 777        cmd_p->hdr.flags = 0;
 778
 779        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 780}
 781
 782void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 783                                        uint32_t resource_id, uint32_t ctx_id,
 784                                        uint64_t offset, uint32_t level,
 785                                        struct virtio_gpu_box *box,
 786                                        struct virtio_gpu_fence **fence)
 787{
 788        struct virtio_gpu_transfer_host_3d *cmd_p;
 789        struct virtio_gpu_vbuffer *vbuf;
 790
 791        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 792        memset(cmd_p, 0, sizeof(*cmd_p));
 793
 794        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
 795        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 796        cmd_p->resource_id = cpu_to_le32(resource_id);
 797        cmd_p->box = *box;
 798        cmd_p->offset = cpu_to_le64(offset);
 799        cmd_p->level = cpu_to_le32(level);
 800
 801        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 802}
 803
 804void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 805                                          uint32_t resource_id, uint32_t ctx_id,
 806                                          uint64_t offset, uint32_t level,
 807                                          struct virtio_gpu_box *box,
 808                                          struct virtio_gpu_fence **fence)
 809{
 810        struct virtio_gpu_transfer_host_3d *cmd_p;
 811        struct virtio_gpu_vbuffer *vbuf;
 812
 813        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 814        memset(cmd_p, 0, sizeof(*cmd_p));
 815
 816        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
 817        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 818        cmd_p->resource_id = cpu_to_le32(resource_id);
 819        cmd_p->box = *box;
 820        cmd_p->offset = cpu_to_le64(offset);
 821        cmd_p->level = cpu_to_le32(level);
 822
 823        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 824}
 825
 826void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 827                           void *data, uint32_t data_size,
 828                           uint32_t ctx_id, struct virtio_gpu_fence **fence)
 829{
 830        struct virtio_gpu_cmd_submit *cmd_p;
 831        struct virtio_gpu_vbuffer *vbuf;
 832
 833        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 834        memset(cmd_p, 0, sizeof(*cmd_p));
 835
 836        vbuf->data_buf = data;
 837        vbuf->data_size = data_size;
 838
 839        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
 840        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 841        cmd_p->size = cpu_to_le32(data_size);
 842
 843        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 844}
 845
 846int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 847                             struct virtio_gpu_object *obj,
 848                             uint32_t resource_id,
 849                             struct virtio_gpu_fence **fence)
 850{
 851        struct virtio_gpu_mem_entry *ents;
 852        struct scatterlist *sg;
 853        int si;
 854
 855        if (!obj->pages) {
 856                int ret;
 857
 858                ret = virtio_gpu_object_get_sg_table(vgdev, obj);
 859                if (ret)
 860                        return ret;
 861        }
 862
 863        /* gets freed when the ring has consumed it */
 864        ents = kmalloc_array(obj->pages->nents,
 865                             sizeof(struct virtio_gpu_mem_entry),
 866                             GFP_KERNEL);
 867        if (!ents) {
 868                DRM_ERROR("failed to allocate ent list\n");
 869                return -ENOMEM;
 870        }
 871
 872        for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
 873                ents[si].addr = cpu_to_le64(sg_phys(sg));
 874                ents[si].length = cpu_to_le32(sg->length);
 875                ents[si].padding = 0;
 876        }
 877
 878        virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
 879                                               ents, obj->pages->nents,
 880                                               fence);
 881        obj->hw_res_handle = resource_id;
 882        return 0;
 883}
 884
 885void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
 886                            struct virtio_gpu_output *output)
 887{
 888        struct virtio_gpu_vbuffer *vbuf;
 889        struct virtio_gpu_update_cursor *cur_p;
 890
 891        output->cursor.pos.scanout_id = cpu_to_le32(output->index);
 892        cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
 893        memcpy(cur_p, &output->cursor, sizeof(output->cursor));
 894        virtio_gpu_queue_cursor(vgdev, vbuf);
 895}
 896