linux/drivers/gpu/drm/virtio/virtgpu_vq.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie <airlied@redhat.com>
   7 *    Gerd Hoffmann <kraxel@redhat.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the next
  17 * paragraph) shall be included in all copies or substantial portions of the
  18 * Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26 * OTHER DEALINGS IN THE SOFTWARE.
  27 */
  28
  29#include <linux/dma-mapping.h>
  30#include <linux/virtio.h>
  31#include <linux/virtio_config.h>
  32#include <linux/virtio_ring.h>
  33
  34#include "virtgpu_drv.h"
  35#include "virtgpu_trace.h"
  36
  37#define MAX_INLINE_CMD_SIZE   96
  38#define MAX_INLINE_RESP_SIZE  24
  39#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
  40                               + MAX_INLINE_CMD_SIZE             \
  41                               + MAX_INLINE_RESP_SIZE)
  42
  43static void convert_to_hw_box(struct virtio_gpu_box *dst,
  44                              const struct drm_virtgpu_3d_box *src)
  45{
  46        dst->x = cpu_to_le32(src->x);
  47        dst->y = cpu_to_le32(src->y);
  48        dst->z = cpu_to_le32(src->z);
  49        dst->w = cpu_to_le32(src->w);
  50        dst->h = cpu_to_le32(src->h);
  51        dst->d = cpu_to_le32(src->d);
  52}
  53
  54void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  55{
  56        struct drm_device *dev = vq->vdev->priv;
  57        struct virtio_gpu_device *vgdev = dev->dev_private;
  58
  59        schedule_work(&vgdev->ctrlq.dequeue_work);
  60}
  61
  62void virtio_gpu_cursor_ack(struct virtqueue *vq)
  63{
  64        struct drm_device *dev = vq->vdev->priv;
  65        struct virtio_gpu_device *vgdev = dev->dev_private;
  66
  67        schedule_work(&vgdev->cursorq.dequeue_work);
  68}
  69
  70int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  71{
  72        vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  73                                         VBUFFER_SIZE,
  74                                         __alignof__(struct virtio_gpu_vbuffer),
  75                                         0, NULL);
  76        if (!vgdev->vbufs)
  77                return -ENOMEM;
  78        return 0;
  79}
  80
  81void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  82{
  83        kmem_cache_destroy(vgdev->vbufs);
  84        vgdev->vbufs = NULL;
  85}
  86
  87static struct virtio_gpu_vbuffer*
  88virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  89                    int size, int resp_size, void *resp_buf,
  90                    virtio_gpu_resp_cb resp_cb)
  91{
  92        struct virtio_gpu_vbuffer *vbuf;
  93
  94        vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
  95        if (!vbuf)
  96                return ERR_PTR(-ENOMEM);
  97
  98        BUG_ON(size > MAX_INLINE_CMD_SIZE ||
  99               size < sizeof(struct virtio_gpu_ctrl_hdr));
 100        vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 101        vbuf->size = size;
 102
 103        vbuf->resp_cb = resp_cb;
 104        vbuf->resp_size = resp_size;
 105        if (resp_size <= MAX_INLINE_RESP_SIZE)
 106                vbuf->resp_buf = (void *)vbuf->buf + size;
 107        else
 108                vbuf->resp_buf = resp_buf;
 109        BUG_ON(!vbuf->resp_buf);
 110        return vbuf;
 111}
 112
 113static struct virtio_gpu_ctrl_hdr *
 114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 115{
 116        /* this assumes a vbuf contains a command that starts with a
 117         * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
 118         * virtqueues.
 119         */
 120        return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 121}
 122
 123static struct virtio_gpu_update_cursor*
 124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
 125                        struct virtio_gpu_vbuffer **vbuffer_p)
 126{
 127        struct virtio_gpu_vbuffer *vbuf;
 128
 129        vbuf = virtio_gpu_get_vbuf
 130                (vgdev, sizeof(struct virtio_gpu_update_cursor),
 131                 0, NULL, NULL);
 132        if (IS_ERR(vbuf)) {
 133                *vbuffer_p = NULL;
 134                return ERR_CAST(vbuf);
 135        }
 136        *vbuffer_p = vbuf;
 137        return (struct virtio_gpu_update_cursor *)vbuf->buf;
 138}
 139
 140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 141                                       virtio_gpu_resp_cb cb,
 142                                       struct virtio_gpu_vbuffer **vbuffer_p,
 143                                       int cmd_size, int resp_size,
 144                                       void *resp_buf)
 145{
 146        struct virtio_gpu_vbuffer *vbuf;
 147
 148        vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
 149                                   resp_size, resp_buf, cb);
 150        if (IS_ERR(vbuf)) {
 151                *vbuffer_p = NULL;
 152                return ERR_CAST(vbuf);
 153        }
 154        *vbuffer_p = vbuf;
 155        return (struct virtio_gpu_command *)vbuf->buf;
 156}
 157
 158static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
 159                                  struct virtio_gpu_vbuffer **vbuffer_p,
 160                                  int size)
 161{
 162        return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
 163                                         sizeof(struct virtio_gpu_ctrl_hdr),
 164                                         NULL);
 165}
 166
 167static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
 168                                     struct virtio_gpu_vbuffer **vbuffer_p,
 169                                     int size,
 170                                     virtio_gpu_resp_cb cb)
 171{
 172        return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
 173                                         sizeof(struct virtio_gpu_ctrl_hdr),
 174                                         NULL);
 175}
 176
 177static void free_vbuf(struct virtio_gpu_device *vgdev,
 178                      struct virtio_gpu_vbuffer *vbuf)
 179{
 180        if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
 181                kfree(vbuf->resp_buf);
 182        kvfree(vbuf->data_buf);
 183        kmem_cache_free(vgdev->vbufs, vbuf);
 184}
 185
 186static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
 187{
 188        struct virtio_gpu_vbuffer *vbuf;
 189        unsigned int len;
 190        int freed = 0;
 191
 192        while ((vbuf = virtqueue_get_buf(vq, &len))) {
 193                list_add_tail(&vbuf->list, reclaim_list);
 194                freed++;
 195        }
 196        if (freed == 0)
 197                DRM_DEBUG("Huh? zero vbufs reclaimed");
 198}
 199
 200void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 201{
 202        struct virtio_gpu_device *vgdev =
 203                container_of(work, struct virtio_gpu_device,
 204                             ctrlq.dequeue_work);
 205        struct list_head reclaim_list;
 206        struct virtio_gpu_vbuffer *entry, *tmp;
 207        struct virtio_gpu_ctrl_hdr *resp;
 208        u64 fence_id = 0;
 209
 210        INIT_LIST_HEAD(&reclaim_list);
 211        spin_lock(&vgdev->ctrlq.qlock);
 212        do {
 213                virtqueue_disable_cb(vgdev->ctrlq.vq);
 214                reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
 215
 216        } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
 217        spin_unlock(&vgdev->ctrlq.qlock);
 218
 219        list_for_each_entry(entry, &reclaim_list, list) {
 220                resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 221
 222                trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 223
 224                if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
 225                        if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 226                                struct virtio_gpu_ctrl_hdr *cmd;
 227                                cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
 228                                DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
 229                                                      le32_to_cpu(resp->type),
 230                                                      le32_to_cpu(cmd->type));
 231                        } else
 232                                DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 233                }
 234                if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
 235                        u64 f = le64_to_cpu(resp->fence_id);
 236
 237                        if (fence_id > f) {
 238                                DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
 239                                          __func__, fence_id, f);
 240                        } else {
 241                                fence_id = f;
 242                        }
 243                }
 244                if (entry->resp_cb)
 245                        entry->resp_cb(vgdev, entry);
 246        }
 247        wake_up(&vgdev->ctrlq.ack_queue);
 248
 249        if (fence_id)
 250                virtio_gpu_fence_event_process(vgdev, fence_id);
 251
 252        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 253                if (entry->objs)
 254                        virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
 255                list_del(&entry->list);
 256                free_vbuf(vgdev, entry);
 257        }
 258}
 259
 260void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 261{
 262        struct virtio_gpu_device *vgdev =
 263                container_of(work, struct virtio_gpu_device,
 264                             cursorq.dequeue_work);
 265        struct list_head reclaim_list;
 266        struct virtio_gpu_vbuffer *entry, *tmp;
 267
 268        INIT_LIST_HEAD(&reclaim_list);
 269        spin_lock(&vgdev->cursorq.qlock);
 270        do {
 271                virtqueue_disable_cb(vgdev->cursorq.vq);
 272                reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
 273        } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
 274        spin_unlock(&vgdev->cursorq.qlock);
 275
 276        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
 277                list_del(&entry->list);
 278                free_vbuf(vgdev, entry);
 279        }
 280        wake_up(&vgdev->cursorq.ack_queue);
 281}
 282
 283/* Create sg_table from a vmalloc'd buffer. */
 284static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 285{
 286        int ret, s, i;
 287        struct sg_table *sgt;
 288        struct scatterlist *sg;
 289        struct page *pg;
 290
 291        if (WARN_ON(!PAGE_ALIGNED(data)))
 292                return NULL;
 293
 294        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 295        if (!sgt)
 296                return NULL;
 297
 298        *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
 299        ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
 300        if (ret) {
 301                kfree(sgt);
 302                return NULL;
 303        }
 304
 305        for_each_sg(sgt->sgl, sg, *sg_ents, i) {
 306                pg = vmalloc_to_page(data);
 307                if (!pg) {
 308                        sg_free_table(sgt);
 309                        kfree(sgt);
 310                        return NULL;
 311                }
 312
 313                s = min_t(int, PAGE_SIZE, size);
 314                sg_set_page(sg, pg, s, 0);
 315
 316                size -= s;
 317                data += s;
 318        }
 319
 320        return sgt;
 321}
 322
 323static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
 324                                      struct virtio_gpu_vbuffer *vbuf,
 325                                      struct virtio_gpu_fence *fence,
 326                                      int elemcnt,
 327                                      struct scatterlist **sgs,
 328                                      int outcnt,
 329                                      int incnt)
 330{
 331        struct virtqueue *vq = vgdev->ctrlq.vq;
 332        int ret, idx;
 333
 334        if (!drm_dev_enter(vgdev->ddev, &idx)) {
 335                if (fence && vbuf->objs)
 336                        virtio_gpu_array_unlock_resv(vbuf->objs);
 337                free_vbuf(vgdev, vbuf);
 338                return;
 339        }
 340
 341        if (vgdev->has_indirect)
 342                elemcnt = 1;
 343
 344again:
 345        spin_lock(&vgdev->ctrlq.qlock);
 346
 347        if (vq->num_free < elemcnt) {
 348                spin_unlock(&vgdev->ctrlq.qlock);
 349                virtio_gpu_notify(vgdev);
 350                wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 351                goto again;
 352        }
 353
 354        /* now that the position of the vbuf in the virtqueue is known, we can
 355         * finally set the fence id
 356         */
 357        if (fence) {
 358                virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
 359                                      fence);
 360                if (vbuf->objs) {
 361                        virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 362                        virtio_gpu_array_unlock_resv(vbuf->objs);
 363                }
 364        }
 365
 366        ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 367        WARN_ON(ret);
 368
 369        trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
 370
 371        atomic_inc(&vgdev->pending_commands);
 372
 373        spin_unlock(&vgdev->ctrlq.qlock);
 374
 375        drm_dev_exit(idx);
 376}
 377
 378static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 379                                                struct virtio_gpu_vbuffer *vbuf,
 380                                                struct virtio_gpu_fence *fence)
 381{
 382        struct scatterlist *sgs[3], vcmd, vout, vresp;
 383        struct sg_table *sgt = NULL;
 384        int elemcnt = 0, outcnt = 0, incnt = 0;
 385
 386        /* set up vcmd */
 387        sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 388        elemcnt++;
 389        sgs[outcnt] = &vcmd;
 390        outcnt++;
 391
 392        /* set up vout */
 393        if (vbuf->data_size) {
 394                if (is_vmalloc_addr(vbuf->data_buf)) {
 395                        int sg_ents;
 396                        sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
 397                                             &sg_ents);
 398                        if (!sgt) {
 399                                if (fence && vbuf->objs)
 400                                        virtio_gpu_array_unlock_resv(vbuf->objs);
 401                                return;
 402                        }
 403
 404                        elemcnt += sg_ents;
 405                        sgs[outcnt] = sgt->sgl;
 406                } else {
 407                        sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
 408                        elemcnt++;
 409                        sgs[outcnt] = &vout;
 410                }
 411                outcnt++;
 412        }
 413
 414        /* set up vresp */
 415        if (vbuf->resp_size) {
 416                sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
 417                elemcnt++;
 418                sgs[outcnt + incnt] = &vresp;
 419                incnt++;
 420        }
 421
 422        virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
 423                                  incnt);
 424
 425        if (sgt) {
 426                sg_free_table(sgt);
 427                kfree(sgt);
 428        }
 429}
 430
 431void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 432{
 433        bool notify;
 434
 435        if (!atomic_read(&vgdev->pending_commands))
 436                return;
 437
 438        spin_lock(&vgdev->ctrlq.qlock);
 439        atomic_set(&vgdev->pending_commands, 0);
 440        notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
 441        spin_unlock(&vgdev->ctrlq.qlock);
 442
 443        if (notify)
 444                virtqueue_notify(vgdev->ctrlq.vq);
 445}
 446
 447static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 448                                         struct virtio_gpu_vbuffer *vbuf)
 449{
 450        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 451}
 452
 453static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 454                                    struct virtio_gpu_vbuffer *vbuf)
 455{
 456        struct virtqueue *vq = vgdev->cursorq.vq;
 457        struct scatterlist *sgs[1], ccmd;
 458        int idx, ret, outcnt;
 459        bool notify;
 460
 461        if (!drm_dev_enter(vgdev->ddev, &idx)) {
 462                free_vbuf(vgdev, vbuf);
 463                return;
 464        }
 465
 466        sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 467        sgs[0] = &ccmd;
 468        outcnt = 1;
 469
 470        spin_lock(&vgdev->cursorq.qlock);
 471retry:
 472        ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 473        if (ret == -ENOSPC) {
 474                spin_unlock(&vgdev->cursorq.qlock);
 475                wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 476                spin_lock(&vgdev->cursorq.qlock);
 477                goto retry;
 478        } else {
 479                trace_virtio_gpu_cmd_queue(vq,
 480                        virtio_gpu_vbuf_ctrl_hdr(vbuf));
 481
 482                notify = virtqueue_kick_prepare(vq);
 483        }
 484
 485        spin_unlock(&vgdev->cursorq.qlock);
 486
 487        if (notify)
 488                virtqueue_notify(vq);
 489
 490        drm_dev_exit(idx);
 491}
 492
 493/* just create gem objects for userspace and long lived objects,
 494 * just use dma_alloced pages for the queue objects?
 495 */
 496
 497/* create a basic resource */
 498void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 499                                    struct virtio_gpu_object *bo,
 500                                    struct virtio_gpu_object_params *params,
 501                                    struct virtio_gpu_object_array *objs,
 502                                    struct virtio_gpu_fence *fence)
 503{
 504        struct virtio_gpu_resource_create_2d *cmd_p;
 505        struct virtio_gpu_vbuffer *vbuf;
 506
 507        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 508        memset(cmd_p, 0, sizeof(*cmd_p));
 509        vbuf->objs = objs;
 510
 511        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
 512        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 513        cmd_p->format = cpu_to_le32(params->format);
 514        cmd_p->width = cpu_to_le32(params->width);
 515        cmd_p->height = cpu_to_le32(params->height);
 516
 517        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 518        bo->created = true;
 519}
 520
 521static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
 522                                    struct virtio_gpu_vbuffer *vbuf)
 523{
 524        struct virtio_gpu_object *bo;
 525
 526        bo = vbuf->resp_cb_data;
 527        vbuf->resp_cb_data = NULL;
 528
 529        virtio_gpu_cleanup_object(bo);
 530}
 531
 532void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 533                                   struct virtio_gpu_object *bo)
 534{
 535        struct virtio_gpu_resource_unref *cmd_p;
 536        struct virtio_gpu_vbuffer *vbuf;
 537
 538        cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
 539                                        virtio_gpu_cmd_unref_cb);
 540        memset(cmd_p, 0, sizeof(*cmd_p));
 541
 542        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
 543        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 544
 545        vbuf->resp_cb_data = bo;
 546        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 547}
 548
 549void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 550                                uint32_t scanout_id, uint32_t resource_id,
 551                                uint32_t width, uint32_t height,
 552                                uint32_t x, uint32_t y)
 553{
 554        struct virtio_gpu_set_scanout *cmd_p;
 555        struct virtio_gpu_vbuffer *vbuf;
 556
 557        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 558        memset(cmd_p, 0, sizeof(*cmd_p));
 559
 560        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
 561        cmd_p->resource_id = cpu_to_le32(resource_id);
 562        cmd_p->scanout_id = cpu_to_le32(scanout_id);
 563        cmd_p->r.width = cpu_to_le32(width);
 564        cmd_p->r.height = cpu_to_le32(height);
 565        cmd_p->r.x = cpu_to_le32(x);
 566        cmd_p->r.y = cpu_to_le32(y);
 567
 568        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 569}
 570
 571void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 572                                   uint32_t resource_id,
 573                                   uint32_t x, uint32_t y,
 574                                   uint32_t width, uint32_t height)
 575{
 576        struct virtio_gpu_resource_flush *cmd_p;
 577        struct virtio_gpu_vbuffer *vbuf;
 578
 579        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 580        memset(cmd_p, 0, sizeof(*cmd_p));
 581
 582        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
 583        cmd_p->resource_id = cpu_to_le32(resource_id);
 584        cmd_p->r.width = cpu_to_le32(width);
 585        cmd_p->r.height = cpu_to_le32(height);
 586        cmd_p->r.x = cpu_to_le32(x);
 587        cmd_p->r.y = cpu_to_le32(y);
 588
 589        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 590}
 591
 592void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 593                                        uint64_t offset,
 594                                        uint32_t width, uint32_t height,
 595                                        uint32_t x, uint32_t y,
 596                                        struct virtio_gpu_object_array *objs,
 597                                        struct virtio_gpu_fence *fence)
 598{
 599        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 600        struct virtio_gpu_transfer_to_host_2d *cmd_p;
 601        struct virtio_gpu_vbuffer *vbuf;
 602        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 603        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 604
 605        if (use_dma_api)
 606                dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 607                                       shmem->pages->sgl, shmem->pages->nents,
 608                                       DMA_TO_DEVICE);
 609
 610        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 611        memset(cmd_p, 0, sizeof(*cmd_p));
 612        vbuf->objs = objs;
 613
 614        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
 615        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 616        cmd_p->offset = cpu_to_le64(offset);
 617        cmd_p->r.width = cpu_to_le32(width);
 618        cmd_p->r.height = cpu_to_le32(height);
 619        cmd_p->r.x = cpu_to_le32(x);
 620        cmd_p->r.y = cpu_to_le32(y);
 621
 622        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 623}
 624
 625static void
 626virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 627                                       uint32_t resource_id,
 628                                       struct virtio_gpu_mem_entry *ents,
 629                                       uint32_t nents,
 630                                       struct virtio_gpu_fence *fence)
 631{
 632        struct virtio_gpu_resource_attach_backing *cmd_p;
 633        struct virtio_gpu_vbuffer *vbuf;
 634
 635        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 636        memset(cmd_p, 0, sizeof(*cmd_p));
 637
 638        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
 639        cmd_p->resource_id = cpu_to_le32(resource_id);
 640        cmd_p->nr_entries = cpu_to_le32(nents);
 641
 642        vbuf->data_buf = ents;
 643        vbuf->data_size = sizeof(*ents) * nents;
 644
 645        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 646}
 647
 648static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
 649                                               struct virtio_gpu_vbuffer *vbuf)
 650{
 651        struct virtio_gpu_resp_display_info *resp =
 652                (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
 653        int i;
 654
 655        spin_lock(&vgdev->display_info_lock);
 656        for (i = 0; i < vgdev->num_scanouts; i++) {
 657                vgdev->outputs[i].info = resp->pmodes[i];
 658                if (resp->pmodes[i].enabled) {
 659                        DRM_DEBUG("output %d: %dx%d+%d+%d", i,
 660                                  le32_to_cpu(resp->pmodes[i].r.width),
 661                                  le32_to_cpu(resp->pmodes[i].r.height),
 662                                  le32_to_cpu(resp->pmodes[i].r.x),
 663                                  le32_to_cpu(resp->pmodes[i].r.y));
 664                } else {
 665                        DRM_DEBUG("output %d: disabled", i);
 666                }
 667        }
 668
 669        vgdev->display_info_pending = false;
 670        spin_unlock(&vgdev->display_info_lock);
 671        wake_up(&vgdev->resp_wq);
 672
 673        if (!drm_helper_hpd_irq_event(vgdev->ddev))
 674                drm_kms_helper_hotplug_event(vgdev->ddev);
 675}
 676
 677static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
 678                                              struct virtio_gpu_vbuffer *vbuf)
 679{
 680        struct virtio_gpu_get_capset_info *cmd =
 681                (struct virtio_gpu_get_capset_info *)vbuf->buf;
 682        struct virtio_gpu_resp_capset_info *resp =
 683                (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
 684        int i = le32_to_cpu(cmd->capset_index);
 685
 686        spin_lock(&vgdev->display_info_lock);
 687        vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
 688        vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
 689        vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
 690        spin_unlock(&vgdev->display_info_lock);
 691        wake_up(&vgdev->resp_wq);
 692}
 693
 694static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 695                                     struct virtio_gpu_vbuffer *vbuf)
 696{
 697        struct virtio_gpu_get_capset *cmd =
 698                (struct virtio_gpu_get_capset *)vbuf->buf;
 699        struct virtio_gpu_resp_capset *resp =
 700                (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
 701        struct virtio_gpu_drv_cap_cache *cache_ent;
 702
 703        spin_lock(&vgdev->display_info_lock);
 704        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 705                if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
 706                    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
 707                        memcpy(cache_ent->caps_cache, resp->capset_data,
 708                               cache_ent->size);
 709                        /* Copy must occur before is_valid is signalled. */
 710                        smp_wmb();
 711                        atomic_set(&cache_ent->is_valid, 1);
 712                        break;
 713                }
 714        }
 715        spin_unlock(&vgdev->display_info_lock);
 716        wake_up_all(&vgdev->resp_wq);
 717}
 718
 719static int virtio_get_edid_block(void *data, u8 *buf,
 720                                 unsigned int block, size_t len)
 721{
 722        struct virtio_gpu_resp_edid *resp = data;
 723        size_t start = block * EDID_LENGTH;
 724
 725        if (start + len > le32_to_cpu(resp->size))
 726                return -1;
 727        memcpy(buf, resp->edid + start, len);
 728        return 0;
 729}
 730
 731static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
 732                                       struct virtio_gpu_vbuffer *vbuf)
 733{
 734        struct virtio_gpu_cmd_get_edid *cmd =
 735                (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
 736        struct virtio_gpu_resp_edid *resp =
 737                (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
 738        uint32_t scanout = le32_to_cpu(cmd->scanout);
 739        struct virtio_gpu_output *output;
 740        struct edid *new_edid, *old_edid;
 741
 742        if (scanout >= vgdev->num_scanouts)
 743                return;
 744        output = vgdev->outputs + scanout;
 745
 746        new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
 747        drm_connector_update_edid_property(&output->conn, new_edid);
 748
 749        spin_lock(&vgdev->display_info_lock);
 750        old_edid = output->edid;
 751        output->edid = new_edid;
 752        spin_unlock(&vgdev->display_info_lock);
 753
 754        kfree(old_edid);
 755        wake_up(&vgdev->resp_wq);
 756}
 757
 758int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 759{
 760        struct virtio_gpu_ctrl_hdr *cmd_p;
 761        struct virtio_gpu_vbuffer *vbuf;
 762        void *resp_buf;
 763
 764        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
 765                           GFP_KERNEL);
 766        if (!resp_buf)
 767                return -ENOMEM;
 768
 769        cmd_p = virtio_gpu_alloc_cmd_resp
 770                (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
 771                 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
 772                 resp_buf);
 773        memset(cmd_p, 0, sizeof(*cmd_p));
 774
 775        vgdev->display_info_pending = true;
 776        cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
 777        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 778        return 0;
 779}
 780
 781int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
 782{
 783        struct virtio_gpu_get_capset_info *cmd_p;
 784        struct virtio_gpu_vbuffer *vbuf;
 785        void *resp_buf;
 786
 787        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
 788                           GFP_KERNEL);
 789        if (!resp_buf)
 790                return -ENOMEM;
 791
 792        cmd_p = virtio_gpu_alloc_cmd_resp
 793                (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
 794                 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
 795                 resp_buf);
 796        memset(cmd_p, 0, sizeof(*cmd_p));
 797
 798        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
 799        cmd_p->capset_index = cpu_to_le32(idx);
 800        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 801        return 0;
 802}
 803
 804int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 805                              int idx, int version,
 806                              struct virtio_gpu_drv_cap_cache **cache_p)
 807{
 808        struct virtio_gpu_get_capset *cmd_p;
 809        struct virtio_gpu_vbuffer *vbuf;
 810        int max_size;
 811        struct virtio_gpu_drv_cap_cache *cache_ent;
 812        struct virtio_gpu_drv_cap_cache *search_ent;
 813        void *resp_buf;
 814
 815        *cache_p = NULL;
 816
 817        if (idx >= vgdev->num_capsets)
 818                return -EINVAL;
 819
 820        if (version > vgdev->capsets[idx].max_version)
 821                return -EINVAL;
 822
 823        cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
 824        if (!cache_ent)
 825                return -ENOMEM;
 826
 827        max_size = vgdev->capsets[idx].max_size;
 828        cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
 829        if (!cache_ent->caps_cache) {
 830                kfree(cache_ent);
 831                return -ENOMEM;
 832        }
 833
 834        resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
 835                           GFP_KERNEL);
 836        if (!resp_buf) {
 837                kfree(cache_ent->caps_cache);
 838                kfree(cache_ent);
 839                return -ENOMEM;
 840        }
 841
 842        cache_ent->version = version;
 843        cache_ent->id = vgdev->capsets[idx].id;
 844        atomic_set(&cache_ent->is_valid, 0);
 845        cache_ent->size = max_size;
 846        spin_lock(&vgdev->display_info_lock);
 847        /* Search while under lock in case it was added by another task. */
 848        list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
 849                if (search_ent->id == vgdev->capsets[idx].id &&
 850                    search_ent->version == version) {
 851                        *cache_p = search_ent;
 852                        break;
 853                }
 854        }
 855        if (!*cache_p)
 856                list_add_tail(&cache_ent->head, &vgdev->cap_cache);
 857        spin_unlock(&vgdev->display_info_lock);
 858
 859        if (*cache_p) {
 860                /* Entry was found, so free everything that was just created. */
 861                kfree(resp_buf);
 862                kfree(cache_ent->caps_cache);
 863                kfree(cache_ent);
 864                return 0;
 865        }
 866
 867        cmd_p = virtio_gpu_alloc_cmd_resp
 868                (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
 869                 sizeof(struct virtio_gpu_resp_capset) + max_size,
 870                 resp_buf);
 871        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
 872        cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
 873        cmd_p->capset_version = cpu_to_le32(version);
 874        *cache_p = cache_ent;
 875        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 876
 877        return 0;
 878}
 879
 880int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
 881{
 882        struct virtio_gpu_cmd_get_edid *cmd_p;
 883        struct virtio_gpu_vbuffer *vbuf;
 884        void *resp_buf;
 885        int scanout;
 886
 887        if (WARN_ON(!vgdev->has_edid))
 888                return -EINVAL;
 889
 890        for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
 891                resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
 892                                   GFP_KERNEL);
 893                if (!resp_buf)
 894                        return -ENOMEM;
 895
 896                cmd_p = virtio_gpu_alloc_cmd_resp
 897                        (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
 898                         sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
 899                         resp_buf);
 900                cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
 901                cmd_p->scanout = cpu_to_le32(scanout);
 902                virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 903        }
 904
 905        return 0;
 906}
 907
 908void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 909                                   uint32_t nlen, const char *name)
 910{
 911        struct virtio_gpu_ctx_create *cmd_p;
 912        struct virtio_gpu_vbuffer *vbuf;
 913
 914        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 915        memset(cmd_p, 0, sizeof(*cmd_p));
 916
 917        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
 918        cmd_p->hdr.ctx_id = cpu_to_le32(id);
 919        cmd_p->nlen = cpu_to_le32(nlen);
 920        strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
 921        cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
 922        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 923}
 924
 925void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
 926                                    uint32_t id)
 927{
 928        struct virtio_gpu_ctx_destroy *cmd_p;
 929        struct virtio_gpu_vbuffer *vbuf;
 930
 931        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 932        memset(cmd_p, 0, sizeof(*cmd_p));
 933
 934        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
 935        cmd_p->hdr.ctx_id = cpu_to_le32(id);
 936        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 937}
 938
 939void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 940                                            uint32_t ctx_id,
 941                                            struct virtio_gpu_object_array *objs)
 942{
 943        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 944        struct virtio_gpu_ctx_resource *cmd_p;
 945        struct virtio_gpu_vbuffer *vbuf;
 946
 947        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 948        memset(cmd_p, 0, sizeof(*cmd_p));
 949        vbuf->objs = objs;
 950
 951        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
 952        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 953        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 954        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 955}
 956
 957void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 958                                            uint32_t ctx_id,
 959                                            struct virtio_gpu_object_array *objs)
 960{
 961        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 962        struct virtio_gpu_ctx_resource *cmd_p;
 963        struct virtio_gpu_vbuffer *vbuf;
 964
 965        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 966        memset(cmd_p, 0, sizeof(*cmd_p));
 967        vbuf->objs = objs;
 968
 969        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
 970        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 971        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 972        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 973}
 974
 975void
 976virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 977                                  struct virtio_gpu_object *bo,
 978                                  struct virtio_gpu_object_params *params,
 979                                  struct virtio_gpu_object_array *objs,
 980                                  struct virtio_gpu_fence *fence)
 981{
 982        struct virtio_gpu_resource_create_3d *cmd_p;
 983        struct virtio_gpu_vbuffer *vbuf;
 984
 985        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
 986        memset(cmd_p, 0, sizeof(*cmd_p));
 987        vbuf->objs = objs;
 988
 989        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 990        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 991        cmd_p->format = cpu_to_le32(params->format);
 992        cmd_p->width = cpu_to_le32(params->width);
 993        cmd_p->height = cpu_to_le32(params->height);
 994
 995        cmd_p->target = cpu_to_le32(params->target);
 996        cmd_p->bind = cpu_to_le32(params->bind);
 997        cmd_p->depth = cpu_to_le32(params->depth);
 998        cmd_p->array_size = cpu_to_le32(params->array_size);
 999        cmd_p->last_level = cpu_to_le32(params->last_level);
1000        cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001        cmd_p->flags = cpu_to_le32(params->flags);
1002
1003        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1004
1005        bo->created = true;
1006}
1007
1008void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1009                                        uint32_t ctx_id,
1010                                        uint64_t offset, uint32_t level,
1011                                        struct drm_virtgpu_3d_box *box,
1012                                        struct virtio_gpu_object_array *objs,
1013                                        struct virtio_gpu_fence *fence)
1014{
1015        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016        struct virtio_gpu_transfer_host_3d *cmd_p;
1017        struct virtio_gpu_vbuffer *vbuf;
1018        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1019        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1020
1021        if (use_dma_api)
1022                dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1023                                       shmem->pages->sgl, shmem->pages->nents,
1024                                       DMA_TO_DEVICE);
1025
1026        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027        memset(cmd_p, 0, sizeof(*cmd_p));
1028
1029        vbuf->objs = objs;
1030
1031        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034        convert_to_hw_box(&cmd_p->box, box);
1035        cmd_p->offset = cpu_to_le64(offset);
1036        cmd_p->level = cpu_to_le32(level);
1037
1038        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1039}
1040
1041void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1042                                          uint32_t ctx_id,
1043                                          uint64_t offset, uint32_t level,
1044                                          struct drm_virtgpu_3d_box *box,
1045                                          struct virtio_gpu_object_array *objs,
1046                                          struct virtio_gpu_fence *fence)
1047{
1048        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1049        struct virtio_gpu_transfer_host_3d *cmd_p;
1050        struct virtio_gpu_vbuffer *vbuf;
1051
1052        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1053        memset(cmd_p, 0, sizeof(*cmd_p));
1054
1055        vbuf->objs = objs;
1056
1057        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1058        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1059        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1060        convert_to_hw_box(&cmd_p->box, box);
1061        cmd_p->offset = cpu_to_le64(offset);
1062        cmd_p->level = cpu_to_le32(level);
1063
1064        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1065}
1066
1067void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1068                           void *data, uint32_t data_size,
1069                           uint32_t ctx_id,
1070                           struct virtio_gpu_object_array *objs,
1071                           struct virtio_gpu_fence *fence)
1072{
1073        struct virtio_gpu_cmd_submit *cmd_p;
1074        struct virtio_gpu_vbuffer *vbuf;
1075
1076        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1077        memset(cmd_p, 0, sizeof(*cmd_p));
1078
1079        vbuf->data_buf = data;
1080        vbuf->data_size = data_size;
1081        vbuf->objs = objs;
1082
1083        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1084        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1085        cmd_p->size = cpu_to_le32(data_size);
1086
1087        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1088}
1089
1090void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1091                              struct virtio_gpu_object *obj,
1092                              struct virtio_gpu_mem_entry *ents,
1093                              unsigned int nents)
1094{
1095        virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1096                                               ents, nents, NULL);
1097}
1098
1099void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1100                            struct virtio_gpu_output *output)
1101{
1102        struct virtio_gpu_vbuffer *vbuf;
1103        struct virtio_gpu_update_cursor *cur_p;
1104
1105        output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1106        cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1107        memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1108        virtio_gpu_queue_cursor(vgdev, vbuf);
1109}
1110