linux/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/drmP.h>
  30#include <drm/ttm/ttm_placement.h>
  31
  32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
  33{
  34        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  35        uint32_t fifo_min, hwversion;
  36        const struct vmw_fifo_state *fifo = &dev_priv->fifo;
  37
  38        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  39                return false;
  40
  41        fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
  42        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
  43                return false;
  44
  45        hwversion = ioread32(fifo_mem +
  46                             ((fifo->capabilities &
  47                               SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
  48                              SVGA_FIFO_3D_HWVERSION_REVISED :
  49                              SVGA_FIFO_3D_HWVERSION));
  50
  51        if (hwversion == 0)
  52                return false;
  53
  54        if (hwversion < SVGA3D_HWVERSION_WS8_B1)
  55                return false;
  56
  57        /* Non-Screen Object path does not support surfaces */
  58        if (!dev_priv->sou_priv)
  59                return false;
  60
  61        return true;
  62}
  63
  64bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
  65{
  66        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  67        uint32_t caps;
  68
  69        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  70                return false;
  71
  72        caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
  73        if (caps & SVGA_FIFO_CAP_PITCHLOCK)
  74                return true;
  75
  76        return false;
  77}
  78
  79int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
  80{
  81        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  82        uint32_t max;
  83        uint32_t min;
  84        uint32_t dummy;
  85
  86        fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
  87        fifo->static_buffer = vmalloc(fifo->static_buffer_size);
  88        if (unlikely(fifo->static_buffer == NULL))
  89                return -ENOMEM;
  90
  91        fifo->dynamic_buffer = NULL;
  92        fifo->reserved_size = 0;
  93        fifo->using_bounce_buffer = false;
  94
  95        mutex_init(&fifo->fifo_mutex);
  96        init_rwsem(&fifo->rwsem);
  97
  98        /*
  99         * Allow mapping the first page read-only to user-space.
 100         */
 101
 102        DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
 103        DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
 104        DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 105
 106        mutex_lock(&dev_priv->hw_mutex);
 107        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
 108        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
 109        dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
 110        vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 111
 112        min = 4;
 113        if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
 114                min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
 115        min <<= 2;
 116
 117        if (min < PAGE_SIZE)
 118                min = PAGE_SIZE;
 119
 120        iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
 121        iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
 122        wmb();
 123        iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
 124        iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
 125        iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
 126        mb();
 127
 128        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
 129        mutex_unlock(&dev_priv->hw_mutex);
 130
 131        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 132        min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
 133        fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
 134
 135        DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
 136                 (unsigned int) max,
 137                 (unsigned int) min,
 138                 (unsigned int) fifo->capabilities);
 139
 140        atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 141        iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 142        vmw_marker_queue_init(&fifo->marker_queue);
 143        return vmw_fifo_send_fence(dev_priv, &dummy);
 144}
 145
 146void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 147{
 148        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 149
 150        mutex_lock(&dev_priv->hw_mutex);
 151
 152        if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
 153                iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
 154                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
 155        }
 156
 157        mutex_unlock(&dev_priv->hw_mutex);
 158}
 159
 160void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 161{
 162        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 163
 164        mutex_lock(&dev_priv->hw_mutex);
 165
 166        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
 167                vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 168
 169        dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 170
 171        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
 172                  dev_priv->config_done_state);
 173        vmw_write(dev_priv, SVGA_REG_ENABLE,
 174                  dev_priv->enable_state);
 175        vmw_write(dev_priv, SVGA_REG_TRACES,
 176                  dev_priv->traces_state);
 177
 178        mutex_unlock(&dev_priv->hw_mutex);
 179        vmw_marker_queue_takedown(&fifo->marker_queue);
 180
 181        if (likely(fifo->static_buffer != NULL)) {
 182                vfree(fifo->static_buffer);
 183                fifo->static_buffer = NULL;
 184        }
 185
 186        if (likely(fifo->dynamic_buffer != NULL)) {
 187                vfree(fifo->dynamic_buffer);
 188                fifo->dynamic_buffer = NULL;
 189        }
 190}
 191
 192static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 193{
 194        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 195        uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 196        uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 197        uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 198        uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
 199
 200        return ((max - next_cmd) + (stop - min) <= bytes);
 201}
 202
 203static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 204                               uint32_t bytes, bool interruptible,
 205                               unsigned long timeout)
 206{
 207        int ret = 0;
 208        unsigned long end_jiffies = jiffies + timeout;
 209        DEFINE_WAIT(__wait);
 210
 211        DRM_INFO("Fifo wait noirq.\n");
 212
 213        for (;;) {
 214                prepare_to_wait(&dev_priv->fifo_queue, &__wait,
 215                                (interruptible) ?
 216                                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 217                if (!vmw_fifo_is_full(dev_priv, bytes))
 218                        break;
 219                if (time_after_eq(jiffies, end_jiffies)) {
 220                        ret = -EBUSY;
 221                        DRM_ERROR("SVGA device lockup.\n");
 222                        break;
 223                }
 224                schedule_timeout(1);
 225                if (interruptible && signal_pending(current)) {
 226                        ret = -ERESTARTSYS;
 227                        break;
 228                }
 229        }
 230        finish_wait(&dev_priv->fifo_queue, &__wait);
 231        wake_up_all(&dev_priv->fifo_queue);
 232        DRM_INFO("Fifo noirq exit.\n");
 233        return ret;
 234}
 235
 236static int vmw_fifo_wait(struct vmw_private *dev_priv,
 237                         uint32_t bytes, bool interruptible,
 238                         unsigned long timeout)
 239{
 240        long ret = 1L;
 241        unsigned long irq_flags;
 242
 243        if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
 244                return 0;
 245
 246        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
 247        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 248                return vmw_fifo_wait_noirq(dev_priv, bytes,
 249                                           interruptible, timeout);
 250
 251        mutex_lock(&dev_priv->hw_mutex);
 252        if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
 253                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 254                outl(SVGA_IRQFLAG_FIFO_PROGRESS,
 255                     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 256                dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
 257                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 258                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 259        }
 260        mutex_unlock(&dev_priv->hw_mutex);
 261
 262        if (interruptible)
 263                ret = wait_event_interruptible_timeout
 264                    (dev_priv->fifo_queue,
 265                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 266        else
 267                ret = wait_event_timeout
 268                    (dev_priv->fifo_queue,
 269                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 270
 271        if (unlikely(ret == 0))
 272                ret = -EBUSY;
 273        else if (likely(ret > 0))
 274                ret = 0;
 275
 276        mutex_lock(&dev_priv->hw_mutex);
 277        if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
 278                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 279                dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
 280                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 281                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 282        }
 283        mutex_unlock(&dev_priv->hw_mutex);
 284
 285        return ret;
 286}
 287
 288/**
 289 * Reserve @bytes number of bytes in the fifo.
 290 *
 291 * This function will return NULL (error) on two conditions:
 292 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
 293 *   available fifo space.
 294 *
 295 * Returns:
 296 *   Pointer to the fifo, or null on error (possible hardware hang).
 297 */
 298void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 299{
 300        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 301        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 302        uint32_t max;
 303        uint32_t min;
 304        uint32_t next_cmd;
 305        uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 306        int ret;
 307
 308        mutex_lock(&fifo_state->fifo_mutex);
 309        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 310        min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 311        next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 312
 313        if (unlikely(bytes >= (max - min)))
 314                goto out_err;
 315
 316        BUG_ON(fifo_state->reserved_size != 0);
 317        BUG_ON(fifo_state->dynamic_buffer != NULL);
 318
 319        fifo_state->reserved_size = bytes;
 320
 321        while (1) {
 322                uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
 323                bool need_bounce = false;
 324                bool reserve_in_place = false;
 325
 326                if (next_cmd >= stop) {
 327                        if (likely((next_cmd + bytes < max ||
 328                                    (next_cmd + bytes == max && stop > min))))
 329                                reserve_in_place = true;
 330
 331                        else if (vmw_fifo_is_full(dev_priv, bytes)) {
 332                                ret = vmw_fifo_wait(dev_priv, bytes,
 333                                                    false, 3 * HZ);
 334                                if (unlikely(ret != 0))
 335                                        goto out_err;
 336                        } else
 337                                need_bounce = true;
 338
 339                } else {
 340
 341                        if (likely((next_cmd + bytes < stop)))
 342                                reserve_in_place = true;
 343                        else {
 344                                ret = vmw_fifo_wait(dev_priv, bytes,
 345                                                    false, 3 * HZ);
 346                                if (unlikely(ret != 0))
 347                                        goto out_err;
 348                        }
 349                }
 350
 351                if (reserve_in_place) {
 352                        if (reserveable || bytes <= sizeof(uint32_t)) {
 353                                fifo_state->using_bounce_buffer = false;
 354
 355                                if (reserveable)
 356                                        iowrite32(bytes, fifo_mem +
 357                                                  SVGA_FIFO_RESERVED);
 358                                return fifo_mem + (next_cmd >> 2);
 359                        } else {
 360                                need_bounce = true;
 361                        }
 362                }
 363
 364                if (need_bounce) {
 365                        fifo_state->using_bounce_buffer = true;
 366                        if (bytes < fifo_state->static_buffer_size)
 367                                return fifo_state->static_buffer;
 368                        else {
 369                                fifo_state->dynamic_buffer = vmalloc(bytes);
 370                                return fifo_state->dynamic_buffer;
 371                        }
 372                }
 373        }
 374out_err:
 375        fifo_state->reserved_size = 0;
 376        mutex_unlock(&fifo_state->fifo_mutex);
 377        return NULL;
 378}
 379
 380static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 381                              __le32 __iomem *fifo_mem,
 382                              uint32_t next_cmd,
 383                              uint32_t max, uint32_t min, uint32_t bytes)
 384{
 385        uint32_t chunk_size = max - next_cmd;
 386        uint32_t rest;
 387        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 388            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 389
 390        if (bytes < chunk_size)
 391                chunk_size = bytes;
 392
 393        iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
 394        mb();
 395        memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
 396        rest = bytes - chunk_size;
 397        if (rest)
 398                memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
 399                            rest);
 400}
 401
 402static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 403                               __le32 __iomem *fifo_mem,
 404                               uint32_t next_cmd,
 405                               uint32_t max, uint32_t min, uint32_t bytes)
 406{
 407        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 408            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 409
 410        while (bytes > 0) {
 411                iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
 412                next_cmd += sizeof(uint32_t);
 413                if (unlikely(next_cmd == max))
 414                        next_cmd = min;
 415                mb();
 416                iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
 417                mb();
 418                bytes -= sizeof(uint32_t);
 419        }
 420}
 421
 422void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 423{
 424        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 425        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 426        uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 427        uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 428        uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 429        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 430
 431        BUG_ON((bytes & 3) != 0);
 432        BUG_ON(bytes > fifo_state->reserved_size);
 433
 434        fifo_state->reserved_size = 0;
 435
 436        if (fifo_state->using_bounce_buffer) {
 437                if (reserveable)
 438                        vmw_fifo_res_copy(fifo_state, fifo_mem,
 439                                          next_cmd, max, min, bytes);
 440                else
 441                        vmw_fifo_slow_copy(fifo_state, fifo_mem,
 442                                           next_cmd, max, min, bytes);
 443
 444                if (fifo_state->dynamic_buffer) {
 445                        vfree(fifo_state->dynamic_buffer);
 446                        fifo_state->dynamic_buffer = NULL;
 447                }
 448
 449        }
 450
 451        down_write(&fifo_state->rwsem);
 452        if (fifo_state->using_bounce_buffer || reserveable) {
 453                next_cmd += bytes;
 454                if (next_cmd >= max)
 455                        next_cmd -= max - min;
 456                mb();
 457                iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
 458        }
 459
 460        if (reserveable)
 461                iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
 462        mb();
 463        up_write(&fifo_state->rwsem);
 464        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 465        mutex_unlock(&fifo_state->fifo_mutex);
 466}
 467
 468int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 469{
 470        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 471        struct svga_fifo_cmd_fence *cmd_fence;
 472        void *fm;
 473        int ret = 0;
 474        uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
 475
 476        fm = vmw_fifo_reserve(dev_priv, bytes);
 477        if (unlikely(fm == NULL)) {
 478                *seqno = atomic_read(&dev_priv->marker_seq);
 479                ret = -ENOMEM;
 480                (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
 481                                        false, 3*HZ);
 482                goto out_err;
 483        }
 484
 485        do {
 486                *seqno = atomic_add_return(1, &dev_priv->marker_seq);
 487        } while (*seqno == 0);
 488
 489        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
 490
 491                /*
 492                 * Don't request hardware to send a fence. The
 493                 * waiting code in vmwgfx_irq.c will emulate this.
 494                 */
 495
 496                vmw_fifo_commit(dev_priv, 0);
 497                return 0;
 498        }
 499
 500        *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
 501        cmd_fence = (struct svga_fifo_cmd_fence *)
 502            ((unsigned long)fm + sizeof(__le32));
 503
 504        iowrite32(*seqno, &cmd_fence->fence);
 505        vmw_fifo_commit(dev_priv, bytes);
 506        (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
 507        vmw_update_seqno(dev_priv, fifo_state);
 508
 509out_err:
 510        return ret;
 511}
 512
 513/**
 514 * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
 515 *
 516 * @dev_priv: The device private structure.
 517 * @cid: The hardware context id used for the query.
 518 *
 519 * This function is used to emit a dummy occlusion query with
 520 * no primitives rendered between query begin and query end.
 521 * It's used to provide a query barrier, in order to know that when
 522 * this query is finished, all preceding queries are also finished.
 523 *
 524 * A Query results structure should have been initialized at the start
 525 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
 526 * must also be either reserved or pinned when this function is called.
 527 *
 528 * Returns -ENOMEM on failure to reserve fifo space.
 529 */
 530int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 531                              uint32_t cid)
 532{
 533        /*
 534         * A query wait without a preceding query end will
 535         * actually finish all queries for this cid
 536         * without writing to the query result structure.
 537         */
 538
 539        struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
 540        struct {
 541                SVGA3dCmdHeader header;
 542                SVGA3dCmdWaitForQuery body;
 543        } *cmd;
 544
 545        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 546
 547        if (unlikely(cmd == NULL)) {
 548                DRM_ERROR("Out of fifo space for dummy query.\n");
 549                return -ENOMEM;
 550        }
 551
 552        cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
 553        cmd->header.size = sizeof(cmd->body);
 554        cmd->body.cid = cid;
 555        cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 556
 557        if (bo->mem.mem_type == TTM_PL_VRAM) {
 558                cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
 559                cmd->body.guestResult.offset = bo->offset;
 560        } else {
 561                cmd->body.guestResult.gmrId = bo->mem.start;
 562                cmd->body.guestResult.offset = 0;
 563        }
 564
 565        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 566
 567        return 0;
 568}
 569