linux/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/drmP.h>
  30#include <drm/ttm/ttm_placement.h>
  31
  32struct vmw_temp_set_context {
  33        SVGA3dCmdHeader header;
  34        SVGA3dCmdDXTempSetContext body;
  35};
  36
  37bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
  38{
  39        u32 *fifo_mem = dev_priv->mmio_virt;
  40        uint32_t fifo_min, hwversion;
  41        const struct vmw_fifo_state *fifo = &dev_priv->fifo;
  42
  43        if (!(dev_priv->capabilities & SVGA_CAP_3D))
  44                return false;
  45
  46        if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  47                uint32_t result;
  48
  49                if (!dev_priv->has_mob)
  50                        return false;
  51
  52                spin_lock(&dev_priv->cap_lock);
  53                vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
  54                result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
  55                spin_unlock(&dev_priv->cap_lock);
  56
  57                return (result != 0);
  58        }
  59
  60        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  61                return false;
  62
  63        fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
  64        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
  65                return false;
  66
  67        hwversion = vmw_mmio_read(fifo_mem +
  68                                  ((fifo->capabilities &
  69                                    SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
  70                                   SVGA_FIFO_3D_HWVERSION_REVISED :
  71                                   SVGA_FIFO_3D_HWVERSION));
  72
  73        if (hwversion == 0)
  74                return false;
  75
  76        if (hwversion < SVGA3D_HWVERSION_WS8_B1)
  77                return false;
  78
  79        /* Legacy Display Unit does not support surfaces */
  80        if (dev_priv->active_display_unit == vmw_du_legacy)
  81                return false;
  82
  83        return true;
  84}
  85
  86bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
  87{
  88        u32  *fifo_mem = dev_priv->mmio_virt;
  89        uint32_t caps;
  90
  91        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  92                return false;
  93
  94        caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
  95        if (caps & SVGA_FIFO_CAP_PITCHLOCK)
  96                return true;
  97
  98        return false;
  99}
 100
 101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 102{
 103        u32  *fifo_mem = dev_priv->mmio_virt;
 104        uint32_t max;
 105        uint32_t min;
 106
 107        fifo->dx = false;
 108        fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
 109        fifo->static_buffer = vmalloc(fifo->static_buffer_size);
 110        if (unlikely(fifo->static_buffer == NULL))
 111                return -ENOMEM;
 112
 113        fifo->dynamic_buffer = NULL;
 114        fifo->reserved_size = 0;
 115        fifo->using_bounce_buffer = false;
 116
 117        mutex_init(&fifo->fifo_mutex);
 118        init_rwsem(&fifo->rwsem);
 119
 120        DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
 121        DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
 122        DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 123
 124        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
 125        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
 126        dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
 127
 128        vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
 129                  SVGA_REG_ENABLE_HIDE);
 130        vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 131
 132        min = 4;
 133        if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
 134                min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
 135        min <<= 2;
 136
 137        if (min < PAGE_SIZE)
 138                min = PAGE_SIZE;
 139
 140        vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
 141        vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
 142        wmb();
 143        vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
 144        vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
 145        vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
 146        mb();
 147
 148        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
 149
 150        max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
 151        min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
 152        fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
 153
 154        DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
 155                 (unsigned int) max,
 156                 (unsigned int) min,
 157                 (unsigned int) fifo->capabilities);
 158
 159        atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 160        vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 161        vmw_marker_queue_init(&fifo->marker_queue);
 162
 163        return 0;
 164}
 165
 166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 167{
 168        u32 *fifo_mem = dev_priv->mmio_virt;
 169
 170        preempt_disable();
 171        if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
 172                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
 173        preempt_enable();
 174}
 175
 176void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 177{
 178        u32  *fifo_mem = dev_priv->mmio_virt;
 179
 180        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 181        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
 182                ;
 183
 184        dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 185
 186        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
 187                  dev_priv->config_done_state);
 188        vmw_write(dev_priv, SVGA_REG_ENABLE,
 189                  dev_priv->enable_state);
 190        vmw_write(dev_priv, SVGA_REG_TRACES,
 191                  dev_priv->traces_state);
 192
 193        vmw_marker_queue_takedown(&fifo->marker_queue);
 194
 195        if (likely(fifo->static_buffer != NULL)) {
 196                vfree(fifo->static_buffer);
 197                fifo->static_buffer = NULL;
 198        }
 199
 200        if (likely(fifo->dynamic_buffer != NULL)) {
 201                vfree(fifo->dynamic_buffer);
 202                fifo->dynamic_buffer = NULL;
 203        }
 204}
 205
 206static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 207{
 208        u32  *fifo_mem = dev_priv->mmio_virt;
 209        uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
 210        uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
 211        uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
 212        uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
 213
 214        return ((max - next_cmd) + (stop - min) <= bytes);
 215}
 216
 217static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 218                               uint32_t bytes, bool interruptible,
 219                               unsigned long timeout)
 220{
 221        int ret = 0;
 222        unsigned long end_jiffies = jiffies + timeout;
 223        DEFINE_WAIT(__wait);
 224
 225        DRM_INFO("Fifo wait noirq.\n");
 226
 227        for (;;) {
 228                prepare_to_wait(&dev_priv->fifo_queue, &__wait,
 229                                (interruptible) ?
 230                                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 231                if (!vmw_fifo_is_full(dev_priv, bytes))
 232                        break;
 233                if (time_after_eq(jiffies, end_jiffies)) {
 234                        ret = -EBUSY;
 235                        DRM_ERROR("SVGA device lockup.\n");
 236                        break;
 237                }
 238                schedule_timeout(1);
 239                if (interruptible && signal_pending(current)) {
 240                        ret = -ERESTARTSYS;
 241                        break;
 242                }
 243        }
 244        finish_wait(&dev_priv->fifo_queue, &__wait);
 245        wake_up_all(&dev_priv->fifo_queue);
 246        DRM_INFO("Fifo noirq exit.\n");
 247        return ret;
 248}
 249
 250static int vmw_fifo_wait(struct vmw_private *dev_priv,
 251                         uint32_t bytes, bool interruptible,
 252                         unsigned long timeout)
 253{
 254        long ret = 1L;
 255
 256        if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
 257                return 0;
 258
 259        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
 260        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 261                return vmw_fifo_wait_noirq(dev_priv, bytes,
 262                                           interruptible, timeout);
 263
 264        vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 265                               &dev_priv->fifo_queue_waiters);
 266
 267        if (interruptible)
 268                ret = wait_event_interruptible_timeout
 269                    (dev_priv->fifo_queue,
 270                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 271        else
 272                ret = wait_event_timeout
 273                    (dev_priv->fifo_queue,
 274                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 275
 276        if (unlikely(ret == 0))
 277                ret = -EBUSY;
 278        else if (likely(ret > 0))
 279                ret = 0;
 280
 281        vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 282                                  &dev_priv->fifo_queue_waiters);
 283
 284        return ret;
 285}
 286
 287/**
 288 * Reserve @bytes number of bytes in the fifo.
 289 *
 290 * This function will return NULL (error) on two conditions:
 291 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
 292 *   available fifo space.
 293 *
 294 * Returns:
 295 *   Pointer to the fifo, or null on error (possible hardware hang).
 296 */
 297static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 298                                    uint32_t bytes)
 299{
 300        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 301        u32  *fifo_mem = dev_priv->mmio_virt;
 302        uint32_t max;
 303        uint32_t min;
 304        uint32_t next_cmd;
 305        uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 306        int ret;
 307
 308        mutex_lock(&fifo_state->fifo_mutex);
 309        max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
 310        min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
 311        next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
 312
 313        if (unlikely(bytes >= (max - min)))
 314                goto out_err;
 315
 316        BUG_ON(fifo_state->reserved_size != 0);
 317        BUG_ON(fifo_state->dynamic_buffer != NULL);
 318
 319        fifo_state->reserved_size = bytes;
 320
 321        while (1) {
 322                uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
 323                bool need_bounce = false;
 324                bool reserve_in_place = false;
 325
 326                if (next_cmd >= stop) {
 327                        if (likely((next_cmd + bytes < max ||
 328                                    (next_cmd + bytes == max && stop > min))))
 329                                reserve_in_place = true;
 330
 331                        else if (vmw_fifo_is_full(dev_priv, bytes)) {
 332                                ret = vmw_fifo_wait(dev_priv, bytes,
 333                                                    false, 3 * HZ);
 334                                if (unlikely(ret != 0))
 335                                        goto out_err;
 336                        } else
 337                                need_bounce = true;
 338
 339                } else {
 340
 341                        if (likely((next_cmd + bytes < stop)))
 342                                reserve_in_place = true;
 343                        else {
 344                                ret = vmw_fifo_wait(dev_priv, bytes,
 345                                                    false, 3 * HZ);
 346                                if (unlikely(ret != 0))
 347                                        goto out_err;
 348                        }
 349                }
 350
 351                if (reserve_in_place) {
 352                        if (reserveable || bytes <= sizeof(uint32_t)) {
 353                                fifo_state->using_bounce_buffer = false;
 354
 355                                if (reserveable)
 356                                        vmw_mmio_write(bytes, fifo_mem +
 357                                                       SVGA_FIFO_RESERVED);
 358                                return (void __force *) (fifo_mem +
 359                                                         (next_cmd >> 2));
 360                        } else {
 361                                need_bounce = true;
 362                        }
 363                }
 364
 365                if (need_bounce) {
 366                        fifo_state->using_bounce_buffer = true;
 367                        if (bytes < fifo_state->static_buffer_size)
 368                                return fifo_state->static_buffer;
 369                        else {
 370                                fifo_state->dynamic_buffer = vmalloc(bytes);
 371                                return fifo_state->dynamic_buffer;
 372                        }
 373                }
 374        }
 375out_err:
 376        fifo_state->reserved_size = 0;
 377        mutex_unlock(&fifo_state->fifo_mutex);
 378
 379        return NULL;
 380}
 381
 382void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
 383                          int ctx_id)
 384{
 385        void *ret;
 386
 387        if (dev_priv->cman)
 388                ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
 389                                         ctx_id, false, NULL);
 390        else if (ctx_id == SVGA3D_INVALID_ID)
 391                ret = vmw_local_fifo_reserve(dev_priv, bytes);
 392        else {
 393                WARN(1, "Command buffer has not been allocated.\n");
 394                ret = NULL;
 395        }
 396        if (IS_ERR_OR_NULL(ret)) {
 397                DRM_ERROR("Fifo reserve failure of %u bytes.\n",
 398                          (unsigned) bytes);
 399                dump_stack();
 400                return NULL;
 401        }
 402
 403        return ret;
 404}
 405
 406static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 407                              u32  *fifo_mem,
 408                              uint32_t next_cmd,
 409                              uint32_t max, uint32_t min, uint32_t bytes)
 410{
 411        uint32_t chunk_size = max - next_cmd;
 412        uint32_t rest;
 413        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 414            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 415
 416        if (bytes < chunk_size)
 417                chunk_size = bytes;
 418
 419        vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
 420        mb();
 421        memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
 422        rest = bytes - chunk_size;
 423        if (rest)
 424                memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
 425}
 426
 427static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 428                               u32  *fifo_mem,
 429                               uint32_t next_cmd,
 430                               uint32_t max, uint32_t min, uint32_t bytes)
 431{
 432        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 433            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 434
 435        while (bytes > 0) {
 436                vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
 437                next_cmd += sizeof(uint32_t);
 438                if (unlikely(next_cmd == max))
 439                        next_cmd = min;
 440                mb();
 441                vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
 442                mb();
 443                bytes -= sizeof(uint32_t);
 444        }
 445}
 446
 447static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 448{
 449        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 450        u32  *fifo_mem = dev_priv->mmio_virt;
 451        uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
 452        uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
 453        uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
 454        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 455
 456        if (fifo_state->dx)
 457                bytes += sizeof(struct vmw_temp_set_context);
 458
 459        fifo_state->dx = false;
 460        BUG_ON((bytes & 3) != 0);
 461        BUG_ON(bytes > fifo_state->reserved_size);
 462
 463        fifo_state->reserved_size = 0;
 464
 465        if (fifo_state->using_bounce_buffer) {
 466                if (reserveable)
 467                        vmw_fifo_res_copy(fifo_state, fifo_mem,
 468                                          next_cmd, max, min, bytes);
 469                else
 470                        vmw_fifo_slow_copy(fifo_state, fifo_mem,
 471                                           next_cmd, max, min, bytes);
 472
 473                if (fifo_state->dynamic_buffer) {
 474                        vfree(fifo_state->dynamic_buffer);
 475                        fifo_state->dynamic_buffer = NULL;
 476                }
 477
 478        }
 479
 480        down_write(&fifo_state->rwsem);
 481        if (fifo_state->using_bounce_buffer || reserveable) {
 482                next_cmd += bytes;
 483                if (next_cmd >= max)
 484                        next_cmd -= max - min;
 485                mb();
 486                vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
 487        }
 488
 489        if (reserveable)
 490                vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
 491        mb();
 492        up_write(&fifo_state->rwsem);
 493        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 494        mutex_unlock(&fifo_state->fifo_mutex);
 495}
 496
 497void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 498{
 499        if (dev_priv->cman)
 500                vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
 501        else
 502                vmw_local_fifo_commit(dev_priv, bytes);
 503}
 504
 505
 506/**
 507 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
 508 *
 509 * @dev_priv: Pointer to device private structure.
 510 * @bytes: Number of bytes to commit.
 511 */
 512void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
 513{
 514        if (dev_priv->cman)
 515                vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
 516        else
 517                vmw_local_fifo_commit(dev_priv, bytes);
 518}
 519
 520/**
 521 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
 522 * starts.
 523 *
 524 * @dev_priv: Pointer to device private structure.
 525 * @interruptible: Whether to wait interruptible if function needs to sleep.
 526 */
 527int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
 528{
 529        might_sleep();
 530
 531        if (dev_priv->cman)
 532                return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
 533        else
 534                return 0;
 535}
 536
 537int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 538{
 539        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 540        struct svga_fifo_cmd_fence *cmd_fence;
 541        u32 *fm;
 542        int ret = 0;
 543        uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 544
 545        fm = vmw_fifo_reserve(dev_priv, bytes);
 546        if (unlikely(fm == NULL)) {
 547                *seqno = atomic_read(&dev_priv->marker_seq);
 548                ret = -ENOMEM;
 549                (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
 550                                        false, 3*HZ);
 551                goto out_err;
 552        }
 553
 554        do {
 555                *seqno = atomic_add_return(1, &dev_priv->marker_seq);
 556        } while (*seqno == 0);
 557
 558        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
 559
 560                /*
 561                 * Don't request hardware to send a fence. The
 562                 * waiting code in vmwgfx_irq.c will emulate this.
 563                 */
 564
 565                vmw_fifo_commit(dev_priv, 0);
 566                return 0;
 567        }
 568
 569        *fm++ = SVGA_CMD_FENCE;
 570        cmd_fence = (struct svga_fifo_cmd_fence *) fm;
 571        cmd_fence->fence = *seqno;
 572        vmw_fifo_commit_flush(dev_priv, bytes);
 573        (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
 574        vmw_update_seqno(dev_priv, fifo_state);
 575
 576out_err:
 577        return ret;
 578}
 579
 580/**
 581 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
 582 * legacy query commands.
 583 *
 584 * @dev_priv: The device private structure.
 585 * @cid: The hardware context id used for the query.
 586 *
 587 * See the vmw_fifo_emit_dummy_query documentation.
 588 */
 589static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 590                                            uint32_t cid)
 591{
 592        /*
 593         * A query wait without a preceding query end will
 594         * actually finish all queries for this cid
 595         * without writing to the query result structure.
 596         */
 597
 598        struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 599        struct {
 600                SVGA3dCmdHeader header;
 601                SVGA3dCmdWaitForQuery body;
 602        } *cmd;
 603
 604        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 605
 606        if (unlikely(cmd == NULL)) {
 607                DRM_ERROR("Out of fifo space for dummy query.\n");
 608                return -ENOMEM;
 609        }
 610
 611        cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
 612        cmd->header.size = sizeof(cmd->body);
 613        cmd->body.cid = cid;
 614        cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 615
 616        if (bo->mem.mem_type == TTM_PL_VRAM) {
 617                cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
 618                cmd->body.guestResult.offset = bo->offset;
 619        } else {
 620                cmd->body.guestResult.gmrId = bo->mem.start;
 621                cmd->body.guestResult.offset = 0;
 622        }
 623
 624        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 625
 626        return 0;
 627}
 628
 629/**
 630 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
 631 * guest-backed resource query commands.
 632 *
 633 * @dev_priv: The device private structure.
 634 * @cid: The hardware context id used for the query.
 635 *
 636 * See the vmw_fifo_emit_dummy_query documentation.
 637 */
 638static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
 639                                        uint32_t cid)
 640{
 641        /*
 642         * A query wait without a preceding query end will
 643         * actually finish all queries for this cid
 644         * without writing to the query result structure.
 645         */
 646
 647        struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 648        struct {
 649                SVGA3dCmdHeader header;
 650                SVGA3dCmdWaitForGBQuery body;
 651        } *cmd;
 652
 653        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 654
 655        if (unlikely(cmd == NULL)) {
 656                DRM_ERROR("Out of fifo space for dummy query.\n");
 657                return -ENOMEM;
 658        }
 659
 660        cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
 661        cmd->header.size = sizeof(cmd->body);
 662        cmd->body.cid = cid;
 663        cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 664        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 665        cmd->body.mobid = bo->mem.start;
 666        cmd->body.offset = 0;
 667
 668        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 669
 670        return 0;
 671}
 672
 673
 674/**
 675 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
 676 * appropriate resource query commands.
 677 *
 678 * @dev_priv: The device private structure.
 679 * @cid: The hardware context id used for the query.
 680 *
 681 * This function is used to emit a dummy occlusion query with
 682 * no primitives rendered between query begin and query end.
 683 * It's used to provide a query barrier, in order to know that when
 684 * this query is finished, all preceding queries are also finished.
 685 *
 686 * A Query results structure should have been initialized at the start
 687 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
 688 * must also be either reserved or pinned when this function is called.
 689 *
 690 * Returns -ENOMEM on failure to reserve fifo space.
 691 */
 692int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 693                              uint32_t cid)
 694{
 695        if (dev_priv->has_mob)
 696                return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
 697
 698        return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
 699}
 700
 701void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 702{
 703        return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
 704}
 705