linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/sched/signal.h>
  29
  30#include <drm/ttm/ttm_placement.h>
  31
  32#include "vmwgfx_drv.h"
  33#include "vmwgfx_devcaps.h"
  34
  35bool vmw_supports_3d(struct vmw_private *dev_priv)
  36{
  37        uint32_t fifo_min, hwversion;
  38        const struct vmw_fifo_state *fifo = dev_priv->fifo;
  39
  40        if (!(dev_priv->capabilities & SVGA_CAP_3D))
  41                return false;
  42
  43        if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  44                uint32_t result;
  45
  46                if (!dev_priv->has_mob)
  47                        return false;
  48
  49                result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
  50
  51                return (result != 0);
  52        }
  53
  54        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  55                return false;
  56
  57        BUG_ON(vmw_is_svga_v3(dev_priv));
  58
  59        fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
  60        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
  61                return false;
  62
  63        hwversion = vmw_fifo_mem_read(dev_priv,
  64                                      ((fifo->capabilities &
  65                                        SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
  66                                               SVGA_FIFO_3D_HWVERSION_REVISED :
  67                                               SVGA_FIFO_3D_HWVERSION));
  68
  69        if (hwversion == 0)
  70                return false;
  71
  72        if (hwversion < SVGA3D_HWVERSION_WS8_B1)
  73                return false;
  74
  75        /* Legacy Display Unit does not support surfaces */
  76        if (dev_priv->active_display_unit == vmw_du_legacy)
  77                return false;
  78
  79        return true;
  80}
  81
  82bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
  83{
  84        uint32_t caps;
  85
  86        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
  87                return false;
  88
  89        caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
  90        if (caps & SVGA_FIFO_CAP_PITCHLOCK)
  91                return true;
  92
  93        return false;
  94}
  95
  96struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
  97{
  98        struct vmw_fifo_state *fifo;
  99        uint32_t max;
 100        uint32_t min;
 101
 102        if (!dev_priv->fifo_mem)
 103                return NULL;
 104
 105        fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
 106        if (!fifo)
 107                return ERR_PTR(-ENOMEM);
 108        fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
 109        fifo->static_buffer = vmalloc(fifo->static_buffer_size);
 110        if (unlikely(fifo->static_buffer == NULL)) {
 111                kfree(fifo);
 112                return ERR_PTR(-ENOMEM);
 113        }
 114
 115        fifo->dynamic_buffer = NULL;
 116        fifo->reserved_size = 0;
 117        fifo->using_bounce_buffer = false;
 118
 119        mutex_init(&fifo->fifo_mutex);
 120        init_rwsem(&fifo->rwsem);
 121        min = 4;
 122        if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
 123                min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
 124        min <<= 2;
 125
 126        if (min < PAGE_SIZE)
 127                min = PAGE_SIZE;
 128
 129        vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
 130        vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
 131        wmb();
 132        vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
 133        vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
 134        vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
 135        mb();
 136
 137        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
 138
 139        max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
 140        min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 141        fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
 142
 143        drm_info(&dev_priv->drm,
 144                 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
 145                 (unsigned int) max,
 146                 (unsigned int) min,
 147                 (unsigned int) fifo->capabilities);
 148        return fifo;
 149}
 150
 151void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 152{
 153        u32 *fifo_mem = dev_priv->fifo_mem;
 154        if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
 155                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
 156
 157}
 158
 159void vmw_fifo_destroy(struct vmw_private *dev_priv)
 160{
 161        struct vmw_fifo_state *fifo = dev_priv->fifo;
 162
 163        if (!fifo)
 164                return;
 165
 166        if (likely(fifo->static_buffer != NULL)) {
 167                vfree(fifo->static_buffer);
 168                fifo->static_buffer = NULL;
 169        }
 170
 171        if (likely(fifo->dynamic_buffer != NULL)) {
 172                vfree(fifo->dynamic_buffer);
 173                fifo->dynamic_buffer = NULL;
 174        }
 175        kfree(fifo);
 176        dev_priv->fifo = NULL;
 177}
 178
 179static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 180{
 181        uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
 182        uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
 183        uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 184        uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
 185
 186        return ((max - next_cmd) + (stop - min) <= bytes);
 187}
 188
 189static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 190                               uint32_t bytes, bool interruptible,
 191                               unsigned long timeout)
 192{
 193        int ret = 0;
 194        unsigned long end_jiffies = jiffies + timeout;
 195        DEFINE_WAIT(__wait);
 196
 197        DRM_INFO("Fifo wait noirq.\n");
 198
 199        for (;;) {
 200                prepare_to_wait(&dev_priv->fifo_queue, &__wait,
 201                                (interruptible) ?
 202                                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 203                if (!vmw_fifo_is_full(dev_priv, bytes))
 204                        break;
 205                if (time_after_eq(jiffies, end_jiffies)) {
 206                        ret = -EBUSY;
 207                        DRM_ERROR("SVGA device lockup.\n");
 208                        break;
 209                }
 210                schedule_timeout(1);
 211                if (interruptible && signal_pending(current)) {
 212                        ret = -ERESTARTSYS;
 213                        break;
 214                }
 215        }
 216        finish_wait(&dev_priv->fifo_queue, &__wait);
 217        wake_up_all(&dev_priv->fifo_queue);
 218        DRM_INFO("Fifo noirq exit.\n");
 219        return ret;
 220}
 221
 222static int vmw_fifo_wait(struct vmw_private *dev_priv,
 223                         uint32_t bytes, bool interruptible,
 224                         unsigned long timeout)
 225{
 226        long ret = 1L;
 227
 228        if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
 229                return 0;
 230
 231        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
 232        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 233                return vmw_fifo_wait_noirq(dev_priv, bytes,
 234                                           interruptible, timeout);
 235
 236        vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 237                               &dev_priv->fifo_queue_waiters);
 238
 239        if (interruptible)
 240                ret = wait_event_interruptible_timeout
 241                    (dev_priv->fifo_queue,
 242                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 243        else
 244                ret = wait_event_timeout
 245                    (dev_priv->fifo_queue,
 246                     !vmw_fifo_is_full(dev_priv, bytes), timeout);
 247
 248        if (unlikely(ret == 0))
 249                ret = -EBUSY;
 250        else if (likely(ret > 0))
 251                ret = 0;
 252
 253        vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 254                                  &dev_priv->fifo_queue_waiters);
 255
 256        return ret;
 257}
 258
 259/*
 260 * Reserve @bytes number of bytes in the fifo.
 261 *
 262 * This function will return NULL (error) on two conditions:
 263 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
 264 *   available fifo space.
 265 *
 266 * Returns:
 267 *   Pointer to the fifo, or null on error (possible hardware hang).
 268 */
 269static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 270                                    uint32_t bytes)
 271{
 272        struct vmw_fifo_state *fifo_state = dev_priv->fifo;
 273        u32  *fifo_mem = dev_priv->fifo_mem;
 274        uint32_t max;
 275        uint32_t min;
 276        uint32_t next_cmd;
 277        uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 278        int ret;
 279
 280        mutex_lock(&fifo_state->fifo_mutex);
 281        max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
 282        min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 283        next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
 284
 285        if (unlikely(bytes >= (max - min)))
 286                goto out_err;
 287
 288        BUG_ON(fifo_state->reserved_size != 0);
 289        BUG_ON(fifo_state->dynamic_buffer != NULL);
 290
 291        fifo_state->reserved_size = bytes;
 292
 293        while (1) {
 294                uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
 295                bool need_bounce = false;
 296                bool reserve_in_place = false;
 297
 298                if (next_cmd >= stop) {
 299                        if (likely((next_cmd + bytes < max ||
 300                                    (next_cmd + bytes == max && stop > min))))
 301                                reserve_in_place = true;
 302
 303                        else if (vmw_fifo_is_full(dev_priv, bytes)) {
 304                                ret = vmw_fifo_wait(dev_priv, bytes,
 305                                                    false, 3 * HZ);
 306                                if (unlikely(ret != 0))
 307                                        goto out_err;
 308                        } else
 309                                need_bounce = true;
 310
 311                } else {
 312
 313                        if (likely((next_cmd + bytes < stop)))
 314                                reserve_in_place = true;
 315                        else {
 316                                ret = vmw_fifo_wait(dev_priv, bytes,
 317                                                    false, 3 * HZ);
 318                                if (unlikely(ret != 0))
 319                                        goto out_err;
 320                        }
 321                }
 322
 323                if (reserve_in_place) {
 324                        if (reserveable || bytes <= sizeof(uint32_t)) {
 325                                fifo_state->using_bounce_buffer = false;
 326
 327                                if (reserveable)
 328                                        vmw_fifo_mem_write(dev_priv,
 329                                                           SVGA_FIFO_RESERVED,
 330                                                           bytes);
 331                                return (void __force *) (fifo_mem +
 332                                                         (next_cmd >> 2));
 333                        } else {
 334                                need_bounce = true;
 335                        }
 336                }
 337
 338                if (need_bounce) {
 339                        fifo_state->using_bounce_buffer = true;
 340                        if (bytes < fifo_state->static_buffer_size)
 341                                return fifo_state->static_buffer;
 342                        else {
 343                                fifo_state->dynamic_buffer = vmalloc(bytes);
 344                                if (!fifo_state->dynamic_buffer)
 345                                        goto out_err;
 346                                return fifo_state->dynamic_buffer;
 347                        }
 348                }
 349        }
 350out_err:
 351        fifo_state->reserved_size = 0;
 352        mutex_unlock(&fifo_state->fifo_mutex);
 353
 354        return NULL;
 355}
 356
 357void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
 358                          int ctx_id)
 359{
 360        void *ret;
 361
 362        if (dev_priv->cman)
 363                ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
 364                                         ctx_id, false, NULL);
 365        else if (ctx_id == SVGA3D_INVALID_ID)
 366                ret = vmw_local_fifo_reserve(dev_priv, bytes);
 367        else {
 368                WARN(1, "Command buffer has not been allocated.\n");
 369                ret = NULL;
 370        }
 371        if (IS_ERR_OR_NULL(ret))
 372                return NULL;
 373
 374        return ret;
 375}
 376
 377static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 378                              struct vmw_private *vmw,
 379                              uint32_t next_cmd,
 380                              uint32_t max, uint32_t min, uint32_t bytes)
 381{
 382        u32 *fifo_mem = vmw->fifo_mem;
 383        uint32_t chunk_size = max - next_cmd;
 384        uint32_t rest;
 385        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 386            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 387
 388        if (bytes < chunk_size)
 389                chunk_size = bytes;
 390
 391        vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
 392        mb();
 393        memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
 394        rest = bytes - chunk_size;
 395        if (rest)
 396                memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
 397}
 398
 399static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 400                               struct vmw_private *vmw,
 401                               uint32_t next_cmd,
 402                               uint32_t max, uint32_t min, uint32_t bytes)
 403{
 404        uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
 405            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 406
 407        while (bytes > 0) {
 408                vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
 409                next_cmd += sizeof(uint32_t);
 410                if (unlikely(next_cmd == max))
 411                        next_cmd = min;
 412                mb();
 413                vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
 414                mb();
 415                bytes -= sizeof(uint32_t);
 416        }
 417}
 418
 419static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 420{
 421        struct vmw_fifo_state *fifo_state = dev_priv->fifo;
 422        uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
 423        uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
 424        uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 425        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 426
 427        BUG_ON((bytes & 3) != 0);
 428        BUG_ON(bytes > fifo_state->reserved_size);
 429
 430        fifo_state->reserved_size = 0;
 431
 432        if (fifo_state->using_bounce_buffer) {
 433                if (reserveable)
 434                        vmw_fifo_res_copy(fifo_state, dev_priv,
 435                                          next_cmd, max, min, bytes);
 436                else
 437                        vmw_fifo_slow_copy(fifo_state, dev_priv,
 438                                           next_cmd, max, min, bytes);
 439
 440                if (fifo_state->dynamic_buffer) {
 441                        vfree(fifo_state->dynamic_buffer);
 442                        fifo_state->dynamic_buffer = NULL;
 443                }
 444
 445        }
 446
 447        down_write(&fifo_state->rwsem);
 448        if (fifo_state->using_bounce_buffer || reserveable) {
 449                next_cmd += bytes;
 450                if (next_cmd >= max)
 451                        next_cmd -= max - min;
 452                mb();
 453                vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
 454        }
 455
 456        if (reserveable)
 457                vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
 458        mb();
 459        up_write(&fifo_state->rwsem);
 460        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 461        mutex_unlock(&fifo_state->fifo_mutex);
 462}
 463
 464void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
 465{
 466        if (dev_priv->cman)
 467                vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
 468        else
 469                vmw_local_fifo_commit(dev_priv, bytes);
 470}
 471
 472
 473/**
 474 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
 475 *
 476 * @dev_priv: Pointer to device private structure.
 477 * @bytes: Number of bytes to commit.
 478 */
 479void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
 480{
 481        if (dev_priv->cman)
 482                vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
 483        else
 484                vmw_local_fifo_commit(dev_priv, bytes);
 485}
 486
 487/**
 488 * vmw_cmd_flush - Flush any buffered commands and make sure command processing
 489 * starts.
 490 *
 491 * @dev_priv: Pointer to device private structure.
 492 * @interruptible: Whether to wait interruptible if function needs to sleep.
 493 */
 494int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
 495{
 496        might_sleep();
 497
 498        if (dev_priv->cman)
 499                return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
 500        else
 501                return 0;
 502}
 503
 504int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 505{
 506        struct svga_fifo_cmd_fence *cmd_fence;
 507        u32 *fm;
 508        int ret = 0;
 509        uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 510
 511        fm = VMW_CMD_RESERVE(dev_priv, bytes);
 512        if (unlikely(fm == NULL)) {
 513                *seqno = atomic_read(&dev_priv->marker_seq);
 514                ret = -ENOMEM;
 515                (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
 516                                        false, 3*HZ);
 517                goto out_err;
 518        }
 519
 520        do {
 521                *seqno = atomic_add_return(1, &dev_priv->marker_seq);
 522        } while (*seqno == 0);
 523
 524        if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
 525
 526                /*
 527                 * Don't request hardware to send a fence. The
 528                 * waiting code in vmwgfx_irq.c will emulate this.
 529                 */
 530
 531                vmw_cmd_commit(dev_priv, 0);
 532                return 0;
 533        }
 534
 535        *fm++ = SVGA_CMD_FENCE;
 536        cmd_fence = (struct svga_fifo_cmd_fence *) fm;
 537        cmd_fence->fence = *seqno;
 538        vmw_cmd_commit_flush(dev_priv, bytes);
 539        vmw_update_seqno(dev_priv);
 540
 541out_err:
 542        return ret;
 543}
 544
 545/**
 546 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
 547 * legacy query commands.
 548 *
 549 * @dev_priv: The device private structure.
 550 * @cid: The hardware context id used for the query.
 551 *
 552 * See the vmw_cmd_emit_dummy_query documentation.
 553 */
 554static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 555                                            uint32_t cid)
 556{
 557        /*
 558         * A query wait without a preceding query end will
 559         * actually finish all queries for this cid
 560         * without writing to the query result structure.
 561         */
 562
 563        struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 564        struct {
 565                SVGA3dCmdHeader header;
 566                SVGA3dCmdWaitForQuery body;
 567        } *cmd;
 568
 569        cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 570        if (unlikely(cmd == NULL))
 571                return -ENOMEM;
 572
 573        cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
 574        cmd->header.size = sizeof(cmd->body);
 575        cmd->body.cid = cid;
 576        cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 577
 578        if (bo->resource->mem_type == TTM_PL_VRAM) {
 579                cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
 580                cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
 581        } else {
 582                cmd->body.guestResult.gmrId = bo->resource->start;
 583                cmd->body.guestResult.offset = 0;
 584        }
 585
 586        vmw_cmd_commit(dev_priv, sizeof(*cmd));
 587
 588        return 0;
 589}
 590
 591/**
 592 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
 593 * guest-backed resource query commands.
 594 *
 595 * @dev_priv: The device private structure.
 596 * @cid: The hardware context id used for the query.
 597 *
 598 * See the vmw_cmd_emit_dummy_query documentation.
 599 */
 600static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
 601                                       uint32_t cid)
 602{
 603        /*
 604         * A query wait without a preceding query end will
 605         * actually finish all queries for this cid
 606         * without writing to the query result structure.
 607         */
 608
 609        struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 610        struct {
 611                SVGA3dCmdHeader header;
 612                SVGA3dCmdWaitForGBQuery body;
 613        } *cmd;
 614
 615        cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 616        if (unlikely(cmd == NULL))
 617                return -ENOMEM;
 618
 619        cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
 620        cmd->header.size = sizeof(cmd->body);
 621        cmd->body.cid = cid;
 622        cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 623        BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 624        cmd->body.mobid = bo->resource->start;
 625        cmd->body.offset = 0;
 626
 627        vmw_cmd_commit(dev_priv, sizeof(*cmd));
 628
 629        return 0;
 630}
 631
 632
 633/**
 634 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
 635 * appropriate resource query commands.
 636 *
 637 * @dev_priv: The device private structure.
 638 * @cid: The hardware context id used for the query.
 639 *
 640 * This function is used to emit a dummy occlusion query with
 641 * no primitives rendered between query begin and query end.
 642 * It's used to provide a query barrier, in order to know that when
 643 * this query is finished, all preceding queries are also finished.
 644 *
 645 * A Query results structure should have been initialized at the start
 646 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
 647 * must also be either reserved or pinned when this function is called.
 648 *
 649 * Returns -ENOMEM on failure to reserve fifo space.
 650 */
 651int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
 652                              uint32_t cid)
 653{
 654        if (dev_priv->has_mob)
 655                return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
 656
 657        return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
 658}
 659
 660
 661/**
 662 * vmw_cmd_supported - returns true if the given device supports
 663 * command queues.
 664 *
 665 * @vmw: The device private structure.
 666 *
 667 * Returns true if we can issue commands.
 668 */
 669bool vmw_cmd_supported(struct vmw_private *vmw)
 670{
 671        if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
 672                                  SVGA_CAP_CMD_BUFFERS_2)) != 0)
 673                return true;
 674        /*
 675         * We have FIFO cmd's
 676         */
 677        return vmw->fifo_mem != NULL;
 678}
 679