linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/dmapool.h>
  29#include <linux/pci.h>
  30
  31#include <drm/ttm/ttm_bo_api.h>
  32
  33#include "vmwgfx_drv.h"
  34
  35/*
  36 * Size of inline command buffers. Try to make sure that a page size is a
  37 * multiple of the DMA pool allocation size.
  38 */
  39#define VMW_CMDBUF_INLINE_ALIGN 64
  40#define VMW_CMDBUF_INLINE_SIZE \
  41        (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  42
  43/**
  44 * struct vmw_cmdbuf_context - Command buffer context queues
  45 *
  46 * @submitted: List of command buffers that have been submitted to the
  47 * manager but not yet submitted to hardware.
  48 * @hw_submitted: List of command buffers submitted to hardware.
  49 * @preempted: List of preempted command buffers.
  50 * @num_hw_submitted: Number of buffers currently being processed by hardware
  51 * @block_submission: Identifies a block command submission.
  52 */
  53struct vmw_cmdbuf_context {
  54        struct list_head submitted;
  55        struct list_head hw_submitted;
  56        struct list_head preempted;
  57        unsigned num_hw_submitted;
  58        bool block_submission;
  59};
  60
  61/**
  62 * struct vmw_cmdbuf_man - Command buffer manager
  63 *
  64 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  65 * kernel command submissions, @cur.
  66 * @space_mutex: Mutex to protect against starvation when we allocate
  67 * main pool buffer space.
  68 * @error_mutex: Mutex to serialize the work queue error handling.
  69 * Note this is not needed if the same workqueue handler
  70 * can't race with itself...
  71 * @work: A struct work_struct implementeing command buffer error handling.
  72 * Immutable.
  73 * @dev_priv: Pointer to the device private struct. Immutable.
  74 * @ctx: Array of command buffer context queues. The queues and the context
  75 * data is protected by @lock.
  76 * @error: List of command buffers that have caused device errors.
  77 * Protected by @lock.
  78 * @mm: Range manager for the command buffer space. Manager allocations and
  79 * frees are protected by @lock.
  80 * @cmd_space: Buffer object for the command buffer space, unless we were
  81 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  82 * @map_obj: Mapping state for @cmd_space. Immutable.
  83 * @map: Pointer to command buffer space. May be a mapped buffer object or
  84 * a contigous coherent DMA memory allocation. Immutable.
  85 * @cur: Command buffer for small kernel command submissions. Protected by
  86 * the @cur_mutex.
  87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  88 * @default_size: Default size for the @cur command buffer. Immutable.
  89 * @max_hw_submitted: Max number of in-flight command buffers the device can
  90 * handle. Immutable.
  91 * @lock: Spinlock protecting command submission queues.
  92 * @headers: Pool of DMA memory for device command buffer headers.
  93 * Internal protection.
  94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  95 * space for inline data. Internal protection.
  96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  97 * space.
  98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  99 * @irq_on: Whether the process function has requested irq to be turned on.
 100 * Protected by @lock.
 101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
 102 * allocation. Immutable.
 103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
 104 * Typically this is false only during bootstrap.
 105 * @handle: DMA address handle for the command buffer space if @using_mob is
 106 * false. Immutable.
 107 * @size: The size of the command buffer space. Immutable.
 108 * @num_contexts: Number of contexts actually enabled.
 109 */
 110struct vmw_cmdbuf_man {
 111        struct mutex cur_mutex;
 112        struct mutex space_mutex;
 113        struct mutex error_mutex;
 114        struct work_struct work;
 115        struct vmw_private *dev_priv;
 116        struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 117        struct list_head error;
 118        struct drm_mm mm;
 119        struct ttm_buffer_object *cmd_space;
 120        struct ttm_bo_kmap_obj map_obj;
 121        u8 *map;
 122        struct vmw_cmdbuf_header *cur;
 123        size_t cur_pos;
 124        size_t default_size;
 125        unsigned max_hw_submitted;
 126        spinlock_t lock;
 127        struct dma_pool *headers;
 128        struct dma_pool *dheaders;
 129        wait_queue_head_t alloc_queue;
 130        wait_queue_head_t idle_queue;
 131        bool irq_on;
 132        bool using_mob;
 133        bool has_pool;
 134        dma_addr_t handle;
 135        size_t size;
 136        u32 num_contexts;
 137};
 138
 139/**
 140 * struct vmw_cmdbuf_header - Command buffer metadata
 141 *
 142 * @man: The command buffer manager.
 143 * @cb_header: Device command buffer header, allocated from a DMA pool.
 144 * @cb_context: The device command buffer context.
 145 * @list: List head for attaching to the manager lists.
 146 * @node: The range manager node.
 147 * @handle: The DMA address of @cb_header. Handed to the device on command
 148 * buffer submission.
 149 * @cmd: Pointer to the command buffer space of this buffer.
 150 * @size: Size of the command buffer space of this buffer.
 151 * @reserved: Reserved space of this buffer.
 152 * @inline_space: Whether inline command buffer space is used.
 153 */
 154struct vmw_cmdbuf_header {
 155        struct vmw_cmdbuf_man *man;
 156        SVGACBHeader *cb_header;
 157        SVGACBContext cb_context;
 158        struct list_head list;
 159        struct drm_mm_node node;
 160        dma_addr_t handle;
 161        u8 *cmd;
 162        size_t size;
 163        size_t reserved;
 164        bool inline_space;
 165};
 166
 167/**
 168 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 169 * command buffer space.
 170 *
 171 * @cb_header: Device command buffer header.
 172 * @cmd: Inline command buffer space.
 173 */
 174struct vmw_cmdbuf_dheader {
 175        SVGACBHeader cb_header;
 176        u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 177};
 178
 179/**
 180 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 181 *
 182 * @page_size: Size of requested command buffer space in pages.
 183 * @node: Pointer to the range manager node.
 184 * @done: True if this allocation has succeeded.
 185 */
 186struct vmw_cmdbuf_alloc_info {
 187        size_t page_size;
 188        struct drm_mm_node *node;
 189        bool done;
 190};
 191
 192/* Loop over each context in the command buffer manager. */
 193#define for_each_cmdbuf_ctx(_man, _i, _ctx)                             \
 194        for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
 195             ++(_i), ++(_ctx))
 196
 197static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 198                                bool enable);
 199static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 200
 201/**
 202 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 203 *
 204 * @man: The range manager.
 205 * @interruptible: Whether to wait interruptible when locking.
 206 */
 207static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 208{
 209        if (interruptible) {
 210                if (mutex_lock_interruptible(&man->cur_mutex))
 211                        return -ERESTARTSYS;
 212        } else {
 213                mutex_lock(&man->cur_mutex);
 214        }
 215
 216        return 0;
 217}
 218
 219/**
 220 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 221 *
 222 * @man: The range manager.
 223 */
 224static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 225{
 226        mutex_unlock(&man->cur_mutex);
 227}
 228
 229/**
 230 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 231 * been used for the device context with inline command buffers.
 232 * Need not be called locked.
 233 *
 234 * @header: Pointer to the header to free.
 235 */
 236static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 237{
 238        struct vmw_cmdbuf_dheader *dheader;
 239
 240        if (WARN_ON_ONCE(!header->inline_space))
 241                return;
 242
 243        dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 244                               cb_header);
 245        dma_pool_free(header->man->dheaders, dheader, header->handle);
 246        kfree(header);
 247}
 248
 249/**
 250 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 251 * associated structures.
 252 *
 253 * @header: Pointer to the header to free.
 254 *
 255 * For internal use. Must be called with man::lock held.
 256 */
 257static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 258{
 259        struct vmw_cmdbuf_man *man = header->man;
 260
 261        lockdep_assert_held_once(&man->lock);
 262
 263        if (header->inline_space) {
 264                vmw_cmdbuf_header_inline_free(header);
 265                return;
 266        }
 267
 268        drm_mm_remove_node(&header->node);
 269        wake_up_all(&man->alloc_queue);
 270        if (header->cb_header)
 271                dma_pool_free(man->headers, header->cb_header,
 272                              header->handle);
 273        kfree(header);
 274}
 275
 276/**
 277 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 278 * associated structures.
 279 *
 280 * @header: Pointer to the header to free.
 281 */
 282void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 283{
 284        struct vmw_cmdbuf_man *man = header->man;
 285
 286        /* Avoid locking if inline_space */
 287        if (header->inline_space) {
 288                vmw_cmdbuf_header_inline_free(header);
 289                return;
 290        }
 291        spin_lock(&man->lock);
 292        __vmw_cmdbuf_header_free(header);
 293        spin_unlock(&man->lock);
 294}
 295
 296
 297/**
 298 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
 299 *
 300 * @header: The header of the buffer to submit.
 301 */
 302static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 303{
 304        struct vmw_cmdbuf_man *man = header->man;
 305        u32 val;
 306
 307        val = upper_32_bits(header->handle);
 308        vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 309
 310        val = lower_32_bits(header->handle);
 311        val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 312        vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 313
 314        return header->cb_header->status;
 315}
 316
 317/**
 318 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 319 *
 320 * @ctx: The command buffer context to initialize
 321 */
 322static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 323{
 324        INIT_LIST_HEAD(&ctx->hw_submitted);
 325        INIT_LIST_HEAD(&ctx->submitted);
 326        INIT_LIST_HEAD(&ctx->preempted);
 327        ctx->num_hw_submitted = 0;
 328}
 329
 330/**
 331 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 332 * context.
 333 *
 334 * @man: The command buffer manager.
 335 * @ctx: The command buffer context.
 336 *
 337 * Submits command buffers to hardware until there are no more command
 338 * buffers to submit or the hardware can't handle more command buffers.
 339 */
 340static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 341                                  struct vmw_cmdbuf_context *ctx)
 342{
 343        while (ctx->num_hw_submitted < man->max_hw_submitted &&
 344               !list_empty(&ctx->submitted) &&
 345               !ctx->block_submission) {
 346                struct vmw_cmdbuf_header *entry;
 347                SVGACBStatus status;
 348
 349                entry = list_first_entry(&ctx->submitted,
 350                                         struct vmw_cmdbuf_header,
 351                                         list);
 352
 353                status = vmw_cmdbuf_header_submit(entry);
 354
 355                /* This should never happen */
 356                if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 357                        entry->cb_header->status = SVGA_CB_STATUS_NONE;
 358                        break;
 359                }
 360
 361                list_move_tail(&entry->list, &ctx->hw_submitted);
 362                ctx->num_hw_submitted++;
 363        }
 364
 365}
 366
 367/**
 368 * vmw_cmdbuf_ctx_process - Process a command buffer context.
 369 *
 370 * @man: The command buffer manager.
 371 * @ctx: The command buffer context.
 372 * @notempty: Pass back count of non-empty command submitted lists.
 373 *
 374 * Submit command buffers to hardware if possible, and process finished
 375 * buffers. Typically freeing them, but on preemption or error take
 376 * appropriate action. Wake up waiters if appropriate.
 377 */
 378static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 379                                   struct vmw_cmdbuf_context *ctx,
 380                                   int *notempty)
 381{
 382        struct vmw_cmdbuf_header *entry, *next;
 383
 384        vmw_cmdbuf_ctx_submit(man, ctx);
 385
 386        list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 387                SVGACBStatus status = entry->cb_header->status;
 388
 389                if (status == SVGA_CB_STATUS_NONE)
 390                        break;
 391
 392                list_del(&entry->list);
 393                wake_up_all(&man->idle_queue);
 394                ctx->num_hw_submitted--;
 395                switch (status) {
 396                case SVGA_CB_STATUS_COMPLETED:
 397                        __vmw_cmdbuf_header_free(entry);
 398                        break;
 399                case SVGA_CB_STATUS_COMMAND_ERROR:
 400                        WARN_ONCE(true, "Command buffer error.\n");
 401                        entry->cb_header->status = SVGA_CB_STATUS_NONE;
 402                        list_add_tail(&entry->list, &man->error);
 403                        schedule_work(&man->work);
 404                        break;
 405                case SVGA_CB_STATUS_PREEMPTED:
 406                        entry->cb_header->status = SVGA_CB_STATUS_NONE;
 407                        list_add_tail(&entry->list, &ctx->preempted);
 408                        break;
 409                case SVGA_CB_STATUS_CB_HEADER_ERROR:
 410                        WARN_ONCE(true, "Command buffer header error.\n");
 411                        __vmw_cmdbuf_header_free(entry);
 412                        break;
 413                default:
 414                        WARN_ONCE(true, "Undefined command buffer status.\n");
 415                        __vmw_cmdbuf_header_free(entry);
 416                        break;
 417                }
 418        }
 419
 420        vmw_cmdbuf_ctx_submit(man, ctx);
 421        if (!list_empty(&ctx->submitted))
 422                (*notempty)++;
 423}
 424
 425/**
 426 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 427 * switch on and off irqs as appropriate.
 428 *
 429 * @man: The command buffer manager.
 430 *
 431 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 432 * command buffers left that are not submitted to hardware, Make sure
 433 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 434 */
 435static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 436{
 437        int notempty;
 438        struct vmw_cmdbuf_context *ctx;
 439        int i;
 440
 441retry:
 442        notempty = 0;
 443        for_each_cmdbuf_ctx(man, i, ctx)
 444                vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 445
 446        if (man->irq_on && !notempty) {
 447                vmw_generic_waiter_remove(man->dev_priv,
 448                                          SVGA_IRQFLAG_COMMAND_BUFFER,
 449                                          &man->dev_priv->cmdbuf_waiters);
 450                man->irq_on = false;
 451        } else if (!man->irq_on && notempty) {
 452                vmw_generic_waiter_add(man->dev_priv,
 453                                       SVGA_IRQFLAG_COMMAND_BUFFER,
 454                                       &man->dev_priv->cmdbuf_waiters);
 455                man->irq_on = true;
 456
 457                /* Rerun in case we just missed an irq. */
 458                goto retry;
 459        }
 460}
 461
 462/**
 463 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 464 * command buffer context
 465 *
 466 * @man: The command buffer manager.
 467 * @header: The header of the buffer to submit.
 468 * @cb_context: The command buffer context to use.
 469 *
 470 * This function adds @header to the "submitted" queue of the command
 471 * buffer context identified by @cb_context. It then calls the command buffer
 472 * manager processing to potentially submit the buffer to hardware.
 473 * @man->lock needs to be held when calling this function.
 474 */
 475static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 476                               struct vmw_cmdbuf_header *header,
 477                               SVGACBContext cb_context)
 478{
 479        if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 480                header->cb_header->dxContext = 0;
 481        header->cb_context = cb_context;
 482        list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 483
 484        vmw_cmdbuf_man_process(man);
 485}
 486
 487/**
 488 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
 489 * handler implemented as a threaded irq task.
 490 *
 491 * @man: Pointer to the command buffer manager.
 492 *
 493 * The bottom half of the interrupt handler simply calls into the
 494 * command buffer processor to free finished buffers and submit any
 495 * queued buffers to hardware.
 496 */
 497void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 498{
 499        spin_lock(&man->lock);
 500        vmw_cmdbuf_man_process(man);
 501        spin_unlock(&man->lock);
 502}
 503
 504/**
 505 * vmw_cmdbuf_work_func - The deferred work function that handles
 506 * command buffer errors.
 507 *
 508 * @work: The work func closure argument.
 509 *
 510 * Restarting the command buffer context after an error requires process
 511 * context, so it is deferred to this work function.
 512 */
 513static void vmw_cmdbuf_work_func(struct work_struct *work)
 514{
 515        struct vmw_cmdbuf_man *man =
 516                container_of(work, struct vmw_cmdbuf_man, work);
 517        struct vmw_cmdbuf_header *entry, *next;
 518        uint32_t dummy = 0;
 519        bool send_fence = false;
 520        struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
 521        int i;
 522        struct vmw_cmdbuf_context *ctx;
 523        bool global_block = false;
 524
 525        for_each_cmdbuf_ctx(man, i, ctx)
 526                INIT_LIST_HEAD(&restart_head[i]);
 527
 528        mutex_lock(&man->error_mutex);
 529        spin_lock(&man->lock);
 530        list_for_each_entry_safe(entry, next, &man->error, list) {
 531                SVGACBHeader *cb_hdr = entry->cb_header;
 532                SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
 533                        (entry->cmd + cb_hdr->errorOffset);
 534                u32 error_cmd_size, new_start_offset;
 535                const char *cmd_name;
 536
 537                list_del_init(&entry->list);
 538                global_block = true;
 539
 540                if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
 541                        VMW_DEBUG_USER("Unknown command causing device error.\n");
 542                        VMW_DEBUG_USER("Command buffer offset is %lu\n",
 543                                       (unsigned long) cb_hdr->errorOffset);
 544                        __vmw_cmdbuf_header_free(entry);
 545                        send_fence = true;
 546                        continue;
 547                }
 548
 549                VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
 550                               cmd_name);
 551                VMW_DEBUG_USER("Command buffer offset is %lu\n",
 552                               (unsigned long) cb_hdr->errorOffset);
 553                VMW_DEBUG_USER("Command size is %lu\n",
 554                               (unsigned long) error_cmd_size);
 555
 556                new_start_offset = cb_hdr->errorOffset + error_cmd_size;
 557
 558                if (new_start_offset >= cb_hdr->length) {
 559                        __vmw_cmdbuf_header_free(entry);
 560                        send_fence = true;
 561                        continue;
 562                }
 563
 564                if (man->using_mob)
 565                        cb_hdr->ptr.mob.mobOffset += new_start_offset;
 566                else
 567                        cb_hdr->ptr.pa += (u64) new_start_offset;
 568
 569                entry->cmd += new_start_offset;
 570                cb_hdr->length -= new_start_offset;
 571                cb_hdr->errorOffset = 0;
 572                cb_hdr->offset = 0;
 573
 574                list_add_tail(&entry->list, &restart_head[entry->cb_context]);
 575        }
 576
 577        for_each_cmdbuf_ctx(man, i, ctx)
 578                man->ctx[i].block_submission = true;
 579
 580        spin_unlock(&man->lock);
 581
 582        /* Preempt all contexts */
 583        if (global_block && vmw_cmdbuf_preempt(man, 0))
 584                DRM_ERROR("Failed preempting command buffer contexts\n");
 585
 586        spin_lock(&man->lock);
 587        for_each_cmdbuf_ctx(man, i, ctx) {
 588                /* Move preempted command buffers to the preempted queue. */
 589                vmw_cmdbuf_ctx_process(man, ctx, &dummy);
 590
 591                /*
 592                 * Add the preempted queue after the command buffer
 593                 * that caused an error.
 594                 */
 595                list_splice_init(&ctx->preempted, restart_head[i].prev);
 596
 597                /*
 598                 * Finally add all command buffers first in the submitted
 599                 * queue, to rerun them.
 600                 */
 601
 602                ctx->block_submission = false;
 603                list_splice_init(&restart_head[i], &ctx->submitted);
 604        }
 605
 606        vmw_cmdbuf_man_process(man);
 607        spin_unlock(&man->lock);
 608
 609        if (global_block && vmw_cmdbuf_startstop(man, 0, true))
 610                DRM_ERROR("Failed restarting command buffer contexts\n");
 611
 612        /* Send a new fence in case one was removed */
 613        if (send_fence) {
 614                vmw_cmd_send_fence(man->dev_priv, &dummy);
 615                wake_up_all(&man->idle_queue);
 616        }
 617
 618        mutex_unlock(&man->error_mutex);
 619}
 620
 621/**
 622 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
 623 *
 624 * @man: The command buffer manager.
 625 * @check_preempted: Check also the preempted queue for pending command buffers.
 626 *
 627 */
 628static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 629                                bool check_preempted)
 630{
 631        struct vmw_cmdbuf_context *ctx;
 632        bool idle = false;
 633        int i;
 634
 635        spin_lock(&man->lock);
 636        vmw_cmdbuf_man_process(man);
 637        for_each_cmdbuf_ctx(man, i, ctx) {
 638                if (!list_empty(&ctx->submitted) ||
 639                    !list_empty(&ctx->hw_submitted) ||
 640                    (check_preempted && !list_empty(&ctx->preempted)))
 641                        goto out_unlock;
 642        }
 643
 644        idle = list_empty(&man->error);
 645
 646out_unlock:
 647        spin_unlock(&man->lock);
 648
 649        return idle;
 650}
 651
 652/**
 653 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 654 * command submissions
 655 *
 656 * @man: The command buffer manager.
 657 *
 658 * Flushes the current command buffer without allocating a new one. A new one
 659 * is automatically allocated when needed. Call with @man->cur_mutex held.
 660 */
 661static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 662{
 663        struct vmw_cmdbuf_header *cur = man->cur;
 664
 665        lockdep_assert_held_once(&man->cur_mutex);
 666
 667        if (!cur)
 668                return;
 669
 670        spin_lock(&man->lock);
 671        if (man->cur_pos == 0) {
 672                __vmw_cmdbuf_header_free(cur);
 673                goto out_unlock;
 674        }
 675
 676        man->cur->cb_header->length = man->cur_pos;
 677        vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 678out_unlock:
 679        spin_unlock(&man->lock);
 680        man->cur = NULL;
 681        man->cur_pos = 0;
 682}
 683
 684/**
 685 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 686 * command submissions
 687 *
 688 * @man: The command buffer manager.
 689 * @interruptible: Whether to sleep interruptible when sleeping.
 690 *
 691 * Flushes the current command buffer without allocating a new one. A new one
 692 * is automatically allocated when needed.
 693 */
 694int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 695                         bool interruptible)
 696{
 697        int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 698
 699        if (ret)
 700                return ret;
 701
 702        __vmw_cmdbuf_cur_flush(man);
 703        vmw_cmdbuf_cur_unlock(man);
 704
 705        return 0;
 706}
 707
 708/**
 709 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 710 *
 711 * @man: The command buffer manager.
 712 * @interruptible: Sleep interruptible while waiting.
 713 * @timeout: Time out after this many ticks.
 714 *
 715 * Wait until the command buffer manager has processed all command buffers,
 716 * or until a timeout occurs. If a timeout occurs, the function will return
 717 * -EBUSY.
 718 */
 719int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 720                    unsigned long timeout)
 721{
 722        int ret;
 723
 724        ret = vmw_cmdbuf_cur_flush(man, interruptible);
 725        vmw_generic_waiter_add(man->dev_priv,
 726                               SVGA_IRQFLAG_COMMAND_BUFFER,
 727                               &man->dev_priv->cmdbuf_waiters);
 728
 729        if (interruptible) {
 730                ret = wait_event_interruptible_timeout
 731                        (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 732                         timeout);
 733        } else {
 734                ret = wait_event_timeout
 735                        (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 736                         timeout);
 737        }
 738        vmw_generic_waiter_remove(man->dev_priv,
 739                                  SVGA_IRQFLAG_COMMAND_BUFFER,
 740                                  &man->dev_priv->cmdbuf_waiters);
 741        if (ret == 0) {
 742                if (!vmw_cmdbuf_man_idle(man, true))
 743                        ret = -EBUSY;
 744                else
 745                        ret = 0;
 746        }
 747        if (ret > 0)
 748                ret = 0;
 749
 750        return ret;
 751}
 752
 753/**
 754 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 755 *
 756 * @man: The command buffer manager.
 757 * @info: Allocation info. Will hold the size on entry and allocated mm node
 758 * on successful return.
 759 *
 760 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 761 * If a fatal error was hit, the error code is returned in @info->ret.
 762 */
 763static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 764                                 struct vmw_cmdbuf_alloc_info *info)
 765{
 766        int ret;
 767
 768        if (info->done)
 769                return true;
 770
 771        memset(info->node, 0, sizeof(*info->node));
 772        spin_lock(&man->lock);
 773        ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 774        if (ret) {
 775                vmw_cmdbuf_man_process(man);
 776                ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 777        }
 778
 779        spin_unlock(&man->lock);
 780        info->done = !ret;
 781
 782        return info->done;
 783}
 784
 785/**
 786 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 787 *
 788 * @man: The command buffer manager.
 789 * @node: Pointer to pre-allocated range-manager node.
 790 * @size: The size of the allocation.
 791 * @interruptible: Whether to sleep interruptible while waiting for space.
 792 *
 793 * This function allocates buffer space from the main pool, and if there is
 794 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 795 * become available.
 796 */
 797static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 798                                  struct drm_mm_node *node,
 799                                  size_t size,
 800                                  bool interruptible)
 801{
 802        struct vmw_cmdbuf_alloc_info info;
 803
 804        info.page_size = PFN_UP(size);
 805        info.node = node;
 806        info.done = false;
 807
 808        /*
 809         * To prevent starvation of large requests, only one allocating call
 810         * at a time waiting for space.
 811         */
 812        if (interruptible) {
 813                if (mutex_lock_interruptible(&man->space_mutex))
 814                        return -ERESTARTSYS;
 815        } else {
 816                mutex_lock(&man->space_mutex);
 817        }
 818
 819        /* Try to allocate space without waiting. */
 820        if (vmw_cmdbuf_try_alloc(man, &info))
 821                goto out_unlock;
 822
 823        vmw_generic_waiter_add(man->dev_priv,
 824                               SVGA_IRQFLAG_COMMAND_BUFFER,
 825                               &man->dev_priv->cmdbuf_waiters);
 826
 827        if (interruptible) {
 828                int ret;
 829
 830                ret = wait_event_interruptible
 831                        (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 832                if (ret) {
 833                        vmw_generic_waiter_remove
 834                                (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 835                                 &man->dev_priv->cmdbuf_waiters);
 836                        mutex_unlock(&man->space_mutex);
 837                        return ret;
 838                }
 839        } else {
 840                wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 841        }
 842        vmw_generic_waiter_remove(man->dev_priv,
 843                                  SVGA_IRQFLAG_COMMAND_BUFFER,
 844                                  &man->dev_priv->cmdbuf_waiters);
 845
 846out_unlock:
 847        mutex_unlock(&man->space_mutex);
 848
 849        return 0;
 850}
 851
 852/**
 853 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 854 * space from the main pool.
 855 *
 856 * @man: The command buffer manager.
 857 * @header: Pointer to the header to set up.
 858 * @size: The requested size of the buffer space.
 859 * @interruptible: Whether to sleep interruptible while waiting for space.
 860 */
 861static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 862                                 struct vmw_cmdbuf_header *header,
 863                                 size_t size,
 864                                 bool interruptible)
 865{
 866        SVGACBHeader *cb_hdr;
 867        size_t offset;
 868        int ret;
 869
 870        if (!man->has_pool)
 871                return -ENOMEM;
 872
 873        ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 874
 875        if (ret)
 876                return ret;
 877
 878        header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
 879                                            &header->handle);
 880        if (!header->cb_header) {
 881                ret = -ENOMEM;
 882                goto out_no_cb_header;
 883        }
 884
 885        header->size = header->node.size << PAGE_SHIFT;
 886        cb_hdr = header->cb_header;
 887        offset = header->node.start << PAGE_SHIFT;
 888        header->cmd = man->map + offset;
 889        if (man->using_mob) {
 890                cb_hdr->flags = SVGA_CB_FLAG_MOB;
 891                cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
 892                cb_hdr->ptr.mob.mobOffset = offset;
 893        } else {
 894                cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 895        }
 896
 897        return 0;
 898
 899out_no_cb_header:
 900        spin_lock(&man->lock);
 901        drm_mm_remove_node(&header->node);
 902        spin_unlock(&man->lock);
 903
 904        return ret;
 905}
 906
 907/**
 908 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 909 * inline command buffer space.
 910 *
 911 * @man: The command buffer manager.
 912 * @header: Pointer to the header to set up.
 913 * @size: The requested size of the buffer space.
 914 */
 915static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 916                                   struct vmw_cmdbuf_header *header,
 917                                   int size)
 918{
 919        struct vmw_cmdbuf_dheader *dheader;
 920        SVGACBHeader *cb_hdr;
 921
 922        if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 923                return -ENOMEM;
 924
 925        dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
 926                                  &header->handle);
 927        if (!dheader)
 928                return -ENOMEM;
 929
 930        header->inline_space = true;
 931        header->size = VMW_CMDBUF_INLINE_SIZE;
 932        cb_hdr = &dheader->cb_header;
 933        header->cb_header = cb_hdr;
 934        header->cmd = dheader->cmd;
 935        cb_hdr->status = SVGA_CB_STATUS_NONE;
 936        cb_hdr->flags = SVGA_CB_FLAG_NONE;
 937        cb_hdr->ptr.pa = (u64)header->handle +
 938                (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 939
 940        return 0;
 941}
 942
 943/**
 944 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 945 * command buffer space.
 946 *
 947 * @man: The command buffer manager.
 948 * @size: The requested size of the buffer space.
 949 * @interruptible: Whether to sleep interruptible while waiting for space.
 950 * @p_header: points to a header pointer to populate on successful return.
 951 *
 952 * Returns a pointer to command buffer space if successful. Otherwise
 953 * returns an error pointer. The header pointer returned in @p_header should
 954 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 955 */
 956void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 957                       size_t size, bool interruptible,
 958                       struct vmw_cmdbuf_header **p_header)
 959{
 960        struct vmw_cmdbuf_header *header;
 961        int ret = 0;
 962
 963        *p_header = NULL;
 964
 965        header = kzalloc(sizeof(*header), GFP_KERNEL);
 966        if (!header)
 967                return ERR_PTR(-ENOMEM);
 968
 969        if (size <= VMW_CMDBUF_INLINE_SIZE)
 970                ret = vmw_cmdbuf_space_inline(man, header, size);
 971        else
 972                ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 973
 974        if (ret) {
 975                kfree(header);
 976                return ERR_PTR(ret);
 977        }
 978
 979        header->man = man;
 980        INIT_LIST_HEAD(&header->list);
 981        header->cb_header->status = SVGA_CB_STATUS_NONE;
 982        *p_header = header;
 983
 984        return header->cmd;
 985}
 986
 987/**
 988 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 989 * command buffer.
 990 *
 991 * @man: The command buffer manager.
 992 * @size: The requested size of the commands.
 993 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 994 * @interruptible: Whether to sleep interruptible while waiting for space.
 995 *
 996 * Returns a pointer to command buffer space if successful. Otherwise
 997 * returns an error pointer.
 998 */
 999static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1000                                    size_t size,
1001                                    int ctx_id,
1002                                    bool interruptible)
1003{
1004        struct vmw_cmdbuf_header *cur;
1005        void *ret;
1006
1007        if (vmw_cmdbuf_cur_lock(man, interruptible))
1008                return ERR_PTR(-ERESTARTSYS);
1009
1010        cur = man->cur;
1011        if (cur && (size + man->cur_pos > cur->size ||
1012                    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013                     ctx_id != cur->cb_header->dxContext)))
1014                __vmw_cmdbuf_cur_flush(man);
1015
1016        if (!man->cur) {
1017                ret = vmw_cmdbuf_alloc(man,
1018                                       max_t(size_t, size, man->default_size),
1019                                       interruptible, &man->cur);
1020                if (IS_ERR(ret)) {
1021                        vmw_cmdbuf_cur_unlock(man);
1022                        return ret;
1023                }
1024
1025                cur = man->cur;
1026        }
1027
1028        if (ctx_id != SVGA3D_INVALID_ID) {
1029                cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030                cur->cb_header->dxContext = ctx_id;
1031        }
1032
1033        cur->reserved = size;
1034
1035        return (void *) (man->cur->cmd + man->cur_pos);
1036}
1037
1038/**
1039 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1040 *
1041 * @man: The command buffer manager.
1042 * @size: The size of the commands actually written.
1043 * @flush: Whether to flush the command buffer immediately.
1044 */
1045static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046                                  size_t size, bool flush)
1047{
1048        struct vmw_cmdbuf_header *cur = man->cur;
1049
1050        lockdep_assert_held_once(&man->cur_mutex);
1051
1052        WARN_ON(size > cur->reserved);
1053        man->cur_pos += size;
1054        if (!size)
1055                cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1056        if (flush)
1057                __vmw_cmdbuf_cur_flush(man);
1058        vmw_cmdbuf_cur_unlock(man);
1059}
1060
1061/**
1062 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1063 *
1064 * @man: The command buffer manager.
1065 * @size: The requested size of the commands.
1066 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1067 * @interruptible: Whether to sleep interruptible while waiting for space.
1068 * @header: Header of the command buffer. NULL if the current command buffer
1069 * should be used.
1070 *
1071 * Returns a pointer to command buffer space if successful. Otherwise
1072 * returns an error pointer.
1073 */
1074void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075                         int ctx_id, bool interruptible,
1076                         struct vmw_cmdbuf_header *header)
1077{
1078        if (!header)
1079                return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1080
1081        if (size > header->size)
1082                return ERR_PTR(-EINVAL);
1083
1084        if (ctx_id != SVGA3D_INVALID_ID) {
1085                header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086                header->cb_header->dxContext = ctx_id;
1087        }
1088
1089        header->reserved = size;
1090        return header->cmd;
1091}
1092
1093/**
1094 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1095 *
1096 * @man: The command buffer manager.
1097 * @size: The size of the commands actually written.
1098 * @header: Header of the command buffer. NULL if the current command buffer
1099 * should be used.
1100 * @flush: Whether to flush the command buffer immediately.
1101 */
1102void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103                       struct vmw_cmdbuf_header *header, bool flush)
1104{
1105        if (!header) {
1106                vmw_cmdbuf_commit_cur(man, size, flush);
1107                return;
1108        }
1109
1110        (void) vmw_cmdbuf_cur_lock(man, false);
1111        __vmw_cmdbuf_cur_flush(man);
1112        WARN_ON(size > header->reserved);
1113        man->cur = header;
1114        man->cur_pos = size;
1115        if (!size)
1116                header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1117        if (flush)
1118                __vmw_cmdbuf_cur_flush(man);
1119        vmw_cmdbuf_cur_unlock(man);
1120}
1121
1122
1123/**
1124 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1125 *
1126 * @man: The command buffer manager.
1127 * @command: Pointer to the command to send.
1128 * @size: Size of the command.
1129 *
1130 * Synchronously sends a device context command.
1131 */
1132static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133                                          const void *command,
1134                                          size_t size)
1135{
1136        struct vmw_cmdbuf_header *header;
1137        int status;
1138        void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1139
1140        if (IS_ERR(cmd))
1141                return PTR_ERR(cmd);
1142
1143        memcpy(cmd, command, size);
1144        header->cb_header->length = size;
1145        header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146        spin_lock(&man->lock);
1147        status = vmw_cmdbuf_header_submit(header);
1148        spin_unlock(&man->lock);
1149        vmw_cmdbuf_header_free(header);
1150
1151        if (status != SVGA_CB_STATUS_COMPLETED) {
1152                DRM_ERROR("Device context command failed with status %d\n",
1153                          status);
1154                return -EINVAL;
1155        }
1156
1157        return 0;
1158}
1159
1160/**
1161 * vmw_cmdbuf_preempt - Send a preempt command through the device
1162 * context.
1163 *
1164 * @man: The command buffer manager.
1165 * @context: Device context to pass command through.
1166 *
1167 * Synchronously sends a preempt command.
1168 */
1169static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1170{
1171        struct {
1172                uint32 id;
1173                SVGADCCmdPreempt body;
1174        } __packed cmd;
1175
1176        cmd.id = SVGA_DC_CMD_PREEMPT;
1177        cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178        cmd.body.ignoreIDZero = 0;
1179
1180        return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1181}
1182
1183
1184/**
1185 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1186 * context.
1187 *
1188 * @man: The command buffer manager.
1189 * @context: Device context to start/stop.
1190 * @enable: Whether to enable or disable the context.
1191 *
1192 * Synchronously sends a device start / stop context command.
1193 */
1194static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1195                                bool enable)
1196{
1197        struct {
1198                uint32 id;
1199                SVGADCCmdStartStop body;
1200        } __packed cmd;
1201
1202        cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203        cmd.body.enable = (enable) ? 1 : 0;
1204        cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1205
1206        return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1207}
1208
1209/**
1210 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1211 *
1212 * @man: The command buffer manager.
1213 * @size: The size of the main space pool.
1214 *
1215 * Set the size and allocate the main command buffer space pool.
1216 * If successful, this enables large command submissions.
1217 * Note that this function requires that rudimentary command
1218 * submission is already available and that the MOB memory manager is alive.
1219 * Returns 0 on success. Negative error code on failure.
1220 */
1221int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222{
1223        struct vmw_private *dev_priv = man->dev_priv;
1224        bool dummy;
1225        int ret;
1226
1227        if (man->has_pool)
1228                return -EINVAL;
1229
1230        /* First, try to allocate a huge chunk of DMA memory */
1231        size = PAGE_ALIGN(size);
1232        man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233                                      &man->handle, GFP_KERNEL);
1234        if (man->map) {
1235                man->using_mob = false;
1236        } else {
1237                /*
1238                 * DMA memory failed. If we can have command buffers in a
1239                 * MOB, try to use that instead. Note that this will
1240                 * actually call into the already enabled manager, when
1241                 * binding the MOB.
1242                 */
1243                if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1244                    !dev_priv->has_mob)
1245                        return -ENOMEM;
1246
1247                ret = vmw_bo_create_kernel(dev_priv, size,
1248                                           &vmw_mob_placement,
1249                                           &man->cmd_space);
1250                if (ret)
1251                        return ret;
1252
1253                man->using_mob = true;
1254                ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255                                  &man->map_obj);
1256                if (ret)
1257                        goto out_no_map;
1258
1259                man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260        }
1261
1262        man->size = size;
1263        drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264
1265        man->has_pool = true;
1266
1267        /*
1268         * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1269         * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1270         * needs to wait for space and we block on further command
1271         * submissions to be able to free up space.
1272         */
1273        man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274        drm_info(&dev_priv->drm,
1275                 "Using command buffers with %s pool.\n",
1276                 (man->using_mob) ? "MOB" : "DMA");
1277
1278        return 0;
1279
1280out_no_map:
1281        if (man->using_mob) {
1282                ttm_bo_put(man->cmd_space);
1283                man->cmd_space = NULL;
1284        }
1285
1286        return ret;
1287}
1288
1289/**
1290 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291 * inline command buffer submissions only.
1292 *
1293 * @dev_priv: Pointer to device private structure.
1294 *
1295 * Returns a pointer to a cummand buffer manager to success or error pointer
1296 * on failure. The command buffer manager will be enabled for submissions of
1297 * size VMW_CMDBUF_INLINE_SIZE only.
1298 */
1299struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300{
1301        struct vmw_cmdbuf_man *man;
1302        struct vmw_cmdbuf_context *ctx;
1303        unsigned int i;
1304        int ret;
1305
1306        if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307                return ERR_PTR(-ENOSYS);
1308
1309        man = kzalloc(sizeof(*man), GFP_KERNEL);
1310        if (!man)
1311                return ERR_PTR(-ENOMEM);
1312
1313        man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314                2 : 1;
1315        man->headers = dma_pool_create("vmwgfx cmdbuf",
1316                                       dev_priv->drm.dev,
1317                                       sizeof(SVGACBHeader),
1318                                       64, PAGE_SIZE);
1319        if (!man->headers) {
1320                ret = -ENOMEM;
1321                goto out_no_pool;
1322        }
1323
1324        man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325                                        dev_priv->drm.dev,
1326                                        sizeof(struct vmw_cmdbuf_dheader),
1327                                        64, PAGE_SIZE);
1328        if (!man->dheaders) {
1329                ret = -ENOMEM;
1330                goto out_no_dpool;
1331        }
1332
1333        for_each_cmdbuf_ctx(man, i, ctx)
1334                vmw_cmdbuf_ctx_init(ctx);
1335
1336        INIT_LIST_HEAD(&man->error);
1337        spin_lock_init(&man->lock);
1338        mutex_init(&man->cur_mutex);
1339        mutex_init(&man->space_mutex);
1340        mutex_init(&man->error_mutex);
1341        man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342        init_waitqueue_head(&man->alloc_queue);
1343        init_waitqueue_head(&man->idle_queue);
1344        man->dev_priv = dev_priv;
1345        man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346        INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347        vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348                               &dev_priv->error_waiters);
1349        ret = vmw_cmdbuf_startstop(man, 0, true);
1350        if (ret) {
1351                DRM_ERROR("Failed starting command buffer contexts\n");
1352                vmw_cmdbuf_man_destroy(man);
1353                return ERR_PTR(ret);
1354        }
1355
1356        return man;
1357
1358out_no_dpool:
1359        dma_pool_destroy(man->headers);
1360out_no_pool:
1361        kfree(man);
1362
1363        return ERR_PTR(ret);
1364}
1365
1366/**
1367 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368 *
1369 * @man: Pointer to a command buffer manager.
1370 *
1371 * This function removes the main buffer space pool, and should be called
1372 * before MOB memory management is removed. When this function has been called,
1373 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374 * less are allowed, and the default size of the command buffer for small kernel
1375 * submissions is also set to this size.
1376 */
1377void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378{
1379        if (!man->has_pool)
1380                return;
1381
1382        man->has_pool = false;
1383        man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384        (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385        if (man->using_mob) {
1386                (void) ttm_bo_kunmap(&man->map_obj);
1387                ttm_bo_put(man->cmd_space);
1388                man->cmd_space = NULL;
1389        } else {
1390                dma_free_coherent(man->dev_priv->drm.dev,
1391                                  man->size, man->map, man->handle);
1392        }
1393}
1394
1395/**
1396 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397 *
1398 * @man: Pointer to a command buffer manager.
1399 *
1400 * This function idles and then destroys a command buffer manager.
1401 */
1402void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403{
1404        WARN_ON_ONCE(man->has_pool);
1405        (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407        if (vmw_cmdbuf_startstop(man, 0, false))
1408                DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410        vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411                                  &man->dev_priv->error_waiters);
1412        (void) cancel_work_sync(&man->work);
1413        dma_pool_destroy(man->dheaders);
1414        dma_pool_destroy(man->headers);
1415        mutex_destroy(&man->cur_mutex);
1416        mutex_destroy(&man->space_mutex);
1417        mutex_destroy(&man->error_mutex);
1418        kfree(man);
1419}
1420