linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
  31#include "vmwgfx_drv.h"
  32#include "ttm_object.h"
  33
  34
  35/**
  36 * struct vmw_user_buffer_object - User-space-visible buffer object
  37 *
  38 * @prime: The prime object providing user visibility.
  39 * @vbo: The struct vmw_buffer_object
  40 */
  41struct vmw_user_buffer_object {
  42        struct ttm_prime_object prime;
  43        struct vmw_buffer_object vbo;
  44};
  45
  46
  47/**
  48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  49 * vmw_buffer_object.
  50 *
  51 * @bo: Pointer to the TTM buffer object.
  52 * Return: Pointer to the struct vmw_buffer_object embedding the
  53 * TTM buffer object.
  54 */
  55static struct vmw_buffer_object *
  56vmw_buffer_object(struct ttm_buffer_object *bo)
  57{
  58        return container_of(bo, struct vmw_buffer_object, base);
  59}
  60
  61
  62/**
  63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  64 * vmw_user_buffer_object.
  65 *
  66 * @bo: Pointer to the TTM buffer object.
  67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  68 * object.
  69 */
  70static struct vmw_user_buffer_object *
  71vmw_user_buffer_object(struct ttm_buffer_object *bo)
  72{
  73        struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  74
  75        return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  76}
  77
  78
  79/**
  80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
  81 *
  82 * @dev_priv:  Driver private.
  83 * @buf:  DMA buffer to move.
  84 * @placement:  The placement to pin it.
  85 * @interruptible:  Use interruptible wait.
  86 * Return: Zero on success, Negative error code on failure. In particular
  87 * -ERESTARTSYS if interrupted by a signal
  88 */
  89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  90                            struct vmw_buffer_object *buf,
  91                            struct ttm_placement *placement,
  92                            bool interruptible)
  93{
  94        struct ttm_operation_ctx ctx = {interruptible, false };
  95        struct ttm_buffer_object *bo = &buf->base;
  96        int ret;
  97        uint32_t new_flags;
  98
  99        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 100        if (unlikely(ret != 0))
 101                return ret;
 102
 103        vmw_execbuf_release_pinned_bo(dev_priv);
 104
 105        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 106        if (unlikely(ret != 0))
 107                goto err;
 108
 109        if (buf->base.pin_count > 0)
 110                ret = ttm_bo_mem_compat(placement, &bo->mem,
 111                                        &new_flags) == true ? 0 : -EINVAL;
 112        else
 113                ret = ttm_bo_validate(bo, placement, &ctx);
 114
 115        if (!ret)
 116                vmw_bo_pin_reserved(buf, true);
 117
 118        ttm_bo_unreserve(bo);
 119
 120err:
 121        ttm_write_unlock(&dev_priv->reservation_sem);
 122        return ret;
 123}
 124
 125
 126/**
 127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 128 *
 129 * This function takes the reservation_sem in write mode.
 130 * Flushes and unpins the query bo to avoid failures.
 131 *
 132 * @dev_priv:  Driver private.
 133 * @buf:  DMA buffer to move.
 134 * @pin:  Pin buffer if true.
 135 * @interruptible:  Use interruptible wait.
 136 * Return: Zero on success, Negative error code on failure. In particular
 137 * -ERESTARTSYS if interrupted by a signal
 138 */
 139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 140                              struct vmw_buffer_object *buf,
 141                              bool interruptible)
 142{
 143        struct ttm_operation_ctx ctx = {interruptible, false };
 144        struct ttm_buffer_object *bo = &buf->base;
 145        int ret;
 146        uint32_t new_flags;
 147
 148        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 149        if (unlikely(ret != 0))
 150                return ret;
 151
 152        vmw_execbuf_release_pinned_bo(dev_priv);
 153
 154        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 155        if (unlikely(ret != 0))
 156                goto err;
 157
 158        if (buf->base.pin_count > 0) {
 159                ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
 160                                        &new_flags) == true ? 0 : -EINVAL;
 161                goto out_unreserve;
 162        }
 163
 164        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 165        if (likely(ret == 0) || ret == -ERESTARTSYS)
 166                goto out_unreserve;
 167
 168        ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 169
 170out_unreserve:
 171        if (!ret)
 172                vmw_bo_pin_reserved(buf, true);
 173
 174        ttm_bo_unreserve(bo);
 175err:
 176        ttm_write_unlock(&dev_priv->reservation_sem);
 177        return ret;
 178}
 179
 180
 181/**
 182 * vmw_bo_pin_in_vram - Move a buffer to vram.
 183 *
 184 * This function takes the reservation_sem in write mode.
 185 * Flushes and unpins the query bo to avoid failures.
 186 *
 187 * @dev_priv:  Driver private.
 188 * @buf:  DMA buffer to move.
 189 * @interruptible:  Use interruptible wait.
 190 * Return: Zero on success, Negative error code on failure. In particular
 191 * -ERESTARTSYS if interrupted by a signal
 192 */
 193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 194                       struct vmw_buffer_object *buf,
 195                       bool interruptible)
 196{
 197        return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
 198                                       interruptible);
 199}
 200
 201
 202/**
 203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
 204 *
 205 * This function takes the reservation_sem in write mode.
 206 * Flushes and unpins the query bo to avoid failures.
 207 *
 208 * @dev_priv:  Driver private.
 209 * @buf:  DMA buffer to pin.
 210 * @interruptible:  Use interruptible wait.
 211 * Return: Zero on success, Negative error code on failure. In particular
 212 * -ERESTARTSYS if interrupted by a signal
 213 */
 214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 215                                struct vmw_buffer_object *buf,
 216                                bool interruptible)
 217{
 218        struct ttm_operation_ctx ctx = {interruptible, false };
 219        struct ttm_buffer_object *bo = &buf->base;
 220        struct ttm_placement placement;
 221        struct ttm_place place;
 222        int ret = 0;
 223        uint32_t new_flags;
 224
 225        place = vmw_vram_placement.placement[0];
 226        place.lpfn = bo->num_pages;
 227        placement.num_placement = 1;
 228        placement.placement = &place;
 229        placement.num_busy_placement = 1;
 230        placement.busy_placement = &place;
 231
 232        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 233        if (unlikely(ret != 0))
 234                return ret;
 235
 236        vmw_execbuf_release_pinned_bo(dev_priv);
 237        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 238        if (unlikely(ret != 0))
 239                goto err_unlock;
 240
 241        /*
 242         * Is this buffer already in vram but not at the start of it?
 243         * In that case, evict it first because TTM isn't good at handling
 244         * that situation.
 245         */
 246        if (bo->mem.mem_type == TTM_PL_VRAM &&
 247            bo->mem.start < bo->num_pages &&
 248            bo->mem.start > 0 &&
 249            buf->base.pin_count == 0) {
 250                ctx.interruptible = false;
 251                (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
 252        }
 253
 254        if (buf->base.pin_count > 0)
 255                ret = ttm_bo_mem_compat(&placement, &bo->mem,
 256                                        &new_flags) == true ? 0 : -EINVAL;
 257        else
 258                ret = ttm_bo_validate(bo, &placement, &ctx);
 259
 260        /* For some reason we didn't end up at the start of vram */
 261        WARN_ON(ret == 0 && bo->mem.start != 0);
 262        if (!ret)
 263                vmw_bo_pin_reserved(buf, true);
 264
 265        ttm_bo_unreserve(bo);
 266err_unlock:
 267        ttm_write_unlock(&dev_priv->reservation_sem);
 268
 269        return ret;
 270}
 271
 272
 273/**
 274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
 275 *
 276 * This function takes the reservation_sem in write mode.
 277 *
 278 * @dev_priv:  Driver private.
 279 * @buf:  DMA buffer to unpin.
 280 * @interruptible:  Use interruptible wait.
 281 * Return: Zero on success, Negative error code on failure. In particular
 282 * -ERESTARTSYS if interrupted by a signal
 283 */
 284int vmw_bo_unpin(struct vmw_private *dev_priv,
 285                 struct vmw_buffer_object *buf,
 286                 bool interruptible)
 287{
 288        struct ttm_buffer_object *bo = &buf->base;
 289        int ret;
 290
 291        ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
 292        if (unlikely(ret != 0))
 293                return ret;
 294
 295        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 296        if (unlikely(ret != 0))
 297                goto err;
 298
 299        vmw_bo_pin_reserved(buf, false);
 300
 301        ttm_bo_unreserve(bo);
 302
 303err:
 304        ttm_read_unlock(&dev_priv->reservation_sem);
 305        return ret;
 306}
 307
 308/**
 309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
 310 * of a buffer.
 311 *
 312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
 313 * @ptr: SVGAGuestPtr returning the result.
 314 */
 315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 316                          SVGAGuestPtr *ptr)
 317{
 318        if (bo->mem.mem_type == TTM_PL_VRAM) {
 319                ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
 320                ptr->offset = bo->mem.start << PAGE_SHIFT;
 321        } else {
 322                ptr->gmrId = bo->mem.start;
 323                ptr->offset = 0;
 324        }
 325}
 326
 327
 328/**
 329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
 330 *
 331 * @vbo: The buffer object. Must be reserved.
 332 * @pin: Whether to pin or unpin.
 333 *
 334 */
 335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 336{
 337        struct ttm_operation_ctx ctx = { false, true };
 338        struct ttm_place pl;
 339        struct ttm_placement placement;
 340        struct ttm_buffer_object *bo = &vbo->base;
 341        uint32_t old_mem_type = bo->mem.mem_type;
 342        int ret;
 343
 344        dma_resv_assert_held(bo->base.resv);
 345
 346        if (pin == !!bo->pin_count)
 347                return;
 348
 349        pl.fpfn = 0;
 350        pl.lpfn = 0;
 351        pl.mem_type = bo->mem.mem_type;
 352        pl.flags = bo->mem.placement;
 353
 354        memset(&placement, 0, sizeof(placement));
 355        placement.num_placement = 1;
 356        placement.placement = &pl;
 357
 358        ret = ttm_bo_validate(bo, &placement, &ctx);
 359
 360        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 361
 362        if (pin)
 363                ttm_bo_pin(bo);
 364        else
 365                ttm_bo_unpin(bo);
 366}
 367
 368/**
 369 * vmw_bo_map_and_cache - Map a buffer object and cache the map
 370 *
 371 * @vbo: The buffer object to map
 372 * Return: A kernel virtual address or NULL if mapping failed.
 373 *
 374 * This function maps a buffer object into the kernel address space, or
 375 * returns the virtual kernel address of an already existing map. The virtual
 376 * address remains valid as long as the buffer object is pinned or reserved.
 377 * The cached map is torn down on either
 378 * 1) Buffer object move
 379 * 2) Buffer object swapout
 380 * 3) Buffer object destruction
 381 *
 382 */
 383void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 384{
 385        struct ttm_buffer_object *bo = &vbo->base;
 386        bool not_used;
 387        void *virtual;
 388        int ret;
 389
 390        virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
 391        if (virtual)
 392                return virtual;
 393
 394        ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
 395        if (ret)
 396                DRM_ERROR("Buffer object map failed: %d.\n", ret);
 397
 398        return ttm_kmap_obj_virtual(&vbo->map, &not_used);
 399}
 400
 401
 402/**
 403 * vmw_bo_unmap - Tear down a cached buffer object map.
 404 *
 405 * @vbo: The buffer object whose map we are tearing down.
 406 *
 407 * This function tears down a cached map set up using
 408 * vmw_buffer_object_map_and_cache().
 409 */
 410void vmw_bo_unmap(struct vmw_buffer_object *vbo)
 411{
 412        if (vbo->map.bo == NULL)
 413                return;
 414
 415        ttm_bo_kunmap(&vbo->map);
 416}
 417
 418
 419/**
 420 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
 421 *
 422 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 423 * @size: The requested buffer size.
 424 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 425 */
 426static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
 427                              bool user)
 428{
 429        static size_t struct_size, user_struct_size;
 430        size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 431        size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 432
 433        if (unlikely(struct_size == 0)) {
 434                size_t backend_size = ttm_round_pot(vmw_tt_size);
 435
 436                struct_size = backend_size +
 437                        ttm_round_pot(sizeof(struct vmw_buffer_object));
 438                user_struct_size = backend_size +
 439                  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
 440                                      TTM_OBJ_EXTRA_SIZE;
 441        }
 442
 443        if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 444                page_array_size +=
 445                        ttm_round_pot(num_pages * sizeof(dma_addr_t));
 446
 447        return ((user) ? user_struct_size : struct_size) +
 448                page_array_size;
 449}
 450
 451
 452/**
 453 * vmw_bo_bo_free - vmw buffer object destructor
 454 *
 455 * @bo: Pointer to the embedded struct ttm_buffer_object
 456 */
 457void vmw_bo_bo_free(struct ttm_buffer_object *bo)
 458{
 459        struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
 460
 461        WARN_ON(vmw_bo->dirty);
 462        WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
 463        vmw_bo_unmap(vmw_bo);
 464        kfree(vmw_bo);
 465}
 466
 467
 468/**
 469 * vmw_user_bo_destroy - vmw buffer object destructor
 470 *
 471 * @bo: Pointer to the embedded struct ttm_buffer_object
 472 */
 473static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 474{
 475        struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
 476        struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
 477
 478        WARN_ON(vbo->dirty);
 479        WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
 480        vmw_bo_unmap(vbo);
 481        ttm_prime_object_kfree(vmw_user_bo, prime);
 482}
 483
 484/**
 485 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
 486 *
 487 * @dev_priv: Pointer to the device private struct
 488 * @size: size of the BO we need
 489 * @placement: where to put it
 490 * @p_bo: resulting BO
 491 *
 492 * Creates and pin a simple BO for in kernel use.
 493 */
 494int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
 495                         struct ttm_placement *placement,
 496                         struct ttm_buffer_object **p_bo)
 497{
 498        unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 499        struct ttm_operation_ctx ctx = { false, false };
 500        struct ttm_buffer_object *bo;
 501        size_t acc_size;
 502        int ret;
 503
 504        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 505        if (unlikely(!bo))
 506                return -ENOMEM;
 507
 508        acc_size = ttm_round_pot(sizeof(*bo));
 509        acc_size += ttm_round_pot(npages * sizeof(void *));
 510        acc_size += ttm_round_pot(sizeof(struct ttm_tt));
 511        ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
 512                                   ttm_bo_type_device, placement, 0,
 513                                   &ctx, acc_size, NULL, NULL, NULL);
 514        if (unlikely(ret))
 515                goto error_free;
 516
 517        ttm_bo_pin(bo);
 518        ttm_bo_unreserve(bo);
 519        *p_bo = bo;
 520
 521        return 0;
 522
 523error_free:
 524        kfree(bo);
 525        return ret;
 526}
 527
 528/**
 529 * vmw_bo_init - Initialize a vmw buffer object
 530 *
 531 * @dev_priv: Pointer to the device private struct
 532 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
 533 * @size: Buffer object size in bytes.
 534 * @placement: Initial placement.
 535 * @interruptible: Whether waits should be performed interruptible.
 536 * @pin: If the BO should be created pinned at a fixed location.
 537 * @bo_free: The buffer object destructor.
 538 * Returns: Zero on success, negative error code on error.
 539 *
 540 * Note that on error, the code will free the buffer object.
 541 */
 542int vmw_bo_init(struct vmw_private *dev_priv,
 543                struct vmw_buffer_object *vmw_bo,
 544                size_t size, struct ttm_placement *placement,
 545                bool interruptible, bool pin,
 546                void (*bo_free)(struct ttm_buffer_object *bo))
 547{
 548        struct ttm_operation_ctx ctx = { interruptible, false };
 549        struct ttm_bo_device *bdev = &dev_priv->bdev;
 550        size_t acc_size;
 551        int ret;
 552        bool user = (bo_free == &vmw_user_bo_destroy);
 553
 554        WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
 555
 556        acc_size = vmw_bo_acc_size(dev_priv, size, user);
 557        memset(vmw_bo, 0, sizeof(*vmw_bo));
 558        BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
 559        vmw_bo->base.priority = 3;
 560        vmw_bo->res_tree = RB_ROOT;
 561
 562        ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
 563                                   ttm_bo_type_device, placement,
 564                                   0, &ctx, acc_size, NULL, NULL, bo_free);
 565        if (unlikely(ret))
 566                return ret;
 567
 568        if (pin)
 569                ttm_bo_pin(&vmw_bo->base);
 570        ttm_bo_unreserve(&vmw_bo->base);
 571        return 0;
 572}
 573
 574
 575/**
 576 * vmw_user_bo_release - TTM reference base object release callback for
 577 * vmw user buffer objects
 578 *
 579 * @p_base: The TTM base object pointer about to be unreferenced.
 580 *
 581 * Clears the TTM base object pointer and drops the reference the
 582 * base object has on the underlying struct vmw_buffer_object.
 583 */
 584static void vmw_user_bo_release(struct ttm_base_object **p_base)
 585{
 586        struct vmw_user_buffer_object *vmw_user_bo;
 587        struct ttm_base_object *base = *p_base;
 588
 589        *p_base = NULL;
 590
 591        if (unlikely(base == NULL))
 592                return;
 593
 594        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 595                                   prime.base);
 596        ttm_bo_put(&vmw_user_bo->vbo.base);
 597}
 598
 599
 600/**
 601 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
 602 * for vmw user buffer objects
 603 *
 604 * @base: Pointer to the TTM base object
 605 * @ref_type: Reference type of the reference reaching zero.
 606 *
 607 * Called when user-space drops its last synccpu reference on the buffer
 608 * object, Either explicitly or as part of a cleanup file close.
 609 */
 610static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
 611                                        enum ttm_ref_type ref_type)
 612{
 613        struct vmw_user_buffer_object *user_bo;
 614
 615        user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
 616
 617        switch (ref_type) {
 618        case TTM_REF_SYNCCPU_WRITE:
 619                atomic_dec(&user_bo->vbo.cpu_writers);
 620                break;
 621        default:
 622                WARN_ONCE(true, "Undefined buffer object reference release.\n");
 623        }
 624}
 625
 626
 627/**
 628 * vmw_user_bo_alloc - Allocate a user buffer object
 629 *
 630 * @dev_priv: Pointer to a struct device private.
 631 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 632 * object.
 633 * @size: Size of the buffer object.
 634 * @shareable: Boolean whether the buffer is shareable with other open files.
 635 * @handle: Pointer to where the handle value should be assigned.
 636 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
 637 * should be assigned.
 638 * Return: Zero on success, negative error code on error.
 639 */
 640int vmw_user_bo_alloc(struct vmw_private *dev_priv,
 641                      struct ttm_object_file *tfile,
 642                      uint32_t size,
 643                      bool shareable,
 644                      uint32_t *handle,
 645                      struct vmw_buffer_object **p_vbo,
 646                      struct ttm_base_object **p_base)
 647{
 648        struct vmw_user_buffer_object *user_bo;
 649        int ret;
 650
 651        user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 652        if (unlikely(!user_bo)) {
 653                DRM_ERROR("Failed to allocate a buffer.\n");
 654                return -ENOMEM;
 655        }
 656
 657        ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
 658                          (dev_priv->has_mob) ?
 659                          &vmw_sys_placement :
 660                          &vmw_vram_sys_placement, true, false,
 661                          &vmw_user_bo_destroy);
 662        if (unlikely(ret != 0))
 663                return ret;
 664
 665        ttm_bo_get(&user_bo->vbo.base);
 666        ret = ttm_prime_object_init(tfile,
 667                                    size,
 668                                    &user_bo->prime,
 669                                    shareable,
 670                                    ttm_buffer_type,
 671                                    &vmw_user_bo_release,
 672                                    &vmw_user_bo_ref_obj_release);
 673        if (unlikely(ret != 0)) {
 674                ttm_bo_put(&user_bo->vbo.base);
 675                goto out_no_base_object;
 676        }
 677
 678        *p_vbo = &user_bo->vbo;
 679        if (p_base) {
 680                *p_base = &user_bo->prime.base;
 681                kref_get(&(*p_base)->refcount);
 682        }
 683        *handle = user_bo->prime.base.handle;
 684
 685out_no_base_object:
 686        return ret;
 687}
 688
 689
 690/**
 691 * vmw_user_bo_verify_access - verify access permissions on this
 692 * buffer object.
 693 *
 694 * @bo: Pointer to the buffer object being accessed
 695 * @tfile: Identifying the caller.
 696 */
 697int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
 698                              struct ttm_object_file *tfile)
 699{
 700        struct vmw_user_buffer_object *vmw_user_bo;
 701
 702        if (unlikely(bo->destroy != vmw_user_bo_destroy))
 703                return -EPERM;
 704
 705        vmw_user_bo = vmw_user_buffer_object(bo);
 706
 707        /* Check that the caller has opened the object. */
 708        if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 709                return 0;
 710
 711        DRM_ERROR("Could not grant buffer access.\n");
 712        return -EPERM;
 713}
 714
 715
 716/**
 717 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
 718 * access, idling previous GPU operations on the buffer and optionally
 719 * blocking it for further command submissions.
 720 *
 721 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 722 * @tfile: Identifying the caller.
 723 * @flags: Flags indicating how the grab should be performed.
 724 * Return: Zero on success, Negative error code on error. In particular,
 725 * -EBUSY will be returned if a dontblock operation is requested and the
 726 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
 727 * interrupted by a signal.
 728 *
 729 * A blocking grab will be automatically released when @tfile is closed.
 730 */
 731static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 732                                    struct ttm_object_file *tfile,
 733                                    uint32_t flags)
 734{
 735        bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 736        struct ttm_buffer_object *bo = &user_bo->vbo.base;
 737        bool existed;
 738        int ret;
 739
 740        if (flags & drm_vmw_synccpu_allow_cs) {
 741                long lret;
 742
 743                lret = dma_resv_wait_timeout_rcu
 744                        (bo->base.resv, true, true,
 745                         nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 746                if (!lret)
 747                        return -EBUSY;
 748                else if (lret < 0)
 749                        return lret;
 750                return 0;
 751        }
 752
 753        ret = ttm_bo_reserve(bo, true, nonblock, NULL);
 754        if (unlikely(ret != 0))
 755                return ret;
 756
 757        ret = ttm_bo_wait(bo, true, nonblock);
 758        if (likely(ret == 0))
 759                atomic_inc(&user_bo->vbo.cpu_writers);
 760
 761        ttm_bo_unreserve(bo);
 762        if (unlikely(ret != 0))
 763                return ret;
 764
 765        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 766                                 TTM_REF_SYNCCPU_WRITE, &existed, false);
 767        if (ret != 0 || existed)
 768                atomic_dec(&user_bo->vbo.cpu_writers);
 769
 770        return ret;
 771}
 772
 773/**
 774 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
 775 * and unblock command submission on the buffer if blocked.
 776 *
 777 * @handle: Handle identifying the buffer object.
 778 * @tfile: Identifying the caller.
 779 * @flags: Flags indicating the type of release.
 780 */
 781static int vmw_user_bo_synccpu_release(uint32_t handle,
 782                                           struct ttm_object_file *tfile,
 783                                           uint32_t flags)
 784{
 785        if (!(flags & drm_vmw_synccpu_allow_cs))
 786                return ttm_ref_object_base_unref(tfile, handle,
 787                                                 TTM_REF_SYNCCPU_WRITE);
 788
 789        return 0;
 790}
 791
 792
 793/**
 794 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
 795 * functionality.
 796 *
 797 * @dev: Identifies the drm device.
 798 * @data: Pointer to the ioctl argument.
 799 * @file_priv: Identifies the caller.
 800 * Return: Zero on success, negative error code on error.
 801 *
 802 * This function checks the ioctl arguments for validity and calls the
 803 * relevant synccpu functions.
 804 */
 805int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 806                              struct drm_file *file_priv)
 807{
 808        struct drm_vmw_synccpu_arg *arg =
 809                (struct drm_vmw_synccpu_arg *) data;
 810        struct vmw_buffer_object *vbo;
 811        struct vmw_user_buffer_object *user_bo;
 812        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 813        struct ttm_base_object *buffer_base;
 814        int ret;
 815
 816        if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 817            || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 818                               drm_vmw_synccpu_dontblock |
 819                               drm_vmw_synccpu_allow_cs)) != 0) {
 820                DRM_ERROR("Illegal synccpu flags.\n");
 821                return -EINVAL;
 822        }
 823
 824        switch (arg->op) {
 825        case drm_vmw_synccpu_grab:
 826                ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
 827                                             &buffer_base);
 828                if (unlikely(ret != 0))
 829                        return ret;
 830
 831                user_bo = container_of(vbo, struct vmw_user_buffer_object,
 832                                       vbo);
 833                ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
 834                vmw_bo_unreference(&vbo);
 835                ttm_base_object_unref(&buffer_base);
 836                if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 837                             ret != -EBUSY)) {
 838                        DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 839                                  (unsigned int) arg->handle);
 840                        return ret;
 841                }
 842                break;
 843        case drm_vmw_synccpu_release:
 844                ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
 845                                                  arg->flags);
 846                if (unlikely(ret != 0)) {
 847                        DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 848                                  (unsigned int) arg->handle);
 849                        return ret;
 850                }
 851                break;
 852        default:
 853                DRM_ERROR("Invalid synccpu operation.\n");
 854                return -EINVAL;
 855        }
 856
 857        return 0;
 858}
 859
 860
 861/**
 862 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
 863 * allocation functionality.
 864 *
 865 * @dev: Identifies the drm device.
 866 * @data: Pointer to the ioctl argument.
 867 * @file_priv: Identifies the caller.
 868 * Return: Zero on success, negative error code on error.
 869 *
 870 * This function checks the ioctl arguments for validity and allocates a
 871 * struct vmw_user_buffer_object bo.
 872 */
 873int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
 874                       struct drm_file *file_priv)
 875{
 876        struct vmw_private *dev_priv = vmw_priv(dev);
 877        union drm_vmw_alloc_dmabuf_arg *arg =
 878            (union drm_vmw_alloc_dmabuf_arg *)data;
 879        struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 880        struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 881        struct vmw_buffer_object *vbo;
 882        uint32_t handle;
 883        int ret;
 884
 885        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 886        if (unlikely(ret != 0))
 887                return ret;
 888
 889        ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 890                                req->size, false, &handle, &vbo,
 891                                NULL);
 892        if (unlikely(ret != 0))
 893                goto out_no_bo;
 894
 895        rep->handle = handle;
 896        rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
 897        rep->cur_gmr_id = handle;
 898        rep->cur_gmr_offset = 0;
 899
 900        vmw_bo_unreference(&vbo);
 901
 902out_no_bo:
 903        ttm_read_unlock(&dev_priv->reservation_sem);
 904
 905        return ret;
 906}
 907
 908
 909/**
 910 * vmw_bo_unref_ioctl - Generic handle close ioctl.
 911 *
 912 * @dev: Identifies the drm device.
 913 * @data: Pointer to the ioctl argument.
 914 * @file_priv: Identifies the caller.
 915 * Return: Zero on success, negative error code on error.
 916 *
 917 * This function checks the ioctl arguments for validity and closes a
 918 * handle to a TTM base object, optionally freeing the object.
 919 */
 920int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
 921                       struct drm_file *file_priv)
 922{
 923        struct drm_vmw_unref_dmabuf_arg *arg =
 924            (struct drm_vmw_unref_dmabuf_arg *)data;
 925
 926        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 927                                         arg->handle,
 928                                         TTM_REF_USAGE);
 929}
 930
 931
 932/**
 933 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
 934 *
 935 * @tfile: The TTM object file the handle is registered with.
 936 * @handle: The user buffer object handle
 937 * @out: Pointer to a where a pointer to the embedded
 938 * struct vmw_buffer_object should be placed.
 939 * @p_base: Pointer to where a pointer to the TTM base object should be
 940 * placed, or NULL if no such pointer is required.
 941 * Return: Zero on success, Negative error code on error.
 942 *
 943 * Both the output base object pointer and the vmw buffer object pointer
 944 * will be refcounted.
 945 */
 946int vmw_user_bo_lookup(struct ttm_object_file *tfile,
 947                       uint32_t handle, struct vmw_buffer_object **out,
 948                       struct ttm_base_object **p_base)
 949{
 950        struct vmw_user_buffer_object *vmw_user_bo;
 951        struct ttm_base_object *base;
 952
 953        base = ttm_base_object_lookup(tfile, handle);
 954        if (unlikely(base == NULL)) {
 955                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 956                          (unsigned long)handle);
 957                return -ESRCH;
 958        }
 959
 960        if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 961                ttm_base_object_unref(&base);
 962                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 963                          (unsigned long)handle);
 964                return -EINVAL;
 965        }
 966
 967        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 968                                   prime.base);
 969        ttm_bo_get(&vmw_user_bo->vbo.base);
 970        if (p_base)
 971                *p_base = base;
 972        else
 973                ttm_base_object_unref(&base);
 974        *out = &vmw_user_bo->vbo;
 975
 976        return 0;
 977}
 978
 979/**
 980 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
 981 * @tfile: The TTM object file the handle is registered with.
 982 * @handle: The user buffer object handle.
 983 *
 984 * This function looks up a struct vmw_user_bo and returns a pointer to the
 985 * struct vmw_buffer_object it derives from without refcounting the pointer.
 986 * The returned pointer is only valid until vmw_user_bo_noref_release() is
 987 * called, and the object pointed to by the returned pointer may be doomed.
 988 * Any persistent usage of the object requires a refcount to be taken using
 989 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
 990 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
 991 * or scheduling functions may be called inbetween these function calls.
 992 *
 993 * Return: A struct vmw_buffer_object pointer if successful or negative
 994 * error pointer on failure.
 995 */
 996struct vmw_buffer_object *
 997vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
 998{
 999        struct vmw_user_buffer_object *vmw_user_bo;
1000        struct ttm_base_object *base;
1001
1002        base = ttm_base_object_noref_lookup(tfile, handle);
1003        if (!base) {
1004                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1005                          (unsigned long)handle);
1006                return ERR_PTR(-ESRCH);
1007        }
1008
1009        if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1010                ttm_base_object_noref_release();
1011                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1012                          (unsigned long)handle);
1013                return ERR_PTR(-EINVAL);
1014        }
1015
1016        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1017                                   prime.base);
1018        return &vmw_user_bo->vbo;
1019}
1020
1021/**
1022 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
1023 *
1024 * @tfile: The TTM object file to register the handle with.
1025 * @vbo: The embedded vmw buffer object.
1026 * @handle: Pointer to where the new handle should be placed.
1027 * Return: Zero on success, Negative error code on error.
1028 */
1029int vmw_user_bo_reference(struct ttm_object_file *tfile,
1030                          struct vmw_buffer_object *vbo,
1031                          uint32_t *handle)
1032{
1033        struct vmw_user_buffer_object *user_bo;
1034
1035        if (vbo->base.destroy != vmw_user_bo_destroy)
1036                return -EINVAL;
1037
1038        user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1039
1040        *handle = user_bo->prime.base.handle;
1041        return ttm_ref_object_add(tfile, &user_bo->prime.base,
1042                                  TTM_REF_USAGE, NULL, false);
1043}
1044
1045
1046/**
1047 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1048 *                       object without unreserving it.
1049 *
1050 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1051 * @fence:          Pointer to the fence. If NULL, this function will
1052 *                  insert a fence into the command stream..
1053 *
1054 * Contrary to the ttm_eu version of this function, it takes only
1055 * a single buffer object instead of a list, and it also doesn't
1056 * unreserve the buffer object, which needs to be done separately.
1057 */
1058void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1059                         struct vmw_fence_obj *fence)
1060{
1061        struct ttm_bo_device *bdev = bo->bdev;
1062
1063        struct vmw_private *dev_priv =
1064                container_of(bdev, struct vmw_private, bdev);
1065
1066        if (fence == NULL) {
1067                vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1068                dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1069                dma_fence_put(&fence->base);
1070        } else
1071                dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1072}
1073
1074
1075/**
1076 * vmw_dumb_create - Create a dumb kms buffer
1077 *
1078 * @file_priv: Pointer to a struct drm_file identifying the caller.
1079 * @dev: Pointer to the drm device.
1080 * @args: Pointer to a struct drm_mode_create_dumb structure
1081 * Return: Zero on success, negative error code on failure.
1082 *
1083 * This is a driver callback for the core drm create_dumb functionality.
1084 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1085 * that the arguments have a different format.
1086 */
1087int vmw_dumb_create(struct drm_file *file_priv,
1088                    struct drm_device *dev,
1089                    struct drm_mode_create_dumb *args)
1090{
1091        struct vmw_private *dev_priv = vmw_priv(dev);
1092        struct vmw_buffer_object *vbo;
1093        int ret;
1094
1095        args->pitch = args->width * ((args->bpp + 7) / 8);
1096        args->size = args->pitch * args->height;
1097
1098        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1099        if (unlikely(ret != 0))
1100                return ret;
1101
1102        ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1103                                    args->size, false, &args->handle,
1104                                    &vbo, NULL);
1105        if (unlikely(ret != 0))
1106                goto out_no_bo;
1107
1108        vmw_bo_unreference(&vbo);
1109out_no_bo:
1110        ttm_read_unlock(&dev_priv->reservation_sem);
1111        return ret;
1112}
1113
1114
1115/**
1116 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1117 *
1118 * @file_priv: Pointer to a struct drm_file identifying the caller.
1119 * @dev: Pointer to the drm device.
1120 * @handle: Handle identifying the dumb buffer.
1121 * @offset: The address space offset returned.
1122 * Return: Zero on success, negative error code on failure.
1123 *
1124 * This is a driver callback for the core drm dumb_map_offset functionality.
1125 */
1126int vmw_dumb_map_offset(struct drm_file *file_priv,
1127                        struct drm_device *dev, uint32_t handle,
1128                        uint64_t *offset)
1129{
1130        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1131        struct vmw_buffer_object *out_buf;
1132        int ret;
1133
1134        ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1135        if (ret != 0)
1136                return -EINVAL;
1137
1138        *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1139        vmw_bo_unreference(&out_buf);
1140        return 0;
1141}
1142
1143
1144/**
1145 * vmw_dumb_destroy - Destroy a dumb boffer
1146 *
1147 * @file_priv: Pointer to a struct drm_file identifying the caller.
1148 * @dev: Pointer to the drm device.
1149 * @handle: Handle identifying the dumb buffer.
1150 * Return: Zero on success, negative error code on failure.
1151 *
1152 * This is a driver callback for the core drm dumb_destroy functionality.
1153 */
1154int vmw_dumb_destroy(struct drm_file *file_priv,
1155                     struct drm_device *dev,
1156                     uint32_t handle)
1157{
1158        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1159                                         handle, TTM_REF_USAGE);
1160}
1161
1162
1163/**
1164 * vmw_bo_swap_notify - swapout notify callback.
1165 *
1166 * @bo: The buffer object to be swapped out.
1167 */
1168void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1169{
1170        /* Is @bo embedded in a struct vmw_buffer_object? */
1171        if (bo->destroy != vmw_bo_bo_free &&
1172            bo->destroy != vmw_user_bo_destroy)
1173                return;
1174
1175        /* Kill any cached kernel maps before swapout */
1176        vmw_bo_unmap(vmw_buffer_object(bo));
1177}
1178
1179
1180/**
1181 * vmw_bo_move_notify - TTM move_notify_callback
1182 *
1183 * @bo: The TTM buffer object about to move.
1184 * @mem: The struct ttm_resource indicating to what memory
1185 *       region the move is taking place.
1186 *
1187 * Detaches cached maps and device bindings that require that the
1188 * buffer doesn't move.
1189 */
1190void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1191                        struct ttm_resource *mem)
1192{
1193        struct vmw_buffer_object *vbo;
1194
1195        /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1196        if (bo->destroy != vmw_bo_bo_free &&
1197            bo->destroy != vmw_user_bo_destroy)
1198                return;
1199
1200        vbo = container_of(bo, struct vmw_buffer_object, base);
1201
1202        /*
1203         * Kill any cached kernel maps before move to or from VRAM.
1204         * With other types of moves, the underlying pages stay the same,
1205         * and the map can be kept.
1206         */
1207        if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1208                vmw_bo_unmap(vbo);
1209
1210        /*
1211         * If we're moving a backup MOB out of MOB placement, then make sure we
1212         * read back all resource content first, and unbind the MOB from
1213         * the resource.
1214         */
1215        if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1216                vmw_resource_unbind_list(vbo);
1217}
1218