linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
  31#include <drm/drmP.h>
  32#include "vmwgfx_drv.h"
  33#include "ttm_object.h"
  34
  35
  36/**
  37 * struct vmw_user_buffer_object - User-space-visible buffer object
  38 *
  39 * @prime: The prime object providing user visibility.
  40 * @vbo: The struct vmw_buffer_object
  41 */
  42struct vmw_user_buffer_object {
  43        struct ttm_prime_object prime;
  44        struct vmw_buffer_object vbo;
  45};
  46
  47
  48/**
  49 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  50 * vmw_buffer_object.
  51 *
  52 * @bo: Pointer to the TTM buffer object.
  53 * Return: Pointer to the struct vmw_buffer_object embedding the
  54 * TTM buffer object.
  55 */
  56static struct vmw_buffer_object *
  57vmw_buffer_object(struct ttm_buffer_object *bo)
  58{
  59        return container_of(bo, struct vmw_buffer_object, base);
  60}
  61
  62
  63/**
  64 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  65 * vmw_user_buffer_object.
  66 *
  67 * @bo: Pointer to the TTM buffer object.
  68 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  69 * object.
  70 */
  71static struct vmw_user_buffer_object *
  72vmw_user_buffer_object(struct ttm_buffer_object *bo)
  73{
  74        struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  75
  76        return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  77}
  78
  79
  80/**
  81 * vmw_bo_pin_in_placement - Validate a buffer to placement.
  82 *
  83 * @dev_priv:  Driver private.
  84 * @buf:  DMA buffer to move.
  85 * @placement:  The placement to pin it.
  86 * @interruptible:  Use interruptible wait.
  87 * Return: Zero on success, Negative error code on failure. In particular
  88 * -ERESTARTSYS if interrupted by a signal
  89 */
  90int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  91                            struct vmw_buffer_object *buf,
  92                            struct ttm_placement *placement,
  93                            bool interruptible)
  94{
  95        struct ttm_operation_ctx ctx = {interruptible, false };
  96        struct ttm_buffer_object *bo = &buf->base;
  97        int ret;
  98        uint32_t new_flags;
  99
 100        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 101        if (unlikely(ret != 0))
 102                return ret;
 103
 104        vmw_execbuf_release_pinned_bo(dev_priv);
 105
 106        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 107        if (unlikely(ret != 0))
 108                goto err;
 109
 110        if (buf->pin_count > 0)
 111                ret = ttm_bo_mem_compat(placement, &bo->mem,
 112                                        &new_flags) == true ? 0 : -EINVAL;
 113        else
 114                ret = ttm_bo_validate(bo, placement, &ctx);
 115
 116        if (!ret)
 117                vmw_bo_pin_reserved(buf, true);
 118
 119        ttm_bo_unreserve(bo);
 120
 121err:
 122        ttm_write_unlock(&dev_priv->reservation_sem);
 123        return ret;
 124}
 125
 126
 127/**
 128 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 129 *
 130 * This function takes the reservation_sem in write mode.
 131 * Flushes and unpins the query bo to avoid failures.
 132 *
 133 * @dev_priv:  Driver private.
 134 * @buf:  DMA buffer to move.
 135 * @pin:  Pin buffer if true.
 136 * @interruptible:  Use interruptible wait.
 137 * Return: Zero on success, Negative error code on failure. In particular
 138 * -ERESTARTSYS if interrupted by a signal
 139 */
 140int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 141                              struct vmw_buffer_object *buf,
 142                              bool interruptible)
 143{
 144        struct ttm_operation_ctx ctx = {interruptible, false };
 145        struct ttm_buffer_object *bo = &buf->base;
 146        int ret;
 147        uint32_t new_flags;
 148
 149        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 150        if (unlikely(ret != 0))
 151                return ret;
 152
 153        vmw_execbuf_release_pinned_bo(dev_priv);
 154
 155        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 156        if (unlikely(ret != 0))
 157                goto err;
 158
 159        if (buf->pin_count > 0) {
 160                ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
 161                                        &new_flags) == true ? 0 : -EINVAL;
 162                goto out_unreserve;
 163        }
 164
 165        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 166        if (likely(ret == 0) || ret == -ERESTARTSYS)
 167                goto out_unreserve;
 168
 169        ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 170
 171out_unreserve:
 172        if (!ret)
 173                vmw_bo_pin_reserved(buf, true);
 174
 175        ttm_bo_unreserve(bo);
 176err:
 177        ttm_write_unlock(&dev_priv->reservation_sem);
 178        return ret;
 179}
 180
 181
 182/**
 183 * vmw_bo_pin_in_vram - Move a buffer to vram.
 184 *
 185 * This function takes the reservation_sem in write mode.
 186 * Flushes and unpins the query bo to avoid failures.
 187 *
 188 * @dev_priv:  Driver private.
 189 * @buf:  DMA buffer to move.
 190 * @interruptible:  Use interruptible wait.
 191 * Return: Zero on success, Negative error code on failure. In particular
 192 * -ERESTARTSYS if interrupted by a signal
 193 */
 194int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 195                       struct vmw_buffer_object *buf,
 196                       bool interruptible)
 197{
 198        return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
 199                                       interruptible);
 200}
 201
 202
 203/**
 204 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
 205 *
 206 * This function takes the reservation_sem in write mode.
 207 * Flushes and unpins the query bo to avoid failures.
 208 *
 209 * @dev_priv:  Driver private.
 210 * @buf:  DMA buffer to pin.
 211 * @interruptible:  Use interruptible wait.
 212 * Return: Zero on success, Negative error code on failure. In particular
 213 * -ERESTARTSYS if interrupted by a signal
 214 */
 215int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 216                                struct vmw_buffer_object *buf,
 217                                bool interruptible)
 218{
 219        struct ttm_operation_ctx ctx = {interruptible, false };
 220        struct ttm_buffer_object *bo = &buf->base;
 221        struct ttm_placement placement;
 222        struct ttm_place place;
 223        int ret = 0;
 224        uint32_t new_flags;
 225
 226        place = vmw_vram_placement.placement[0];
 227        place.lpfn = bo->num_pages;
 228        placement.num_placement = 1;
 229        placement.placement = &place;
 230        placement.num_busy_placement = 1;
 231        placement.busy_placement = &place;
 232
 233        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 234        if (unlikely(ret != 0))
 235                return ret;
 236
 237        vmw_execbuf_release_pinned_bo(dev_priv);
 238        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 239        if (unlikely(ret != 0))
 240                goto err_unlock;
 241
 242        /*
 243         * Is this buffer already in vram but not at the start of it?
 244         * In that case, evict it first because TTM isn't good at handling
 245         * that situation.
 246         */
 247        if (bo->mem.mem_type == TTM_PL_VRAM &&
 248            bo->mem.start < bo->num_pages &&
 249            bo->mem.start > 0 &&
 250            buf->pin_count == 0) {
 251                ctx.interruptible = false;
 252                (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
 253        }
 254
 255        if (buf->pin_count > 0)
 256                ret = ttm_bo_mem_compat(&placement, &bo->mem,
 257                                        &new_flags) == true ? 0 : -EINVAL;
 258        else
 259                ret = ttm_bo_validate(bo, &placement, &ctx);
 260
 261        /* For some reason we didn't end up at the start of vram */
 262        WARN_ON(ret == 0 && bo->offset != 0);
 263        if (!ret)
 264                vmw_bo_pin_reserved(buf, true);
 265
 266        ttm_bo_unreserve(bo);
 267err_unlock:
 268        ttm_write_unlock(&dev_priv->reservation_sem);
 269
 270        return ret;
 271}
 272
 273
 274/**
 275 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
 276 *
 277 * This function takes the reservation_sem in write mode.
 278 *
 279 * @dev_priv:  Driver private.
 280 * @buf:  DMA buffer to unpin.
 281 * @interruptible:  Use interruptible wait.
 282 * Return: Zero on success, Negative error code on failure. In particular
 283 * -ERESTARTSYS if interrupted by a signal
 284 */
 285int vmw_bo_unpin(struct vmw_private *dev_priv,
 286                 struct vmw_buffer_object *buf,
 287                 bool interruptible)
 288{
 289        struct ttm_buffer_object *bo = &buf->base;
 290        int ret;
 291
 292        ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
 293        if (unlikely(ret != 0))
 294                return ret;
 295
 296        ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 297        if (unlikely(ret != 0))
 298                goto err;
 299
 300        vmw_bo_pin_reserved(buf, false);
 301
 302        ttm_bo_unreserve(bo);
 303
 304err:
 305        ttm_read_unlock(&dev_priv->reservation_sem);
 306        return ret;
 307}
 308
 309/**
 310 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
 311 * of a buffer.
 312 *
 313 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
 314 * @ptr: SVGAGuestPtr returning the result.
 315 */
 316void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 317                          SVGAGuestPtr *ptr)
 318{
 319        if (bo->mem.mem_type == TTM_PL_VRAM) {
 320                ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
 321                ptr->offset = bo->offset;
 322        } else {
 323                ptr->gmrId = bo->mem.start;
 324                ptr->offset = 0;
 325        }
 326}
 327
 328
 329/**
 330 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
 331 *
 332 * @vbo: The buffer object. Must be reserved.
 333 * @pin: Whether to pin or unpin.
 334 *
 335 */
 336void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 337{
 338        struct ttm_operation_ctx ctx = { false, true };
 339        struct ttm_place pl;
 340        struct ttm_placement placement;
 341        struct ttm_buffer_object *bo = &vbo->base;
 342        uint32_t old_mem_type = bo->mem.mem_type;
 343        int ret;
 344
 345        lockdep_assert_held(&bo->resv->lock.base);
 346
 347        if (pin) {
 348                if (vbo->pin_count++ > 0)
 349                        return;
 350        } else {
 351                WARN_ON(vbo->pin_count <= 0);
 352                if (--vbo->pin_count > 0)
 353                        return;
 354        }
 355
 356        pl.fpfn = 0;
 357        pl.lpfn = 0;
 358        pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
 359                | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
 360        if (pin)
 361                pl.flags |= TTM_PL_FLAG_NO_EVICT;
 362
 363        memset(&placement, 0, sizeof(placement));
 364        placement.num_placement = 1;
 365        placement.placement = &pl;
 366
 367        ret = ttm_bo_validate(bo, &placement, &ctx);
 368
 369        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 370}
 371
 372
 373/**
 374 * vmw_bo_map_and_cache - Map a buffer object and cache the map
 375 *
 376 * @vbo: The buffer object to map
 377 * Return: A kernel virtual address or NULL if mapping failed.
 378 *
 379 * This function maps a buffer object into the kernel address space, or
 380 * returns the virtual kernel address of an already existing map. The virtual
 381 * address remains valid as long as the buffer object is pinned or reserved.
 382 * The cached map is torn down on either
 383 * 1) Buffer object move
 384 * 2) Buffer object swapout
 385 * 3) Buffer object destruction
 386 *
 387 */
 388void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 389{
 390        struct ttm_buffer_object *bo = &vbo->base;
 391        bool not_used;
 392        void *virtual;
 393        int ret;
 394
 395        virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
 396        if (virtual)
 397                return virtual;
 398
 399        ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
 400        if (ret)
 401                DRM_ERROR("Buffer object map failed: %d.\n", ret);
 402
 403        return ttm_kmap_obj_virtual(&vbo->map, &not_used);
 404}
 405
 406
 407/**
 408 * vmw_bo_unmap - Tear down a cached buffer object map.
 409 *
 410 * @vbo: The buffer object whose map we are tearing down.
 411 *
 412 * This function tears down a cached map set up using
 413 * vmw_buffer_object_map_and_cache().
 414 */
 415void vmw_bo_unmap(struct vmw_buffer_object *vbo)
 416{
 417        if (vbo->map.bo == NULL)
 418                return;
 419
 420        ttm_bo_kunmap(&vbo->map);
 421}
 422
 423
 424/**
 425 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
 426 *
 427 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 428 * @size: The requested buffer size.
 429 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 430 */
 431static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
 432                              bool user)
 433{
 434        static size_t struct_size, user_struct_size;
 435        size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 436        size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 437
 438        if (unlikely(struct_size == 0)) {
 439                size_t backend_size = ttm_round_pot(vmw_tt_size);
 440
 441                struct_size = backend_size +
 442                        ttm_round_pot(sizeof(struct vmw_buffer_object));
 443                user_struct_size = backend_size +
 444                  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
 445                                      TTM_OBJ_EXTRA_SIZE;
 446        }
 447
 448        if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 449                page_array_size +=
 450                        ttm_round_pot(num_pages * sizeof(dma_addr_t));
 451
 452        return ((user) ? user_struct_size : struct_size) +
 453                page_array_size;
 454}
 455
 456
 457/**
 458 * vmw_bo_bo_free - vmw buffer object destructor
 459 *
 460 * @bo: Pointer to the embedded struct ttm_buffer_object
 461 */
 462void vmw_bo_bo_free(struct ttm_buffer_object *bo)
 463{
 464        struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
 465
 466        vmw_bo_unmap(vmw_bo);
 467        kfree(vmw_bo);
 468}
 469
 470
 471/**
 472 * vmw_user_bo_destroy - vmw buffer object destructor
 473 *
 474 * @bo: Pointer to the embedded struct ttm_buffer_object
 475 */
 476static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 477{
 478        struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
 479
 480        vmw_bo_unmap(&vmw_user_bo->vbo);
 481        ttm_prime_object_kfree(vmw_user_bo, prime);
 482}
 483
 484
 485/**
 486 * vmw_bo_init - Initialize a vmw buffer object
 487 *
 488 * @dev_priv: Pointer to the device private struct
 489 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
 490 * @size: Buffer object size in bytes.
 491 * @placement: Initial placement.
 492 * @interruptible: Whether waits should be performed interruptible.
 493 * @bo_free: The buffer object destructor.
 494 * Returns: Zero on success, negative error code on error.
 495 *
 496 * Note that on error, the code will free the buffer object.
 497 */
 498int vmw_bo_init(struct vmw_private *dev_priv,
 499                struct vmw_buffer_object *vmw_bo,
 500                size_t size, struct ttm_placement *placement,
 501                bool interruptible,
 502                void (*bo_free)(struct ttm_buffer_object *bo))
 503{
 504        struct ttm_bo_device *bdev = &dev_priv->bdev;
 505        size_t acc_size;
 506        int ret;
 507        bool user = (bo_free == &vmw_user_bo_destroy);
 508
 509        WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
 510
 511        acc_size = vmw_bo_acc_size(dev_priv, size, user);
 512        memset(vmw_bo, 0, sizeof(*vmw_bo));
 513
 514        INIT_LIST_HEAD(&vmw_bo->res_list);
 515
 516        ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 517                          ttm_bo_type_device, placement,
 518                          0, interruptible, acc_size,
 519                          NULL, NULL, bo_free);
 520        return ret;
 521}
 522
 523
 524/**
 525 * vmw_user_bo_release - TTM reference base object release callback for
 526 * vmw user buffer objects
 527 *
 528 * @p_base: The TTM base object pointer about to be unreferenced.
 529 *
 530 * Clears the TTM base object pointer and drops the reference the
 531 * base object has on the underlying struct vmw_buffer_object.
 532 */
 533static void vmw_user_bo_release(struct ttm_base_object **p_base)
 534{
 535        struct vmw_user_buffer_object *vmw_user_bo;
 536        struct ttm_base_object *base = *p_base;
 537        struct ttm_buffer_object *bo;
 538
 539        *p_base = NULL;
 540
 541        if (unlikely(base == NULL))
 542                return;
 543
 544        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 545                                   prime.base);
 546        bo = &vmw_user_bo->vbo.base;
 547        ttm_bo_unref(&bo);
 548}
 549
 550
 551/**
 552 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
 553 * for vmw user buffer objects
 554 *
 555 * @base: Pointer to the TTM base object
 556 * @ref_type: Reference type of the reference reaching zero.
 557 *
 558 * Called when user-space drops its last synccpu reference on the buffer
 559 * object, Either explicitly or as part of a cleanup file close.
 560 */
 561static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
 562                                        enum ttm_ref_type ref_type)
 563{
 564        struct vmw_user_buffer_object *user_bo;
 565
 566        user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
 567
 568        switch (ref_type) {
 569        case TTM_REF_SYNCCPU_WRITE:
 570                ttm_bo_synccpu_write_release(&user_bo->vbo.base);
 571                break;
 572        default:
 573                WARN_ONCE(true, "Undefined buffer object reference release.\n");
 574        }
 575}
 576
 577
 578/**
 579 * vmw_user_bo_alloc - Allocate a user buffer object
 580 *
 581 * @dev_priv: Pointer to a struct device private.
 582 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 583 * object.
 584 * @size: Size of the buffer object.
 585 * @shareable: Boolean whether the buffer is shareable with other open files.
 586 * @handle: Pointer to where the handle value should be assigned.
 587 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
 588 * should be assigned.
 589 * Return: Zero on success, negative error code on error.
 590 */
 591int vmw_user_bo_alloc(struct vmw_private *dev_priv,
 592                      struct ttm_object_file *tfile,
 593                      uint32_t size,
 594                      bool shareable,
 595                      uint32_t *handle,
 596                      struct vmw_buffer_object **p_vbo,
 597                      struct ttm_base_object **p_base)
 598{
 599        struct vmw_user_buffer_object *user_bo;
 600        struct ttm_buffer_object *tmp;
 601        int ret;
 602
 603        user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 604        if (unlikely(!user_bo)) {
 605                DRM_ERROR("Failed to allocate a buffer.\n");
 606                return -ENOMEM;
 607        }
 608
 609        ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
 610                          (dev_priv->has_mob) ?
 611                          &vmw_sys_placement :
 612                          &vmw_vram_sys_placement, true,
 613                          &vmw_user_bo_destroy);
 614        if (unlikely(ret != 0))
 615                return ret;
 616
 617        tmp = ttm_bo_reference(&user_bo->vbo.base);
 618        ret = ttm_prime_object_init(tfile,
 619                                    size,
 620                                    &user_bo->prime,
 621                                    shareable,
 622                                    ttm_buffer_type,
 623                                    &vmw_user_bo_release,
 624                                    &vmw_user_bo_ref_obj_release);
 625        if (unlikely(ret != 0)) {
 626                ttm_bo_unref(&tmp);
 627                goto out_no_base_object;
 628        }
 629
 630        *p_vbo = &user_bo->vbo;
 631        if (p_base) {
 632                *p_base = &user_bo->prime.base;
 633                kref_get(&(*p_base)->refcount);
 634        }
 635        *handle = user_bo->prime.base.handle;
 636
 637out_no_base_object:
 638        return ret;
 639}
 640
 641
 642/**
 643 * vmw_user_bo_verify_access - verify access permissions on this
 644 * buffer object.
 645 *
 646 * @bo: Pointer to the buffer object being accessed
 647 * @tfile: Identifying the caller.
 648 */
 649int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
 650                              struct ttm_object_file *tfile)
 651{
 652        struct vmw_user_buffer_object *vmw_user_bo;
 653
 654        if (unlikely(bo->destroy != vmw_user_bo_destroy))
 655                return -EPERM;
 656
 657        vmw_user_bo = vmw_user_buffer_object(bo);
 658
 659        /* Check that the caller has opened the object. */
 660        if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 661                return 0;
 662
 663        DRM_ERROR("Could not grant buffer access.\n");
 664        return -EPERM;
 665}
 666
 667
 668/**
 669 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
 670 * access, idling previous GPU operations on the buffer and optionally
 671 * blocking it for further command submissions.
 672 *
 673 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 674 * @tfile: Identifying the caller.
 675 * @flags: Flags indicating how the grab should be performed.
 676 * Return: Zero on success, Negative error code on error. In particular,
 677 * -EBUSY will be returned if a dontblock operation is requested and the
 678 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
 679 * interrupted by a signal.
 680 *
 681 * A blocking grab will be automatically released when @tfile is closed.
 682 */
 683static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 684                                    struct ttm_object_file *tfile,
 685                                    uint32_t flags)
 686{
 687        struct ttm_buffer_object *bo = &user_bo->vbo.base;
 688        bool existed;
 689        int ret;
 690
 691        if (flags & drm_vmw_synccpu_allow_cs) {
 692                bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 693                long lret;
 694
 695                lret = reservation_object_wait_timeout_rcu
 696                        (bo->resv, true, true,
 697                         nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 698                if (!lret)
 699                        return -EBUSY;
 700                else if (lret < 0)
 701                        return lret;
 702                return 0;
 703        }
 704
 705        ret = ttm_bo_synccpu_write_grab
 706                (bo, !!(flags & drm_vmw_synccpu_dontblock));
 707        if (unlikely(ret != 0))
 708                return ret;
 709
 710        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 711                                 TTM_REF_SYNCCPU_WRITE, &existed, false);
 712        if (ret != 0 || existed)
 713                ttm_bo_synccpu_write_release(&user_bo->vbo.base);
 714
 715        return ret;
 716}
 717
 718/**
 719 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
 720 * and unblock command submission on the buffer if blocked.
 721 *
 722 * @handle: Handle identifying the buffer object.
 723 * @tfile: Identifying the caller.
 724 * @flags: Flags indicating the type of release.
 725 */
 726static int vmw_user_bo_synccpu_release(uint32_t handle,
 727                                           struct ttm_object_file *tfile,
 728                                           uint32_t flags)
 729{
 730        if (!(flags & drm_vmw_synccpu_allow_cs))
 731                return ttm_ref_object_base_unref(tfile, handle,
 732                                                 TTM_REF_SYNCCPU_WRITE);
 733
 734        return 0;
 735}
 736
 737
 738/**
 739 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
 740 * functionality.
 741 *
 742 * @dev: Identifies the drm device.
 743 * @data: Pointer to the ioctl argument.
 744 * @file_priv: Identifies the caller.
 745 * Return: Zero on success, negative error code on error.
 746 *
 747 * This function checks the ioctl arguments for validity and calls the
 748 * relevant synccpu functions.
 749 */
 750int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 751                              struct drm_file *file_priv)
 752{
 753        struct drm_vmw_synccpu_arg *arg =
 754                (struct drm_vmw_synccpu_arg *) data;
 755        struct vmw_buffer_object *vbo;
 756        struct vmw_user_buffer_object *user_bo;
 757        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 758        struct ttm_base_object *buffer_base;
 759        int ret;
 760
 761        if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 762            || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 763                               drm_vmw_synccpu_dontblock |
 764                               drm_vmw_synccpu_allow_cs)) != 0) {
 765                DRM_ERROR("Illegal synccpu flags.\n");
 766                return -EINVAL;
 767        }
 768
 769        switch (arg->op) {
 770        case drm_vmw_synccpu_grab:
 771                ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
 772                                             &buffer_base);
 773                if (unlikely(ret != 0))
 774                        return ret;
 775
 776                user_bo = container_of(vbo, struct vmw_user_buffer_object,
 777                                       vbo);
 778                ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
 779                vmw_bo_unreference(&vbo);
 780                ttm_base_object_unref(&buffer_base);
 781                if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 782                             ret != -EBUSY)) {
 783                        DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 784                                  (unsigned int) arg->handle);
 785                        return ret;
 786                }
 787                break;
 788        case drm_vmw_synccpu_release:
 789                ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
 790                                                  arg->flags);
 791                if (unlikely(ret != 0)) {
 792                        DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 793                                  (unsigned int) arg->handle);
 794                        return ret;
 795                }
 796                break;
 797        default:
 798                DRM_ERROR("Invalid synccpu operation.\n");
 799                return -EINVAL;
 800        }
 801
 802        return 0;
 803}
 804
 805
 806/**
 807 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
 808 * allocation functionality.
 809 *
 810 * @dev: Identifies the drm device.
 811 * @data: Pointer to the ioctl argument.
 812 * @file_priv: Identifies the caller.
 813 * Return: Zero on success, negative error code on error.
 814 *
 815 * This function checks the ioctl arguments for validity and allocates a
 816 * struct vmw_user_buffer_object bo.
 817 */
 818int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
 819                       struct drm_file *file_priv)
 820{
 821        struct vmw_private *dev_priv = vmw_priv(dev);
 822        union drm_vmw_alloc_dmabuf_arg *arg =
 823            (union drm_vmw_alloc_dmabuf_arg *)data;
 824        struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 825        struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 826        struct vmw_buffer_object *vbo;
 827        uint32_t handle;
 828        int ret;
 829
 830        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 831        if (unlikely(ret != 0))
 832                return ret;
 833
 834        ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 835                                req->size, false, &handle, &vbo,
 836                                NULL);
 837        if (unlikely(ret != 0))
 838                goto out_no_bo;
 839
 840        rep->handle = handle;
 841        rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
 842        rep->cur_gmr_id = handle;
 843        rep->cur_gmr_offset = 0;
 844
 845        vmw_bo_unreference(&vbo);
 846
 847out_no_bo:
 848        ttm_read_unlock(&dev_priv->reservation_sem);
 849
 850        return ret;
 851}
 852
 853
 854/**
 855 * vmw_bo_unref_ioctl - Generic handle close ioctl.
 856 *
 857 * @dev: Identifies the drm device.
 858 * @data: Pointer to the ioctl argument.
 859 * @file_priv: Identifies the caller.
 860 * Return: Zero on success, negative error code on error.
 861 *
 862 * This function checks the ioctl arguments for validity and closes a
 863 * handle to a TTM base object, optionally freeing the object.
 864 */
 865int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
 866                       struct drm_file *file_priv)
 867{
 868        struct drm_vmw_unref_dmabuf_arg *arg =
 869            (struct drm_vmw_unref_dmabuf_arg *)data;
 870
 871        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 872                                         arg->handle,
 873                                         TTM_REF_USAGE);
 874}
 875
 876
 877/**
 878 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
 879 *
 880 * @tfile: The TTM object file the handle is registered with.
 881 * @handle: The user buffer object handle
 882 * @out: Pointer to a where a pointer to the embedded
 883 * struct vmw_buffer_object should be placed.
 884 * @p_base: Pointer to where a pointer to the TTM base object should be
 885 * placed, or NULL if no such pointer is required.
 886 * Return: Zero on success, Negative error code on error.
 887 *
 888 * Both the output base object pointer and the vmw buffer object pointer
 889 * will be refcounted.
 890 */
 891int vmw_user_bo_lookup(struct ttm_object_file *tfile,
 892                       uint32_t handle, struct vmw_buffer_object **out,
 893                       struct ttm_base_object **p_base)
 894{
 895        struct vmw_user_buffer_object *vmw_user_bo;
 896        struct ttm_base_object *base;
 897
 898        base = ttm_base_object_lookup(tfile, handle);
 899        if (unlikely(base == NULL)) {
 900                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 901                          (unsigned long)handle);
 902                return -ESRCH;
 903        }
 904
 905        if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 906                ttm_base_object_unref(&base);
 907                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 908                          (unsigned long)handle);
 909                return -EINVAL;
 910        }
 911
 912        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 913                                   prime.base);
 914        (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
 915        if (p_base)
 916                *p_base = base;
 917        else
 918                ttm_base_object_unref(&base);
 919        *out = &vmw_user_bo->vbo;
 920
 921        return 0;
 922}
 923
 924/**
 925 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
 926 * @tfile: The TTM object file the handle is registered with.
 927 * @handle: The user buffer object handle.
 928 *
 929 * This function looks up a struct vmw_user_bo and returns a pointer to the
 930 * struct vmw_buffer_object it derives from without refcounting the pointer.
 931 * The returned pointer is only valid until vmw_user_bo_noref_release() is
 932 * called, and the object pointed to by the returned pointer may be doomed.
 933 * Any persistent usage of the object requires a refcount to be taken using
 934 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
 935 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
 936 * or scheduling functions may be called inbetween these function calls.
 937 *
 938 * Return: A struct vmw_buffer_object pointer if successful or negative
 939 * error pointer on failure.
 940 */
 941struct vmw_buffer_object *
 942vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
 943{
 944        struct vmw_user_buffer_object *vmw_user_bo;
 945        struct ttm_base_object *base;
 946
 947        base = ttm_base_object_noref_lookup(tfile, handle);
 948        if (!base) {
 949                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 950                          (unsigned long)handle);
 951                return ERR_PTR(-ESRCH);
 952        }
 953
 954        if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 955                ttm_base_object_noref_release();
 956                DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 957                          (unsigned long)handle);
 958                return ERR_PTR(-EINVAL);
 959        }
 960
 961        vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 962                                   prime.base);
 963        return &vmw_user_bo->vbo;
 964}
 965
 966/**
 967 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
 968 *
 969 * @tfile: The TTM object file to register the handle with.
 970 * @vbo: The embedded vmw buffer object.
 971 * @handle: Pointer to where the new handle should be placed.
 972 * Return: Zero on success, Negative error code on error.
 973 */
 974int vmw_user_bo_reference(struct ttm_object_file *tfile,
 975                          struct vmw_buffer_object *vbo,
 976                          uint32_t *handle)
 977{
 978        struct vmw_user_buffer_object *user_bo;
 979
 980        if (vbo->base.destroy != vmw_user_bo_destroy)
 981                return -EINVAL;
 982
 983        user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
 984
 985        *handle = user_bo->prime.base.handle;
 986        return ttm_ref_object_add(tfile, &user_bo->prime.base,
 987                                  TTM_REF_USAGE, NULL, false);
 988}
 989
 990
 991/**
 992 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
 993 *                       object without unreserving it.
 994 *
 995 * @bo:             Pointer to the struct ttm_buffer_object to fence.
 996 * @fence:          Pointer to the fence. If NULL, this function will
 997 *                  insert a fence into the command stream..
 998 *
 999 * Contrary to the ttm_eu version of this function, it takes only
1000 * a single buffer object instead of a list, and it also doesn't
1001 * unreserve the buffer object, which needs to be done separately.
1002 */
1003void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1004                         struct vmw_fence_obj *fence)
1005{
1006        struct ttm_bo_device *bdev = bo->bdev;
1007
1008        struct vmw_private *dev_priv =
1009                container_of(bdev, struct vmw_private, bdev);
1010
1011        if (fence == NULL) {
1012                vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1013                reservation_object_add_excl_fence(bo->resv, &fence->base);
1014                dma_fence_put(&fence->base);
1015        } else
1016                reservation_object_add_excl_fence(bo->resv, &fence->base);
1017}
1018
1019
1020/**
1021 * vmw_dumb_create - Create a dumb kms buffer
1022 *
1023 * @file_priv: Pointer to a struct drm_file identifying the caller.
1024 * @dev: Pointer to the drm device.
1025 * @args: Pointer to a struct drm_mode_create_dumb structure
1026 * Return: Zero on success, negative error code on failure.
1027 *
1028 * This is a driver callback for the core drm create_dumb functionality.
1029 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1030 * that the arguments have a different format.
1031 */
1032int vmw_dumb_create(struct drm_file *file_priv,
1033                    struct drm_device *dev,
1034                    struct drm_mode_create_dumb *args)
1035{
1036        struct vmw_private *dev_priv = vmw_priv(dev);
1037        struct vmw_buffer_object *vbo;
1038        int ret;
1039
1040        args->pitch = args->width * ((args->bpp + 7) / 8);
1041        args->size = args->pitch * args->height;
1042
1043        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1044        if (unlikely(ret != 0))
1045                return ret;
1046
1047        ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1048                                    args->size, false, &args->handle,
1049                                    &vbo, NULL);
1050        if (unlikely(ret != 0))
1051                goto out_no_bo;
1052
1053        vmw_bo_unreference(&vbo);
1054out_no_bo:
1055        ttm_read_unlock(&dev_priv->reservation_sem);
1056        return ret;
1057}
1058
1059
1060/**
1061 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1062 *
1063 * @file_priv: Pointer to a struct drm_file identifying the caller.
1064 * @dev: Pointer to the drm device.
1065 * @handle: Handle identifying the dumb buffer.
1066 * @offset: The address space offset returned.
1067 * Return: Zero on success, negative error code on failure.
1068 *
1069 * This is a driver callback for the core drm dumb_map_offset functionality.
1070 */
1071int vmw_dumb_map_offset(struct drm_file *file_priv,
1072                        struct drm_device *dev, uint32_t handle,
1073                        uint64_t *offset)
1074{
1075        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1076        struct vmw_buffer_object *out_buf;
1077        int ret;
1078
1079        ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1080        if (ret != 0)
1081                return -EINVAL;
1082
1083        *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1084        vmw_bo_unreference(&out_buf);
1085        return 0;
1086}
1087
1088
1089/**
1090 * vmw_dumb_destroy - Destroy a dumb boffer
1091 *
1092 * @file_priv: Pointer to a struct drm_file identifying the caller.
1093 * @dev: Pointer to the drm device.
1094 * @handle: Handle identifying the dumb buffer.
1095 * Return: Zero on success, negative error code on failure.
1096 *
1097 * This is a driver callback for the core drm dumb_destroy functionality.
1098 */
1099int vmw_dumb_destroy(struct drm_file *file_priv,
1100                     struct drm_device *dev,
1101                     uint32_t handle)
1102{
1103        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1104                                         handle, TTM_REF_USAGE);
1105}
1106
1107
1108/**
1109 * vmw_bo_swap_notify - swapout notify callback.
1110 *
1111 * @bo: The buffer object to be swapped out.
1112 */
1113void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1114{
1115        /* Is @bo embedded in a struct vmw_buffer_object? */
1116        if (bo->destroy != vmw_bo_bo_free &&
1117            bo->destroy != vmw_user_bo_destroy)
1118                return;
1119
1120        /* Kill any cached kernel maps before swapout */
1121        vmw_bo_unmap(vmw_buffer_object(bo));
1122}
1123
1124
1125/**
1126 * vmw_bo_move_notify - TTM move_notify_callback
1127 *
1128 * @bo: The TTM buffer object about to move.
1129 * @mem: The struct ttm_mem_reg indicating to what memory
1130 *       region the move is taking place.
1131 *
1132 * Detaches cached maps and device bindings that require that the
1133 * buffer doesn't move.
1134 */
1135void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1136                        struct ttm_mem_reg *mem)
1137{
1138        struct vmw_buffer_object *vbo;
1139
1140        if (mem == NULL)
1141                return;
1142
1143        /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1144        if (bo->destroy != vmw_bo_bo_free &&
1145            bo->destroy != vmw_user_bo_destroy)
1146                return;
1147
1148        vbo = container_of(bo, struct vmw_buffer_object, base);
1149
1150        /*
1151         * Kill any cached kernel maps before move to or from VRAM.
1152         * With other types of moves, the underlying pages stay the same,
1153         * and the map can be kept.
1154         */
1155        if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1156                vmw_bo_unmap(vbo);
1157
1158        /*
1159         * If we're moving a backup MOB out of MOB placement, then make sure we
1160         * read back all resource content first, and unbind the MOB from
1161         * the resource.
1162         */
1163        if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1164                vmw_resource_unbind_list(vbo);
1165}
1166