linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_drv.h"
  31#include "vmwgfx_resource_priv.h"
  32#include "vmwgfx_so.h"
  33#include "vmwgfx_binding.h"
  34#include "device_include/svga3d_surfacedefs.h"
  35
  36
  37/**
  38 * struct vmw_user_surface - User-space visible surface resource
  39 *
  40 * @base:           The TTM base object handling user-space visibility.
  41 * @srf:            The surface metadata.
  42 * @size:           TTM accounting size for the surface.
  43 * @master: master of the creating client. Used for security check.
  44 */
  45struct vmw_user_surface {
  46        struct ttm_prime_object prime;
  47        struct vmw_surface srf;
  48        uint32_t size;
  49        struct drm_master *master;
  50        struct ttm_base_object *backup_base;
  51};
  52
  53/**
  54 * struct vmw_surface_offset - Backing store mip level offset info
  55 *
  56 * @face:           Surface face.
  57 * @mip:            Mip level.
  58 * @bo_offset:      Offset into backing store of this mip level.
  59 *
  60 */
  61struct vmw_surface_offset {
  62        uint32_t face;
  63        uint32_t mip;
  64        uint32_t bo_offset;
  65};
  66
  67static void vmw_user_surface_free(struct vmw_resource *res);
  68static struct vmw_resource *
  69vmw_user_surface_base_to_res(struct ttm_base_object *base);
  70static int vmw_legacy_srf_bind(struct vmw_resource *res,
  71                               struct ttm_validate_buffer *val_buf);
  72static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  73                                 bool readback,
  74                                 struct ttm_validate_buffer *val_buf);
  75static int vmw_legacy_srf_create(struct vmw_resource *res);
  76static int vmw_legacy_srf_destroy(struct vmw_resource *res);
  77static int vmw_gb_surface_create(struct vmw_resource *res);
  78static int vmw_gb_surface_bind(struct vmw_resource *res,
  79                               struct ttm_validate_buffer *val_buf);
  80static int vmw_gb_surface_unbind(struct vmw_resource *res,
  81                                 bool readback,
  82                                 struct ttm_validate_buffer *val_buf);
  83static int vmw_gb_surface_destroy(struct vmw_resource *res);
  84
  85
  86static const struct vmw_user_resource_conv user_surface_conv = {
  87        .object_type = VMW_RES_SURFACE,
  88        .base_obj_to_res = vmw_user_surface_base_to_res,
  89        .res_free = vmw_user_surface_free
  90};
  91
  92const struct vmw_user_resource_conv *user_surface_converter =
  93        &user_surface_conv;
  94
  95
  96static uint64_t vmw_user_surface_size;
  97
  98static const struct vmw_res_func vmw_legacy_surface_func = {
  99        .res_type = vmw_res_surface,
 100        .needs_backup = false,
 101        .may_evict = true,
 102        .type_name = "legacy surfaces",
 103        .backup_placement = &vmw_srf_placement,
 104        .create = &vmw_legacy_srf_create,
 105        .destroy = &vmw_legacy_srf_destroy,
 106        .bind = &vmw_legacy_srf_bind,
 107        .unbind = &vmw_legacy_srf_unbind
 108};
 109
 110static const struct vmw_res_func vmw_gb_surface_func = {
 111        .res_type = vmw_res_surface,
 112        .needs_backup = true,
 113        .may_evict = true,
 114        .type_name = "guest backed surfaces",
 115        .backup_placement = &vmw_mob_placement,
 116        .create = vmw_gb_surface_create,
 117        .destroy = vmw_gb_surface_destroy,
 118        .bind = vmw_gb_surface_bind,
 119        .unbind = vmw_gb_surface_unbind
 120};
 121
 122/**
 123 * struct vmw_surface_dma - SVGA3D DMA command
 124 */
 125struct vmw_surface_dma {
 126        SVGA3dCmdHeader header;
 127        SVGA3dCmdSurfaceDMA body;
 128        SVGA3dCopyBox cb;
 129        SVGA3dCmdSurfaceDMASuffix suffix;
 130};
 131
 132/**
 133 * struct vmw_surface_define - SVGA3D Surface Define command
 134 */
 135struct vmw_surface_define {
 136        SVGA3dCmdHeader header;
 137        SVGA3dCmdDefineSurface body;
 138};
 139
 140/**
 141 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
 142 */
 143struct vmw_surface_destroy {
 144        SVGA3dCmdHeader header;
 145        SVGA3dCmdDestroySurface body;
 146};
 147
 148
 149/**
 150 * vmw_surface_dma_size - Compute fifo size for a dma command.
 151 *
 152 * @srf: Pointer to a struct vmw_surface
 153 *
 154 * Computes the required size for a surface dma command for backup or
 155 * restoration of the surface represented by @srf.
 156 */
 157static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
 158{
 159        return srf->num_sizes * sizeof(struct vmw_surface_dma);
 160}
 161
 162
 163/**
 164 * vmw_surface_define_size - Compute fifo size for a surface define command.
 165 *
 166 * @srf: Pointer to a struct vmw_surface
 167 *
 168 * Computes the required size for a surface define command for the definition
 169 * of the surface represented by @srf.
 170 */
 171static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
 172{
 173        return sizeof(struct vmw_surface_define) + srf->num_sizes *
 174                sizeof(SVGA3dSize);
 175}
 176
 177
 178/**
 179 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
 180 *
 181 * Computes the required size for a surface destroy command for the destruction
 182 * of a hw surface.
 183 */
 184static inline uint32_t vmw_surface_destroy_size(void)
 185{
 186        return sizeof(struct vmw_surface_destroy);
 187}
 188
 189/**
 190 * vmw_surface_destroy_encode - Encode a surface_destroy command.
 191 *
 192 * @id: The surface id
 193 * @cmd_space: Pointer to memory area in which the commands should be encoded.
 194 */
 195static void vmw_surface_destroy_encode(uint32_t id,
 196                                       void *cmd_space)
 197{
 198        struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
 199                cmd_space;
 200
 201        cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
 202        cmd->header.size = sizeof(cmd->body);
 203        cmd->body.sid = id;
 204}
 205
 206/**
 207 * vmw_surface_define_encode - Encode a surface_define command.
 208 *
 209 * @srf: Pointer to a struct vmw_surface object.
 210 * @cmd_space: Pointer to memory area in which the commands should be encoded.
 211 */
 212static void vmw_surface_define_encode(const struct vmw_surface *srf,
 213                                      void *cmd_space)
 214{
 215        struct vmw_surface_define *cmd = (struct vmw_surface_define *)
 216                cmd_space;
 217        struct drm_vmw_size *src_size;
 218        SVGA3dSize *cmd_size;
 219        uint32_t cmd_len;
 220        int i;
 221
 222        cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
 223
 224        cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
 225        cmd->header.size = cmd_len;
 226        cmd->body.sid = srf->res.id;
 227        cmd->body.surfaceFlags = srf->flags;
 228        cmd->body.format = srf->format;
 229        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 230                cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 231
 232        cmd += 1;
 233        cmd_size = (SVGA3dSize *) cmd;
 234        src_size = srf->sizes;
 235
 236        for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
 237                cmd_size->width = src_size->width;
 238                cmd_size->height = src_size->height;
 239                cmd_size->depth = src_size->depth;
 240        }
 241}
 242
 243/**
 244 * vmw_surface_dma_encode - Encode a surface_dma command.
 245 *
 246 * @srf: Pointer to a struct vmw_surface object.
 247 * @cmd_space: Pointer to memory area in which the commands should be encoded.
 248 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
 249 * should be placed or read from.
 250 * @to_surface: Boolean whether to DMA to the surface or from the surface.
 251 */
 252static void vmw_surface_dma_encode(struct vmw_surface *srf,
 253                                   void *cmd_space,
 254                                   const SVGAGuestPtr *ptr,
 255                                   bool to_surface)
 256{
 257        uint32_t i;
 258        struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
 259        const struct svga3d_surface_desc *desc =
 260                svga3dsurface_get_desc(srf->format);
 261
 262        for (i = 0; i < srf->num_sizes; ++i) {
 263                SVGA3dCmdHeader *header = &cmd->header;
 264                SVGA3dCmdSurfaceDMA *body = &cmd->body;
 265                SVGA3dCopyBox *cb = &cmd->cb;
 266                SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
 267                const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
 268                const struct drm_vmw_size *cur_size = &srf->sizes[i];
 269
 270                header->id = SVGA_3D_CMD_SURFACE_DMA;
 271                header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
 272
 273                body->guest.ptr = *ptr;
 274                body->guest.ptr.offset += cur_offset->bo_offset;
 275                body->guest.pitch = svga3dsurface_calculate_pitch(desc,
 276                                                                  cur_size);
 277                body->host.sid = srf->res.id;
 278                body->host.face = cur_offset->face;
 279                body->host.mipmap = cur_offset->mip;
 280                body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
 281                                  SVGA3D_READ_HOST_VRAM);
 282                cb->x = 0;
 283                cb->y = 0;
 284                cb->z = 0;
 285                cb->srcx = 0;
 286                cb->srcy = 0;
 287                cb->srcz = 0;
 288                cb->w = cur_size->width;
 289                cb->h = cur_size->height;
 290                cb->d = cur_size->depth;
 291
 292                suffix->suffixSize = sizeof(*suffix);
 293                suffix->maximumOffset =
 294                        svga3dsurface_get_image_buffer_size(desc, cur_size,
 295                                                            body->guest.pitch);
 296                suffix->flags.discard = 0;
 297                suffix->flags.unsynchronized = 0;
 298                suffix->flags.reserved = 0;
 299                ++cmd;
 300        }
 301};
 302
 303
 304/**
 305 * vmw_hw_surface_destroy - destroy a Device surface
 306 *
 307 * @res:        Pointer to a struct vmw_resource embedded in a struct
 308 *              vmw_surface.
 309 *
 310 * Destroys a the device surface associated with a struct vmw_surface if
 311 * any, and adjusts accounting and resource count accordingly.
 312 */
 313static void vmw_hw_surface_destroy(struct vmw_resource *res)
 314{
 315
 316        struct vmw_private *dev_priv = res->dev_priv;
 317        struct vmw_surface *srf;
 318        void *cmd;
 319
 320        if (res->func->destroy == vmw_gb_surface_destroy) {
 321                (void) vmw_gb_surface_destroy(res);
 322                return;
 323        }
 324
 325        if (res->id != -1) {
 326
 327                cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
 328                if (unlikely(!cmd)) {
 329                        DRM_ERROR("Failed reserving FIFO space for surface "
 330                                  "destruction.\n");
 331                        return;
 332                }
 333
 334                vmw_surface_destroy_encode(res->id, cmd);
 335                vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
 336
 337                /*
 338                 * used_memory_size_atomic, or separate lock
 339                 * to avoid taking dev_priv::cmdbuf_mutex in
 340                 * the destroy path.
 341                 */
 342
 343                mutex_lock(&dev_priv->cmdbuf_mutex);
 344                srf = vmw_res_to_srf(res);
 345                dev_priv->used_memory_size -= res->backup_size;
 346                mutex_unlock(&dev_priv->cmdbuf_mutex);
 347        }
 348        vmw_fifo_resource_dec(dev_priv);
 349}
 350
 351/**
 352 * vmw_legacy_srf_create - Create a device surface as part of the
 353 * resource validation process.
 354 *
 355 * @res: Pointer to a struct vmw_surface.
 356 *
 357 * If the surface doesn't have a hw id.
 358 *
 359 * Returns -EBUSY if there wasn't sufficient device resources to
 360 * complete the validation. Retry after freeing up resources.
 361 *
 362 * May return other errors if the kernel is out of guest resources.
 363 */
 364static int vmw_legacy_srf_create(struct vmw_resource *res)
 365{
 366        struct vmw_private *dev_priv = res->dev_priv;
 367        struct vmw_surface *srf;
 368        uint32_t submit_size;
 369        uint8_t *cmd;
 370        int ret;
 371
 372        if (likely(res->id != -1))
 373                return 0;
 374
 375        srf = vmw_res_to_srf(res);
 376        if (unlikely(dev_priv->used_memory_size + res->backup_size >=
 377                     dev_priv->memory_size))
 378                return -EBUSY;
 379
 380        /*
 381         * Alloc id for the resource.
 382         */
 383
 384        ret = vmw_resource_alloc_id(res);
 385        if (unlikely(ret != 0)) {
 386                DRM_ERROR("Failed to allocate a surface id.\n");
 387                goto out_no_id;
 388        }
 389
 390        if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
 391                ret = -EBUSY;
 392                goto out_no_fifo;
 393        }
 394
 395        /*
 396         * Encode surface define- commands.
 397         */
 398
 399        submit_size = vmw_surface_define_size(srf);
 400        cmd = vmw_fifo_reserve(dev_priv, submit_size);
 401        if (unlikely(!cmd)) {
 402                DRM_ERROR("Failed reserving FIFO space for surface "
 403                          "creation.\n");
 404                ret = -ENOMEM;
 405                goto out_no_fifo;
 406        }
 407
 408        vmw_surface_define_encode(srf, cmd);
 409        vmw_fifo_commit(dev_priv, submit_size);
 410        /*
 411         * Surface memory usage accounting.
 412         */
 413
 414        dev_priv->used_memory_size += res->backup_size;
 415        return 0;
 416
 417out_no_fifo:
 418        vmw_resource_release_id(res);
 419out_no_id:
 420        return ret;
 421}
 422
 423/**
 424 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
 425 *
 426 * @res:            Pointer to a struct vmw_res embedded in a struct
 427 *                  vmw_surface.
 428 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
 429 *                  information about the backup buffer.
 430 * @bind:           Boolean wether to DMA to the surface.
 431 *
 432 * Transfer backup data to or from a legacy surface as part of the
 433 * validation process.
 434 * May return other errors if the kernel is out of guest resources.
 435 * The backup buffer will be fenced or idle upon successful completion,
 436 * and if the surface needs persistent backup storage, the backup buffer
 437 * will also be returned reserved iff @bind is true.
 438 */
 439static int vmw_legacy_srf_dma(struct vmw_resource *res,
 440                              struct ttm_validate_buffer *val_buf,
 441                              bool bind)
 442{
 443        SVGAGuestPtr ptr;
 444        struct vmw_fence_obj *fence;
 445        uint32_t submit_size;
 446        struct vmw_surface *srf = vmw_res_to_srf(res);
 447        uint8_t *cmd;
 448        struct vmw_private *dev_priv = res->dev_priv;
 449
 450        BUG_ON(!val_buf->bo);
 451        submit_size = vmw_surface_dma_size(srf);
 452        cmd = vmw_fifo_reserve(dev_priv, submit_size);
 453        if (unlikely(!cmd)) {
 454                DRM_ERROR("Failed reserving FIFO space for surface "
 455                          "DMA.\n");
 456                return -ENOMEM;
 457        }
 458        vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
 459        vmw_surface_dma_encode(srf, cmd, &ptr, bind);
 460
 461        vmw_fifo_commit(dev_priv, submit_size);
 462
 463        /*
 464         * Create a fence object and fence the backup buffer.
 465         */
 466
 467        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
 468                                          &fence, NULL);
 469
 470        vmw_fence_single_bo(val_buf->bo, fence);
 471
 472        if (likely(fence != NULL))
 473                vmw_fence_obj_unreference(&fence);
 474
 475        return 0;
 476}
 477
 478/**
 479 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
 480 *                       surface validation process.
 481 *
 482 * @res:            Pointer to a struct vmw_res embedded in a struct
 483 *                  vmw_surface.
 484 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
 485 *                  information about the backup buffer.
 486 *
 487 * This function will copy backup data to the surface if the
 488 * backup buffer is dirty.
 489 */
 490static int vmw_legacy_srf_bind(struct vmw_resource *res,
 491                               struct ttm_validate_buffer *val_buf)
 492{
 493        if (!res->backup_dirty)
 494                return 0;
 495
 496        return vmw_legacy_srf_dma(res, val_buf, true);
 497}
 498
 499
 500/**
 501 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
 502 *                         surface eviction process.
 503 *
 504 * @res:            Pointer to a struct vmw_res embedded in a struct
 505 *                  vmw_surface.
 506 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
 507 *                  information about the backup buffer.
 508 *
 509 * This function will copy backup data from the surface.
 510 */
 511static int vmw_legacy_srf_unbind(struct vmw_resource *res,
 512                                 bool readback,
 513                                 struct ttm_validate_buffer *val_buf)
 514{
 515        if (unlikely(readback))
 516                return vmw_legacy_srf_dma(res, val_buf, false);
 517        return 0;
 518}
 519
 520/**
 521 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
 522 *                          resource eviction process.
 523 *
 524 * @res:            Pointer to a struct vmw_res embedded in a struct
 525 *                  vmw_surface.
 526 */
 527static int vmw_legacy_srf_destroy(struct vmw_resource *res)
 528{
 529        struct vmw_private *dev_priv = res->dev_priv;
 530        uint32_t submit_size;
 531        uint8_t *cmd;
 532
 533        BUG_ON(res->id == -1);
 534
 535        /*
 536         * Encode the dma- and surface destroy commands.
 537         */
 538
 539        submit_size = vmw_surface_destroy_size();
 540        cmd = vmw_fifo_reserve(dev_priv, submit_size);
 541        if (unlikely(!cmd)) {
 542                DRM_ERROR("Failed reserving FIFO space for surface "
 543                          "eviction.\n");
 544                return -ENOMEM;
 545        }
 546
 547        vmw_surface_destroy_encode(res->id, cmd);
 548        vmw_fifo_commit(dev_priv, submit_size);
 549
 550        /*
 551         * Surface memory usage accounting.
 552         */
 553
 554        dev_priv->used_memory_size -= res->backup_size;
 555
 556        /*
 557         * Release the surface ID.
 558         */
 559
 560        vmw_resource_release_id(res);
 561
 562        return 0;
 563}
 564
 565
 566/**
 567 * vmw_surface_init - initialize a struct vmw_surface
 568 *
 569 * @dev_priv:       Pointer to a device private struct.
 570 * @srf:            Pointer to the struct vmw_surface to initialize.
 571 * @res_free:       Pointer to a resource destructor used to free
 572 *                  the object.
 573 */
 574static int vmw_surface_init(struct vmw_private *dev_priv,
 575                            struct vmw_surface *srf,
 576                            void (*res_free) (struct vmw_resource *res))
 577{
 578        int ret;
 579        struct vmw_resource *res = &srf->res;
 580
 581        BUG_ON(!res_free);
 582        if (!dev_priv->has_mob)
 583                vmw_fifo_resource_inc(dev_priv);
 584        ret = vmw_resource_init(dev_priv, res, true, res_free,
 585                                (dev_priv->has_mob) ? &vmw_gb_surface_func :
 586                                &vmw_legacy_surface_func);
 587
 588        if (unlikely(ret != 0)) {
 589                if (!dev_priv->has_mob)
 590                        vmw_fifo_resource_dec(dev_priv);
 591                res_free(res);
 592                return ret;
 593        }
 594
 595        /*
 596         * The surface won't be visible to hardware until a
 597         * surface validate.
 598         */
 599
 600        INIT_LIST_HEAD(&srf->view_list);
 601        vmw_resource_activate(res, vmw_hw_surface_destroy);
 602        return ret;
 603}
 604
 605/**
 606 * vmw_user_surface_base_to_res - TTM base object to resource converter for
 607 *                                user visible surfaces
 608 *
 609 * @base:           Pointer to a TTM base object
 610 *
 611 * Returns the struct vmw_resource embedded in a struct vmw_surface
 612 * for the user-visible object identified by the TTM base object @base.
 613 */
 614static struct vmw_resource *
 615vmw_user_surface_base_to_res(struct ttm_base_object *base)
 616{
 617        return &(container_of(base, struct vmw_user_surface,
 618                              prime.base)->srf.res);
 619}
 620
 621/**
 622 * vmw_user_surface_free - User visible surface resource destructor
 623 *
 624 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
 625 */
 626static void vmw_user_surface_free(struct vmw_resource *res)
 627{
 628        struct vmw_surface *srf = vmw_res_to_srf(res);
 629        struct vmw_user_surface *user_srf =
 630            container_of(srf, struct vmw_user_surface, srf);
 631        struct vmw_private *dev_priv = srf->res.dev_priv;
 632        uint32_t size = user_srf->size;
 633
 634        if (user_srf->master)
 635                drm_master_put(&user_srf->master);
 636        kfree(srf->offsets);
 637        kfree(srf->sizes);
 638        kfree(srf->snooper.image);
 639        ttm_prime_object_kfree(user_srf, prime);
 640        ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 641}
 642
 643/**
 644 * vmw_user_surface_free - User visible surface TTM base object destructor
 645 *
 646 * @p_base:         Pointer to a pointer to a TTM base object
 647 *                  embedded in a struct vmw_user_surface.
 648 *
 649 * Drops the base object's reference on its resource, and the
 650 * pointer pointed to by *p_base is set to NULL.
 651 */
 652static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 653{
 654        struct ttm_base_object *base = *p_base;
 655        struct vmw_user_surface *user_srf =
 656            container_of(base, struct vmw_user_surface, prime.base);
 657        struct vmw_resource *res = &user_srf->srf.res;
 658
 659        *p_base = NULL;
 660        if (user_srf->backup_base)
 661                ttm_base_object_unref(&user_srf->backup_base);
 662        vmw_resource_unreference(&res);
 663}
 664
 665/**
 666 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
 667 *                                  the user surface destroy functionality.
 668 *
 669 * @dev:            Pointer to a struct drm_device.
 670 * @data:           Pointer to data copied from / to user-space.
 671 * @file_priv:      Pointer to a drm file private structure.
 672 */
 673int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 674                              struct drm_file *file_priv)
 675{
 676        struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
 677        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 678
 679        return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 680}
 681
 682/**
 683 * vmw_user_surface_define_ioctl - Ioctl function implementing
 684 *                                  the user surface define functionality.
 685 *
 686 * @dev:            Pointer to a struct drm_device.
 687 * @data:           Pointer to data copied from / to user-space.
 688 * @file_priv:      Pointer to a drm file private structure.
 689 */
 690int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 691                             struct drm_file *file_priv)
 692{
 693        struct vmw_private *dev_priv = vmw_priv(dev);
 694        struct vmw_user_surface *user_srf;
 695        struct vmw_surface *srf;
 696        struct vmw_resource *res;
 697        struct vmw_resource *tmp;
 698        union drm_vmw_surface_create_arg *arg =
 699            (union drm_vmw_surface_create_arg *)data;
 700        struct drm_vmw_surface_create_req *req = &arg->req;
 701        struct drm_vmw_surface_arg *rep = &arg->rep;
 702        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 703        int ret;
 704        int i, j;
 705        uint32_t cur_bo_offset;
 706        struct drm_vmw_size *cur_size;
 707        struct vmw_surface_offset *cur_offset;
 708        uint32_t num_sizes;
 709        uint32_t size;
 710        const struct svga3d_surface_desc *desc;
 711
 712        if (unlikely(vmw_user_surface_size == 0))
 713                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 714                        128;
 715
 716        num_sizes = 0;
 717        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
 718                if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
 719                        return -EINVAL;
 720                num_sizes += req->mip_levels[i];
 721        }
 722
 723        if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
 724            num_sizes == 0)
 725                return -EINVAL;
 726
 727        size = vmw_user_surface_size + 128 +
 728                ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
 729                ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
 730
 731
 732        desc = svga3dsurface_get_desc(req->format);
 733        if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
 734                DRM_ERROR("Invalid surface format for surface creation.\n");
 735                DRM_ERROR("Format requested is: %d\n", req->format);
 736                return -EINVAL;
 737        }
 738
 739        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 740        if (unlikely(ret != 0))
 741                return ret;
 742
 743        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 744                                   size, false, true);
 745        if (unlikely(ret != 0)) {
 746                if (ret != -ERESTARTSYS)
 747                        DRM_ERROR("Out of graphics memory for surface"
 748                                  " creation.\n");
 749                goto out_unlock;
 750        }
 751
 752        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
 753        if (unlikely(!user_srf)) {
 754                ret = -ENOMEM;
 755                goto out_no_user_srf;
 756        }
 757
 758        srf = &user_srf->srf;
 759        res = &srf->res;
 760
 761        srf->flags = req->flags;
 762        srf->format = req->format;
 763        srf->scanout = req->scanout;
 764
 765        memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
 766        srf->num_sizes = num_sizes;
 767        user_srf->size = size;
 768        srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
 769                                 req->size_addr,
 770                                 sizeof(*srf->sizes) * srf->num_sizes);
 771        if (IS_ERR(srf->sizes)) {
 772                ret = PTR_ERR(srf->sizes);
 773                goto out_no_sizes;
 774        }
 775        srf->offsets = kmalloc_array(srf->num_sizes,
 776                                     sizeof(*srf->offsets),
 777                                     GFP_KERNEL);
 778        if (unlikely(!srf->offsets)) {
 779                ret = -ENOMEM;
 780                goto out_no_offsets;
 781        }
 782
 783        srf->base_size = *srf->sizes;
 784        srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
 785        srf->multisample_count = 0;
 786
 787        cur_bo_offset = 0;
 788        cur_offset = srf->offsets;
 789        cur_size = srf->sizes;
 790
 791        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
 792                for (j = 0; j < srf->mip_levels[i]; ++j) {
 793                        uint32_t stride = svga3dsurface_calculate_pitch
 794                                (desc, cur_size);
 795
 796                        cur_offset->face = i;
 797                        cur_offset->mip = j;
 798                        cur_offset->bo_offset = cur_bo_offset;
 799                        cur_bo_offset += svga3dsurface_get_image_buffer_size
 800                                (desc, cur_size, stride);
 801                        ++cur_offset;
 802                        ++cur_size;
 803                }
 804        }
 805        res->backup_size = cur_bo_offset;
 806        if (srf->scanout &&
 807            srf->num_sizes == 1 &&
 808            srf->sizes[0].width == 64 &&
 809            srf->sizes[0].height == 64 &&
 810            srf->format == SVGA3D_A8R8G8B8) {
 811
 812                srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
 813                if (!srf->snooper.image) {
 814                        DRM_ERROR("Failed to allocate cursor_image\n");
 815                        ret = -ENOMEM;
 816                        goto out_no_copy;
 817                }
 818        } else {
 819                srf->snooper.image = NULL;
 820        }
 821
 822        user_srf->prime.base.shareable = false;
 823        user_srf->prime.base.tfile = NULL;
 824        if (drm_is_primary_client(file_priv))
 825                user_srf->master = drm_master_get(file_priv->master);
 826
 827        /**
 828         * From this point, the generic resource management functions
 829         * destroy the object on failure.
 830         */
 831
 832        ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
 833        if (unlikely(ret != 0))
 834                goto out_unlock;
 835
 836        /*
 837         * A gb-aware client referencing a shared surface will
 838         * expect a backup buffer to be present.
 839         */
 840        if (dev_priv->has_mob && req->shareable) {
 841                uint32_t backup_handle;
 842
 843                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
 844                                            res->backup_size,
 845                                            true,
 846                                            &backup_handle,
 847                                            &res->backup,
 848                                            &user_srf->backup_base);
 849                if (unlikely(ret != 0)) {
 850                        vmw_resource_unreference(&res);
 851                        goto out_unlock;
 852                }
 853        }
 854
 855        tmp = vmw_resource_reference(&srf->res);
 856        ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
 857                                    req->shareable, VMW_RES_SURFACE,
 858                                    &vmw_user_surface_base_release, NULL);
 859
 860        if (unlikely(ret != 0)) {
 861                vmw_resource_unreference(&tmp);
 862                vmw_resource_unreference(&res);
 863                goto out_unlock;
 864        }
 865
 866        rep->sid = user_srf->prime.base.hash.key;
 867        vmw_resource_unreference(&res);
 868
 869        ttm_read_unlock(&dev_priv->reservation_sem);
 870        return 0;
 871out_no_copy:
 872        kfree(srf->offsets);
 873out_no_offsets:
 874        kfree(srf->sizes);
 875out_no_sizes:
 876        ttm_prime_object_kfree(user_srf, prime);
 877out_no_user_srf:
 878        ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 879out_unlock:
 880        ttm_read_unlock(&dev_priv->reservation_sem);
 881        return ret;
 882}
 883
 884
 885static int
 886vmw_surface_handle_reference(struct vmw_private *dev_priv,
 887                             struct drm_file *file_priv,
 888                             uint32_t u_handle,
 889                             enum drm_vmw_handle_type handle_type,
 890                             struct ttm_base_object **base_p)
 891{
 892        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 893        struct vmw_user_surface *user_srf;
 894        uint32_t handle;
 895        struct ttm_base_object *base;
 896        int ret;
 897        bool require_exist = false;
 898
 899        if (handle_type == DRM_VMW_HANDLE_PRIME) {
 900                ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
 901                if (unlikely(ret != 0))
 902                        return ret;
 903        } else {
 904                if (unlikely(drm_is_render_client(file_priv)))
 905                        require_exist = true;
 906
 907                if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
 908                        DRM_ERROR("Locked master refused legacy "
 909                                  "surface reference.\n");
 910                        return -EACCES;
 911                }
 912
 913                handle = u_handle;
 914        }
 915
 916        ret = -EINVAL;
 917        base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
 918        if (unlikely(!base)) {
 919                DRM_ERROR("Could not find surface to reference.\n");
 920                goto out_no_lookup;
 921        }
 922
 923        if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
 924                DRM_ERROR("Referenced object is not a surface.\n");
 925                goto out_bad_resource;
 926        }
 927
 928        if (handle_type != DRM_VMW_HANDLE_PRIME) {
 929                user_srf = container_of(base, struct vmw_user_surface,
 930                                        prime.base);
 931
 932                /*
 933                 * Make sure the surface creator has the same
 934                 * authenticating master, or is already registered with us.
 935                 */
 936                if (drm_is_primary_client(file_priv) &&
 937                    user_srf->master != file_priv->master)
 938                        require_exist = true;
 939
 940                ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
 941                                         require_exist);
 942                if (unlikely(ret != 0)) {
 943                        DRM_ERROR("Could not add a reference to a surface.\n");
 944                        goto out_bad_resource;
 945                }
 946        }
 947
 948        *base_p = base;
 949        return 0;
 950
 951out_bad_resource:
 952        ttm_base_object_unref(&base);
 953out_no_lookup:
 954        if (handle_type == DRM_VMW_HANDLE_PRIME)
 955                (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
 956
 957        return ret;
 958}
 959
 960/**
 961 * vmw_user_surface_define_ioctl - Ioctl function implementing
 962 *                                  the user surface reference functionality.
 963 *
 964 * @dev:            Pointer to a struct drm_device.
 965 * @data:           Pointer to data copied from / to user-space.
 966 * @file_priv:      Pointer to a drm file private structure.
 967 */
 968int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 969                                struct drm_file *file_priv)
 970{
 971        struct vmw_private *dev_priv = vmw_priv(dev);
 972        union drm_vmw_surface_reference_arg *arg =
 973            (union drm_vmw_surface_reference_arg *)data;
 974        struct drm_vmw_surface_arg *req = &arg->req;
 975        struct drm_vmw_surface_create_req *rep = &arg->rep;
 976        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 977        struct vmw_surface *srf;
 978        struct vmw_user_surface *user_srf;
 979        struct drm_vmw_size __user *user_sizes;
 980        struct ttm_base_object *base;
 981        int ret;
 982
 983        ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
 984                                           req->handle_type, &base);
 985        if (unlikely(ret != 0))
 986                return ret;
 987
 988        user_srf = container_of(base, struct vmw_user_surface, prime.base);
 989        srf = &user_srf->srf;
 990
 991        rep->flags = srf->flags;
 992        rep->format = srf->format;
 993        memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
 994        user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 995            rep->size_addr;
 996
 997        if (user_sizes)
 998                ret = copy_to_user(user_sizes, &srf->base_size,
 999                                   sizeof(srf->base_size));
1000        if (unlikely(ret != 0)) {
1001                DRM_ERROR("copy_to_user failed %p %u\n",
1002                          user_sizes, srf->num_sizes);
1003                ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1004                ret = -EFAULT;
1005        }
1006
1007        ttm_base_object_unref(&base);
1008
1009        return ret;
1010}
1011
1012/**
1013 * vmw_surface_define_encode - Encode a surface_define command.
1014 *
1015 * @srf: Pointer to a struct vmw_surface object.
1016 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1017 */
1018static int vmw_gb_surface_create(struct vmw_resource *res)
1019{
1020        struct vmw_private *dev_priv = res->dev_priv;
1021        struct vmw_surface *srf = vmw_res_to_srf(res);
1022        uint32_t cmd_len, cmd_id, submit_len;
1023        int ret;
1024        struct {
1025                SVGA3dCmdHeader header;
1026                SVGA3dCmdDefineGBSurface body;
1027        } *cmd;
1028        struct {
1029                SVGA3dCmdHeader header;
1030                SVGA3dCmdDefineGBSurface_v2 body;
1031        } *cmd2;
1032
1033        if (likely(res->id != -1))
1034                return 0;
1035
1036        vmw_fifo_resource_inc(dev_priv);
1037        ret = vmw_resource_alloc_id(res);
1038        if (unlikely(ret != 0)) {
1039                DRM_ERROR("Failed to allocate a surface id.\n");
1040                goto out_no_id;
1041        }
1042
1043        if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1044                ret = -EBUSY;
1045                goto out_no_fifo;
1046        }
1047
1048        if (srf->array_size > 0) {
1049                /* has_dx checked on creation time. */
1050                cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1051                cmd_len = sizeof(cmd2->body);
1052                submit_len = sizeof(*cmd2);
1053        } else {
1054                cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1055                cmd_len = sizeof(cmd->body);
1056                submit_len = sizeof(*cmd);
1057        }
1058
1059        cmd = vmw_fifo_reserve(dev_priv, submit_len);
1060        cmd2 = (typeof(cmd2))cmd;
1061        if (unlikely(!cmd)) {
1062                DRM_ERROR("Failed reserving FIFO space for surface "
1063                          "creation.\n");
1064                ret = -ENOMEM;
1065                goto out_no_fifo;
1066        }
1067
1068        if (srf->array_size > 0) {
1069                cmd2->header.id = cmd_id;
1070                cmd2->header.size = cmd_len;
1071                cmd2->body.sid = srf->res.id;
1072                cmd2->body.surfaceFlags = srf->flags;
1073                cmd2->body.format = cpu_to_le32(srf->format);
1074                cmd2->body.numMipLevels = srf->mip_levels[0];
1075                cmd2->body.multisampleCount = srf->multisample_count;
1076                cmd2->body.autogenFilter = srf->autogen_filter;
1077                cmd2->body.size.width = srf->base_size.width;
1078                cmd2->body.size.height = srf->base_size.height;
1079                cmd2->body.size.depth = srf->base_size.depth;
1080                cmd2->body.arraySize = srf->array_size;
1081        } else {
1082                cmd->header.id = cmd_id;
1083                cmd->header.size = cmd_len;
1084                cmd->body.sid = srf->res.id;
1085                cmd->body.surfaceFlags = srf->flags;
1086                cmd->body.format = cpu_to_le32(srf->format);
1087                cmd->body.numMipLevels = srf->mip_levels[0];
1088                cmd->body.multisampleCount = srf->multisample_count;
1089                cmd->body.autogenFilter = srf->autogen_filter;
1090                cmd->body.size.width = srf->base_size.width;
1091                cmd->body.size.height = srf->base_size.height;
1092                cmd->body.size.depth = srf->base_size.depth;
1093        }
1094
1095        vmw_fifo_commit(dev_priv, submit_len);
1096
1097        return 0;
1098
1099out_no_fifo:
1100        vmw_resource_release_id(res);
1101out_no_id:
1102        vmw_fifo_resource_dec(dev_priv);
1103        return ret;
1104}
1105
1106
1107static int vmw_gb_surface_bind(struct vmw_resource *res,
1108                               struct ttm_validate_buffer *val_buf)
1109{
1110        struct vmw_private *dev_priv = res->dev_priv;
1111        struct {
1112                SVGA3dCmdHeader header;
1113                SVGA3dCmdBindGBSurface body;
1114        } *cmd1;
1115        struct {
1116                SVGA3dCmdHeader header;
1117                SVGA3dCmdUpdateGBSurface body;
1118        } *cmd2;
1119        uint32_t submit_size;
1120        struct ttm_buffer_object *bo = val_buf->bo;
1121
1122        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1123
1124        submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1125
1126        cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1127        if (unlikely(!cmd1)) {
1128                DRM_ERROR("Failed reserving FIFO space for surface "
1129                          "binding.\n");
1130                return -ENOMEM;
1131        }
1132
1133        cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1134        cmd1->header.size = sizeof(cmd1->body);
1135        cmd1->body.sid = res->id;
1136        cmd1->body.mobid = bo->mem.start;
1137        if (res->backup_dirty) {
1138                cmd2 = (void *) &cmd1[1];
1139                cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1140                cmd2->header.size = sizeof(cmd2->body);
1141                cmd2->body.sid = res->id;
1142                res->backup_dirty = false;
1143        }
1144        vmw_fifo_commit(dev_priv, submit_size);
1145
1146        return 0;
1147}
1148
1149static int vmw_gb_surface_unbind(struct vmw_resource *res,
1150                                 bool readback,
1151                                 struct ttm_validate_buffer *val_buf)
1152{
1153        struct vmw_private *dev_priv = res->dev_priv;
1154        struct ttm_buffer_object *bo = val_buf->bo;
1155        struct vmw_fence_obj *fence;
1156
1157        struct {
1158                SVGA3dCmdHeader header;
1159                SVGA3dCmdReadbackGBSurface body;
1160        } *cmd1;
1161        struct {
1162                SVGA3dCmdHeader header;
1163                SVGA3dCmdInvalidateGBSurface body;
1164        } *cmd2;
1165        struct {
1166                SVGA3dCmdHeader header;
1167                SVGA3dCmdBindGBSurface body;
1168        } *cmd3;
1169        uint32_t submit_size;
1170        uint8_t *cmd;
1171
1172
1173        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1174
1175        submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1176        cmd = vmw_fifo_reserve(dev_priv, submit_size);
1177        if (unlikely(!cmd)) {
1178                DRM_ERROR("Failed reserving FIFO space for surface "
1179                          "unbinding.\n");
1180                return -ENOMEM;
1181        }
1182
1183        if (readback) {
1184                cmd1 = (void *) cmd;
1185                cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1186                cmd1->header.size = sizeof(cmd1->body);
1187                cmd1->body.sid = res->id;
1188                cmd3 = (void *) &cmd1[1];
1189        } else {
1190                cmd2 = (void *) cmd;
1191                cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1192                cmd2->header.size = sizeof(cmd2->body);
1193                cmd2->body.sid = res->id;
1194                cmd3 = (void *) &cmd2[1];
1195        }
1196
1197        cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1198        cmd3->header.size = sizeof(cmd3->body);
1199        cmd3->body.sid = res->id;
1200        cmd3->body.mobid = SVGA3D_INVALID_ID;
1201
1202        vmw_fifo_commit(dev_priv, submit_size);
1203
1204        /*
1205         * Create a fence object and fence the backup buffer.
1206         */
1207
1208        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1209                                          &fence, NULL);
1210
1211        vmw_fence_single_bo(val_buf->bo, fence);
1212
1213        if (likely(fence != NULL))
1214                vmw_fence_obj_unreference(&fence);
1215
1216        return 0;
1217}
1218
1219static int vmw_gb_surface_destroy(struct vmw_resource *res)
1220{
1221        struct vmw_private *dev_priv = res->dev_priv;
1222        struct vmw_surface *srf = vmw_res_to_srf(res);
1223        struct {
1224                SVGA3dCmdHeader header;
1225                SVGA3dCmdDestroyGBSurface body;
1226        } *cmd;
1227
1228        if (likely(res->id == -1))
1229                return 0;
1230
1231        mutex_lock(&dev_priv->binding_mutex);
1232        vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1233        vmw_binding_res_list_scrub(&res->binding_head);
1234
1235        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1236        if (unlikely(!cmd)) {
1237                DRM_ERROR("Failed reserving FIFO space for surface "
1238                          "destruction.\n");
1239                mutex_unlock(&dev_priv->binding_mutex);
1240                return -ENOMEM;
1241        }
1242
1243        cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1244        cmd->header.size = sizeof(cmd->body);
1245        cmd->body.sid = res->id;
1246        vmw_fifo_commit(dev_priv, sizeof(*cmd));
1247        mutex_unlock(&dev_priv->binding_mutex);
1248        vmw_resource_release_id(res);
1249        vmw_fifo_resource_dec(dev_priv);
1250
1251        return 0;
1252}
1253
1254
1255/**
1256 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1257 *                               the user surface define functionality.
1258 *
1259 * @dev:            Pointer to a struct drm_device.
1260 * @data:           Pointer to data copied from / to user-space.
1261 * @file_priv:      Pointer to a drm file private structure.
1262 */
1263int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1264                                struct drm_file *file_priv)
1265{
1266        struct vmw_private *dev_priv = vmw_priv(dev);
1267        struct vmw_user_surface *user_srf;
1268        struct vmw_surface *srf;
1269        struct vmw_resource *res;
1270        struct vmw_resource *tmp;
1271        union drm_vmw_gb_surface_create_arg *arg =
1272            (union drm_vmw_gb_surface_create_arg *)data;
1273        struct drm_vmw_gb_surface_create_req *req = &arg->req;
1274        struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1275        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1276        int ret;
1277        uint32_t size;
1278        uint32_t backup_handle = 0;
1279
1280        if (req->multisample_count != 0)
1281                return -EINVAL;
1282
1283        if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1284                return -EINVAL;
1285
1286        if (unlikely(vmw_user_surface_size == 0))
1287                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1288                        128;
1289
1290        size = vmw_user_surface_size + 128;
1291
1292        /* Define a surface based on the parameters. */
1293        ret = vmw_surface_gb_priv_define(dev,
1294                        size,
1295                        req->svga3d_flags,
1296                        req->format,
1297                        req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1298                        req->mip_levels,
1299                        req->multisample_count,
1300                        req->array_size,
1301                        req->base_size,
1302                        &srf);
1303        if (unlikely(ret != 0))
1304                return ret;
1305
1306        user_srf = container_of(srf, struct vmw_user_surface, srf);
1307        if (drm_is_primary_client(file_priv))
1308                user_srf->master = drm_master_get(file_priv->master);
1309
1310        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1311        if (unlikely(ret != 0))
1312                return ret;
1313
1314        res = &user_srf->srf.res;
1315
1316
1317        if (req->buffer_handle != SVGA3D_INVALID_ID) {
1318                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1319                                             &res->backup,
1320                                             &user_srf->backup_base);
1321                if (ret == 0) {
1322                        if (res->backup->base.num_pages * PAGE_SIZE <
1323                            res->backup_size) {
1324                                DRM_ERROR("Surface backup buffer is too small.\n");
1325                                vmw_dmabuf_unreference(&res->backup);
1326                                ret = -EINVAL;
1327                                goto out_unlock;
1328                        } else {
1329                                backup_handle = req->buffer_handle;
1330                        }
1331                }
1332        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1333                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1334                                            res->backup_size,
1335                                            req->drm_surface_flags &
1336                                            drm_vmw_surface_flag_shareable,
1337                                            &backup_handle,
1338                                            &res->backup,
1339                                            &user_srf->backup_base);
1340
1341        if (unlikely(ret != 0)) {
1342                vmw_resource_unreference(&res);
1343                goto out_unlock;
1344        }
1345
1346        tmp = vmw_resource_reference(res);
1347        ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1348                                    req->drm_surface_flags &
1349                                    drm_vmw_surface_flag_shareable,
1350                                    VMW_RES_SURFACE,
1351                                    &vmw_user_surface_base_release, NULL);
1352
1353        if (unlikely(ret != 0)) {
1354                vmw_resource_unreference(&tmp);
1355                vmw_resource_unreference(&res);
1356                goto out_unlock;
1357        }
1358
1359        rep->handle      = user_srf->prime.base.hash.key;
1360        rep->backup_size = res->backup_size;
1361        if (res->backup) {
1362                rep->buffer_map_handle =
1363                        drm_vma_node_offset_addr(&res->backup->base.vma_node);
1364                rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1365                rep->buffer_handle = backup_handle;
1366        } else {
1367                rep->buffer_map_handle = 0;
1368                rep->buffer_size = 0;
1369                rep->buffer_handle = SVGA3D_INVALID_ID;
1370        }
1371
1372        vmw_resource_unreference(&res);
1373
1374out_unlock:
1375        ttm_read_unlock(&dev_priv->reservation_sem);
1376        return ret;
1377}
1378
1379/**
1380 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1381 *                                  the user surface reference functionality.
1382 *
1383 * @dev:            Pointer to a struct drm_device.
1384 * @data:           Pointer to data copied from / to user-space.
1385 * @file_priv:      Pointer to a drm file private structure.
1386 */
1387int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1388                                   struct drm_file *file_priv)
1389{
1390        struct vmw_private *dev_priv = vmw_priv(dev);
1391        union drm_vmw_gb_surface_reference_arg *arg =
1392            (union drm_vmw_gb_surface_reference_arg *)data;
1393        struct drm_vmw_surface_arg *req = &arg->req;
1394        struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1395        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1396        struct vmw_surface *srf;
1397        struct vmw_user_surface *user_srf;
1398        struct ttm_base_object *base;
1399        uint32_t backup_handle;
1400        int ret = -EINVAL;
1401
1402        ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1403                                           req->handle_type, &base);
1404        if (unlikely(ret != 0))
1405                return ret;
1406
1407        user_srf = container_of(base, struct vmw_user_surface, prime.base);
1408        srf = &user_srf->srf;
1409        if (!srf->res.backup) {
1410                DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1411                goto out_bad_resource;
1412        }
1413
1414        mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1415        ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1416                                        &backup_handle);
1417        mutex_unlock(&dev_priv->cmdbuf_mutex);
1418
1419        if (unlikely(ret != 0)) {
1420                DRM_ERROR("Could not add a reference to a GB surface "
1421                          "backup buffer.\n");
1422                (void) ttm_ref_object_base_unref(tfile, base->hash.key,
1423                                                 TTM_REF_USAGE);
1424                goto out_bad_resource;
1425        }
1426
1427        rep->creq.svga3d_flags = srf->flags;
1428        rep->creq.format = srf->format;
1429        rep->creq.mip_levels = srf->mip_levels[0];
1430        rep->creq.drm_surface_flags = 0;
1431        rep->creq.multisample_count = srf->multisample_count;
1432        rep->creq.autogen_filter = srf->autogen_filter;
1433        rep->creq.array_size = srf->array_size;
1434        rep->creq.buffer_handle = backup_handle;
1435        rep->creq.base_size = srf->base_size;
1436        rep->crep.handle = user_srf->prime.base.hash.key;
1437        rep->crep.backup_size = srf->res.backup_size;
1438        rep->crep.buffer_handle = backup_handle;
1439        rep->crep.buffer_map_handle =
1440                drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1441        rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1442
1443out_bad_resource:
1444        ttm_base_object_unref(&base);
1445
1446        return ret;
1447}
1448
1449/**
1450 * vmw_surface_gb_priv_define - Define a private GB surface
1451 *
1452 * @dev:  Pointer to a struct drm_device
1453 * @user_accounting_size:  Used to track user-space memory usage, set
1454 *                         to 0 for kernel mode only memory
1455 * @svga3d_flags: SVGA3d surface flags for the device
1456 * @format: requested surface format
1457 * @for_scanout: true if inteded to be used for scanout buffer
1458 * @num_mip_levels:  number of MIP levels
1459 * @multisample_count:
1460 * @array_size: Surface array size.
1461 * @size: width, heigh, depth of the surface requested
1462 * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1463 *
1464 * GB surfaces allocated by this function will not have a user mode handle, and
1465 * thus will only be visible to vmwgfx.  For optimization reasons the
1466 * surface may later be given a user mode handle by another function to make
1467 * it available to user mode drivers.
1468 */
1469int vmw_surface_gb_priv_define(struct drm_device *dev,
1470                               uint32_t user_accounting_size,
1471                               uint32_t svga3d_flags,
1472                               SVGA3dSurfaceFormat format,
1473                               bool for_scanout,
1474                               uint32_t num_mip_levels,
1475                               uint32_t multisample_count,
1476                               uint32_t array_size,
1477                               struct drm_vmw_size size,
1478                               struct vmw_surface **srf_out)
1479{
1480        struct vmw_private *dev_priv = vmw_priv(dev);
1481        struct vmw_user_surface *user_srf;
1482        struct vmw_surface *srf;
1483        int ret;
1484        u32 num_layers;
1485
1486        *srf_out = NULL;
1487
1488        if (for_scanout) {
1489                uint32_t max_width, max_height;
1490
1491                if (!svga3dsurface_is_screen_target_format(format)) {
1492                        DRM_ERROR("Invalid Screen Target surface format.");
1493                        return -EINVAL;
1494                }
1495
1496                max_width = min(dev_priv->texture_max_width,
1497                                dev_priv->stdu_max_width);
1498                max_height = min(dev_priv->texture_max_height,
1499                                 dev_priv->stdu_max_height);
1500
1501                if (size.width > max_width || size.height > max_height) {
1502                        DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1503                                  size.width, size.height,
1504                                  max_width, max_height);
1505                        return -EINVAL;
1506                }
1507        } else {
1508                const struct svga3d_surface_desc *desc;
1509
1510                desc = svga3dsurface_get_desc(format);
1511                if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1512                        DRM_ERROR("Invalid surface format.\n");
1513                        return -EINVAL;
1514                }
1515        }
1516
1517        /* array_size must be null for non-GL3 host. */
1518        if (array_size > 0 && !dev_priv->has_dx) {
1519                DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1520                return -EINVAL;
1521        }
1522
1523        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1524        if (unlikely(ret != 0))
1525                return ret;
1526
1527        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1528                                   user_accounting_size, false, true);
1529        if (unlikely(ret != 0)) {
1530                if (ret != -ERESTARTSYS)
1531                        DRM_ERROR("Out of graphics memory for surface"
1532                                  " creation.\n");
1533                goto out_unlock;
1534        }
1535
1536        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1537        if (unlikely(!user_srf)) {
1538                ret = -ENOMEM;
1539                goto out_no_user_srf;
1540        }
1541
1542        *srf_out  = &user_srf->srf;
1543        user_srf->size = user_accounting_size;
1544        user_srf->prime.base.shareable = false;
1545        user_srf->prime.base.tfile     = NULL;
1546
1547        srf = &user_srf->srf;
1548        srf->flags             = svga3d_flags;
1549        srf->format            = format;
1550        srf->scanout           = for_scanout;
1551        srf->mip_levels[0]     = num_mip_levels;
1552        srf->num_sizes         = 1;
1553        srf->sizes             = NULL;
1554        srf->offsets           = NULL;
1555        srf->base_size         = size;
1556        srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1557        srf->array_size        = array_size;
1558        srf->multisample_count = multisample_count;
1559
1560        if (array_size)
1561                num_layers = array_size;
1562        else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1563                num_layers = SVGA3D_MAX_SURFACE_FACES;
1564        else
1565                num_layers = 1;
1566
1567        srf->res.backup_size   =
1568                svga3dsurface_get_serialized_size(srf->format,
1569                                                  srf->base_size,
1570                                                  srf->mip_levels[0],
1571                                                  num_layers);
1572
1573        if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1574                srf->res.backup_size += sizeof(SVGA3dDXSOState);
1575
1576        if (dev_priv->active_display_unit == vmw_du_screen_target &&
1577            for_scanout)
1578                srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1579
1580        /*
1581         * From this point, the generic resource management functions
1582         * destroy the object on failure.
1583         */
1584        ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1585
1586        ttm_read_unlock(&dev_priv->reservation_sem);
1587        return ret;
1588
1589out_no_user_srf:
1590        ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1591
1592out_unlock:
1593        ttm_read_unlock(&dev_priv->reservation_sem);
1594        return ret;
1595}
1596