linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_drm.h"
  30#include "ttm/ttm_object.h"
  31#include "ttm/ttm_placement.h"
  32#include "drmP.h"
  33
  34#define VMW_RES_CONTEXT ttm_driver_type0
  35#define VMW_RES_SURFACE ttm_driver_type1
  36#define VMW_RES_STREAM ttm_driver_type2
  37
  38struct vmw_user_context {
  39        struct ttm_base_object base;
  40        struct vmw_resource res;
  41};
  42
  43struct vmw_user_surface {
  44        struct ttm_base_object base;
  45        struct vmw_surface srf;
  46};
  47
  48struct vmw_user_dma_buffer {
  49        struct ttm_base_object base;
  50        struct vmw_dma_buffer dma;
  51};
  52
  53struct vmw_bo_user_rep {
  54        uint32_t handle;
  55        uint64_t map_handle;
  56};
  57
  58struct vmw_stream {
  59        struct vmw_resource res;
  60        uint32_t stream_id;
  61};
  62
  63struct vmw_user_stream {
  64        struct ttm_base_object base;
  65        struct vmw_stream stream;
  66};
  67
  68static inline struct vmw_dma_buffer *
  69vmw_dma_buffer(struct ttm_buffer_object *bo)
  70{
  71        return container_of(bo, struct vmw_dma_buffer, base);
  72}
  73
  74static inline struct vmw_user_dma_buffer *
  75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  76{
  77        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  78        return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  79}
  80
  81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  82{
  83        kref_get(&res->kref);
  84        return res;
  85}
  86
  87static void vmw_resource_release(struct kref *kref)
  88{
  89        struct vmw_resource *res =
  90            container_of(kref, struct vmw_resource, kref);
  91        struct vmw_private *dev_priv = res->dev_priv;
  92
  93        idr_remove(res->idr, res->id);
  94        write_unlock(&dev_priv->resource_lock);
  95
  96        if (likely(res->hw_destroy != NULL))
  97                res->hw_destroy(res);
  98
  99        if (res->res_free != NULL)
 100                res->res_free(res);
 101        else
 102                kfree(res);
 103
 104        write_lock(&dev_priv->resource_lock);
 105}
 106
 107void vmw_resource_unreference(struct vmw_resource **p_res)
 108{
 109        struct vmw_resource *res = *p_res;
 110        struct vmw_private *dev_priv = res->dev_priv;
 111
 112        *p_res = NULL;
 113        write_lock(&dev_priv->resource_lock);
 114        kref_put(&res->kref, vmw_resource_release);
 115        write_unlock(&dev_priv->resource_lock);
 116}
 117
 118static int vmw_resource_init(struct vmw_private *dev_priv,
 119                             struct vmw_resource *res,
 120                             struct idr *idr,
 121                             enum ttm_object_type obj_type,
 122                             void (*res_free) (struct vmw_resource *res))
 123{
 124        int ret;
 125
 126        kref_init(&res->kref);
 127        res->hw_destroy = NULL;
 128        res->res_free = res_free;
 129        res->res_type = obj_type;
 130        res->idr = idr;
 131        res->avail = false;
 132        res->dev_priv = dev_priv;
 133
 134        do {
 135                if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 136                        return -ENOMEM;
 137
 138                write_lock(&dev_priv->resource_lock);
 139                ret = idr_get_new_above(idr, res, 1, &res->id);
 140                write_unlock(&dev_priv->resource_lock);
 141
 142        } while (ret == -EAGAIN);
 143
 144        return ret;
 145}
 146
 147/**
 148 * vmw_resource_activate
 149 *
 150 * @res:        Pointer to the newly created resource
 151 * @hw_destroy: Destroy function. NULL if none.
 152 *
 153 * Activate a resource after the hardware has been made aware of it.
 154 * Set tye destroy function to @destroy. Typically this frees the
 155 * resource and destroys the hardware resources associated with it.
 156 * Activate basically means that the function vmw_resource_lookup will
 157 * find it.
 158 */
 159
 160static void vmw_resource_activate(struct vmw_resource *res,
 161                                  void (*hw_destroy) (struct vmw_resource *))
 162{
 163        struct vmw_private *dev_priv = res->dev_priv;
 164
 165        write_lock(&dev_priv->resource_lock);
 166        res->avail = true;
 167        res->hw_destroy = hw_destroy;
 168        write_unlock(&dev_priv->resource_lock);
 169}
 170
 171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 172                                         struct idr *idr, int id)
 173{
 174        struct vmw_resource *res;
 175
 176        read_lock(&dev_priv->resource_lock);
 177        res = idr_find(idr, id);
 178        if (res && res->avail)
 179                kref_get(&res->kref);
 180        else
 181                res = NULL;
 182        read_unlock(&dev_priv->resource_lock);
 183
 184        if (unlikely(res == NULL))
 185                return NULL;
 186
 187        return res;
 188}
 189
 190/**
 191 * Context management:
 192 */
 193
 194static void vmw_hw_context_destroy(struct vmw_resource *res)
 195{
 196
 197        struct vmw_private *dev_priv = res->dev_priv;
 198        struct {
 199                SVGA3dCmdHeader header;
 200                SVGA3dCmdDestroyContext body;
 201        } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 202
 203        if (unlikely(cmd == NULL)) {
 204                DRM_ERROR("Failed reserving FIFO space for surface "
 205                          "destruction.\n");
 206                return;
 207        }
 208
 209        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
 210        cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 211        cmd->body.cid = cpu_to_le32(res->id);
 212
 213        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 214        vmw_3d_resource_dec(dev_priv);
 215}
 216
 217static int vmw_context_init(struct vmw_private *dev_priv,
 218                            struct vmw_resource *res,
 219                            void (*res_free) (struct vmw_resource *res))
 220{
 221        int ret;
 222
 223        struct {
 224                SVGA3dCmdHeader header;
 225                SVGA3dCmdDefineContext body;
 226        } *cmd;
 227
 228        ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
 229                                VMW_RES_CONTEXT, res_free);
 230
 231        if (unlikely(ret != 0)) {
 232                if (res_free == NULL)
 233                        kfree(res);
 234                else
 235                        res_free(res);
 236                return ret;
 237        }
 238
 239        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 240        if (unlikely(cmd == NULL)) {
 241                DRM_ERROR("Fifo reserve failed.\n");
 242                vmw_resource_unreference(&res);
 243                return -ENOMEM;
 244        }
 245
 246        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
 247        cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 248        cmd->body.cid = cpu_to_le32(res->id);
 249
 250        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 251        (void) vmw_3d_resource_inc(dev_priv);
 252        vmw_resource_activate(res, vmw_hw_context_destroy);
 253        return 0;
 254}
 255
 256struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
 257{
 258        struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
 259        int ret;
 260
 261        if (unlikely(res == NULL))
 262                return NULL;
 263
 264        ret = vmw_context_init(dev_priv, res, NULL);
 265        return (ret == 0) ? res : NULL;
 266}
 267
 268/**
 269 * User-space context management:
 270 */
 271
 272static void vmw_user_context_free(struct vmw_resource *res)
 273{
 274        struct vmw_user_context *ctx =
 275            container_of(res, struct vmw_user_context, res);
 276
 277        kfree(ctx);
 278}
 279
 280/**
 281 * This function is called when user space has no more references on the
 282 * base object. It releases the base-object's reference on the resource object.
 283 */
 284
 285static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 286{
 287        struct ttm_base_object *base = *p_base;
 288        struct vmw_user_context *ctx =
 289            container_of(base, struct vmw_user_context, base);
 290        struct vmw_resource *res = &ctx->res;
 291
 292        *p_base = NULL;
 293        vmw_resource_unreference(&res);
 294}
 295
 296int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 297                              struct drm_file *file_priv)
 298{
 299        struct vmw_private *dev_priv = vmw_priv(dev);
 300        struct vmw_resource *res;
 301        struct vmw_user_context *ctx;
 302        struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 303        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 304        int ret = 0;
 305
 306        res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
 307        if (unlikely(res == NULL))
 308                return -EINVAL;
 309
 310        if (res->res_free != &vmw_user_context_free) {
 311                ret = -EINVAL;
 312                goto out;
 313        }
 314
 315        ctx = container_of(res, struct vmw_user_context, res);
 316        if (ctx->base.tfile != tfile && !ctx->base.shareable) {
 317                ret = -EPERM;
 318                goto out;
 319        }
 320
 321        ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
 322out:
 323        vmw_resource_unreference(&res);
 324        return ret;
 325}
 326
 327int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 328                             struct drm_file *file_priv)
 329{
 330        struct vmw_private *dev_priv = vmw_priv(dev);
 331        struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 332        struct vmw_resource *res;
 333        struct vmw_resource *tmp;
 334        struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 335        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 336        int ret;
 337
 338        if (unlikely(ctx == NULL))
 339                return -ENOMEM;
 340
 341        res = &ctx->res;
 342        ctx->base.shareable = false;
 343        ctx->base.tfile = NULL;
 344
 345        ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
 346        if (unlikely(ret != 0))
 347                return ret;
 348
 349        tmp = vmw_resource_reference(&ctx->res);
 350        ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
 351                                   &vmw_user_context_base_release, NULL);
 352
 353        if (unlikely(ret != 0)) {
 354                vmw_resource_unreference(&tmp);
 355                goto out_err;
 356        }
 357
 358        arg->cid = res->id;
 359out_err:
 360        vmw_resource_unreference(&res);
 361        return ret;
 362
 363}
 364
 365int vmw_context_check(struct vmw_private *dev_priv,
 366                      struct ttm_object_file *tfile,
 367                      int id)
 368{
 369        struct vmw_resource *res;
 370        int ret = 0;
 371
 372        read_lock(&dev_priv->resource_lock);
 373        res = idr_find(&dev_priv->context_idr, id);
 374        if (res && res->avail) {
 375                struct vmw_user_context *ctx =
 376                        container_of(res, struct vmw_user_context, res);
 377                if (ctx->base.tfile != tfile && !ctx->base.shareable)
 378                        ret = -EPERM;
 379        } else
 380                ret = -EINVAL;
 381        read_unlock(&dev_priv->resource_lock);
 382
 383        return ret;
 384}
 385
 386
 387/**
 388 * Surface management.
 389 */
 390
 391static void vmw_hw_surface_destroy(struct vmw_resource *res)
 392{
 393
 394        struct vmw_private *dev_priv = res->dev_priv;
 395        struct {
 396                SVGA3dCmdHeader header;
 397                SVGA3dCmdDestroySurface body;
 398        } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 399
 400        if (unlikely(cmd == NULL)) {
 401                DRM_ERROR("Failed reserving FIFO space for surface "
 402                          "destruction.\n");
 403                return;
 404        }
 405
 406        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
 407        cmd->header.size = cpu_to_le32(sizeof(cmd->body));
 408        cmd->body.sid = cpu_to_le32(res->id);
 409
 410        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 411        vmw_3d_resource_dec(dev_priv);
 412}
 413
 414void vmw_surface_res_free(struct vmw_resource *res)
 415{
 416        struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 417
 418        kfree(srf->sizes);
 419        kfree(srf->snooper.image);
 420        kfree(srf);
 421}
 422
 423int vmw_surface_init(struct vmw_private *dev_priv,
 424                     struct vmw_surface *srf,
 425                     void (*res_free) (struct vmw_resource *res))
 426{
 427        int ret;
 428        struct {
 429                SVGA3dCmdHeader header;
 430                SVGA3dCmdDefineSurface body;
 431        } *cmd;
 432        SVGA3dSize *cmd_size;
 433        struct vmw_resource *res = &srf->res;
 434        struct drm_vmw_size *src_size;
 435        size_t submit_size;
 436        uint32_t cmd_len;
 437        int i;
 438
 439        BUG_ON(res_free == NULL);
 440        ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
 441                                VMW_RES_SURFACE, res_free);
 442
 443        if (unlikely(ret != 0)) {
 444                res_free(res);
 445                return ret;
 446        }
 447
 448        submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
 449        cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
 450
 451        cmd = vmw_fifo_reserve(dev_priv, submit_size);
 452        if (unlikely(cmd == NULL)) {
 453                DRM_ERROR("Fifo reserve failed for create surface.\n");
 454                vmw_resource_unreference(&res);
 455                return -ENOMEM;
 456        }
 457
 458        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
 459        cmd->header.size = cpu_to_le32(cmd_len);
 460        cmd->body.sid = cpu_to_le32(res->id);
 461        cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
 462        cmd->body.format = cpu_to_le32(srf->format);
 463        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
 464                cmd->body.face[i].numMipLevels =
 465                    cpu_to_le32(srf->mip_levels[i]);
 466        }
 467
 468        cmd += 1;
 469        cmd_size = (SVGA3dSize *) cmd;
 470        src_size = srf->sizes;
 471
 472        for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
 473                cmd_size->width = cpu_to_le32(src_size->width);
 474                cmd_size->height = cpu_to_le32(src_size->height);
 475                cmd_size->depth = cpu_to_le32(src_size->depth);
 476        }
 477
 478        vmw_fifo_commit(dev_priv, submit_size);
 479        (void) vmw_3d_resource_inc(dev_priv);
 480        vmw_resource_activate(res, vmw_hw_surface_destroy);
 481        return 0;
 482}
 483
 484static void vmw_user_surface_free(struct vmw_resource *res)
 485{
 486        struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
 487        struct vmw_user_surface *user_srf =
 488            container_of(srf, struct vmw_user_surface, srf);
 489
 490        kfree(srf->sizes);
 491        kfree(srf->snooper.image);
 492        kfree(user_srf);
 493}
 494
 495int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
 496                                   struct ttm_object_file *tfile,
 497                                   uint32_t handle, struct vmw_surface **out)
 498{
 499        struct vmw_resource *res;
 500        struct vmw_surface *srf;
 501        struct vmw_user_surface *user_srf;
 502        struct ttm_base_object *base;
 503        int ret = -EINVAL;
 504
 505        base = ttm_base_object_lookup(tfile, handle);
 506        if (unlikely(base == NULL))
 507                return -EINVAL;
 508
 509        if (unlikely(base->object_type != VMW_RES_SURFACE))
 510                goto out_bad_resource;
 511
 512        user_srf = container_of(base, struct vmw_user_surface, base);
 513        srf = &user_srf->srf;
 514        res = &srf->res;
 515
 516        read_lock(&dev_priv->resource_lock);
 517
 518        if (!res->avail || res->res_free != &vmw_user_surface_free) {
 519                read_unlock(&dev_priv->resource_lock);
 520                goto out_bad_resource;
 521        }
 522
 523        kref_get(&res->kref);
 524        read_unlock(&dev_priv->resource_lock);
 525
 526        *out = srf;
 527        ret = 0;
 528
 529out_bad_resource:
 530        ttm_base_object_unref(&base);
 531
 532        return ret;
 533}
 534
 535static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 536{
 537        struct ttm_base_object *base = *p_base;
 538        struct vmw_user_surface *user_srf =
 539            container_of(base, struct vmw_user_surface, base);
 540        struct vmw_resource *res = &user_srf->srf.res;
 541
 542        *p_base = NULL;
 543        vmw_resource_unreference(&res);
 544}
 545
 546int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 547                              struct drm_file *file_priv)
 548{
 549        struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
 550        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 551
 552        return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 553}
 554
 555int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 556                             struct drm_file *file_priv)
 557{
 558        struct vmw_private *dev_priv = vmw_priv(dev);
 559        struct vmw_user_surface *user_srf =
 560            kmalloc(sizeof(*user_srf), GFP_KERNEL);
 561        struct vmw_surface *srf;
 562        struct vmw_resource *res;
 563        struct vmw_resource *tmp;
 564        union drm_vmw_surface_create_arg *arg =
 565            (union drm_vmw_surface_create_arg *)data;
 566        struct drm_vmw_surface_create_req *req = &arg->req;
 567        struct drm_vmw_surface_arg *rep = &arg->rep;
 568        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 569        struct drm_vmw_size __user *user_sizes;
 570        int ret;
 571        int i;
 572
 573        if (unlikely(user_srf == NULL))
 574                return -ENOMEM;
 575
 576        srf = &user_srf->srf;
 577        res = &srf->res;
 578
 579        srf->flags = req->flags;
 580        srf->format = req->format;
 581        srf->scanout = req->scanout;
 582        memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
 583        srf->num_sizes = 0;
 584        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 585                srf->num_sizes += srf->mip_levels[i];
 586
 587        if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
 588            DRM_VMW_MAX_MIP_LEVELS) {
 589                ret = -EINVAL;
 590                goto out_err0;
 591        }
 592
 593        srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
 594        if (unlikely(srf->sizes == NULL)) {
 595                ret = -ENOMEM;
 596                goto out_err0;
 597        }
 598
 599        user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 600            req->size_addr;
 601
 602        ret = copy_from_user(srf->sizes, user_sizes,
 603                             srf->num_sizes * sizeof(*srf->sizes));
 604        if (unlikely(ret != 0)) {
 605                ret = -EFAULT;
 606                goto out_err1;
 607        }
 608
 609        if (srf->scanout &&
 610            srf->num_sizes == 1 &&
 611            srf->sizes[0].width == 64 &&
 612            srf->sizes[0].height == 64 &&
 613            srf->format == SVGA3D_A8R8G8B8) {
 614
 615                srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
 616                /* clear the image */
 617                if (srf->snooper.image) {
 618                        memset(srf->snooper.image, 0x00, 64 * 64 * 4);
 619                } else {
 620                        DRM_ERROR("Failed to allocate cursor_image\n");
 621                        ret = -ENOMEM;
 622                        goto out_err1;
 623                }
 624        } else {
 625                srf->snooper.image = NULL;
 626        }
 627        srf->snooper.crtc = NULL;
 628
 629        user_srf->base.shareable = false;
 630        user_srf->base.tfile = NULL;
 631
 632        /**
 633         * From this point, the generic resource management functions
 634         * destroy the object on failure.
 635         */
 636
 637        ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
 638        if (unlikely(ret != 0))
 639                return ret;
 640
 641        tmp = vmw_resource_reference(&srf->res);
 642        ret = ttm_base_object_init(tfile, &user_srf->base,
 643                                   req->shareable, VMW_RES_SURFACE,
 644                                   &vmw_user_surface_base_release, NULL);
 645
 646        if (unlikely(ret != 0)) {
 647                vmw_resource_unreference(&tmp);
 648                vmw_resource_unreference(&res);
 649                return ret;
 650        }
 651
 652        rep->sid = user_srf->base.hash.key;
 653        if (rep->sid == SVGA3D_INVALID_ID)
 654                DRM_ERROR("Created bad Surface ID.\n");
 655
 656        vmw_resource_unreference(&res);
 657        return 0;
 658out_err1:
 659        kfree(srf->sizes);
 660out_err0:
 661        kfree(user_srf);
 662        return ret;
 663}
 664
 665int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 666                                struct drm_file *file_priv)
 667{
 668        union drm_vmw_surface_reference_arg *arg =
 669            (union drm_vmw_surface_reference_arg *)data;
 670        struct drm_vmw_surface_arg *req = &arg->req;
 671        struct drm_vmw_surface_create_req *rep = &arg->rep;
 672        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 673        struct vmw_surface *srf;
 674        struct vmw_user_surface *user_srf;
 675        struct drm_vmw_size __user *user_sizes;
 676        struct ttm_base_object *base;
 677        int ret = -EINVAL;
 678
 679        base = ttm_base_object_lookup(tfile, req->sid);
 680        if (unlikely(base == NULL)) {
 681                DRM_ERROR("Could not find surface to reference.\n");
 682                return -EINVAL;
 683        }
 684
 685        if (unlikely(base->object_type != VMW_RES_SURFACE))
 686                goto out_bad_resource;
 687
 688        user_srf = container_of(base, struct vmw_user_surface, base);
 689        srf = &user_srf->srf;
 690
 691        ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
 692        if (unlikely(ret != 0)) {
 693                DRM_ERROR("Could not add a reference to a surface.\n");
 694                goto out_no_reference;
 695        }
 696
 697        rep->flags = srf->flags;
 698        rep->format = srf->format;
 699        memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
 700        user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 701            rep->size_addr;
 702
 703        if (user_sizes)
 704                ret = copy_to_user(user_sizes, srf->sizes,
 705                                   srf->num_sizes * sizeof(*srf->sizes));
 706        if (unlikely(ret != 0)) {
 707                DRM_ERROR("copy_to_user failed %p %u\n",
 708                          user_sizes, srf->num_sizes);
 709                ret = -EFAULT;
 710        }
 711out_bad_resource:
 712out_no_reference:
 713        ttm_base_object_unref(&base);
 714
 715        return ret;
 716}
 717
 718int vmw_surface_check(struct vmw_private *dev_priv,
 719                      struct ttm_object_file *tfile,
 720                      uint32_t handle, int *id)
 721{
 722        struct ttm_base_object *base;
 723        struct vmw_user_surface *user_srf;
 724
 725        int ret = -EPERM;
 726
 727        base = ttm_base_object_lookup(tfile, handle);
 728        if (unlikely(base == NULL))
 729                return -EINVAL;
 730
 731        if (unlikely(base->object_type != VMW_RES_SURFACE))
 732                goto out_bad_surface;
 733
 734        user_srf = container_of(base, struct vmw_user_surface, base);
 735        *id = user_srf->srf.res.id;
 736        ret = 0;
 737
 738out_bad_surface:
 739        /**
 740         * FIXME: May deadlock here when called from the
 741         * command parsing code.
 742         */
 743
 744        ttm_base_object_unref(&base);
 745        return ret;
 746}
 747
 748/**
 749 * Buffer management.
 750 */
 751
 752static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
 753                                  unsigned long num_pages)
 754{
 755        static size_t bo_user_size = ~0;
 756
 757        size_t page_array_size =
 758            (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
 759
 760        if (unlikely(bo_user_size == ~0)) {
 761                bo_user_size = glob->ttm_bo_extra_size +
 762                    ttm_round_pot(sizeof(struct vmw_dma_buffer));
 763        }
 764
 765        return bo_user_size + page_array_size;
 766}
 767
 768void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 769{
 770        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 771        struct ttm_bo_global *glob = bo->glob;
 772
 773        ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 774        kfree(vmw_bo);
 775}
 776
 777int vmw_dmabuf_init(struct vmw_private *dev_priv,
 778                    struct vmw_dma_buffer *vmw_bo,
 779                    size_t size, struct ttm_placement *placement,
 780                    bool interruptible,
 781                    void (*bo_free) (struct ttm_buffer_object *bo))
 782{
 783        struct ttm_bo_device *bdev = &dev_priv->bdev;
 784        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 785        size_t acc_size;
 786        int ret;
 787
 788        BUG_ON(!bo_free);
 789
 790        acc_size =
 791            vmw_dmabuf_acc_size(bdev->glob,
 792                                (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
 793
 794        ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
 795        if (unlikely(ret != 0)) {
 796                /* we must free the bo here as
 797                 * ttm_buffer_object_init does so as well */
 798                bo_free(&vmw_bo->base);
 799                return ret;
 800        }
 801
 802        memset(vmw_bo, 0, sizeof(*vmw_bo));
 803
 804        INIT_LIST_HEAD(&vmw_bo->validate_list);
 805
 806        ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 807                          ttm_bo_type_device, placement,
 808                          0, 0, interruptible,
 809                          NULL, acc_size, bo_free);
 810        return ret;
 811}
 812
 813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 814{
 815        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 816        struct ttm_bo_global *glob = bo->glob;
 817
 818        ttm_mem_global_free(glob->mem_glob, bo->acc_size);
 819        kfree(vmw_user_bo);
 820}
 821
 822static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 823{
 824        struct vmw_user_dma_buffer *vmw_user_bo;
 825        struct ttm_base_object *base = *p_base;
 826        struct ttm_buffer_object *bo;
 827
 828        *p_base = NULL;
 829
 830        if (unlikely(base == NULL))
 831                return;
 832
 833        vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 834        bo = &vmw_user_bo->dma.base;
 835        ttm_bo_unref(&bo);
 836}
 837
 838int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 839                           struct drm_file *file_priv)
 840{
 841        struct vmw_private *dev_priv = vmw_priv(dev);
 842        union drm_vmw_alloc_dmabuf_arg *arg =
 843            (union drm_vmw_alloc_dmabuf_arg *)data;
 844        struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 845        struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 846        struct vmw_user_dma_buffer *vmw_user_bo;
 847        struct ttm_buffer_object *tmp;
 848        struct vmw_master *vmaster = vmw_master(file_priv->master);
 849        int ret;
 850
 851        vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
 852        if (unlikely(vmw_user_bo == NULL))
 853                return -ENOMEM;
 854
 855        ret = ttm_read_lock(&vmaster->lock, true);
 856        if (unlikely(ret != 0)) {
 857                kfree(vmw_user_bo);
 858                return ret;
 859        }
 860
 861        ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
 862                              &vmw_vram_sys_placement, true,
 863                              &vmw_user_dmabuf_destroy);
 864        if (unlikely(ret != 0))
 865                goto out_no_dmabuf;
 866
 867        tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
 868        ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
 869                                   &vmw_user_bo->base,
 870                                   false,
 871                                   ttm_buffer_type,
 872                                   &vmw_user_dmabuf_release, NULL);
 873        if (unlikely(ret != 0))
 874                goto out_no_base_object;
 875        else {
 876                rep->handle = vmw_user_bo->base.hash.key;
 877                rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
 878                rep->cur_gmr_id = vmw_user_bo->base.hash.key;
 879                rep->cur_gmr_offset = 0;
 880        }
 881
 882out_no_base_object:
 883        ttm_bo_unref(&tmp);
 884out_no_dmabuf:
 885        ttm_read_unlock(&vmaster->lock);
 886
 887        return ret;
 888}
 889
 890int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 891                           struct drm_file *file_priv)
 892{
 893        struct drm_vmw_unref_dmabuf_arg *arg =
 894            (struct drm_vmw_unref_dmabuf_arg *)data;
 895
 896        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 897                                         arg->handle,
 898                                         TTM_REF_USAGE);
 899}
 900
 901uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 902                                  uint32_t cur_validate_node)
 903{
 904        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 905
 906        if (likely(vmw_bo->on_validate_list))
 907                return vmw_bo->cur_validate_node;
 908
 909        vmw_bo->cur_validate_node = cur_validate_node;
 910        vmw_bo->on_validate_list = true;
 911
 912        return cur_validate_node;
 913}
 914
 915void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
 916{
 917        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 918
 919        vmw_bo->on_validate_list = false;
 920}
 921
 922int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 923                           uint32_t handle, struct vmw_dma_buffer **out)
 924{
 925        struct vmw_user_dma_buffer *vmw_user_bo;
 926        struct ttm_base_object *base;
 927
 928        base = ttm_base_object_lookup(tfile, handle);
 929        if (unlikely(base == NULL)) {
 930                printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 931                       (unsigned long)handle);
 932                return -ESRCH;
 933        }
 934
 935        if (unlikely(base->object_type != ttm_buffer_type)) {
 936                ttm_base_object_unref(&base);
 937                printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
 938                       (unsigned long)handle);
 939                return -EINVAL;
 940        }
 941
 942        vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
 943        (void)ttm_bo_reference(&vmw_user_bo->dma.base);
 944        ttm_base_object_unref(&base);
 945        *out = &vmw_user_bo->dma;
 946
 947        return 0;
 948}
 949
 950/*
 951 * Stream management
 952 */
 953
 954static void vmw_stream_destroy(struct vmw_resource *res)
 955{
 956        struct vmw_private *dev_priv = res->dev_priv;
 957        struct vmw_stream *stream;
 958        int ret;
 959
 960        DRM_INFO("%s: unref\n", __func__);
 961        stream = container_of(res, struct vmw_stream, res);
 962
 963        ret = vmw_overlay_unref(dev_priv, stream->stream_id);
 964        WARN_ON(ret != 0);
 965}
 966
 967static int vmw_stream_init(struct vmw_private *dev_priv,
 968                           struct vmw_stream *stream,
 969                           void (*res_free) (struct vmw_resource *res))
 970{
 971        struct vmw_resource *res = &stream->res;
 972        int ret;
 973
 974        ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
 975                                VMW_RES_STREAM, res_free);
 976
 977        if (unlikely(ret != 0)) {
 978                if (res_free == NULL)
 979                        kfree(stream);
 980                else
 981                        res_free(&stream->res);
 982                return ret;
 983        }
 984
 985        ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
 986        if (ret) {
 987                vmw_resource_unreference(&res);
 988                return ret;
 989        }
 990
 991        DRM_INFO("%s: claimed\n", __func__);
 992
 993        vmw_resource_activate(&stream->res, vmw_stream_destroy);
 994        return 0;
 995}
 996
 997/**
 998 * User-space context management:
 999 */
1000
1001static void vmw_user_stream_free(struct vmw_resource *res)
1002{
1003        struct vmw_user_stream *stream =
1004            container_of(res, struct vmw_user_stream, stream.res);
1005
1006        kfree(stream);
1007}
1008
1009/**
1010 * This function is called when user space has no more references on the
1011 * base object. It releases the base-object's reference on the resource object.
1012 */
1013
1014static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1015{
1016        struct ttm_base_object *base = *p_base;
1017        struct vmw_user_stream *stream =
1018            container_of(base, struct vmw_user_stream, base);
1019        struct vmw_resource *res = &stream->stream.res;
1020
1021        *p_base = NULL;
1022        vmw_resource_unreference(&res);
1023}
1024
1025int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1026                           struct drm_file *file_priv)
1027{
1028        struct vmw_private *dev_priv = vmw_priv(dev);
1029        struct vmw_resource *res;
1030        struct vmw_user_stream *stream;
1031        struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1032        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1033        int ret = 0;
1034
1035        res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1036        if (unlikely(res == NULL))
1037                return -EINVAL;
1038
1039        if (res->res_free != &vmw_user_stream_free) {
1040                ret = -EINVAL;
1041                goto out;
1042        }
1043
1044        stream = container_of(res, struct vmw_user_stream, stream.res);
1045        if (stream->base.tfile != tfile) {
1046                ret = -EINVAL;
1047                goto out;
1048        }
1049
1050        ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1051out:
1052        vmw_resource_unreference(&res);
1053        return ret;
1054}
1055
1056int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1057                           struct drm_file *file_priv)
1058{
1059        struct vmw_private *dev_priv = vmw_priv(dev);
1060        struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1061        struct vmw_resource *res;
1062        struct vmw_resource *tmp;
1063        struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1064        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1065        int ret;
1066
1067        if (unlikely(stream == NULL))
1068                return -ENOMEM;
1069
1070        res = &stream->stream.res;
1071        stream->base.shareable = false;
1072        stream->base.tfile = NULL;
1073
1074        ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1075        if (unlikely(ret != 0))
1076                return ret;
1077
1078        tmp = vmw_resource_reference(res);
1079        ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1080                                   &vmw_user_stream_base_release, NULL);
1081
1082        if (unlikely(ret != 0)) {
1083                vmw_resource_unreference(&tmp);
1084                goto out_err;
1085        }
1086
1087        arg->stream_id = res->id;
1088out_err:
1089        vmw_resource_unreference(&res);
1090        return ret;
1091}
1092
1093int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1094                           struct ttm_object_file *tfile,
1095                           uint32_t *inout_id, struct vmw_resource **out)
1096{
1097        struct vmw_user_stream *stream;
1098        struct vmw_resource *res;
1099        int ret;
1100
1101        res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1102        if (unlikely(res == NULL))
1103                return -EINVAL;
1104
1105        if (res->res_free != &vmw_user_stream_free) {
1106                ret = -EINVAL;
1107                goto err_ref;
1108        }
1109
1110        stream = container_of(res, struct vmw_user_stream, stream.res);
1111        if (stream->base.tfile != tfile) {
1112                ret = -EPERM;
1113                goto err_ref;
1114        }
1115
1116        *inout_id = stream->stream.stream_id;
1117        *out = res;
1118        return 0;
1119err_ref:
1120        vmw_resource_unreference(&res);
1121        return ret;
1122}
1123