linux/drivers/gpu/drm/virtio/virtgpu_object.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 */
  25
  26#include "virtgpu_drv.h"
  27
  28static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
  29                                       uint32_t *resid)
  30{
  31#if 0
  32        int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
  33
  34        if (handle < 0)
  35                return handle;
  36#else
  37        static int handle;
  38
  39        /*
  40         * FIXME: dirty hack to avoid re-using IDs, virglrenderer
  41         * can't deal with that.  Needs fixing in virglrenderer, also
  42         * should figure a better way to handle that in the guest.
  43         */
  44        handle++;
  45#endif
  46
  47        *resid = handle + 1;
  48        return 0;
  49}
  50
  51static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
  52{
  53#if 0
  54        ida_free(&vgdev->resource_ida, id - 1);
  55#endif
  56}
  57
  58static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
  59{
  60        struct virtio_gpu_object *bo;
  61        struct virtio_gpu_device *vgdev;
  62
  63        bo = container_of(tbo, struct virtio_gpu_object, tbo);
  64        vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
  65
  66        if (bo->created)
  67                virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
  68        if (bo->pages)
  69                virtio_gpu_object_free_sg_table(bo);
  70        if (bo->vmap)
  71                virtio_gpu_object_kunmap(bo);
  72        drm_gem_object_release(&bo->gem_base);
  73        virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
  74        kfree(bo);
  75}
  76
  77static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
  78                                          bool pinned)
  79{
  80        u32 c = 1;
  81        u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
  82
  83        vgbo->placement.placement = &vgbo->placement_code;
  84        vgbo->placement.busy_placement = &vgbo->placement_code;
  85        vgbo->placement_code.fpfn = 0;
  86        vgbo->placement_code.lpfn = 0;
  87        vgbo->placement_code.flags =
  88                TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
  89        vgbo->placement.num_placement = c;
  90        vgbo->placement.num_busy_placement = c;
  91
  92}
  93
  94int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
  95                             unsigned long size, bool kernel, bool pinned,
  96                             struct virtio_gpu_object **bo_ptr)
  97{
  98        struct virtio_gpu_object *bo;
  99        enum ttm_bo_type type;
 100        size_t acc_size;
 101        int ret;
 102
 103        if (kernel)
 104                type = ttm_bo_type_kernel;
 105        else
 106                type = ttm_bo_type_device;
 107        *bo_ptr = NULL;
 108
 109        acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
 110                                       sizeof(struct virtio_gpu_object));
 111
 112        bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
 113        if (bo == NULL)
 114                return -ENOMEM;
 115        ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
 116        if (ret < 0) {
 117                kfree(bo);
 118                return ret;
 119        }
 120        size = roundup(size, PAGE_SIZE);
 121        ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
 122        if (ret != 0) {
 123                virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 124                kfree(bo);
 125                return ret;
 126        }
 127        bo->dumb = false;
 128        virtio_gpu_init_ttm_placement(bo, pinned);
 129
 130        ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
 131                          &bo->placement, 0, !kernel, acc_size,
 132                          NULL, NULL, &virtio_gpu_ttm_bo_destroy);
 133        /* ttm_bo_init failure will call the destroy */
 134        if (ret != 0)
 135                return ret;
 136
 137        *bo_ptr = bo;
 138        return 0;
 139}
 140
 141void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
 142{
 143        bo->vmap = NULL;
 144        ttm_bo_kunmap(&bo->kmap);
 145}
 146
 147int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
 148{
 149        bool is_iomem;
 150        int r;
 151
 152        WARN_ON(bo->vmap);
 153
 154        r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 155        if (r)
 156                return r;
 157        bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 158        return 0;
 159}
 160
 161int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
 162                                   struct virtio_gpu_object *bo)
 163{
 164        int ret;
 165        struct page **pages = bo->tbo.ttm->pages;
 166        int nr_pages = bo->tbo.num_pages;
 167        struct ttm_operation_ctx ctx = {
 168                .interruptible = false,
 169                .no_wait_gpu = false
 170        };
 171
 172        /* wtf swapping */
 173        if (bo->pages)
 174                return 0;
 175
 176        if (bo->tbo.ttm->state == tt_unpopulated)
 177                bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
 178        bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 179        if (!bo->pages)
 180                goto out;
 181
 182        ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
 183                                        nr_pages << PAGE_SHIFT, GFP_KERNEL);
 184        if (ret)
 185                goto out;
 186        return 0;
 187out:
 188        kfree(bo->pages);
 189        bo->pages = NULL;
 190        return -ENOMEM;
 191}
 192
 193void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
 194{
 195        sg_free_table(bo->pages);
 196        kfree(bo->pages);
 197        bo->pages = NULL;
 198}
 199
 200int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
 201{
 202        int r;
 203
 204        r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 205        if (unlikely(r != 0))
 206                return r;
 207        r = ttm_bo_wait(&bo->tbo, true, no_wait);
 208        ttm_bo_unreserve(&bo->tbo);
 209        return r;
 210}
 211
 212