linux/drivers/gpu/drm/virtio/virtgpu_vram.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "virtgpu_drv.h"
   3
   4#include <linux/dma-mapping.h>
   5
   6static void virtio_gpu_vram_free(struct drm_gem_object *obj)
   7{
   8        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
   9        struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  10        struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  11        bool unmap;
  12
  13        if (bo->created) {
  14                spin_lock(&vgdev->host_visible_lock);
  15                unmap = drm_mm_node_allocated(&vram->vram_node);
  16                spin_unlock(&vgdev->host_visible_lock);
  17
  18                if (unmap)
  19                        virtio_gpu_cmd_unmap(vgdev, bo);
  20
  21                virtio_gpu_cmd_unref_resource(vgdev, bo);
  22                virtio_gpu_notify(vgdev);
  23                return;
  24        }
  25}
  26
  27static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
  28        .open = drm_gem_vm_open,
  29        .close = drm_gem_vm_close,
  30};
  31
  32static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
  33                                struct vm_area_struct *vma)
  34{
  35        int ret;
  36        struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  37        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  38        struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  39        unsigned long vm_size = vma->vm_end - vma->vm_start;
  40
  41        if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
  42                return -EINVAL;
  43
  44        wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
  45        if (vram->map_state != STATE_OK)
  46                return -EINVAL;
  47
  48        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
  49        vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
  50        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  51        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
  52        vma->vm_ops = &virtio_gpu_vram_vm_ops;
  53
  54        if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
  55                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  56        else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
  57                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  58
  59        /* Partial mappings of GEM buffers don't happen much in practice. */
  60        if (vm_size != vram->vram_node.size)
  61                return -EINVAL;
  62
  63        ret = io_remap_pfn_range(vma, vma->vm_start,
  64                                 vram->vram_node.start >> PAGE_SHIFT,
  65                                 vm_size, vma->vm_page_prot);
  66        return ret;
  67}
  68
  69struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
  70                                             struct device *dev,
  71                                             enum dma_data_direction dir)
  72{
  73        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  74        struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  75        struct sg_table *sgt;
  76        dma_addr_t addr;
  77        int ret;
  78
  79        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  80        if (!sgt)
  81                return ERR_PTR(-ENOMEM);
  82
  83        if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
  84                // Virtio devices can access the dma-buf via its UUID. Return a stub
  85                // sg_table so the dma-buf API still works.
  86                if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
  87                        ret = -EIO;
  88                        goto out;
  89                }
  90                return sgt;
  91        }
  92
  93        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  94        if (ret)
  95                goto out;
  96
  97        addr = dma_map_resource(dev, vram->vram_node.start,
  98                                vram->vram_node.size, dir,
  99                                DMA_ATTR_SKIP_CPU_SYNC);
 100        ret = dma_mapping_error(dev, addr);
 101        if (ret)
 102                goto out;
 103
 104        sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
 105        sg_dma_address(sgt->sgl) = addr;
 106        sg_dma_len(sgt->sgl) = vram->vram_node.size;
 107
 108        return sgt;
 109out:
 110        sg_free_table(sgt);
 111        kfree(sgt);
 112        return ERR_PTR(ret);
 113}
 114
 115void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
 116                                   struct sg_table *sgt,
 117                                   enum dma_data_direction dir)
 118{
 119        if (sgt->nents) {
 120                dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
 121                                   sg_dma_len(sgt->sgl), dir,
 122                                   DMA_ATTR_SKIP_CPU_SYNC);
 123        }
 124        sg_free_table(sgt);
 125        kfree(sgt);
 126}
 127
 128static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
 129        .open = virtio_gpu_gem_object_open,
 130        .close = virtio_gpu_gem_object_close,
 131        .free = virtio_gpu_vram_free,
 132        .mmap = virtio_gpu_vram_mmap,
 133        .export = virtgpu_gem_prime_export,
 134};
 135
 136bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
 137{
 138        return bo->base.base.funcs == &virtio_gpu_vram_funcs;
 139}
 140
 141static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
 142{
 143        int ret;
 144        uint64_t offset;
 145        struct virtio_gpu_object_array *objs;
 146        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 147        struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
 148
 149        if (!vgdev->has_host_visible)
 150                return -EINVAL;
 151
 152        spin_lock(&vgdev->host_visible_lock);
 153        ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
 154                                 bo->base.base.size);
 155        spin_unlock(&vgdev->host_visible_lock);
 156
 157        if (ret)
 158                return ret;
 159
 160        objs = virtio_gpu_array_alloc(1);
 161        if (!objs) {
 162                ret = -ENOMEM;
 163                goto err_remove_node;
 164        }
 165
 166        virtio_gpu_array_add_obj(objs, &bo->base.base);
 167        /*TODO: Add an error checking helper function in drm_mm.h */
 168        offset = vram->vram_node.start - vgdev->host_visible_region.addr;
 169
 170        ret = virtio_gpu_cmd_map(vgdev, objs, offset);
 171        if (ret) {
 172                virtio_gpu_array_put_free(objs);
 173                goto err_remove_node;
 174        }
 175
 176        return 0;
 177
 178err_remove_node:
 179        spin_lock(&vgdev->host_visible_lock);
 180        drm_mm_remove_node(&vram->vram_node);
 181        spin_unlock(&vgdev->host_visible_lock);
 182        return ret;
 183}
 184
 185int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
 186                           struct virtio_gpu_object_params *params,
 187                           struct virtio_gpu_object **bo_ptr)
 188{
 189        struct drm_gem_object *obj;
 190        struct virtio_gpu_object_vram *vram;
 191        int ret;
 192
 193        vram = kzalloc(sizeof(*vram), GFP_KERNEL);
 194        if (!vram)
 195                return -ENOMEM;
 196
 197        obj = &vram->base.base.base;
 198        obj->funcs = &virtio_gpu_vram_funcs;
 199
 200        params->size = PAGE_ALIGN(params->size);
 201        drm_gem_private_object_init(vgdev->ddev, obj, params->size);
 202
 203        /* Create fake offset */
 204        ret = drm_gem_create_mmap_offset(obj);
 205        if (ret) {
 206                kfree(vram);
 207                return ret;
 208        }
 209
 210        ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
 211        if (ret) {
 212                kfree(vram);
 213                return ret;
 214        }
 215
 216        virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
 217                                            0);
 218        if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
 219                ret = virtio_gpu_vram_map(&vram->base);
 220                if (ret) {
 221                        virtio_gpu_vram_free(obj);
 222                        return ret;
 223                }
 224        }
 225
 226        *bo_ptr = &vram->base;
 227        return 0;
 228}
 229