linux/drivers/gpu/drm/vkms/vkms_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/shmem_fs.h>
   4
   5#include "vkms_drv.h"
   6
   7static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
   8                                                 u64 size)
   9{
  10        struct vkms_gem_object *obj;
  11        int ret;
  12
  13        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  14        if (!obj)
  15                return ERR_PTR(-ENOMEM);
  16
  17        size = roundup(size, PAGE_SIZE);
  18        ret = drm_gem_object_init(dev, &obj->gem, size);
  19        if (ret) {
  20                kfree(obj);
  21                return ERR_PTR(ret);
  22        }
  23
  24        mutex_init(&obj->pages_lock);
  25
  26        return obj;
  27}
  28
  29void vkms_gem_free_object(struct drm_gem_object *obj)
  30{
  31        struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
  32                                                   gem);
  33
  34        WARN_ON(gem->pages);
  35        WARN_ON(gem->vaddr);
  36
  37        mutex_destroy(&gem->pages_lock);
  38        drm_gem_object_release(obj);
  39        kfree(gem);
  40}
  41
  42vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
  43{
  44        struct vm_area_struct *vma = vmf->vma;
  45        struct vkms_gem_object *obj = vma->vm_private_data;
  46        unsigned long vaddr = vmf->address;
  47        pgoff_t page_offset;
  48        loff_t num_pages;
  49        vm_fault_t ret = VM_FAULT_SIGBUS;
  50
  51        page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
  52        num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
  53
  54        if (page_offset > num_pages)
  55                return VM_FAULT_SIGBUS;
  56
  57        mutex_lock(&obj->pages_lock);
  58        if (obj->pages) {
  59                get_page(obj->pages[page_offset]);
  60                vmf->page = obj->pages[page_offset];
  61                ret = 0;
  62        }
  63        mutex_unlock(&obj->pages_lock);
  64        if (ret) {
  65                struct page *page;
  66                struct address_space *mapping;
  67
  68                mapping = file_inode(obj->gem.filp)->i_mapping;
  69                page = shmem_read_mapping_page(mapping, page_offset);
  70
  71                if (!IS_ERR(page)) {
  72                        vmf->page = page;
  73                        ret = 0;
  74                } else {
  75                        switch (PTR_ERR(page)) {
  76                        case -ENOSPC:
  77                        case -ENOMEM:
  78                                ret = VM_FAULT_OOM;
  79                                break;
  80                        case -EBUSY:
  81                                ret = VM_FAULT_RETRY;
  82                                break;
  83                        case -EFAULT:
  84                        case -EINVAL:
  85                                ret = VM_FAULT_SIGBUS;
  86                                break;
  87                        default:
  88                                WARN_ON(PTR_ERR(page));
  89                                ret = VM_FAULT_SIGBUS;
  90                                break;
  91                        }
  92                }
  93        }
  94        return ret;
  95}
  96
  97struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
  98                                       struct drm_file *file,
  99                                       u32 *handle,
 100                                       u64 size)
 101{
 102        struct vkms_gem_object *obj;
 103        int ret;
 104
 105        if (!file || !dev || !handle)
 106                return ERR_PTR(-EINVAL);
 107
 108        obj = __vkms_gem_create(dev, size);
 109        if (IS_ERR(obj))
 110                return ERR_CAST(obj);
 111
 112        ret = drm_gem_handle_create(file, &obj->gem, handle);
 113        drm_gem_object_put_unlocked(&obj->gem);
 114        if (ret)
 115                return ERR_PTR(ret);
 116
 117        return &obj->gem;
 118}
 119
 120int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
 121                     struct drm_mode_create_dumb *args)
 122{
 123        struct drm_gem_object *gem_obj;
 124        u64 pitch, size;
 125
 126        if (!args || !dev || !file)
 127                return -EINVAL;
 128
 129        pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
 130        size = pitch * args->height;
 131
 132        if (!size)
 133                return -EINVAL;
 134
 135        gem_obj = vkms_gem_create(dev, file, &args->handle, size);
 136        if (IS_ERR(gem_obj))
 137                return PTR_ERR(gem_obj);
 138
 139        args->size = gem_obj->size;
 140        args->pitch = pitch;
 141
 142        DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
 143
 144        return 0;
 145}
 146
 147static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
 148{
 149        struct drm_gem_object *gem_obj = &vkms_obj->gem;
 150
 151        if (!vkms_obj->pages) {
 152                struct page **pages = drm_gem_get_pages(gem_obj);
 153
 154                if (IS_ERR(pages))
 155                        return pages;
 156
 157                if (cmpxchg(&vkms_obj->pages, NULL, pages))
 158                        drm_gem_put_pages(gem_obj, pages, false, true);
 159        }
 160
 161        return vkms_obj->pages;
 162}
 163
 164void vkms_gem_vunmap(struct drm_gem_object *obj)
 165{
 166        struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
 167
 168        mutex_lock(&vkms_obj->pages_lock);
 169        if (vkms_obj->vmap_count < 1) {
 170                WARN_ON(vkms_obj->vaddr);
 171                WARN_ON(vkms_obj->pages);
 172                mutex_unlock(&vkms_obj->pages_lock);
 173                return;
 174        }
 175
 176        vkms_obj->vmap_count--;
 177
 178        if (vkms_obj->vmap_count == 0) {
 179                vunmap(vkms_obj->vaddr);
 180                vkms_obj->vaddr = NULL;
 181                drm_gem_put_pages(obj, vkms_obj->pages, false, true);
 182                vkms_obj->pages = NULL;
 183        }
 184
 185        mutex_unlock(&vkms_obj->pages_lock);
 186}
 187
 188int vkms_gem_vmap(struct drm_gem_object *obj)
 189{
 190        struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
 191        int ret = 0;
 192
 193        mutex_lock(&vkms_obj->pages_lock);
 194
 195        if (!vkms_obj->vaddr) {
 196                unsigned int n_pages = obj->size >> PAGE_SHIFT;
 197                struct page **pages = _get_pages(vkms_obj);
 198
 199                if (IS_ERR(pages)) {
 200                        ret = PTR_ERR(pages);
 201                        goto out;
 202                }
 203
 204                vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
 205                if (!vkms_obj->vaddr)
 206                        goto err_vmap;
 207        }
 208
 209        vkms_obj->vmap_count++;
 210        goto out;
 211
 212err_vmap:
 213        ret = -ENOMEM;
 214        drm_gem_put_pages(obj, vkms_obj->pages, false, true);
 215        vkms_obj->pages = NULL;
 216out:
 217        mutex_unlock(&vkms_obj->pages_lock);
 218        return ret;
 219}
 220