linux/drivers/gpu/drm/xen/xen_drm_front_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2
   3/*
   4 *  Xen para-virtual DRM device
   5 *
   6 * Copyright (C) 2016-2018 EPAM Systems Inc.
   7 *
   8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
   9 */
  10
  11#include <linux/dma-buf.h>
  12#include <linux/scatterlist.h>
  13#include <linux/shmem_fs.h>
  14
  15#include <drm/drm_fb_helper.h>
  16#include <drm/drm_gem.h>
  17#include <drm/drm_prime.h>
  18#include <drm/drm_probe_helper.h>
  19
  20#include <xen/balloon.h>
  21#include <xen/xen.h>
  22
  23#include "xen_drm_front.h"
  24#include "xen_drm_front_gem.h"
  25
  26struct xen_gem_object {
  27        struct drm_gem_object base;
  28
  29        size_t num_pages;
  30        struct page **pages;
  31
  32        /* set for buffers allocated by the backend */
  33        bool be_alloc;
  34
  35        /* this is for imported PRIME buffer */
  36        struct sg_table *sgt_imported;
  37};
  38
  39static inline struct xen_gem_object *
  40to_xen_gem_obj(struct drm_gem_object *gem_obj)
  41{
  42        return container_of(gem_obj, struct xen_gem_object, base);
  43}
  44
  45static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
  46                                 size_t buf_size)
  47{
  48        xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
  49        xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
  50                                        sizeof(struct page *), GFP_KERNEL);
  51        return !xen_obj->pages ? -ENOMEM : 0;
  52}
  53
  54static void gem_free_pages_array(struct xen_gem_object *xen_obj)
  55{
  56        kvfree(xen_obj->pages);
  57        xen_obj->pages = NULL;
  58}
  59
  60static const struct vm_operations_struct xen_drm_drv_vm_ops = {
  61        .open           = drm_gem_vm_open,
  62        .close          = drm_gem_vm_close,
  63};
  64
  65static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
  66        .free = xen_drm_front_gem_object_free,
  67        .get_sg_table = xen_drm_front_gem_get_sg_table,
  68        .vmap = xen_drm_front_gem_prime_vmap,
  69        .vunmap = xen_drm_front_gem_prime_vunmap,
  70        .vm_ops = &xen_drm_drv_vm_ops,
  71};
  72
  73static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
  74                                             size_t size)
  75{
  76        struct xen_gem_object *xen_obj;
  77        int ret;
  78
  79        xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
  80        if (!xen_obj)
  81                return ERR_PTR(-ENOMEM);
  82
  83        xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
  84
  85        ret = drm_gem_object_init(dev, &xen_obj->base, size);
  86        if (ret < 0) {
  87                kfree(xen_obj);
  88                return ERR_PTR(ret);
  89        }
  90
  91        return xen_obj;
  92}
  93
  94static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
  95{
  96        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  97        struct xen_gem_object *xen_obj;
  98        int ret;
  99
 100        size = round_up(size, PAGE_SIZE);
 101        xen_obj = gem_create_obj(dev, size);
 102        if (IS_ERR(xen_obj))
 103                return xen_obj;
 104
 105        if (drm_info->front_info->cfg.be_alloc) {
 106                /*
 107                 * backend will allocate space for this buffer, so
 108                 * only allocate array of pointers to pages
 109                 */
 110                ret = gem_alloc_pages_array(xen_obj, size);
 111                if (ret < 0)
 112                        goto fail;
 113
 114                /*
 115                 * allocate ballooned pages which will be used to map
 116                 * grant references provided by the backend
 117                 */
 118                ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
 119                                                  xen_obj->pages);
 120                if (ret < 0) {
 121                        DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
 122                                  xen_obj->num_pages, ret);
 123                        gem_free_pages_array(xen_obj);
 124                        goto fail;
 125                }
 126
 127                xen_obj->be_alloc = true;
 128                return xen_obj;
 129        }
 130        /*
 131         * need to allocate backing pages now, so we can share those
 132         * with the backend
 133         */
 134        xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
 135        xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
 136        if (IS_ERR(xen_obj->pages)) {
 137                ret = PTR_ERR(xen_obj->pages);
 138                xen_obj->pages = NULL;
 139                goto fail;
 140        }
 141
 142        return xen_obj;
 143
 144fail:
 145        DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
 146        return ERR_PTR(ret);
 147}
 148
 149struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
 150                                                size_t size)
 151{
 152        struct xen_gem_object *xen_obj;
 153
 154        xen_obj = gem_create(dev, size);
 155        if (IS_ERR(xen_obj))
 156                return ERR_CAST(xen_obj);
 157
 158        return &xen_obj->base;
 159}
 160
 161void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
 162{
 163        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 164
 165        if (xen_obj->base.import_attach) {
 166                drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
 167                gem_free_pages_array(xen_obj);
 168        } else {
 169                if (xen_obj->pages) {
 170                        if (xen_obj->be_alloc) {
 171                                xen_free_unpopulated_pages(xen_obj->num_pages,
 172                                                           xen_obj->pages);
 173                                gem_free_pages_array(xen_obj);
 174                        } else {
 175                                drm_gem_put_pages(&xen_obj->base,
 176                                                  xen_obj->pages, true, false);
 177                        }
 178                }
 179        }
 180        drm_gem_object_release(gem_obj);
 181        kfree(xen_obj);
 182}
 183
 184struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
 185{
 186        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 187
 188        return xen_obj->pages;
 189}
 190
 191struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
 192{
 193        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 194
 195        if (!xen_obj->pages)
 196                return ERR_PTR(-ENOMEM);
 197
 198        return drm_prime_pages_to_sg(gem_obj->dev,
 199                                     xen_obj->pages, xen_obj->num_pages);
 200}
 201
 202struct drm_gem_object *
 203xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 204                                  struct dma_buf_attachment *attach,
 205                                  struct sg_table *sgt)
 206{
 207        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
 208        struct xen_gem_object *xen_obj;
 209        size_t size;
 210        int ret;
 211
 212        size = attach->dmabuf->size;
 213        xen_obj = gem_create_obj(dev, size);
 214        if (IS_ERR(xen_obj))
 215                return ERR_CAST(xen_obj);
 216
 217        ret = gem_alloc_pages_array(xen_obj, size);
 218        if (ret < 0)
 219                return ERR_PTR(ret);
 220
 221        xen_obj->sgt_imported = sgt;
 222
 223        ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
 224                                         xen_obj->num_pages);
 225        if (ret < 0)
 226                return ERR_PTR(ret);
 227
 228        ret = xen_drm_front_dbuf_create(drm_info->front_info,
 229                                        xen_drm_front_dbuf_to_cookie(&xen_obj->base),
 230                                        0, 0, 0, size, sgt->sgl->offset,
 231                                        xen_obj->pages);
 232        if (ret < 0)
 233                return ERR_PTR(ret);
 234
 235        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 236                  size, sgt->orig_nents);
 237
 238        return &xen_obj->base;
 239}
 240
 241static int gem_mmap_obj(struct xen_gem_object *xen_obj,
 242                        struct vm_area_struct *vma)
 243{
 244        int ret;
 245
 246        /*
 247         * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 248         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 249         * the whole buffer.
 250         */
 251        vma->vm_flags &= ~VM_PFNMAP;
 252        vma->vm_flags |= VM_MIXEDMAP;
 253        vma->vm_pgoff = 0;
 254        /*
 255         * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
 256         * all memory which is shared with other entities in the system
 257         * (including the hypervisor and other guests) must reside in memory
 258         * which is mapped as Normal Inner Write-Back Outer Write-Back
 259         * Inner-Shareable.
 260         */
 261        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 262
 263        /*
 264         * vm_operations_struct.fault handler will be called if CPU access
 265         * to VM is here. For GPUs this isn't the case, because CPU
 266         * doesn't touch the memory. Insert pages now, so both CPU and GPU are
 267         * happy.
 268         * FIXME: as we insert all the pages now then no .fault handler must
 269         * be called, so don't provide one
 270         */
 271        ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
 272        if (ret < 0)
 273                DRM_ERROR("Failed to map pages into vma: %d\n", ret);
 274
 275        return ret;
 276}
 277
 278int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 279{
 280        struct xen_gem_object *xen_obj;
 281        struct drm_gem_object *gem_obj;
 282        int ret;
 283
 284        ret = drm_gem_mmap(filp, vma);
 285        if (ret < 0)
 286                return ret;
 287
 288        gem_obj = vma->vm_private_data;
 289        xen_obj = to_xen_gem_obj(gem_obj);
 290        return gem_mmap_obj(xen_obj, vma);
 291}
 292
 293int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map)
 294{
 295        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 296        void *vaddr;
 297
 298        if (!xen_obj->pages)
 299                return -ENOMEM;
 300
 301        /* Please see comment in gem_mmap_obj on mapping and attributes. */
 302        vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
 303                     VM_MAP, PAGE_KERNEL);
 304        if (!vaddr)
 305                return -ENOMEM;
 306        dma_buf_map_set_vaddr(map, vaddr);
 307
 308        return 0;
 309}
 310
 311void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
 312                                    struct dma_buf_map *map)
 313{
 314        vunmap(map->vaddr);
 315}
 316
 317int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
 318                                 struct vm_area_struct *vma)
 319{
 320        struct xen_gem_object *xen_obj;
 321        int ret;
 322
 323        ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
 324        if (ret < 0)
 325                return ret;
 326
 327        xen_obj = to_xen_gem_obj(gem_obj);
 328        return gem_mmap_obj(xen_obj, vma);
 329}
 330