linux/drivers/gpu/drm/xen/xen_drm_front_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2
   3/*
   4 *  Xen para-virtual DRM device
   5 *
   6 * Copyright (C) 2016-2018 EPAM Systems Inc.
   7 *
   8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
   9 */
  10
  11#include "xen_drm_front_gem.h"
  12
  13#include <drm/drmP.h>
  14#include <drm/drm_fb_helper.h>
  15#include <drm/drm_gem.h>
  16#include <drm/drm_probe_helper.h>
  17
  18#include <linux/dma-buf.h>
  19#include <linux/scatterlist.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <xen/balloon.h>
  23
  24#include "xen_drm_front.h"
  25
  26struct xen_gem_object {
  27        struct drm_gem_object base;
  28
  29        size_t num_pages;
  30        struct page **pages;
  31
  32        /* set for buffers allocated by the backend */
  33        bool be_alloc;
  34
  35        /* this is for imported PRIME buffer */
  36        struct sg_table *sgt_imported;
  37};
  38
  39static inline struct xen_gem_object *
  40to_xen_gem_obj(struct drm_gem_object *gem_obj)
  41{
  42        return container_of(gem_obj, struct xen_gem_object, base);
  43}
  44
  45static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
  46                                 size_t buf_size)
  47{
  48        xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
  49        xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
  50                                        sizeof(struct page *), GFP_KERNEL);
  51        return !xen_obj->pages ? -ENOMEM : 0;
  52}
  53
  54static void gem_free_pages_array(struct xen_gem_object *xen_obj)
  55{
  56        kvfree(xen_obj->pages);
  57        xen_obj->pages = NULL;
  58}
  59
  60static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
  61                                             size_t size)
  62{
  63        struct xen_gem_object *xen_obj;
  64        int ret;
  65
  66        xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
  67        if (!xen_obj)
  68                return ERR_PTR(-ENOMEM);
  69
  70        ret = drm_gem_object_init(dev, &xen_obj->base, size);
  71        if (ret < 0) {
  72                kfree(xen_obj);
  73                return ERR_PTR(ret);
  74        }
  75
  76        return xen_obj;
  77}
  78
  79static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
  80{
  81        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  82        struct xen_gem_object *xen_obj;
  83        int ret;
  84
  85        size = round_up(size, PAGE_SIZE);
  86        xen_obj = gem_create_obj(dev, size);
  87        if (IS_ERR_OR_NULL(xen_obj))
  88                return xen_obj;
  89
  90        if (drm_info->front_info->cfg.be_alloc) {
  91                /*
  92                 * backend will allocate space for this buffer, so
  93                 * only allocate array of pointers to pages
  94                 */
  95                ret = gem_alloc_pages_array(xen_obj, size);
  96                if (ret < 0)
  97                        goto fail;
  98
  99                /*
 100                 * allocate ballooned pages which will be used to map
 101                 * grant references provided by the backend
 102                 */
 103                ret = alloc_xenballooned_pages(xen_obj->num_pages,
 104                                               xen_obj->pages);
 105                if (ret < 0) {
 106                        DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
 107                                  xen_obj->num_pages, ret);
 108                        gem_free_pages_array(xen_obj);
 109                        goto fail;
 110                }
 111
 112                xen_obj->be_alloc = true;
 113                return xen_obj;
 114        }
 115        /*
 116         * need to allocate backing pages now, so we can share those
 117         * with the backend
 118         */
 119        xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
 120        xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
 121        if (IS_ERR_OR_NULL(xen_obj->pages)) {
 122                ret = PTR_ERR(xen_obj->pages);
 123                xen_obj->pages = NULL;
 124                goto fail;
 125        }
 126
 127        return xen_obj;
 128
 129fail:
 130        DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
 131        return ERR_PTR(ret);
 132}
 133
 134struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
 135                                                size_t size)
 136{
 137        struct xen_gem_object *xen_obj;
 138
 139        xen_obj = gem_create(dev, size);
 140        if (IS_ERR_OR_NULL(xen_obj))
 141                return ERR_CAST(xen_obj);
 142
 143        return &xen_obj->base;
 144}
 145
 146void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
 147{
 148        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 149
 150        if (xen_obj->base.import_attach) {
 151                drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
 152                gem_free_pages_array(xen_obj);
 153        } else {
 154                if (xen_obj->pages) {
 155                        if (xen_obj->be_alloc) {
 156                                free_xenballooned_pages(xen_obj->num_pages,
 157                                                        xen_obj->pages);
 158                                gem_free_pages_array(xen_obj);
 159                        } else {
 160                                drm_gem_put_pages(&xen_obj->base,
 161                                                  xen_obj->pages, true, false);
 162                        }
 163                }
 164        }
 165        drm_gem_object_release(gem_obj);
 166        kfree(xen_obj);
 167}
 168
 169struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
 170{
 171        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 172
 173        return xen_obj->pages;
 174}
 175
 176struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
 177{
 178        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 179
 180        if (!xen_obj->pages)
 181                return ERR_PTR(-ENOMEM);
 182
 183        return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 184}
 185
 186struct drm_gem_object *
 187xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 188                                  struct dma_buf_attachment *attach,
 189                                  struct sg_table *sgt)
 190{
 191        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
 192        struct xen_gem_object *xen_obj;
 193        size_t size;
 194        int ret;
 195
 196        size = attach->dmabuf->size;
 197        xen_obj = gem_create_obj(dev, size);
 198        if (IS_ERR_OR_NULL(xen_obj))
 199                return ERR_CAST(xen_obj);
 200
 201        ret = gem_alloc_pages_array(xen_obj, size);
 202        if (ret < 0)
 203                return ERR_PTR(ret);
 204
 205        xen_obj->sgt_imported = sgt;
 206
 207        ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
 208                                               NULL, xen_obj->num_pages);
 209        if (ret < 0)
 210                return ERR_PTR(ret);
 211
 212        ret = xen_drm_front_dbuf_create(drm_info->front_info,
 213                                        xen_drm_front_dbuf_to_cookie(&xen_obj->base),
 214                                        0, 0, 0, size, xen_obj->pages);
 215        if (ret < 0)
 216                return ERR_PTR(ret);
 217
 218        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 219                  size, sgt->nents);
 220
 221        return &xen_obj->base;
 222}
 223
 224static int gem_mmap_obj(struct xen_gem_object *xen_obj,
 225                        struct vm_area_struct *vma)
 226{
 227        int ret;
 228
 229        /*
 230         * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 231         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 232         * the whole buffer.
 233         */
 234        vma->vm_flags &= ~VM_PFNMAP;
 235        vma->vm_flags |= VM_MIXEDMAP;
 236        vma->vm_pgoff = 0;
 237        /*
 238         * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
 239         * all memory which is shared with other entities in the system
 240         * (including the hypervisor and other guests) must reside in memory
 241         * which is mapped as Normal Inner Write-Back Outer Write-Back
 242         * Inner-Shareable.
 243         */
 244        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 245
 246        /*
 247         * vm_operations_struct.fault handler will be called if CPU access
 248         * to VM is here. For GPUs this isn't the case, because CPU
 249         * doesn't touch the memory. Insert pages now, so both CPU and GPU are
 250         * happy.
 251         * FIXME: as we insert all the pages now then no .fault handler must
 252         * be called, so don't provide one
 253         */
 254        ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
 255        if (ret < 0)
 256                DRM_ERROR("Failed to map pages into vma: %d\n", ret);
 257
 258        return ret;
 259}
 260
 261int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 262{
 263        struct xen_gem_object *xen_obj;
 264        struct drm_gem_object *gem_obj;
 265        int ret;
 266
 267        ret = drm_gem_mmap(filp, vma);
 268        if (ret < 0)
 269                return ret;
 270
 271        gem_obj = vma->vm_private_data;
 272        xen_obj = to_xen_gem_obj(gem_obj);
 273        return gem_mmap_obj(xen_obj, vma);
 274}
 275
 276void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
 277{
 278        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 279
 280        if (!xen_obj->pages)
 281                return NULL;
 282
 283        /* Please see comment in gem_mmap_obj on mapping and attributes. */
 284        return vmap(xen_obj->pages, xen_obj->num_pages,
 285                    VM_MAP, PAGE_KERNEL);
 286}
 287
 288void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
 289                                    void *vaddr)
 290{
 291        vunmap(vaddr);
 292}
 293
 294int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
 295                                 struct vm_area_struct *vma)
 296{
 297        struct xen_gem_object *xen_obj;
 298        int ret;
 299
 300        ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
 301        if (ret < 0)
 302                return ret;
 303
 304        xen_obj = to_xen_gem_obj(gem_obj);
 305        return gem_mmap_obj(xen_obj, vma);
 306}
 307