linux/drivers/gpu/drm/xen/xen_drm_front_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2
   3/*
   4 *  Xen para-virtual DRM device
   5 *
   6 * Copyright (C) 2016-2018 EPAM Systems Inc.
   7 *
   8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
   9 */
  10
  11#include "xen_drm_front_gem.h"
  12
  13#include <drm/drmP.h>
  14#include <drm/drm_crtc_helper.h>
  15#include <drm/drm_fb_helper.h>
  16#include <drm/drm_gem.h>
  17
  18#include <linux/dma-buf.h>
  19#include <linux/scatterlist.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <xen/balloon.h>
  23
  24#include "xen_drm_front.h"
  25#include "xen_drm_front_shbuf.h"
  26
  27struct xen_gem_object {
  28        struct drm_gem_object base;
  29
  30        size_t num_pages;
  31        struct page **pages;
  32
  33        /* set for buffers allocated by the backend */
  34        bool be_alloc;
  35
  36        /* this is for imported PRIME buffer */
  37        struct sg_table *sgt_imported;
  38};
  39
  40static inline struct xen_gem_object *
  41to_xen_gem_obj(struct drm_gem_object *gem_obj)
  42{
  43        return container_of(gem_obj, struct xen_gem_object, base);
  44}
  45
  46static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
  47                                 size_t buf_size)
  48{
  49        xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
  50        xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
  51                                        sizeof(struct page *), GFP_KERNEL);
  52        return !xen_obj->pages ? -ENOMEM : 0;
  53}
  54
  55static void gem_free_pages_array(struct xen_gem_object *xen_obj)
  56{
  57        kvfree(xen_obj->pages);
  58        xen_obj->pages = NULL;
  59}
  60
  61static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
  62                                             size_t size)
  63{
  64        struct xen_gem_object *xen_obj;
  65        int ret;
  66
  67        xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
  68        if (!xen_obj)
  69                return ERR_PTR(-ENOMEM);
  70
  71        ret = drm_gem_object_init(dev, &xen_obj->base, size);
  72        if (ret < 0) {
  73                kfree(xen_obj);
  74                return ERR_PTR(ret);
  75        }
  76
  77        return xen_obj;
  78}
  79
  80static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
  81{
  82        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  83        struct xen_gem_object *xen_obj;
  84        int ret;
  85
  86        size = round_up(size, PAGE_SIZE);
  87        xen_obj = gem_create_obj(dev, size);
  88        if (IS_ERR_OR_NULL(xen_obj))
  89                return xen_obj;
  90
  91        if (drm_info->front_info->cfg.be_alloc) {
  92                /*
  93                 * backend will allocate space for this buffer, so
  94                 * only allocate array of pointers to pages
  95                 */
  96                ret = gem_alloc_pages_array(xen_obj, size);
  97                if (ret < 0)
  98                        goto fail;
  99
 100                /*
 101                 * allocate ballooned pages which will be used to map
 102                 * grant references provided by the backend
 103                 */
 104                ret = alloc_xenballooned_pages(xen_obj->num_pages,
 105                                               xen_obj->pages);
 106                if (ret < 0) {
 107                        DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
 108                                  xen_obj->num_pages, ret);
 109                        gem_free_pages_array(xen_obj);
 110                        goto fail;
 111                }
 112
 113                xen_obj->be_alloc = true;
 114                return xen_obj;
 115        }
 116        /*
 117         * need to allocate backing pages now, so we can share those
 118         * with the backend
 119         */
 120        xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
 121        xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
 122        if (IS_ERR_OR_NULL(xen_obj->pages)) {
 123                ret = PTR_ERR(xen_obj->pages);
 124                xen_obj->pages = NULL;
 125                goto fail;
 126        }
 127
 128        return xen_obj;
 129
 130fail:
 131        DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
 132        return ERR_PTR(ret);
 133}
 134
 135struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
 136                                                size_t size)
 137{
 138        struct xen_gem_object *xen_obj;
 139
 140        xen_obj = gem_create(dev, size);
 141        if (IS_ERR_OR_NULL(xen_obj))
 142                return ERR_CAST(xen_obj);
 143
 144        return &xen_obj->base;
 145}
 146
 147void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
 148{
 149        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 150
 151        if (xen_obj->base.import_attach) {
 152                drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
 153                gem_free_pages_array(xen_obj);
 154        } else {
 155                if (xen_obj->pages) {
 156                        if (xen_obj->be_alloc) {
 157                                free_xenballooned_pages(xen_obj->num_pages,
 158                                                        xen_obj->pages);
 159                                gem_free_pages_array(xen_obj);
 160                        } else {
 161                                drm_gem_put_pages(&xen_obj->base,
 162                                                  xen_obj->pages, true, false);
 163                        }
 164                }
 165        }
 166        drm_gem_object_release(gem_obj);
 167        kfree(xen_obj);
 168}
 169
 170struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
 171{
 172        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 173
 174        return xen_obj->pages;
 175}
 176
 177struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
 178{
 179        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 180
 181        if (!xen_obj->pages)
 182                return NULL;
 183
 184        return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 185}
 186
 187struct drm_gem_object *
 188xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 189                                  struct dma_buf_attachment *attach,
 190                                  struct sg_table *sgt)
 191{
 192        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
 193        struct xen_gem_object *xen_obj;
 194        size_t size;
 195        int ret;
 196
 197        size = attach->dmabuf->size;
 198        xen_obj = gem_create_obj(dev, size);
 199        if (IS_ERR_OR_NULL(xen_obj))
 200                return ERR_CAST(xen_obj);
 201
 202        ret = gem_alloc_pages_array(xen_obj, size);
 203        if (ret < 0)
 204                return ERR_PTR(ret);
 205
 206        xen_obj->sgt_imported = sgt;
 207
 208        ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
 209                                               NULL, xen_obj->num_pages);
 210        if (ret < 0)
 211                return ERR_PTR(ret);
 212
 213        ret = xen_drm_front_dbuf_create(drm_info->front_info,
 214                                        xen_drm_front_dbuf_to_cookie(&xen_obj->base),
 215                                        0, 0, 0, size, xen_obj->pages);
 216        if (ret < 0)
 217                return ERR_PTR(ret);
 218
 219        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 220                  size, sgt->nents);
 221
 222        return &xen_obj->base;
 223}
 224
 225static int gem_mmap_obj(struct xen_gem_object *xen_obj,
 226                        struct vm_area_struct *vma)
 227{
 228        unsigned long addr = vma->vm_start;
 229        int i;
 230
 231        /*
 232         * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 233         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 234         * the whole buffer.
 235         */
 236        vma->vm_flags &= ~VM_PFNMAP;
 237        vma->vm_flags |= VM_MIXEDMAP;
 238        vma->vm_pgoff = 0;
 239        vma->vm_page_prot =
 240                        pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 241
 242        /*
 243         * vm_operations_struct.fault handler will be called if CPU access
 244         * to VM is here. For GPUs this isn't the case, because CPU
 245         * doesn't touch the memory. Insert pages now, so both CPU and GPU are
 246         * happy.
 247         * FIXME: as we insert all the pages now then no .fault handler must
 248         * be called, so don't provide one
 249         */
 250        for (i = 0; i < xen_obj->num_pages; i++) {
 251                int ret;
 252
 253                ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
 254                if (ret < 0) {
 255                        DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
 256                        return ret;
 257                }
 258
 259                addr += PAGE_SIZE;
 260        }
 261        return 0;
 262}
 263
 264int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 265{
 266        struct xen_gem_object *xen_obj;
 267        struct drm_gem_object *gem_obj;
 268        int ret;
 269
 270        ret = drm_gem_mmap(filp, vma);
 271        if (ret < 0)
 272                return ret;
 273
 274        gem_obj = vma->vm_private_data;
 275        xen_obj = to_xen_gem_obj(gem_obj);
 276        return gem_mmap_obj(xen_obj, vma);
 277}
 278
 279void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
 280{
 281        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 282
 283        if (!xen_obj->pages)
 284                return NULL;
 285
 286        return vmap(xen_obj->pages, xen_obj->num_pages,
 287                    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 288}
 289
 290void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
 291                                    void *vaddr)
 292{
 293        vunmap(vaddr);
 294}
 295
 296int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
 297                                 struct vm_area_struct *vma)
 298{
 299        struct xen_gem_object *xen_obj;
 300        int ret;
 301
 302        ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
 303        if (ret < 0)
 304                return ret;
 305
 306        xen_obj = to_xen_gem_obj(gem_obj);
 307        return gem_mmap_obj(xen_obj, vma);
 308}
 309