linux/drivers/gpu/drm/virtio/virtgpu_ttm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie
   7 *    Alon Levy
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25 * OTHER DEALINGS IN THE SOFTWARE.
  26 */
  27
  28#include <drm/ttm/ttm_bo_api.h>
  29#include <drm/ttm/ttm_bo_driver.h>
  30#include <drm/ttm/ttm_placement.h>
  31#include <drm/ttm/ttm_page_alloc.h>
  32#include <drm/ttm/ttm_module.h>
  33#include <drm/drmP.h>
  34#include <drm/drm.h>
  35#include <drm/virtgpu_drm.h>
  36#include "virtgpu_drv.h"
  37
  38#include <linux/delay.h>
  39
  40static struct
  41virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
  42{
  43        struct virtio_gpu_mman *mman;
  44        struct virtio_gpu_device *vgdev;
  45
  46        mman = container_of(bdev, struct virtio_gpu_mman, bdev);
  47        vgdev = container_of(mman, struct virtio_gpu_device, mman);
  48        return vgdev;
  49}
  50
  51int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
  52{
  53        struct drm_file *file_priv;
  54        struct virtio_gpu_device *vgdev;
  55        int r;
  56
  57        file_priv = filp->private_data;
  58        vgdev = file_priv->minor->dev->dev_private;
  59        if (vgdev == NULL) {
  60                DRM_ERROR(
  61                 "filp->private_data->minor->dev->dev_private == NULL\n");
  62                return -EINVAL;
  63        }
  64        r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
  65
  66        return r;
  67}
  68
  69static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
  70                                        uint32_t flags)
  71{
  72        return 0;
  73}
  74
  75static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
  76                               struct ttm_buffer_object *bo,
  77                               const struct ttm_place *place,
  78                               struct ttm_mem_reg *mem)
  79{
  80        mem->mm_node = (void *)1;
  81        return 0;
  82}
  83
  84static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
  85                                struct ttm_mem_reg *mem)
  86{
  87        mem->mm_node = (void *)NULL;
  88}
  89
  90static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
  91                           unsigned long p_size)
  92{
  93        return 0;
  94}
  95
  96static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
  97{
  98        return 0;
  99}
 100
 101static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
 102                             struct drm_printer *printer)
 103{
 104}
 105
 106static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
 107        .init = ttm_bo_man_init,
 108        .takedown = ttm_bo_man_takedown,
 109        .get_node = ttm_bo_man_get_node,
 110        .put_node = ttm_bo_man_put_node,
 111        .debug = ttm_bo_man_debug
 112};
 113
 114static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 115                                    struct ttm_mem_type_manager *man)
 116{
 117        switch (type) {
 118        case TTM_PL_SYSTEM:
 119                /* System memory */
 120                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 121                man->available_caching = TTM_PL_MASK_CACHING;
 122                man->default_caching = TTM_PL_FLAG_CACHED;
 123                break;
 124        case TTM_PL_TT:
 125                man->func = &virtio_gpu_bo_manager_func;
 126                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 127                man->available_caching = TTM_PL_MASK_CACHING;
 128                man->default_caching = TTM_PL_FLAG_CACHED;
 129                break;
 130        default:
 131                DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
 132                return -EINVAL;
 133        }
 134        return 0;
 135}
 136
 137static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
 138                                struct ttm_placement *placement)
 139{
 140        static const struct ttm_place placements = {
 141                .fpfn  = 0,
 142                .lpfn  = 0,
 143                .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
 144        };
 145
 146        placement->placement = &placements;
 147        placement->busy_placement = &placements;
 148        placement->num_placement = 1;
 149        placement->num_busy_placement = 1;
 150}
 151
 152static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
 153                                    struct file *filp)
 154{
 155        return 0;
 156}
 157
 158static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
 159                                         struct ttm_mem_reg *mem)
 160{
 161        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 162
 163        mem->bus.addr = NULL;
 164        mem->bus.offset = 0;
 165        mem->bus.size = mem->num_pages << PAGE_SHIFT;
 166        mem->bus.base = 0;
 167        mem->bus.is_iomem = false;
 168        if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 169                return -EINVAL;
 170        switch (mem->mem_type) {
 171        case TTM_PL_SYSTEM:
 172        case TTM_PL_TT:
 173                /* system memory */
 174                return 0;
 175        default:
 176                return -EINVAL;
 177        }
 178        return 0;
 179}
 180
 181static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
 182                                       struct ttm_mem_reg *mem)
 183{
 184}
 185
 186/*
 187 * TTM backend functions.
 188 */
 189struct virtio_gpu_ttm_tt {
 190        struct ttm_dma_tt               ttm;
 191        struct virtio_gpu_object        *obj;
 192};
 193
 194static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
 195                                  struct ttm_mem_reg *bo_mem)
 196{
 197        struct virtio_gpu_ttm_tt *gtt =
 198                container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
 199        struct virtio_gpu_device *vgdev =
 200                virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
 201
 202        virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
 203        return 0;
 204}
 205
 206static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
 207{
 208        struct virtio_gpu_ttm_tt *gtt =
 209                container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
 210        struct virtio_gpu_device *vgdev =
 211                virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
 212
 213        virtio_gpu_object_detach(vgdev, gtt->obj);
 214        return 0;
 215}
 216
 217static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
 218{
 219        struct virtio_gpu_ttm_tt *gtt =
 220                container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
 221
 222        ttm_dma_tt_fini(&gtt->ttm);
 223        kfree(gtt);
 224}
 225
 226static struct ttm_backend_func virtio_gpu_tt_func = {
 227        .bind = &virtio_gpu_ttm_tt_bind,
 228        .unbind = &virtio_gpu_ttm_tt_unbind,
 229        .destroy = &virtio_gpu_ttm_tt_destroy,
 230};
 231
 232static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
 233                                               uint32_t page_flags)
 234{
 235        struct virtio_gpu_device *vgdev;
 236        struct virtio_gpu_ttm_tt *gtt;
 237
 238        vgdev = virtio_gpu_get_vgdev(bo->bdev);
 239        gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
 240        if (gtt == NULL)
 241                return NULL;
 242        gtt->ttm.ttm.func = &virtio_gpu_tt_func;
 243        gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
 244        if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
 245                kfree(gtt);
 246                return NULL;
 247        }
 248        return &gtt->ttm.ttm;
 249}
 250
 251static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
 252{
 253        struct virtio_gpu_object *bo;
 254
 255        bo = container_of(tbo, struct virtio_gpu_object, tbo);
 256
 257        if (bo->pages)
 258                virtio_gpu_object_free_sg_table(bo);
 259}
 260
 261static struct ttm_bo_driver virtio_gpu_bo_driver = {
 262        .ttm_tt_create = &virtio_gpu_ttm_tt_create,
 263        .invalidate_caches = &virtio_gpu_invalidate_caches,
 264        .init_mem_type = &virtio_gpu_init_mem_type,
 265        .eviction_valuable = ttm_bo_eviction_valuable,
 266        .evict_flags = &virtio_gpu_evict_flags,
 267        .verify_access = &virtio_gpu_verify_access,
 268        .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
 269        .io_mem_free = &virtio_gpu_ttm_io_mem_free,
 270        .swap_notify = &virtio_gpu_bo_swap_notify,
 271};
 272
 273int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 274{
 275        int r;
 276
 277        /* No others user of address space so set it to 0 */
 278        r = ttm_bo_device_init(&vgdev->mman.bdev,
 279                               &virtio_gpu_bo_driver,
 280                               vgdev->ddev->anon_inode->i_mapping,
 281                               false);
 282        if (r) {
 283                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 284                goto err_dev_init;
 285        }
 286
 287        r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
 288        if (r) {
 289                DRM_ERROR("Failed initializing GTT heap.\n");
 290                goto err_mm_init;
 291        }
 292        return 0;
 293
 294err_mm_init:
 295        ttm_bo_device_release(&vgdev->mman.bdev);
 296err_dev_init:
 297        return r;
 298}
 299
 300void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
 301{
 302        ttm_bo_device_release(&vgdev->mman.bdev);
 303        DRM_INFO("virtio_gpu: ttm finalized\n");
 304}
 305