linux/drivers/gpu/drm/qxl/qxl_ttm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Dave Airlie
  23 *          Alon Levy
  24 */
  25
  26#include <linux/delay.h>
  27
  28#include <drm/drm.h>
  29#include <drm/drm_file.h>
  30#include <drm/drm_debugfs.h>
  31#include <drm/qxl_drm.h>
  32#include <drm/ttm/ttm_bo_api.h>
  33#include <drm/ttm/ttm_bo_driver.h>
  34#include <drm/ttm/ttm_placement.h>
  35#include <drm/ttm/ttm_range_manager.h>
  36
  37#include "qxl_drv.h"
  38#include "qxl_object.h"
  39
  40static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
  41{
  42        struct qxl_mman *mman;
  43        struct qxl_device *qdev;
  44
  45        mman = container_of(bdev, struct qxl_mman, bdev);
  46        qdev = container_of(mman, struct qxl_device, mman);
  47        return qdev;
  48}
  49
  50static void qxl_evict_flags(struct ttm_buffer_object *bo,
  51                                struct ttm_placement *placement)
  52{
  53        struct qxl_bo *qbo;
  54        static const struct ttm_place placements = {
  55                .fpfn = 0,
  56                .lpfn = 0,
  57                .mem_type = TTM_PL_SYSTEM,
  58                .flags = 0
  59        };
  60
  61        if (!qxl_ttm_bo_is_qxl_bo(bo)) {
  62                placement->placement = &placements;
  63                placement->busy_placement = &placements;
  64                placement->num_placement = 1;
  65                placement->num_busy_placement = 1;
  66                return;
  67        }
  68        qbo = to_qxl_bo(bo);
  69        qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
  70        *placement = qbo->placement;
  71}
  72
  73int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
  74                           struct ttm_resource *mem)
  75{
  76        struct qxl_device *qdev = qxl_get_qdev(bdev);
  77
  78        switch (mem->mem_type) {
  79        case TTM_PL_SYSTEM:
  80                /* system memory */
  81                return 0;
  82        case TTM_PL_VRAM:
  83                mem->bus.is_iomem = true;
  84                mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
  85                mem->bus.caching = ttm_cached;
  86                break;
  87        case TTM_PL_PRIV:
  88                mem->bus.is_iomem = true;
  89                mem->bus.offset = (mem->start << PAGE_SHIFT) +
  90                        qdev->surfaceram_base;
  91                mem->bus.caching = ttm_cached;
  92                break;
  93        default:
  94                return -EINVAL;
  95        }
  96        return 0;
  97}
  98
  99/*
 100 * TTM backend functions.
 101 */
 102static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 103{
 104        ttm_tt_destroy_common(bdev, ttm);
 105        ttm_tt_fini(ttm);
 106        kfree(ttm);
 107}
 108
 109static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
 110                                        uint32_t page_flags)
 111{
 112        struct ttm_tt *ttm;
 113
 114        ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
 115        if (ttm == NULL)
 116                return NULL;
 117        if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) {
 118                kfree(ttm);
 119                return NULL;
 120        }
 121        return ttm;
 122}
 123
 124static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
 125                               struct ttm_resource *new_mem)
 126{
 127        struct qxl_bo *qbo;
 128        struct qxl_device *qdev;
 129
 130        if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
 131                return;
 132        qbo = to_qxl_bo(bo);
 133        qdev = to_qxl(qbo->tbo.base.dev);
 134
 135        if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
 136                qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 137}
 138
 139static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
 140                       struct ttm_operation_ctx *ctx,
 141                       struct ttm_resource *new_mem,
 142                       struct ttm_place *hop)
 143{
 144        struct ttm_resource *old_mem = bo->resource;
 145        int ret;
 146
 147        qxl_bo_move_notify(bo, new_mem);
 148
 149        ret = ttm_bo_wait_ctx(bo, ctx);
 150        if (ret)
 151                return ret;
 152
 153        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 154                ttm_bo_move_null(bo, new_mem);
 155                return 0;
 156        }
 157        return ttm_bo_move_memcpy(bo, ctx, new_mem);
 158}
 159
 160static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
 161{
 162        qxl_bo_move_notify(bo, NULL);
 163}
 164
 165static struct ttm_device_funcs qxl_bo_driver = {
 166        .ttm_tt_create = &qxl_ttm_tt_create,
 167        .ttm_tt_destroy = &qxl_ttm_backend_destroy,
 168        .eviction_valuable = ttm_bo_eviction_valuable,
 169        .evict_flags = &qxl_evict_flags,
 170        .move = &qxl_bo_move,
 171        .io_mem_reserve = &qxl_ttm_io_mem_reserve,
 172        .delete_mem_notify = &qxl_bo_delete_mem_notify,
 173};
 174
 175static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
 176                                 unsigned int type,
 177                                 uint64_t size)
 178{
 179        return ttm_range_man_init(&qdev->mman.bdev, type, false, size);
 180}
 181
 182int qxl_ttm_init(struct qxl_device *qdev)
 183{
 184        int r;
 185        int num_io_pages; /* != rom->num_io_pages, we include surface0 */
 186
 187        /* No others user of address space so set it to 0 */
 188        r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
 189                            qdev->ddev.anon_inode->i_mapping,
 190                            qdev->ddev.vma_offset_manager,
 191                            false, false);
 192        if (r) {
 193                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 194                return r;
 195        }
 196        /* NOTE: this includes the framebuffer (aka surface 0) */
 197        num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
 198        r = qxl_ttm_init_mem_type(qdev, TTM_PL_VRAM, num_io_pages);
 199        if (r) {
 200                DRM_ERROR("Failed initializing VRAM heap.\n");
 201                return r;
 202        }
 203        r = qxl_ttm_init_mem_type(qdev, TTM_PL_PRIV,
 204                                  qdev->surfaceram_size / PAGE_SIZE);
 205        if (r) {
 206                DRM_ERROR("Failed initializing Surfaces heap.\n");
 207                return r;
 208        }
 209        DRM_INFO("qxl: %uM of VRAM memory size\n",
 210                 (unsigned int)qdev->vram_size / (1024 * 1024));
 211        DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
 212                 ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
 213        DRM_INFO("qxl: %uM of Surface memory size\n",
 214                 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
 215        return 0;
 216}
 217
 218void qxl_ttm_fini(struct qxl_device *qdev)
 219{
 220        ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
 221        ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
 222        ttm_device_fini(&qdev->mman.bdev);
 223        DRM_INFO("qxl: ttm finalized\n");
 224}
 225
 226#define QXL_DEBUGFS_MEM_TYPES 2
 227
 228#if defined(CONFIG_DEBUG_FS)
 229static int qxl_mm_dump_table(struct seq_file *m, void *data)
 230{
 231        struct drm_info_node *node = (struct drm_info_node *)m->private;
 232        struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data;
 233        struct drm_printer p = drm_seq_file_printer(m);
 234
 235        ttm_resource_manager_debug(man, &p);
 236        return 0;
 237}
 238#endif
 239
 240void qxl_ttm_debugfs_init(struct qxl_device *qdev)
 241{
 242#if defined(CONFIG_DEBUG_FS)
 243        static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
 244        static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
 245        unsigned int i;
 246
 247        for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
 248                if (i == 0)
 249                        sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
 250                else
 251                        sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
 252                qxl_mem_types_list[i].name = qxl_mem_types_names[i];
 253                qxl_mem_types_list[i].show = &qxl_mm_dump_table;
 254                qxl_mem_types_list[i].driver_features = 0;
 255                if (i == 0)
 256                        qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
 257                else
 258                        qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
 259
 260        }
 261        qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
 262#endif
 263}
 264