linux/drivers/gpu/drm/i915/intel_region_ttm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2021 Intel Corporation
   4 */
   5#include <drm/ttm/ttm_bo_driver.h>
   6#include <drm/ttm/ttm_device.h>
   7#include <drm/ttm/ttm_range_manager.h>
   8
   9#include "i915_drv.h"
  10#include "i915_scatterlist.h"
  11
  12#include "intel_region_ttm.h"
  13
  14/**
  15 * DOC: TTM support structure
  16 *
  17 * The code in this file deals with setting up memory managers for TTM
  18 * LMEM and MOCK regions and converting the output from
  19 * the managers to struct sg_table, Basically providing the mapping from
  20 * i915 GEM regions to TTM memory types and resource managers.
  21 */
  22
  23/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
  24static struct ttm_device_funcs i915_ttm_bo_driver;
  25
  26/**
  27 * intel_region_ttm_device_init - Initialize a TTM device
  28 * @dev_priv: Pointer to an i915 device private structure.
  29 *
  30 * Return: 0 on success, negative error code on failure.
  31 */
  32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
  33{
  34        struct drm_device *drm = &dev_priv->drm;
  35
  36        return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
  37                               drm->dev, drm->anon_inode->i_mapping,
  38                               drm->vma_offset_manager, false, false);
  39}
  40
  41/**
  42 * intel_region_ttm_device_fini - Finalize a TTM device
  43 * @dev_priv: Pointer to an i915 device private structure.
  44 */
  45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
  46{
  47        ttm_device_fini(&dev_priv->bdev);
  48}
  49
  50/*
  51 * Map the i915 memory regions to TTM memory types. We use the
  52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
  53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
  54 */
  55static int intel_region_to_ttm_type(struct intel_memory_region *mem)
  56{
  57        int type;
  58
  59        GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
  60                   mem->type != INTEL_MEMORY_MOCK);
  61
  62        type = mem->instance + TTM_PL_PRIV;
  63        GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
  64
  65        return type;
  66}
  67
  68static struct ttm_resource *
  69intel_region_ttm_node_reserve(struct intel_memory_region *mem,
  70                              resource_size_t offset,
  71                              resource_size_t size)
  72{
  73        struct ttm_resource_manager *man = mem->region_private;
  74        struct ttm_place place = {};
  75        struct ttm_buffer_object mock_bo = {};
  76        struct ttm_resource *res;
  77        int ret;
  78
  79        /*
  80         * Having to use a mock_bo is unfortunate but stems from some
  81         * drivers having private managers that insist to know what the
  82         * allocate memory is intended for, using it to send private
  83         * data to the manager. Also recently the bo has been used to send
  84         * alignment info to the manager. Assume that apart from the latter,
  85         * none of the managers we use will ever access the buffer object
  86         * members, hoping we can pass the alignment info in the
  87         * struct ttm_place in the future.
  88         */
  89
  90        place.fpfn = offset >> PAGE_SHIFT;
  91        place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
  92        mock_bo.base.size = size;
  93        ret = man->func->alloc(man, &mock_bo, &place, &res);
  94        if (ret == -ENOSPC)
  95                ret = -ENXIO;
  96
  97        return ret ? ERR_PTR(ret) : res;
  98}
  99
 100/**
 101 * intel_region_ttm_node_free - Free a node allocated from a resource manager
 102 * @mem: The region the node was allocated from.
 103 * @node: The opaque node representing an allocation.
 104 */
 105void intel_region_ttm_node_free(struct intel_memory_region *mem,
 106                                struct ttm_resource *res)
 107{
 108        struct ttm_resource_manager *man = mem->region_private;
 109
 110        man->func->free(man, res);
 111}
 112
 113static const struct intel_memory_region_private_ops priv_ops = {
 114        .reserve = intel_region_ttm_node_reserve,
 115        .free = intel_region_ttm_node_free,
 116};
 117
 118int intel_region_ttm_init(struct intel_memory_region *mem)
 119{
 120        struct ttm_device *bdev = &mem->i915->bdev;
 121        int mem_type = intel_region_to_ttm_type(mem);
 122        int ret;
 123
 124        ret = ttm_range_man_init(bdev, mem_type, false,
 125                                 resource_size(&mem->region) >> PAGE_SHIFT);
 126        if (ret)
 127                return ret;
 128
 129        mem->chunk_size = PAGE_SIZE;
 130        mem->max_order =
 131                get_order(rounddown_pow_of_two(resource_size(&mem->region)));
 132        mem->is_range_manager = true;
 133        mem->priv_ops = &priv_ops;
 134        mem->region_private = ttm_manager_type(bdev, mem_type);
 135
 136        return 0;
 137}
 138
 139/**
 140 * intel_region_ttm_fini - Finalize a TTM region.
 141 * @mem: The memory region
 142 *
 143 * This functions takes down the TTM resource manager associated with the
 144 * memory region, and if it was registered with the TTM device,
 145 * removes that registration.
 146 */
 147void intel_region_ttm_fini(struct intel_memory_region *mem)
 148{
 149        int ret;
 150
 151        ret = ttm_range_man_fini(&mem->i915->bdev,
 152                                 intel_region_to_ttm_type(mem));
 153        GEM_WARN_ON(ret);
 154        mem->region_private = NULL;
 155}
 156
 157/**
 158 * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
 159 * to an sg_table.
 160 * @mem: The memory region.
 161 * @node: The resource manager node obtained from the TTM resource manager.
 162 *
 163 * The gem backends typically use sg-tables for operations on the underlying
 164 * io_memory. So provide a way for the backends to translate the
 165 * nodes they are handed from TTM to sg-tables.
 166 *
 167 * Return: A malloced sg_table on success, an error pointer on failure.
 168 */
 169struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
 170                                             struct ttm_resource *res)
 171{
 172        struct ttm_range_mgr_node *range_node =
 173                container_of(res, typeof(*range_node), base);
 174
 175        GEM_WARN_ON(!mem->is_range_manager);
 176        return i915_sg_from_mm_node(&range_node->mm_nodes[0],
 177                                    mem->region.start);
 178}
 179
 180/**
 181 * intel_region_ttm_node_alloc - Allocate memory resources from a region
 182 * @mem: The memory region,
 183 * @size: The requested size in bytes
 184 * @flags: Allocation flags
 185 *
 186 * This functionality is provided only for callers that need to allocate
 187 * memory from standalone TTM range managers, without the TTM eviction
 188 * functionality. Don't use if you are not completely sure that's the
 189 * case. The returned opaque node can be converted to an sg_table using
 190 * intel_region_ttm_node_to_st(), and can be freed using
 191 * intel_region_ttm_node_free().
 192 *
 193 * Return: A valid pointer on success, an error pointer on failure.
 194 */
 195struct ttm_resource *
 196intel_region_ttm_node_alloc(struct intel_memory_region *mem,
 197                            resource_size_t size,
 198                            unsigned int flags)
 199{
 200        struct ttm_resource_manager *man = mem->region_private;
 201        struct ttm_place place = {};
 202        struct ttm_buffer_object mock_bo = {};
 203        struct ttm_resource *res;
 204        int ret;
 205
 206        /*
 207         * We ignore the flags for now since we're using the range
 208         * manager and contigous and min page size would be fulfilled
 209         * by default if size is min page size aligned.
 210         */
 211        mock_bo.base.size = size;
 212
 213        if (mem->is_range_manager) {
 214                if (size >= SZ_1G)
 215                        mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
 216                else if (size >= SZ_2M)
 217                        mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
 218                else if (size >= SZ_64K)
 219                        mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
 220        }
 221
 222        ret = man->func->alloc(man, &mock_bo, &place, &res);
 223        if (ret == -ENOSPC)
 224                ret = -ENXIO;
 225        return ret ? ERR_PTR(ret) : res;
 226}
 227