linux/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Huge page-table-entry support for IO memory.
   4 *
   5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
   6 */
   7#include "vmwgfx_drv.h"
   8#include <drm/ttm/ttm_bo_driver.h>
   9#include <drm/ttm/ttm_placement.h>
  10#include <drm/ttm/ttm_range_manager.h>
  11
  12/**
  13 * struct vmw_thp_manager - Range manager implementing huge page alignment
  14 *
  15 * @manager: TTM resource manager.
  16 * @mm: The underlying range manager. Protected by @lock.
  17 * @lock: Manager lock.
  18 */
  19struct vmw_thp_manager {
  20        struct ttm_resource_manager manager;
  21        struct drm_mm mm;
  22        spinlock_t lock;
  23};
  24
  25static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
  26{
  27        return container_of(man, struct vmw_thp_manager, manager);
  28}
  29
  30static const struct ttm_resource_manager_func vmw_thp_func;
  31
  32static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
  33                                  struct drm_mm *mm, struct drm_mm_node *node,
  34                                  unsigned long align_pages,
  35                                  const struct ttm_place *place,
  36                                  struct ttm_resource *mem,
  37                                  unsigned long lpfn,
  38                                  enum drm_mm_insert_mode mode)
  39{
  40        if (align_pages >= bo->page_alignment &&
  41            (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
  42                return drm_mm_insert_node_in_range(mm, node,
  43                                                   mem->num_pages,
  44                                                   align_pages, 0,
  45                                                   place->fpfn, lpfn, mode);
  46        }
  47
  48        return -ENOSPC;
  49}
  50
  51static int vmw_thp_get_node(struct ttm_resource_manager *man,
  52                            struct ttm_buffer_object *bo,
  53                            const struct ttm_place *place,
  54                            struct ttm_resource **res)
  55{
  56        struct vmw_thp_manager *rman = to_thp_manager(man);
  57        struct drm_mm *mm = &rman->mm;
  58        struct ttm_range_mgr_node *node;
  59        unsigned long align_pages;
  60        unsigned long lpfn;
  61        enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
  62        int ret;
  63
  64        node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
  65        if (!node)
  66                return -ENOMEM;
  67
  68        ttm_resource_init(bo, place, &node->base);
  69
  70        lpfn = place->lpfn;
  71        if (!lpfn)
  72                lpfn = man->size;
  73
  74        mode = DRM_MM_INSERT_BEST;
  75        if (place->flags & TTM_PL_FLAG_TOPDOWN)
  76                mode = DRM_MM_INSERT_HIGH;
  77
  78        spin_lock(&rman->lock);
  79        if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
  80                align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
  81                if (node->base.num_pages >= align_pages) {
  82                        ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
  83                                                     align_pages, place,
  84                                                     &node->base, lpfn, mode);
  85                        if (!ret)
  86                                goto found_unlock;
  87                }
  88        }
  89
  90        align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
  91        if (node->base.num_pages >= align_pages) {
  92                ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
  93                                             align_pages, place, &node->base,
  94                                             lpfn, mode);
  95                if (!ret)
  96                        goto found_unlock;
  97        }
  98
  99        ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
 100                                          node->base.num_pages,
 101                                          bo->page_alignment, 0,
 102                                          place->fpfn, lpfn, mode);
 103found_unlock:
 104        spin_unlock(&rman->lock);
 105
 106        if (unlikely(ret)) {
 107                kfree(node);
 108        } else {
 109                node->base.start = node->mm_nodes[0].start;
 110                *res = &node->base;
 111        }
 112
 113        return ret;
 114}
 115
 116static void vmw_thp_put_node(struct ttm_resource_manager *man,
 117                             struct ttm_resource *res)
 118{
 119        struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 120        struct vmw_thp_manager *rman = to_thp_manager(man);
 121
 122        spin_lock(&rman->lock);
 123        drm_mm_remove_node(&node->mm_nodes[0]);
 124        spin_unlock(&rman->lock);
 125
 126        kfree(node);
 127}
 128
 129int vmw_thp_init(struct vmw_private *dev_priv)
 130{
 131        struct vmw_thp_manager *rman;
 132
 133        rman = kzalloc(sizeof(*rman), GFP_KERNEL);
 134        if (!rman)
 135                return -ENOMEM;
 136
 137        ttm_resource_manager_init(&rman->manager,
 138                                  dev_priv->vram_size >> PAGE_SHIFT);
 139
 140        rman->manager.func = &vmw_thp_func;
 141        drm_mm_init(&rman->mm, 0, rman->manager.size);
 142        spin_lock_init(&rman->lock);
 143
 144        ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
 145        ttm_resource_manager_set_used(&rman->manager, true);
 146        return 0;
 147}
 148
 149void vmw_thp_fini(struct vmw_private *dev_priv)
 150{
 151        struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
 152        struct vmw_thp_manager *rman = to_thp_manager(man);
 153        struct drm_mm *mm = &rman->mm;
 154        int ret;
 155
 156        ttm_resource_manager_set_used(man, false);
 157
 158        ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
 159        if (ret)
 160                return;
 161        spin_lock(&rman->lock);
 162        drm_mm_clean(mm);
 163        drm_mm_takedown(mm);
 164        spin_unlock(&rman->lock);
 165        ttm_resource_manager_cleanup(man);
 166        ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
 167        kfree(rman);
 168}
 169
 170static void vmw_thp_debug(struct ttm_resource_manager *man,
 171                          struct drm_printer *printer)
 172{
 173        struct vmw_thp_manager *rman = to_thp_manager(man);
 174
 175        spin_lock(&rman->lock);
 176        drm_mm_print(&rman->mm, printer);
 177        spin_unlock(&rman->lock);
 178}
 179
 180static const struct ttm_resource_manager_func vmw_thp_func = {
 181        .alloc = vmw_thp_get_node,
 182        .free = vmw_thp_put_node,
 183        .debug = vmw_thp_debug
 184};
 185