linux/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2014-2018 Intel Corporation
   4 */
   5
   6#include "gem/i915_gem_object.h"
   7
   8#include "i915_drv.h"
   9#include "intel_engine_pm.h"
  10#include "intel_gt_buffer_pool.h"
  11
  12static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
  13{
  14        return container_of(pool, struct intel_gt, buffer_pool);
  15}
  16
  17static struct list_head *
  18bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
  19{
  20        int n;
  21
  22        /*
  23         * Compute a power-of-two bucket, but throw everything greater than
  24         * 16KiB into the same bucket: i.e. the buckets hold objects of
  25         * (1 page, 2 pages, 4 pages, 8+ pages).
  26         */
  27        n = fls(sz >> PAGE_SHIFT) - 1;
  28        if (n >= ARRAY_SIZE(pool->cache_list))
  29                n = ARRAY_SIZE(pool->cache_list) - 1;
  30
  31        return &pool->cache_list[n];
  32}
  33
  34static void node_free(struct intel_gt_buffer_pool_node *node)
  35{
  36        i915_gem_object_put(node->obj);
  37        i915_active_fini(&node->active);
  38        kfree_rcu(node, rcu);
  39}
  40
  41static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
  42{
  43        struct intel_gt_buffer_pool_node *node, *stale = NULL;
  44        bool active = false;
  45        int n;
  46
  47        /* Free buffers that have not been used in the past second */
  48        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
  49                struct list_head *list = &pool->cache_list[n];
  50
  51                if (list_empty(list))
  52                        continue;
  53
  54                if (spin_trylock_irq(&pool->lock)) {
  55                        struct list_head *pos;
  56
  57                        /* Most recent at head; oldest at tail */
  58                        list_for_each_prev(pos, list) {
  59                                unsigned long age;
  60
  61                                node = list_entry(pos, typeof(*node), link);
  62
  63                                age = READ_ONCE(node->age);
  64                                if (!age || jiffies - age < keep)
  65                                        break;
  66
  67                                /* Check we are the first to claim this node */
  68                                if (!xchg(&node->age, 0))
  69                                        break;
  70
  71                                node->free = stale;
  72                                stale = node;
  73                        }
  74                        if (!list_is_last(pos, list))
  75                                __list_del_many(pos, list);
  76
  77                        spin_unlock_irq(&pool->lock);
  78                }
  79
  80                active |= !list_empty(list);
  81        }
  82
  83        while ((node = stale)) {
  84                stale = stale->free;
  85                node_free(node);
  86        }
  87
  88        return active;
  89}
  90
  91static void pool_free_work(struct work_struct *wrk)
  92{
  93        struct intel_gt_buffer_pool *pool =
  94                container_of(wrk, typeof(*pool), work.work);
  95
  96        if (pool_free_older_than(pool, HZ))
  97                schedule_delayed_work(&pool->work,
  98                                      round_jiffies_up_relative(HZ));
  99}
 100
 101static void pool_retire(struct i915_active *ref)
 102{
 103        struct intel_gt_buffer_pool_node *node =
 104                container_of(ref, typeof(*node), active);
 105        struct intel_gt_buffer_pool *pool = node->pool;
 106        struct list_head *list = bucket_for_size(pool, node->obj->base.size);
 107        unsigned long flags;
 108
 109        if (node->pinned) {
 110                i915_gem_object_unpin_pages(node->obj);
 111
 112                /* Return this object to the shrinker pool */
 113                i915_gem_object_make_purgeable(node->obj);
 114                node->pinned = false;
 115        }
 116
 117        GEM_BUG_ON(node->age);
 118        spin_lock_irqsave(&pool->lock, flags);
 119        list_add_rcu(&node->link, list);
 120        WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
 121        spin_unlock_irqrestore(&pool->lock, flags);
 122
 123        schedule_delayed_work(&pool->work,
 124                              round_jiffies_up_relative(HZ));
 125}
 126
 127void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
 128{
 129        assert_object_held(node->obj);
 130
 131        if (node->pinned)
 132                return;
 133
 134        __i915_gem_object_pin_pages(node->obj);
 135        /* Hide this pinned object from the shrinker until retired */
 136        i915_gem_object_make_unshrinkable(node->obj);
 137        node->pinned = true;
 138}
 139
 140static struct intel_gt_buffer_pool_node *
 141node_create(struct intel_gt_buffer_pool *pool, size_t sz,
 142            enum i915_map_type type)
 143{
 144        struct intel_gt *gt = to_gt(pool);
 145        struct intel_gt_buffer_pool_node *node;
 146        struct drm_i915_gem_object *obj;
 147
 148        node = kmalloc(sizeof(*node),
 149                       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 150        if (!node)
 151                return ERR_PTR(-ENOMEM);
 152
 153        node->age = 0;
 154        node->pool = pool;
 155        node->pinned = false;
 156        i915_active_init(&node->active, NULL, pool_retire, 0);
 157
 158        obj = i915_gem_object_create_internal(gt->i915, sz);
 159        if (IS_ERR(obj)) {
 160                i915_active_fini(&node->active);
 161                kfree(node);
 162                return ERR_CAST(obj);
 163        }
 164
 165        i915_gem_object_set_readonly(obj);
 166
 167        node->type = type;
 168        node->obj = obj;
 169        return node;
 170}
 171
 172struct intel_gt_buffer_pool_node *
 173intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
 174                         enum i915_map_type type)
 175{
 176        struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
 177        struct intel_gt_buffer_pool_node *node;
 178        struct list_head *list;
 179        int ret;
 180
 181        size = PAGE_ALIGN(size);
 182        list = bucket_for_size(pool, size);
 183
 184        rcu_read_lock();
 185        list_for_each_entry_rcu(node, list, link) {
 186                unsigned long age;
 187
 188                if (node->obj->base.size < size)
 189                        continue;
 190
 191                if (node->type != type)
 192                        continue;
 193
 194                age = READ_ONCE(node->age);
 195                if (!age)
 196                        continue;
 197
 198                if (cmpxchg(&node->age, age, 0) == age) {
 199                        spin_lock_irq(&pool->lock);
 200                        list_del_rcu(&node->link);
 201                        spin_unlock_irq(&pool->lock);
 202                        break;
 203                }
 204        }
 205        rcu_read_unlock();
 206
 207        if (&node->link == list) {
 208                node = node_create(pool, size, type);
 209                if (IS_ERR(node))
 210                        return node;
 211        }
 212
 213        ret = i915_active_acquire(&node->active);
 214        if (ret) {
 215                node_free(node);
 216                return ERR_PTR(ret);
 217        }
 218
 219        return node;
 220}
 221
 222void intel_gt_init_buffer_pool(struct intel_gt *gt)
 223{
 224        struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
 225        int n;
 226
 227        spin_lock_init(&pool->lock);
 228        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 229                INIT_LIST_HEAD(&pool->cache_list[n]);
 230        INIT_DELAYED_WORK(&pool->work, pool_free_work);
 231}
 232
 233void intel_gt_flush_buffer_pool(struct intel_gt *gt)
 234{
 235        struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
 236
 237        do {
 238                while (pool_free_older_than(pool, 0))
 239                        ;
 240        } while (cancel_delayed_work_sync(&pool->work));
 241}
 242
 243void intel_gt_fini_buffer_pool(struct intel_gt *gt)
 244{
 245        struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
 246        int n;
 247
 248        intel_gt_flush_buffer_pool(gt);
 249
 250        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 251                GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
 252}
 253