linux/drivers/gpu/drm/msm/msm_gem_shrinker.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_drv.h"
   8#include "msm_gem.h"
   9
  10static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
  11{
  12        /* NOTE: we are *closer* to being able to get rid of
  13         * mutex_trylock_recursive().. the msm_gem code itself does
  14         * not need struct_mutex, although codepaths that can trigger
  15         * shrinker are still called in code-paths that hold the
  16         * struct_mutex.
  17         *
  18         * Also, msm_obj->madv is protected by struct_mutex.
  19         *
  20         * The next step is probably split out a seperate lock for
  21         * protecting inactive_list, so that shrinker does not need
  22         * struct_mutex.
  23         */
  24        switch (mutex_trylock_recursive(&dev->struct_mutex)) {
  25        case MUTEX_TRYLOCK_FAILED:
  26                return false;
  27
  28        case MUTEX_TRYLOCK_SUCCESS:
  29                *unlock = true;
  30                return true;
  31
  32        case MUTEX_TRYLOCK_RECURSIVE:
  33                *unlock = false;
  34                return true;
  35        }
  36
  37        BUG();
  38}
  39
  40static unsigned long
  41msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
  42{
  43        struct msm_drm_private *priv =
  44                container_of(shrinker, struct msm_drm_private, shrinker);
  45        struct drm_device *dev = priv->dev;
  46        struct msm_gem_object *msm_obj;
  47        unsigned long count = 0;
  48        bool unlock;
  49
  50        if (!msm_gem_shrinker_lock(dev, &unlock))
  51                return 0;
  52
  53        list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
  54                if (is_purgeable(msm_obj))
  55                        count += msm_obj->base.size >> PAGE_SHIFT;
  56        }
  57
  58        if (unlock)
  59                mutex_unlock(&dev->struct_mutex);
  60
  61        return count;
  62}
  63
  64static unsigned long
  65msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
  66{
  67        struct msm_drm_private *priv =
  68                container_of(shrinker, struct msm_drm_private, shrinker);
  69        struct drm_device *dev = priv->dev;
  70        struct msm_gem_object *msm_obj;
  71        unsigned long freed = 0;
  72        bool unlock;
  73
  74        if (!msm_gem_shrinker_lock(dev, &unlock))
  75                return SHRINK_STOP;
  76
  77        list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
  78                if (freed >= sc->nr_to_scan)
  79                        break;
  80                if (is_purgeable(msm_obj)) {
  81                        msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
  82                        freed += msm_obj->base.size >> PAGE_SHIFT;
  83                }
  84        }
  85
  86        if (unlock)
  87                mutex_unlock(&dev->struct_mutex);
  88
  89        if (freed > 0)
  90                pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
  91
  92        return freed;
  93}
  94
  95static int
  96msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
  97{
  98        struct msm_drm_private *priv =
  99                container_of(nb, struct msm_drm_private, vmap_notifier);
 100        struct drm_device *dev = priv->dev;
 101        struct msm_gem_object *msm_obj;
 102        unsigned unmapped = 0;
 103        bool unlock;
 104
 105        if (!msm_gem_shrinker_lock(dev, &unlock))
 106                return NOTIFY_DONE;
 107
 108        list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 109                if (is_vunmapable(msm_obj)) {
 110                        msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
 111                        /* since we don't know any better, lets bail after a few
 112                         * and if necessary the shrinker will be invoked again.
 113                         * Seems better than unmapping *everything*
 114                         */
 115                        if (++unmapped >= 15)
 116                                break;
 117                }
 118        }
 119
 120        if (unlock)
 121                mutex_unlock(&dev->struct_mutex);
 122
 123        *(unsigned long *)ptr += unmapped;
 124
 125        if (unmapped > 0)
 126                pr_info_ratelimited("Purging %u vmaps\n", unmapped);
 127
 128        return NOTIFY_DONE;
 129}
 130
 131/**
 132 * msm_gem_shrinker_init - Initialize msm shrinker
 133 * @dev_priv: msm device
 134 *
 135 * This function registers and sets up the msm shrinker.
 136 */
 137void msm_gem_shrinker_init(struct drm_device *dev)
 138{
 139        struct msm_drm_private *priv = dev->dev_private;
 140        priv->shrinker.count_objects = msm_gem_shrinker_count;
 141        priv->shrinker.scan_objects = msm_gem_shrinker_scan;
 142        priv->shrinker.seeks = DEFAULT_SEEKS;
 143        WARN_ON(register_shrinker(&priv->shrinker));
 144
 145        priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
 146        WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
 147}
 148
 149/**
 150 * msm_gem_shrinker_cleanup - Clean up msm shrinker
 151 * @dev_priv: msm device
 152 *
 153 * This function unregisters the msm shrinker.
 154 */
 155void msm_gem_shrinker_cleanup(struct drm_device *dev)
 156{
 157        struct msm_drm_private *priv = dev->dev_private;
 158
 159        if (priv->shrinker.nr_deferred) {
 160                WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
 161                unregister_shrinker(&priv->shrinker);
 162        }
 163}
 164