linux/mm/swap_slots.c
<<
>>
Prefs
   1/*
   2 * Manage cache of swap slots to be used for and returned from
   3 * swap.
   4 *
   5 * Copyright(c) 2016 Intel Corporation.
   6 *
   7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
   8 *
   9 * We allocate the swap slots from the global pool and put
  10 * it into local per cpu caches.  This has the advantage
  11 * of no needing to acquire the swap_info lock every time
  12 * we need a new slot.
  13 *
  14 * There is also opportunity to simply return the slot
  15 * to local caches without needing to acquire swap_info
  16 * lock.  We do not reuse the returned slots directly but
  17 * move them back to the global pool in a batch.  This
  18 * allows the slots to coaellesce and reduce fragmentation.
  19 *
  20 * The swap entry allocated is marked with SWAP_HAS_CACHE
  21 * flag in map_count that prevents it from being allocated
  22 * again from the global pool.
  23 *
  24 * The swap slots cache is protected by a mutex instead of
  25 * a spin lock as when we search for slots with scan_swap_map,
  26 * we can possibly sleep.
  27 */
  28
  29#include <linux/swap_slots.h>
  30#include <linux/cpu.h>
  31#include <linux/cpumask.h>
  32#include <linux/vmalloc.h>
  33#include <linux/mutex.h>
  34#include <linux/mm.h>
  35
  36#ifdef CONFIG_SWAP
  37
  38static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
  39static bool     swap_slot_cache_active;
  40bool    swap_slot_cache_enabled;
  41static bool     swap_slot_cache_initialized;
  42DEFINE_MUTEX(swap_slots_cache_mutex);
  43/* Serialize swap slots cache enable/disable operations */
  44DEFINE_MUTEX(swap_slots_cache_enable_mutex);
  45
  46static void __drain_swap_slots_cache(unsigned int type);
  47static void deactivate_swap_slots_cache(void);
  48static void reactivate_swap_slots_cache(void);
  49
  50#define use_swap_slot_cache (swap_slot_cache_active && \
  51                swap_slot_cache_enabled && swap_slot_cache_initialized)
  52#define SLOTS_CACHE 0x1
  53#define SLOTS_CACHE_RET 0x2
  54
  55static void deactivate_swap_slots_cache(void)
  56{
  57        mutex_lock(&swap_slots_cache_mutex);
  58        swap_slot_cache_active = false;
  59        __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  60        mutex_unlock(&swap_slots_cache_mutex);
  61}
  62
  63static void reactivate_swap_slots_cache(void)
  64{
  65        mutex_lock(&swap_slots_cache_mutex);
  66        swap_slot_cache_active = true;
  67        mutex_unlock(&swap_slots_cache_mutex);
  68}
  69
  70/* Must not be called with cpu hot plug lock */
  71void disable_swap_slots_cache_lock(void)
  72{
  73        mutex_lock(&swap_slots_cache_enable_mutex);
  74        swap_slot_cache_enabled = false;
  75        if (swap_slot_cache_initialized) {
  76                /* serialize with cpu hotplug operations */
  77                get_online_cpus();
  78                __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  79                put_online_cpus();
  80        }
  81}
  82
  83static void __reenable_swap_slots_cache(void)
  84{
  85        swap_slot_cache_enabled = has_usable_swap();
  86}
  87
  88void reenable_swap_slots_cache_unlock(void)
  89{
  90        __reenable_swap_slots_cache();
  91        mutex_unlock(&swap_slots_cache_enable_mutex);
  92}
  93
  94static bool check_cache_active(void)
  95{
  96        long pages;
  97
  98        if (!swap_slot_cache_enabled || !swap_slot_cache_initialized)
  99                return false;
 100
 101        pages = get_nr_swap_pages();
 102        if (!swap_slot_cache_active) {
 103                if (pages > num_online_cpus() *
 104                    THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
 105                        reactivate_swap_slots_cache();
 106                goto out;
 107        }
 108
 109        /* if global pool of slot caches too low, deactivate cache */
 110        if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
 111                deactivate_swap_slots_cache();
 112out:
 113        return swap_slot_cache_active;
 114}
 115
 116static int alloc_swap_slot_cache(unsigned int cpu)
 117{
 118        struct swap_slots_cache *cache;
 119        swp_entry_t *slots, *slots_ret;
 120
 121        /*
 122         * Do allocation outside swap_slots_cache_mutex
 123         * as kvzalloc could trigger reclaim and get_swap_page,
 124         * which can lock swap_slots_cache_mutex.
 125         */
 126        slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
 127                         GFP_KERNEL);
 128        if (!slots)
 129                return -ENOMEM;
 130
 131        slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
 132                             GFP_KERNEL);
 133        if (!slots_ret) {
 134                kvfree(slots);
 135                return -ENOMEM;
 136        }
 137
 138        mutex_lock(&swap_slots_cache_mutex);
 139        cache = &per_cpu(swp_slots, cpu);
 140        if (cache->slots || cache->slots_ret)
 141                /* cache already allocated */
 142                goto out;
 143        if (!cache->lock_initialized) {
 144                mutex_init(&cache->alloc_lock);
 145                spin_lock_init(&cache->free_lock);
 146                cache->lock_initialized = true;
 147        }
 148        cache->nr = 0;
 149        cache->cur = 0;
 150        cache->n_ret = 0;
 151        cache->slots = slots;
 152        slots = NULL;
 153        cache->slots_ret = slots_ret;
 154        slots_ret = NULL;
 155out:
 156        mutex_unlock(&swap_slots_cache_mutex);
 157        if (slots)
 158                kvfree(slots);
 159        if (slots_ret)
 160                kvfree(slots_ret);
 161        return 0;
 162}
 163
 164static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
 165                                  bool free_slots)
 166{
 167        struct swap_slots_cache *cache;
 168        swp_entry_t *slots = NULL;
 169
 170        cache = &per_cpu(swp_slots, cpu);
 171        if ((type & SLOTS_CACHE) && cache->slots) {
 172                mutex_lock(&cache->alloc_lock);
 173                swapcache_free_entries(cache->slots + cache->cur, cache->nr);
 174                cache->cur = 0;
 175                cache->nr = 0;
 176                if (free_slots && cache->slots) {
 177                        kvfree(cache->slots);
 178                        cache->slots = NULL;
 179                }
 180                mutex_unlock(&cache->alloc_lock);
 181        }
 182        if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
 183                spin_lock_irq(&cache->free_lock);
 184                swapcache_free_entries(cache->slots_ret, cache->n_ret);
 185                cache->n_ret = 0;
 186                if (free_slots && cache->slots_ret) {
 187                        slots = cache->slots_ret;
 188                        cache->slots_ret = NULL;
 189                }
 190                spin_unlock_irq(&cache->free_lock);
 191                if (slots)
 192                        kvfree(slots);
 193        }
 194}
 195
 196static void __drain_swap_slots_cache(unsigned int type)
 197{
 198        unsigned int cpu;
 199
 200        /*
 201         * This function is called during
 202         *      1) swapoff, when we have to make sure no
 203         *         left over slots are in cache when we remove
 204         *         a swap device;
 205         *      2) disabling of swap slot cache, when we run low
 206         *         on swap slots when allocating memory and need
 207         *         to return swap slots to global pool.
 208         *
 209         * We cannot acquire cpu hot plug lock here as
 210         * this function can be invoked in the cpu
 211         * hot plug path:
 212         * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
 213         *   -> memory allocation -> direct reclaim -> get_swap_page
 214         *   -> drain_swap_slots_cache
 215         *
 216         * Hence the loop over current online cpu below could miss cpu that
 217         * is being brought online but not yet marked as online.
 218         * That is okay as we do not schedule and run anything on a
 219         * cpu before it has been marked online. Hence, we will not
 220         * fill any swap slots in slots cache of such cpu.
 221         * There are no slots on such cpu that need to be drained.
 222         */
 223        for_each_online_cpu(cpu)
 224                drain_slots_cache_cpu(cpu, type, false);
 225}
 226
 227static int free_slot_cache(unsigned int cpu)
 228{
 229        mutex_lock(&swap_slots_cache_mutex);
 230        drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
 231        mutex_unlock(&swap_slots_cache_mutex);
 232        return 0;
 233}
 234
 235int enable_swap_slots_cache(void)
 236{
 237        int ret = 0;
 238
 239        mutex_lock(&swap_slots_cache_enable_mutex);
 240        if (swap_slot_cache_initialized) {
 241                __reenable_swap_slots_cache();
 242                goto out_unlock;
 243        }
 244
 245        ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
 246                                alloc_swap_slot_cache, free_slot_cache);
 247        if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
 248                               "without swap slots cache.\n", __func__))
 249                goto out_unlock;
 250
 251        swap_slot_cache_initialized = true;
 252        __reenable_swap_slots_cache();
 253out_unlock:
 254        mutex_unlock(&swap_slots_cache_enable_mutex);
 255        return 0;
 256}
 257
 258/* called with swap slot cache's alloc lock held */
 259static int refill_swap_slots_cache(struct swap_slots_cache *cache)
 260{
 261        if (!use_swap_slot_cache || cache->nr)
 262                return 0;
 263
 264        cache->cur = 0;
 265        if (swap_slot_cache_active)
 266                cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, cache->slots);
 267
 268        return cache->nr;
 269}
 270
 271int free_swap_slot(swp_entry_t entry)
 272{
 273        struct swap_slots_cache *cache;
 274
 275        cache = &get_cpu_var(swp_slots);
 276        if (use_swap_slot_cache && cache->slots_ret) {
 277                spin_lock_irq(&cache->free_lock);
 278                /* Swap slots cache may be deactivated before acquiring lock */
 279                if (!use_swap_slot_cache) {
 280                        spin_unlock_irq(&cache->free_lock);
 281                        goto direct_free;
 282                }
 283                if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
 284                        /*
 285                         * Return slots to global pool.
 286                         * The current swap_map value is SWAP_HAS_CACHE.
 287                         * Set it to 0 to indicate it is available for
 288                         * allocation in global pool
 289                         */
 290                        swapcache_free_entries(cache->slots_ret, cache->n_ret);
 291                        cache->n_ret = 0;
 292                }
 293                cache->slots_ret[cache->n_ret++] = entry;
 294                spin_unlock_irq(&cache->free_lock);
 295        } else {
 296direct_free:
 297                swapcache_free_entries(&entry, 1);
 298        }
 299        put_cpu_var(swp_slots);
 300
 301        return 0;
 302}
 303
 304swp_entry_t get_swap_page(void)
 305{
 306        swp_entry_t entry, *pentry;
 307        struct swap_slots_cache *cache;
 308
 309        /*
 310         * Preemption is allowed here, because we may sleep
 311         * in refill_swap_slots_cache().  But it is safe, because
 312         * accesses to the per-CPU data structure are protected by the
 313         * mutex cache->alloc_lock.
 314         *
 315         * The alloc path here does not touch cache->slots_ret
 316         * so cache->free_lock is not taken.
 317         */
 318        cache = raw_cpu_ptr(&swp_slots);
 319
 320        entry.val = 0;
 321        if (check_cache_active()) {
 322                mutex_lock(&cache->alloc_lock);
 323                if (cache->slots) {
 324repeat:
 325                        if (cache->nr) {
 326                                pentry = &cache->slots[cache->cur++];
 327                                entry = *pentry;
 328                                pentry->val = 0;
 329                                cache->nr--;
 330                        } else {
 331                                if (refill_swap_slots_cache(cache))
 332                                        goto repeat;
 333                        }
 334                }
 335                mutex_unlock(&cache->alloc_lock);
 336                if (entry.val)
 337                        return entry;
 338        }
 339
 340        get_swap_pages(1, &entry);
 341
 342        return entry;
 343}
 344
 345#endif /* CONFIG_SWAP */
 346