linux/mm/kasan/quarantine.c
<<
>>
Prefs
   1/*
   2 * KASAN quarantine.
   3 *
   4 * Author: Alexander Potapenko <glider@google.com>
   5 * Copyright (C) 2016 Google, Inc.
   6 *
   7 * Based on code by Dmitry Chernenkov.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 */
  19
  20#include <linux/gfp.h>
  21#include <linux/hash.h>
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/percpu.h>
  25#include <linux/printk.h>
  26#include <linux/shrinker.h>
  27#include <linux/slab.h>
  28#include <linux/srcu.h>
  29#include <linux/string.h>
  30#include <linux/types.h>
  31
  32#include "../slab.h"
  33#include "kasan.h"
  34
  35/* Data structure and operations for quarantine queues. */
  36
  37/*
  38 * Each queue is a signle-linked list, which also stores the total size of
  39 * objects inside of it.
  40 */
  41struct qlist_head {
  42        struct qlist_node *head;
  43        struct qlist_node *tail;
  44        size_t bytes;
  45};
  46
  47#define QLIST_INIT { NULL, NULL, 0 }
  48
  49static bool qlist_empty(struct qlist_head *q)
  50{
  51        return !q->head;
  52}
  53
  54static void qlist_init(struct qlist_head *q)
  55{
  56        q->head = q->tail = NULL;
  57        q->bytes = 0;
  58}
  59
  60static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
  61                size_t size)
  62{
  63        if (unlikely(qlist_empty(q)))
  64                q->head = qlink;
  65        else
  66                q->tail->next = qlink;
  67        q->tail = qlink;
  68        qlink->next = NULL;
  69        q->bytes += size;
  70}
  71
  72static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
  73{
  74        if (unlikely(qlist_empty(from)))
  75                return;
  76
  77        if (qlist_empty(to)) {
  78                *to = *from;
  79                qlist_init(from);
  80                return;
  81        }
  82
  83        to->tail->next = from->head;
  84        to->tail = from->tail;
  85        to->bytes += from->bytes;
  86
  87        qlist_init(from);
  88}
  89
  90#define QUARANTINE_PERCPU_SIZE (1 << 20)
  91#define QUARANTINE_BATCHES \
  92        (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
  93
  94/*
  95 * The object quarantine consists of per-cpu queues and a global queue,
  96 * guarded by quarantine_lock.
  97 */
  98static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
  99
 100/* Round-robin FIFO array of batches. */
 101static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
 102static int quarantine_head;
 103static int quarantine_tail;
 104/* Total size of all objects in global_quarantine across all batches. */
 105static unsigned long quarantine_size;
 106static DEFINE_SPINLOCK(quarantine_lock);
 107DEFINE_STATIC_SRCU(remove_cache_srcu);
 108
 109/* Maximum size of the global queue. */
 110static unsigned long quarantine_max_size;
 111
 112/*
 113 * Target size of a batch in global_quarantine.
 114 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
 115 */
 116static unsigned long quarantine_batch_size;
 117
 118/*
 119 * The fraction of physical memory the quarantine is allowed to occupy.
 120 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
 121 * the ratio low to avoid OOM.
 122 */
 123#define QUARANTINE_FRACTION 32
 124
 125static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
 126{
 127        return virt_to_head_page(qlink)->slab_cache;
 128}
 129
 130static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
 131{
 132        struct kasan_free_meta *free_info =
 133                container_of(qlink, struct kasan_free_meta,
 134                             quarantine_link);
 135
 136        return ((void *)free_info) - cache->kasan_info.free_meta_offset;
 137}
 138
 139static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
 140{
 141        void *object = qlink_to_object(qlink, cache);
 142        unsigned long flags;
 143
 144        if (IS_ENABLED(CONFIG_SLAB))
 145                local_irq_save(flags);
 146
 147        ___cache_free(cache, object, _THIS_IP_);
 148
 149        if (IS_ENABLED(CONFIG_SLAB))
 150                local_irq_restore(flags);
 151}
 152
 153static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
 154{
 155        struct qlist_node *qlink;
 156
 157        if (unlikely(qlist_empty(q)))
 158                return;
 159
 160        qlink = q->head;
 161        while (qlink) {
 162                struct kmem_cache *obj_cache =
 163                        cache ? cache : qlink_to_cache(qlink);
 164                struct qlist_node *next = qlink->next;
 165
 166                qlink_free(qlink, obj_cache);
 167                qlink = next;
 168        }
 169        qlist_init(q);
 170}
 171
 172void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
 173{
 174        unsigned long flags;
 175        struct qlist_head *q;
 176        struct qlist_head temp = QLIST_INIT;
 177
 178        /*
 179         * Note: irq must be disabled until after we move the batch to the
 180         * global quarantine. Otherwise quarantine_remove_cache() can miss
 181         * some objects belonging to the cache if they are in our local temp
 182         * list. quarantine_remove_cache() executes on_each_cpu() at the
 183         * beginning which ensures that it either sees the objects in per-cpu
 184         * lists or in the global quarantine.
 185         */
 186        local_irq_save(flags);
 187
 188        q = this_cpu_ptr(&cpu_quarantine);
 189        qlist_put(q, &info->quarantine_link, cache->size);
 190        if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
 191                qlist_move_all(q, &temp);
 192
 193                spin_lock(&quarantine_lock);
 194                WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
 195                qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
 196                if (global_quarantine[quarantine_tail].bytes >=
 197                                READ_ONCE(quarantine_batch_size)) {
 198                        int new_tail;
 199
 200                        new_tail = quarantine_tail + 1;
 201                        if (new_tail == QUARANTINE_BATCHES)
 202                                new_tail = 0;
 203                        if (new_tail != quarantine_head)
 204                                quarantine_tail = new_tail;
 205                }
 206                spin_unlock(&quarantine_lock);
 207        }
 208
 209        local_irq_restore(flags);
 210}
 211
 212void quarantine_reduce(void)
 213{
 214        size_t total_size, new_quarantine_size, percpu_quarantines;
 215        unsigned long flags;
 216        int srcu_idx;
 217        struct qlist_head to_free = QLIST_INIT;
 218
 219        if (likely(READ_ONCE(quarantine_size) <=
 220                   READ_ONCE(quarantine_max_size)))
 221                return;
 222
 223        /*
 224         * srcu critical section ensures that quarantine_remove_cache()
 225         * will not miss objects belonging to the cache while they are in our
 226         * local to_free list. srcu is chosen because (1) it gives us private
 227         * grace period domain that does not interfere with anything else,
 228         * and (2) it allows synchronize_srcu() to return without waiting
 229         * if there are no pending read critical sections (which is the
 230         * expected case).
 231         */
 232        srcu_idx = srcu_read_lock(&remove_cache_srcu);
 233        spin_lock_irqsave(&quarantine_lock, flags);
 234
 235        /*
 236         * Update quarantine size in case of hotplug. Allocate a fraction of
 237         * the installed memory to quarantine minus per-cpu queue limits.
 238         */
 239        total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
 240                QUARANTINE_FRACTION;
 241        percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
 242        new_quarantine_size = (total_size < percpu_quarantines) ?
 243                0 : total_size - percpu_quarantines;
 244        WRITE_ONCE(quarantine_max_size, new_quarantine_size);
 245        /* Aim at consuming at most 1/2 of slots in quarantine. */
 246        WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
 247                2 * total_size / QUARANTINE_BATCHES));
 248
 249        if (likely(quarantine_size > quarantine_max_size)) {
 250                qlist_move_all(&global_quarantine[quarantine_head], &to_free);
 251                WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
 252                quarantine_head++;
 253                if (quarantine_head == QUARANTINE_BATCHES)
 254                        quarantine_head = 0;
 255        }
 256
 257        spin_unlock_irqrestore(&quarantine_lock, flags);
 258
 259        qlist_free_all(&to_free, NULL);
 260        srcu_read_unlock(&remove_cache_srcu, srcu_idx);
 261}
 262
 263static void qlist_move_cache(struct qlist_head *from,
 264                                   struct qlist_head *to,
 265                                   struct kmem_cache *cache)
 266{
 267        struct qlist_node *curr;
 268
 269        if (unlikely(qlist_empty(from)))
 270                return;
 271
 272        curr = from->head;
 273        qlist_init(from);
 274        while (curr) {
 275                struct qlist_node *next = curr->next;
 276                struct kmem_cache *obj_cache = qlink_to_cache(curr);
 277
 278                if (obj_cache == cache)
 279                        qlist_put(to, curr, obj_cache->size);
 280                else
 281                        qlist_put(from, curr, obj_cache->size);
 282
 283                curr = next;
 284        }
 285}
 286
 287static void per_cpu_remove_cache(void *arg)
 288{
 289        struct kmem_cache *cache = arg;
 290        struct qlist_head to_free = QLIST_INIT;
 291        struct qlist_head *q;
 292
 293        q = this_cpu_ptr(&cpu_quarantine);
 294        qlist_move_cache(q, &to_free, cache);
 295        qlist_free_all(&to_free, cache);
 296}
 297
 298/* Free all quarantined objects belonging to cache. */
 299void quarantine_remove_cache(struct kmem_cache *cache)
 300{
 301        unsigned long flags, i;
 302        struct qlist_head to_free = QLIST_INIT;
 303
 304        /*
 305         * Must be careful to not miss any objects that are being moved from
 306         * per-cpu list to the global quarantine in quarantine_put(),
 307         * nor objects being freed in quarantine_reduce(). on_each_cpu()
 308         * achieves the first goal, while synchronize_srcu() achieves the
 309         * second.
 310         */
 311        on_each_cpu(per_cpu_remove_cache, cache, 1);
 312
 313        spin_lock_irqsave(&quarantine_lock, flags);
 314        for (i = 0; i < QUARANTINE_BATCHES; i++) {
 315                if (qlist_empty(&global_quarantine[i]))
 316                        continue;
 317                qlist_move_cache(&global_quarantine[i], &to_free, cache);
 318                /* Scanning whole quarantine can take a while. */
 319                spin_unlock_irqrestore(&quarantine_lock, flags);
 320                cond_resched();
 321                spin_lock_irqsave(&quarantine_lock, flags);
 322        }
 323        spin_unlock_irqrestore(&quarantine_lock, flags);
 324
 325        qlist_free_all(&to_free, cache);
 326
 327        synchronize_srcu(&remove_cache_srcu);
 328}
 329