linux/lib/stackdepot.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic stack depot for storing stack traces.
   4 *
   5 * Some debugging tools need to save stack traces of certain events which can
   6 * be later presented to the user. For example, KASAN needs to safe alloc and
   7 * free stacks for each object, but storing two stack traces per object
   8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
   9 * that).
  10 *
  11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
  12 * and free stacks repeat a lot, we save about 100x space.
  13 * Stacks are never removed from depot, so we store them contiguously one after
  14 * another in a contiguos memory allocation.
  15 *
  16 * Author: Alexander Potapenko <glider@google.com>
  17 * Copyright (C) 2016 Google, Inc.
  18 *
  19 * Based on code by Dmitry Chernenkov.
  20 */
  21
  22#include <linux/gfp.h>
  23#include <linux/jhash.h>
  24#include <linux/kernel.h>
  25#include <linux/mm.h>
  26#include <linux/percpu.h>
  27#include <linux/printk.h>
  28#include <linux/slab.h>
  29#include <linux/stacktrace.h>
  30#include <linux/stackdepot.h>
  31#include <linux/string.h>
  32#include <linux/types.h>
  33
  34#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
  35
  36#define STACK_ALLOC_NULL_PROTECTION_BITS 1
  37#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
  38#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
  39#define STACK_ALLOC_ALIGN 4
  40#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
  41                                        STACK_ALLOC_ALIGN)
  42#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
  43                STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
  44#define STACK_ALLOC_SLABS_CAP 8192
  45#define STACK_ALLOC_MAX_SLABS \
  46        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
  47         (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
  48
  49/* The compact structure to store the reference to stacks. */
  50union handle_parts {
  51        depot_stack_handle_t handle;
  52        struct {
  53                u32 slabindex : STACK_ALLOC_INDEX_BITS;
  54                u32 offset : STACK_ALLOC_OFFSET_BITS;
  55                u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
  56        };
  57};
  58
  59struct stack_record {
  60        struct stack_record *next;      /* Link in the hashtable */
  61        u32 hash;                       /* Hash in the hastable */
  62        u32 size;                       /* Number of frames in the stack */
  63        union handle_parts handle;
  64        unsigned long entries[1];       /* Variable-sized array of entries. */
  65};
  66
  67static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
  68
  69static int depot_index;
  70static int next_slab_inited;
  71static size_t depot_offset;
  72static DEFINE_SPINLOCK(depot_lock);
  73
  74static bool init_stack_slab(void **prealloc)
  75{
  76        if (!*prealloc)
  77                return false;
  78        /*
  79         * This smp_load_acquire() pairs with smp_store_release() to
  80         * |next_slab_inited| below and in depot_alloc_stack().
  81         */
  82        if (smp_load_acquire(&next_slab_inited))
  83                return true;
  84        if (stack_slabs[depot_index] == NULL) {
  85                stack_slabs[depot_index] = *prealloc;
  86        } else {
  87                stack_slabs[depot_index + 1] = *prealloc;
  88                /*
  89                 * This smp_store_release pairs with smp_load_acquire() from
  90                 * |next_slab_inited| above and in depot_save_stack().
  91                 */
  92                smp_store_release(&next_slab_inited, 1);
  93        }
  94        *prealloc = NULL;
  95        return true;
  96}
  97
  98/* Allocation of a new stack in raw storage */
  99static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 100                u32 hash, void **prealloc, gfp_t alloc_flags)
 101{
 102        int required_size = offsetof(struct stack_record, entries) +
 103                sizeof(unsigned long) * size;
 104        struct stack_record *stack;
 105
 106        required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
 107
 108        if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
 109                if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
 110                        WARN_ONCE(1, "Stack depot reached limit capacity");
 111                        return NULL;
 112                }
 113                depot_index++;
 114                depot_offset = 0;
 115                /*
 116                 * smp_store_release() here pairs with smp_load_acquire() from
 117                 * |next_slab_inited| in depot_save_stack() and
 118                 * init_stack_slab().
 119                 */
 120                if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
 121                        smp_store_release(&next_slab_inited, 0);
 122        }
 123        init_stack_slab(prealloc);
 124        if (stack_slabs[depot_index] == NULL)
 125                return NULL;
 126
 127        stack = stack_slabs[depot_index] + depot_offset;
 128
 129        stack->hash = hash;
 130        stack->size = size;
 131        stack->handle.slabindex = depot_index;
 132        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
 133        stack->handle.valid = 1;
 134        memcpy(stack->entries, entries, size * sizeof(unsigned long));
 135        depot_offset += required_size;
 136
 137        return stack;
 138}
 139
 140#define STACK_HASH_ORDER 20
 141#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
 142#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
 143#define STACK_HASH_SEED 0x9747b28c
 144
 145static struct stack_record *stack_table[STACK_HASH_SIZE] = {
 146        [0 ...  STACK_HASH_SIZE - 1] = NULL
 147};
 148
 149/* Calculate hash for a stack */
 150static inline u32 hash_stack(unsigned long *entries, unsigned int size)
 151{
 152        return jhash2((u32 *)entries,
 153                               size * sizeof(unsigned long) / sizeof(u32),
 154                               STACK_HASH_SEED);
 155}
 156
 157/* Use our own, non-instrumented version of memcmp().
 158 *
 159 * We actually don't care about the order, just the equality.
 160 */
 161static inline
 162int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
 163                        unsigned int n)
 164{
 165        for ( ; n-- ; u1++, u2++) {
 166                if (*u1 != *u2)
 167                        return 1;
 168        }
 169        return 0;
 170}
 171
 172/* Find a stack that is equal to the one stored in entries in the hash */
 173static inline struct stack_record *find_stack(struct stack_record *bucket,
 174                                             unsigned long *entries, int size,
 175                                             u32 hash)
 176{
 177        struct stack_record *found;
 178
 179        for (found = bucket; found; found = found->next) {
 180                if (found->hash == hash &&
 181                    found->size == size &&
 182                    !stackdepot_memcmp(entries, found->entries, size))
 183                        return found;
 184        }
 185        return NULL;
 186}
 187
 188/**
 189 * stack_depot_fetch - Fetch stack entries from a depot
 190 *
 191 * @handle:             Stack depot handle which was returned from
 192 *                      stack_depot_save().
 193 * @entries:            Pointer to store the entries address
 194 *
 195 * Return: The number of trace entries for this depot.
 196 */
 197unsigned int stack_depot_fetch(depot_stack_handle_t handle,
 198                               unsigned long **entries)
 199{
 200        union handle_parts parts = { .handle = handle };
 201        void *slab = stack_slabs[parts.slabindex];
 202        size_t offset = parts.offset << STACK_ALLOC_ALIGN;
 203        struct stack_record *stack = slab + offset;
 204
 205        *entries = stack->entries;
 206        return stack->size;
 207}
 208EXPORT_SYMBOL_GPL(stack_depot_fetch);
 209
 210/**
 211 * stack_depot_save - Save a stack trace from an array
 212 *
 213 * @entries:            Pointer to storage array
 214 * @nr_entries:         Size of the storage array
 215 * @alloc_flags:        Allocation gfp flags
 216 *
 217 * Return: The handle of the stack struct stored in depot
 218 */
 219depot_stack_handle_t stack_depot_save(unsigned long *entries,
 220                                      unsigned int nr_entries,
 221                                      gfp_t alloc_flags)
 222{
 223        struct stack_record *found = NULL, **bucket;
 224        depot_stack_handle_t retval = 0;
 225        struct page *page = NULL;
 226        void *prealloc = NULL;
 227        unsigned long flags;
 228        u32 hash;
 229
 230        if (unlikely(nr_entries == 0))
 231                goto fast_exit;
 232
 233        hash = hash_stack(entries, nr_entries);
 234        bucket = &stack_table[hash & STACK_HASH_MASK];
 235
 236        /*
 237         * Fast path: look the stack trace up without locking.
 238         * The smp_load_acquire() here pairs with smp_store_release() to
 239         * |bucket| below.
 240         */
 241        found = find_stack(smp_load_acquire(bucket), entries,
 242                           nr_entries, hash);
 243        if (found)
 244                goto exit;
 245
 246        /*
 247         * Check if the current or the next stack slab need to be initialized.
 248         * If so, allocate the memory - we won't be able to do that under the
 249         * lock.
 250         *
 251         * The smp_load_acquire() here pairs with smp_store_release() to
 252         * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
 253         */
 254        if (unlikely(!smp_load_acquire(&next_slab_inited))) {
 255                /*
 256                 * Zero out zone modifiers, as we don't have specific zone
 257                 * requirements. Keep the flags related to allocation in atomic
 258                 * contexts and I/O.
 259                 */
 260                alloc_flags &= ~GFP_ZONEMASK;
 261                alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
 262                alloc_flags |= __GFP_NOWARN;
 263                page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
 264                if (page)
 265                        prealloc = page_address(page);
 266        }
 267
 268        spin_lock_irqsave(&depot_lock, flags);
 269
 270        found = find_stack(*bucket, entries, nr_entries, hash);
 271        if (!found) {
 272                struct stack_record *new =
 273                        depot_alloc_stack(entries, nr_entries,
 274                                          hash, &prealloc, alloc_flags);
 275                if (new) {
 276                        new->next = *bucket;
 277                        /*
 278                         * This smp_store_release() pairs with
 279                         * smp_load_acquire() from |bucket| above.
 280                         */
 281                        smp_store_release(bucket, new);
 282                        found = new;
 283                }
 284        } else if (prealloc) {
 285                /*
 286                 * We didn't need to store this stack trace, but let's keep
 287                 * the preallocated memory for the future.
 288                 */
 289                WARN_ON(!init_stack_slab(&prealloc));
 290        }
 291
 292        spin_unlock_irqrestore(&depot_lock, flags);
 293exit:
 294        if (prealloc) {
 295                /* Nobody used this memory, ok to free it. */
 296                free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
 297        }
 298        if (found)
 299                retval = found->handle.handle;
 300fast_exit:
 301        return retval;
 302}
 303EXPORT_SYMBOL_GPL(stack_depot_save);
 304