linux/lib/stackdepot.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic stack depot for storing stack traces.
   4 *
   5 * Some debugging tools need to save stack traces of certain events which can
   6 * be later presented to the user. For example, KASAN needs to safe alloc and
   7 * free stacks for each object, but storing two stack traces per object
   8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
   9 * that).
  10 *
  11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
  12 * and free stacks repeat a lot, we save about 100x space.
  13 * Stacks are never removed from depot, so we store them contiguously one after
  14 * another in a contiguos memory allocation.
  15 *
  16 * Author: Alexander Potapenko <glider@google.com>
  17 * Copyright (C) 2016 Google, Inc.
  18 *
  19 * Based on code by Dmitry Chernenkov.
  20 */
  21
  22#include <linux/gfp.h>
  23#include <linux/interrupt.h>
  24#include <linux/jhash.h>
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/percpu.h>
  28#include <linux/printk.h>
  29#include <linux/slab.h>
  30#include <linux/stacktrace.h>
  31#include <linux/stackdepot.h>
  32#include <linux/string.h>
  33#include <linux/types.h>
  34
  35#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
  36
  37#define STACK_ALLOC_NULL_PROTECTION_BITS 1
  38#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
  39#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
  40#define STACK_ALLOC_ALIGN 4
  41#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
  42                                        STACK_ALLOC_ALIGN)
  43#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
  44                STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
  45#define STACK_ALLOC_SLABS_CAP 8192
  46#define STACK_ALLOC_MAX_SLABS \
  47        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
  48         (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
  49
  50/* The compact structure to store the reference to stacks. */
  51union handle_parts {
  52        depot_stack_handle_t handle;
  53        struct {
  54                u32 slabindex : STACK_ALLOC_INDEX_BITS;
  55                u32 offset : STACK_ALLOC_OFFSET_BITS;
  56                u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
  57        };
  58};
  59
  60struct stack_record {
  61        struct stack_record *next;      /* Link in the hashtable */
  62        u32 hash;                       /* Hash in the hastable */
  63        u32 size;                       /* Number of frames in the stack */
  64        union handle_parts handle;
  65        unsigned long entries[1];       /* Variable-sized array of entries. */
  66};
  67
  68static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
  69
  70static int depot_index;
  71static int next_slab_inited;
  72static size_t depot_offset;
  73static DEFINE_SPINLOCK(depot_lock);
  74
  75static bool init_stack_slab(void **prealloc)
  76{
  77        if (!*prealloc)
  78                return false;
  79        /*
  80         * This smp_load_acquire() pairs with smp_store_release() to
  81         * |next_slab_inited| below and in depot_alloc_stack().
  82         */
  83        if (smp_load_acquire(&next_slab_inited))
  84                return true;
  85        if (stack_slabs[depot_index] == NULL) {
  86                stack_slabs[depot_index] = *prealloc;
  87                *prealloc = NULL;
  88        } else {
  89                /* If this is the last depot slab, do not touch the next one. */
  90                if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
  91                        stack_slabs[depot_index + 1] = *prealloc;
  92                        *prealloc = NULL;
  93                }
  94                /*
  95                 * This smp_store_release pairs with smp_load_acquire() from
  96                 * |next_slab_inited| above and in stack_depot_save().
  97                 */
  98                smp_store_release(&next_slab_inited, 1);
  99        }
 100        return true;
 101}
 102
 103/* Allocation of a new stack in raw storage */
 104static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 105                u32 hash, void **prealloc, gfp_t alloc_flags)
 106{
 107        int required_size = offsetof(struct stack_record, entries) +
 108                sizeof(unsigned long) * size;
 109        struct stack_record *stack;
 110
 111        required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
 112
 113        if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
 114                if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
 115                        WARN_ONCE(1, "Stack depot reached limit capacity");
 116                        return NULL;
 117                }
 118                depot_index++;
 119                depot_offset = 0;
 120                /*
 121                 * smp_store_release() here pairs with smp_load_acquire() from
 122                 * |next_slab_inited| in stack_depot_save() and
 123                 * init_stack_slab().
 124                 */
 125                if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
 126                        smp_store_release(&next_slab_inited, 0);
 127        }
 128        init_stack_slab(prealloc);
 129        if (stack_slabs[depot_index] == NULL)
 130                return NULL;
 131
 132        stack = stack_slabs[depot_index] + depot_offset;
 133
 134        stack->hash = hash;
 135        stack->size = size;
 136        stack->handle.slabindex = depot_index;
 137        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
 138        stack->handle.valid = 1;
 139        memcpy(stack->entries, entries, size * sizeof(unsigned long));
 140        depot_offset += required_size;
 141
 142        return stack;
 143}
 144
 145#define STACK_HASH_ORDER 20
 146#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
 147#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
 148#define STACK_HASH_SEED 0x9747b28c
 149
 150static struct stack_record *stack_table[STACK_HASH_SIZE] = {
 151        [0 ...  STACK_HASH_SIZE - 1] = NULL
 152};
 153
 154/* Calculate hash for a stack */
 155static inline u32 hash_stack(unsigned long *entries, unsigned int size)
 156{
 157        return jhash2((u32 *)entries,
 158                               size * sizeof(unsigned long) / sizeof(u32),
 159                               STACK_HASH_SEED);
 160}
 161
 162/* Use our own, non-instrumented version of memcmp().
 163 *
 164 * We actually don't care about the order, just the equality.
 165 */
 166static inline
 167int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
 168                        unsigned int n)
 169{
 170        for ( ; n-- ; u1++, u2++) {
 171                if (*u1 != *u2)
 172                        return 1;
 173        }
 174        return 0;
 175}
 176
 177/* Find a stack that is equal to the one stored in entries in the hash */
 178static inline struct stack_record *find_stack(struct stack_record *bucket,
 179                                             unsigned long *entries, int size,
 180                                             u32 hash)
 181{
 182        struct stack_record *found;
 183
 184        for (found = bucket; found; found = found->next) {
 185                if (found->hash == hash &&
 186                    found->size == size &&
 187                    !stackdepot_memcmp(entries, found->entries, size))
 188                        return found;
 189        }
 190        return NULL;
 191}
 192
 193/**
 194 * stack_depot_fetch - Fetch stack entries from a depot
 195 *
 196 * @handle:             Stack depot handle which was returned from
 197 *                      stack_depot_save().
 198 * @entries:            Pointer to store the entries address
 199 *
 200 * Return: The number of trace entries for this depot.
 201 */
 202unsigned int stack_depot_fetch(depot_stack_handle_t handle,
 203                               unsigned long **entries)
 204{
 205        union handle_parts parts = { .handle = handle };
 206        void *slab;
 207        size_t offset = parts.offset << STACK_ALLOC_ALIGN;
 208        struct stack_record *stack;
 209
 210        *entries = NULL;
 211        if (parts.slabindex > depot_index) {
 212                WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
 213                        parts.slabindex, depot_index, handle);
 214                return 0;
 215        }
 216        slab = stack_slabs[parts.slabindex];
 217        if (!slab)
 218                return 0;
 219        stack = slab + offset;
 220
 221        *entries = stack->entries;
 222        return stack->size;
 223}
 224EXPORT_SYMBOL_GPL(stack_depot_fetch);
 225
 226/**
 227 * stack_depot_save - Save a stack trace from an array
 228 *
 229 * @entries:            Pointer to storage array
 230 * @nr_entries:         Size of the storage array
 231 * @alloc_flags:        Allocation gfp flags
 232 *
 233 * Return: The handle of the stack struct stored in depot
 234 */
 235depot_stack_handle_t stack_depot_save(unsigned long *entries,
 236                                      unsigned int nr_entries,
 237                                      gfp_t alloc_flags)
 238{
 239        struct stack_record *found = NULL, **bucket;
 240        depot_stack_handle_t retval = 0;
 241        struct page *page = NULL;
 242        void *prealloc = NULL;
 243        unsigned long flags;
 244        u32 hash;
 245
 246        if (unlikely(nr_entries == 0))
 247                goto fast_exit;
 248
 249        hash = hash_stack(entries, nr_entries);
 250        bucket = &stack_table[hash & STACK_HASH_MASK];
 251
 252        /*
 253         * Fast path: look the stack trace up without locking.
 254         * The smp_load_acquire() here pairs with smp_store_release() to
 255         * |bucket| below.
 256         */
 257        found = find_stack(smp_load_acquire(bucket), entries,
 258                           nr_entries, hash);
 259        if (found)
 260                goto exit;
 261
 262        /*
 263         * Check if the current or the next stack slab need to be initialized.
 264         * If so, allocate the memory - we won't be able to do that under the
 265         * lock.
 266         *
 267         * The smp_load_acquire() here pairs with smp_store_release() to
 268         * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
 269         */
 270        if (unlikely(!smp_load_acquire(&next_slab_inited))) {
 271                /*
 272                 * Zero out zone modifiers, as we don't have specific zone
 273                 * requirements. Keep the flags related to allocation in atomic
 274                 * contexts and I/O.
 275                 */
 276                alloc_flags &= ~GFP_ZONEMASK;
 277                alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
 278                alloc_flags |= __GFP_NOWARN;
 279                page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
 280                if (page)
 281                        prealloc = page_address(page);
 282        }
 283
 284        spin_lock_irqsave(&depot_lock, flags);
 285
 286        found = find_stack(*bucket, entries, nr_entries, hash);
 287        if (!found) {
 288                struct stack_record *new =
 289                        depot_alloc_stack(entries, nr_entries,
 290                                          hash, &prealloc, alloc_flags);
 291                if (new) {
 292                        new->next = *bucket;
 293                        /*
 294                         * This smp_store_release() pairs with
 295                         * smp_load_acquire() from |bucket| above.
 296                         */
 297                        smp_store_release(bucket, new);
 298                        found = new;
 299                }
 300        } else if (prealloc) {
 301                /*
 302                 * We didn't need to store this stack trace, but let's keep
 303                 * the preallocated memory for the future.
 304                 */
 305                WARN_ON(!init_stack_slab(&prealloc));
 306        }
 307
 308        spin_unlock_irqrestore(&depot_lock, flags);
 309exit:
 310        if (prealloc) {
 311                /* Nobody used this memory, ok to free it. */
 312                free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
 313        }
 314        if (found)
 315                retval = found->handle.handle;
 316fast_exit:
 317        return retval;
 318}
 319EXPORT_SYMBOL_GPL(stack_depot_save);
 320
 321static inline int in_irqentry_text(unsigned long ptr)
 322{
 323        return (ptr >= (unsigned long)&__irqentry_text_start &&
 324                ptr < (unsigned long)&__irqentry_text_end) ||
 325                (ptr >= (unsigned long)&__softirqentry_text_start &&
 326                 ptr < (unsigned long)&__softirqentry_text_end);
 327}
 328
 329unsigned int filter_irq_stacks(unsigned long *entries,
 330                                             unsigned int nr_entries)
 331{
 332        unsigned int i;
 333
 334        for (i = 0; i < nr_entries; i++) {
 335                if (in_irqentry_text(entries[i])) {
 336                        /* Include the irqentry function into the stack. */
 337                        return i + 1;
 338                }
 339        }
 340        return nr_entries;
 341}
 342EXPORT_SYMBOL_GPL(filter_irq_stacks);
 343