linux/lib/stackdepot.c
<<
>>
Prefs
   1/*
   2 * Generic stack depot for storing stack traces.
   3 *
   4 * Some debugging tools need to save stack traces of certain events which can
   5 * be later presented to the user. For example, KASAN needs to safe alloc and
   6 * free stacks for each object, but storing two stack traces per object
   7 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
   8 * that).
   9 *
  10 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
  11 * and free stacks repeat a lot, we save about 100x space.
  12 * Stacks are never removed from depot, so we store them contiguously one after
  13 * another in a contiguos memory allocation.
  14 *
  15 * Author: Alexander Potapenko <glider@google.com>
  16 * Copyright (C) 2016 Google, Inc.
  17 *
  18 * Based on code by Dmitry Chernenkov.
  19 *
  20 * This program is free software; you can redistribute it and/or
  21 * modify it under the terms of the GNU General Public License
  22 * version 2 as published by the Free Software Foundation.
  23 *
  24 * This program is distributed in the hope that it will be useful, but
  25 * WITHOUT ANY WARRANTY; without even the implied warranty of
  26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  27 * General Public License for more details.
  28 *
  29 */
  30
  31#include <linux/gfp.h>
  32#include <linux/jhash.h>
  33#include <linux/kernel.h>
  34#include <linux/mm.h>
  35#include <linux/percpu.h>
  36#include <linux/printk.h>
  37#include <linux/slab.h>
  38#include <linux/stacktrace.h>
  39#include <linux/stackdepot.h>
  40#include <linux/string.h>
  41#include <linux/types.h>
  42
  43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
  44
  45#define STACK_ALLOC_NULL_PROTECTION_BITS 1
  46#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
  47#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
  48#define STACK_ALLOC_ALIGN 4
  49#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
  50                                        STACK_ALLOC_ALIGN)
  51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
  52                STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
  53#define STACK_ALLOC_SLABS_CAP 8192
  54#define STACK_ALLOC_MAX_SLABS \
  55        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
  56         (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
  57
  58/* The compact structure to store the reference to stacks. */
  59union handle_parts {
  60        depot_stack_handle_t handle;
  61        struct {
  62                u32 slabindex : STACK_ALLOC_INDEX_BITS;
  63                u32 offset : STACK_ALLOC_OFFSET_BITS;
  64                u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
  65        };
  66};
  67
  68struct stack_record {
  69        struct stack_record *next;      /* Link in the hashtable */
  70        u32 hash;                       /* Hash in the hastable */
  71        u32 size;                       /* Number of frames in the stack */
  72        union handle_parts handle;
  73        unsigned long entries[1];       /* Variable-sized array of entries. */
  74};
  75
  76static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
  77
  78static int depot_index;
  79static int next_slab_inited;
  80static size_t depot_offset;
  81static DEFINE_SPINLOCK(depot_lock);
  82
  83static bool init_stack_slab(void **prealloc)
  84{
  85        if (!*prealloc)
  86                return false;
  87        /*
  88         * This smp_load_acquire() pairs with smp_store_release() to
  89         * |next_slab_inited| below and in depot_alloc_stack().
  90         */
  91        if (smp_load_acquire(&next_slab_inited))
  92                return true;
  93        if (stack_slabs[depot_index] == NULL) {
  94                stack_slabs[depot_index] = *prealloc;
  95        } else {
  96                stack_slabs[depot_index + 1] = *prealloc;
  97                /*
  98                 * This smp_store_release pairs with smp_load_acquire() from
  99                 * |next_slab_inited| above and in depot_save_stack().
 100                 */
 101                smp_store_release(&next_slab_inited, 1);
 102        }
 103        *prealloc = NULL;
 104        return true;
 105}
 106
 107/* Allocation of a new stack in raw storage */
 108static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 109                u32 hash, void **prealloc, gfp_t alloc_flags)
 110{
 111        int required_size = offsetof(struct stack_record, entries) +
 112                sizeof(unsigned long) * size;
 113        struct stack_record *stack;
 114
 115        required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
 116
 117        if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
 118                if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
 119                        WARN_ONCE(1, "Stack depot reached limit capacity");
 120                        return NULL;
 121                }
 122                depot_index++;
 123                depot_offset = 0;
 124                /*
 125                 * smp_store_release() here pairs with smp_load_acquire() from
 126                 * |next_slab_inited| in depot_save_stack() and
 127                 * init_stack_slab().
 128                 */
 129                if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
 130                        smp_store_release(&next_slab_inited, 0);
 131        }
 132        init_stack_slab(prealloc);
 133        if (stack_slabs[depot_index] == NULL)
 134                return NULL;
 135
 136        stack = stack_slabs[depot_index] + depot_offset;
 137
 138        stack->hash = hash;
 139        stack->size = size;
 140        stack->handle.slabindex = depot_index;
 141        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
 142        stack->handle.valid = 1;
 143        memcpy(stack->entries, entries, size * sizeof(unsigned long));
 144        depot_offset += required_size;
 145
 146        return stack;
 147}
 148
 149#define STACK_HASH_ORDER 20
 150#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
 151#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
 152#define STACK_HASH_SEED 0x9747b28c
 153
 154static struct stack_record *stack_table[STACK_HASH_SIZE] = {
 155        [0 ...  STACK_HASH_SIZE - 1] = NULL
 156};
 157
 158/* Calculate hash for a stack */
 159static inline u32 hash_stack(unsigned long *entries, unsigned int size)
 160{
 161        return jhash2((u32 *)entries,
 162                               size * sizeof(unsigned long) / sizeof(u32),
 163                               STACK_HASH_SEED);
 164}
 165
 166/* Use our own, non-instrumented version of memcmp().
 167 *
 168 * We actually don't care about the order, just the equality.
 169 */
 170static inline
 171int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
 172                        unsigned int n)
 173{
 174        for ( ; n-- ; u1++, u2++) {
 175                if (*u1 != *u2)
 176                        return 1;
 177        }
 178        return 0;
 179}
 180
 181/* Find a stack that is equal to the one stored in entries in the hash */
 182static inline struct stack_record *find_stack(struct stack_record *bucket,
 183                                             unsigned long *entries, int size,
 184                                             u32 hash)
 185{
 186        struct stack_record *found;
 187
 188        for (found = bucket; found; found = found->next) {
 189                if (found->hash == hash &&
 190                    found->size == size &&
 191                    !stackdepot_memcmp(entries, found->entries, size))
 192                        return found;
 193        }
 194        return NULL;
 195}
 196
 197void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
 198{
 199        union handle_parts parts = { .handle = handle };
 200        void *slab = stack_slabs[parts.slabindex];
 201        size_t offset = parts.offset << STACK_ALLOC_ALIGN;
 202        struct stack_record *stack = slab + offset;
 203
 204        trace->nr_entries = trace->max_entries = stack->size;
 205        trace->entries = stack->entries;
 206        trace->skip = 0;
 207}
 208EXPORT_SYMBOL_GPL(depot_fetch_stack);
 209
 210/**
 211 * depot_save_stack - save stack in a stack depot.
 212 * @trace - the stacktrace to save.
 213 * @alloc_flags - flags for allocating additional memory if required.
 214 *
 215 * Returns the handle of the stack struct stored in depot.
 216 */
 217depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
 218                                    gfp_t alloc_flags)
 219{
 220        u32 hash;
 221        depot_stack_handle_t retval = 0;
 222        struct stack_record *found = NULL, **bucket;
 223        unsigned long flags;
 224        struct page *page = NULL;
 225        void *prealloc = NULL;
 226
 227        if (unlikely(trace->nr_entries == 0))
 228                goto fast_exit;
 229
 230        hash = hash_stack(trace->entries, trace->nr_entries);
 231        bucket = &stack_table[hash & STACK_HASH_MASK];
 232
 233        /*
 234         * Fast path: look the stack trace up without locking.
 235         * The smp_load_acquire() here pairs with smp_store_release() to
 236         * |bucket| below.
 237         */
 238        found = find_stack(smp_load_acquire(bucket), trace->entries,
 239                           trace->nr_entries, hash);
 240        if (found)
 241                goto exit;
 242
 243        /*
 244         * Check if the current or the next stack slab need to be initialized.
 245         * If so, allocate the memory - we won't be able to do that under the
 246         * lock.
 247         *
 248         * The smp_load_acquire() here pairs with smp_store_release() to
 249         * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
 250         */
 251        if (unlikely(!smp_load_acquire(&next_slab_inited))) {
 252                /*
 253                 * Zero out zone modifiers, as we don't have specific zone
 254                 * requirements. Keep the flags related to allocation in atomic
 255                 * contexts and I/O.
 256                 */
 257                alloc_flags &= ~GFP_ZONEMASK;
 258                alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
 259                alloc_flags |= __GFP_NOWARN;
 260                page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
 261                if (page)
 262                        prealloc = page_address(page);
 263        }
 264
 265        spin_lock_irqsave(&depot_lock, flags);
 266
 267        found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
 268        if (!found) {
 269                struct stack_record *new =
 270                        depot_alloc_stack(trace->entries, trace->nr_entries,
 271                                          hash, &prealloc, alloc_flags);
 272                if (new) {
 273                        new->next = *bucket;
 274                        /*
 275                         * This smp_store_release() pairs with
 276                         * smp_load_acquire() from |bucket| above.
 277                         */
 278                        smp_store_release(bucket, new);
 279                        found = new;
 280                }
 281        } else if (prealloc) {
 282                /*
 283                 * We didn't need to store this stack trace, but let's keep
 284                 * the preallocated memory for the future.
 285                 */
 286                WARN_ON(!init_stack_slab(&prealloc));
 287        }
 288
 289        spin_unlock_irqrestore(&depot_lock, flags);
 290exit:
 291        if (prealloc) {
 292                /* Nobody used this memory, ok to free it. */
 293                free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
 294        }
 295        if (found)
 296                retval = found->handle.handle;
 297fast_exit:
 298        return retval;
 299}
 300EXPORT_SYMBOL_GPL(depot_save_stack);
 301