linux/arch/sh/kernel/unwinder.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009  Matt Fleming
   3 *
   4 * Based, in part, on kernel/time/clocksource.c.
   5 *
   6 * This file provides arbitration code for stack unwinders.
   7 *
   8 * Multiple stack unwinders can be available on a system, usually with
   9 * the most accurate unwinder being the currently active one.
  10 */
  11#include <linux/errno.h>
  12#include <linux/list.h>
  13#include <linux/spinlock.h>
  14#include <linux/module.h>
  15#include <asm/unwinder.h>
  16#include <asm/atomic.h>
  17
  18/*
  19 * This is the most basic stack unwinder an architecture can
  20 * provide. For architectures without reliable frame pointers, e.g.
  21 * RISC CPUs, it can be implemented by looking through the stack for
  22 * addresses that lie within the kernel text section.
  23 *
  24 * Other CPUs, e.g. x86, can use their frame pointer register to
  25 * construct more accurate stack traces.
  26 */
  27static struct list_head unwinder_list;
  28static struct unwinder stack_reader = {
  29        .name = "stack-reader",
  30        .dump = stack_reader_dump,
  31        .rating = 50,
  32        .list = {
  33                .next = &unwinder_list,
  34                .prev = &unwinder_list,
  35        },
  36};
  37
  38/*
  39 * "curr_unwinder" points to the stack unwinder currently in use. This
  40 * is the unwinder with the highest rating.
  41 *
  42 * "unwinder_list" is a linked-list of all available unwinders, sorted
  43 * by rating.
  44 *
  45 * All modifications of "curr_unwinder" and "unwinder_list" must be
  46 * performed whilst holding "unwinder_lock".
  47 */
  48static struct unwinder *curr_unwinder = &stack_reader;
  49
  50static struct list_head unwinder_list = {
  51        .next = &stack_reader.list,
  52        .prev = &stack_reader.list,
  53};
  54
  55static DEFINE_SPINLOCK(unwinder_lock);
  56
  57/**
  58 * select_unwinder - Select the best registered stack unwinder.
  59 *
  60 * Private function. Must hold unwinder_lock when called.
  61 *
  62 * Select the stack unwinder with the best rating. This is useful for
  63 * setting up curr_unwinder.
  64 */
  65static struct unwinder *select_unwinder(void)
  66{
  67        struct unwinder *best;
  68
  69        if (list_empty(&unwinder_list))
  70                return NULL;
  71
  72        best = list_entry(unwinder_list.next, struct unwinder, list);
  73        if (best == curr_unwinder)
  74                return NULL;
  75
  76        return best;
  77}
  78
  79/*
  80 * Enqueue the stack unwinder sorted by rating.
  81 */
  82static int unwinder_enqueue(struct unwinder *ops)
  83{
  84        struct list_head *tmp, *entry = &unwinder_list;
  85
  86        list_for_each(tmp, &unwinder_list) {
  87                struct unwinder *o;
  88
  89                o = list_entry(tmp, struct unwinder, list);
  90                if (o == ops)
  91                        return -EBUSY;
  92                /* Keep track of the place, where to insert */
  93                if (o->rating >= ops->rating)
  94                        entry = tmp;
  95        }
  96        list_add(&ops->list, entry);
  97
  98        return 0;
  99}
 100
 101/**
 102 * unwinder_register - Used to install new stack unwinder
 103 * @u: unwinder to be registered
 104 *
 105 * Install the new stack unwinder on the unwinder list, which is sorted
 106 * by rating.
 107 *
 108 * Returns -EBUSY if registration fails, zero otherwise.
 109 */
 110int unwinder_register(struct unwinder *u)
 111{
 112        unsigned long flags;
 113        int ret;
 114
 115        spin_lock_irqsave(&unwinder_lock, flags);
 116        ret = unwinder_enqueue(u);
 117        if (!ret)
 118                curr_unwinder = select_unwinder();
 119        spin_unlock_irqrestore(&unwinder_lock, flags);
 120
 121        return ret;
 122}
 123
 124int unwinder_faulted = 0;
 125
 126/*
 127 * Unwind the call stack and pass information to the stacktrace_ops
 128 * functions. Also handle the case where we need to switch to a new
 129 * stack dumper because the current one faulted unexpectedly.
 130 */
 131void unwind_stack(struct task_struct *task, struct pt_regs *regs,
 132                  unsigned long *sp, const struct stacktrace_ops *ops,
 133                  void *data)
 134{
 135        unsigned long flags;
 136
 137        /*
 138         * The problem with unwinders with high ratings is that they are
 139         * inherently more complicated than the simple ones with lower
 140         * ratings. We are therefore more likely to fault in the
 141         * complicated ones, e.g. hitting BUG()s. If we fault in the
 142         * code for the current stack unwinder we try to downgrade to
 143         * one with a lower rating.
 144         *
 145         * Hopefully this will give us a semi-reliable stacktrace so we
 146         * can diagnose why curr_unwinder->dump() faulted.
 147         */
 148        if (unwinder_faulted) {
 149                spin_lock_irqsave(&unwinder_lock, flags);
 150
 151                /* Make sure no one beat us to changing the unwinder */
 152                if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
 153                        list_del(&curr_unwinder->list);
 154                        curr_unwinder = select_unwinder();
 155
 156                        unwinder_faulted = 0;
 157                }
 158
 159                spin_unlock_irqrestore(&unwinder_lock, flags);
 160        }
 161
 162        curr_unwinder->dump(task, regs, sp, ops, data);
 163}
 164EXPORT_SYMBOL_GPL(unwind_stack);
 165