linux/lib/bug.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3  Generic support for BUG()
   4
   5  This respects the following config options:
   6
   7  CONFIG_BUG - emit BUG traps.  Nothing happens without this.
   8  CONFIG_GENERIC_BUG - enable this code.
   9  CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
  10        the containing struct bug_entry for bug_addr and file.
  11  CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
  12
  13  CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
  14  (though they're generally always on).
  15
  16  CONFIG_GENERIC_BUG is set by each architecture using this code.
  17
  18  To use this, your architecture must:
  19
  20  1. Set up the config options:
  21     - Enable CONFIG_GENERIC_BUG if CONFIG_BUG
  22
  23  2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
  24     - Define HAVE_ARCH_BUG
  25     - Implement BUG() to generate a faulting instruction
  26     - NOTE: struct bug_entry does not have "file" or "line" entries
  27       when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
  28       the values accordingly.
  29
  30  3. Implement the trap
  31     - In the illegal instruction trap handler (typically), verify
  32       that the fault was in kernel mode, and call report_bug()
  33     - report_bug() will return whether it was a false alarm, a warning,
  34       or an actual bug.
  35     - You must implement the is_valid_bugaddr(bugaddr) callback which
  36       returns true if the eip is a real kernel address, and it points
  37       to the expected BUG trap instruction.
  38
  39    Jeremy Fitzhardinge <jeremy@goop.org> 2006
  40 */
  41
  42#define pr_fmt(fmt) fmt
  43
  44#include <linux/list.h>
  45#include <linux/module.h>
  46#include <linux/kernel.h>
  47#include <linux/bug.h>
  48#include <linux/sched.h>
  49#include <linux/rculist.h>
  50#include <linux/ftrace.h>
  51
  52extern struct bug_entry __start___bug_table[], __stop___bug_table[];
  53
  54static inline unsigned long bug_addr(const struct bug_entry *bug)
  55{
  56#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
  57        return bug->bug_addr;
  58#else
  59        return (unsigned long)bug + bug->bug_addr_disp;
  60#endif
  61}
  62
  63#ifdef CONFIG_MODULES
  64/* Updates are protected by module mutex */
  65static LIST_HEAD(module_bug_list);
  66
  67static struct bug_entry *module_find_bug(unsigned long bugaddr)
  68{
  69        struct module *mod;
  70        struct bug_entry *bug = NULL;
  71
  72        rcu_read_lock_sched();
  73        list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
  74                unsigned i;
  75
  76                bug = mod->bug_table;
  77                for (i = 0; i < mod->num_bugs; ++i, ++bug)
  78                        if (bugaddr == bug_addr(bug))
  79                                goto out;
  80        }
  81        bug = NULL;
  82out:
  83        rcu_read_unlock_sched();
  84
  85        return bug;
  86}
  87
  88void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
  89                         struct module *mod)
  90{
  91        char *secstrings;
  92        unsigned int i;
  93
  94        lockdep_assert_held(&module_mutex);
  95
  96        mod->bug_table = NULL;
  97        mod->num_bugs = 0;
  98
  99        /* Find the __bug_table section, if present */
 100        secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 101        for (i = 1; i < hdr->e_shnum; i++) {
 102                if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
 103                        continue;
 104                mod->bug_table = (void *) sechdrs[i].sh_addr;
 105                mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
 106                break;
 107        }
 108
 109        /*
 110         * Strictly speaking this should have a spinlock to protect against
 111         * traversals, but since we only traverse on BUG()s, a spinlock
 112         * could potentially lead to deadlock and thus be counter-productive.
 113         * Thus, this uses RCU to safely manipulate the bug list, since BUG
 114         * must run in non-interruptive state.
 115         */
 116        list_add_rcu(&mod->bug_list, &module_bug_list);
 117}
 118
 119void module_bug_cleanup(struct module *mod)
 120{
 121        lockdep_assert_held(&module_mutex);
 122        list_del_rcu(&mod->bug_list);
 123}
 124
 125#else
 126
 127static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
 128{
 129        return NULL;
 130}
 131#endif
 132
 133struct bug_entry *find_bug(unsigned long bugaddr)
 134{
 135        struct bug_entry *bug;
 136
 137        for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
 138                if (bugaddr == bug_addr(bug))
 139                        return bug;
 140
 141        return module_find_bug(bugaddr);
 142}
 143
 144enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
 145{
 146        struct bug_entry *bug;
 147        const char *file;
 148        unsigned line, warning, once, done;
 149
 150        if (!is_valid_bugaddr(bugaddr))
 151                return BUG_TRAP_TYPE_NONE;
 152
 153        bug = find_bug(bugaddr);
 154        if (!bug)
 155                return BUG_TRAP_TYPE_NONE;
 156
 157        disable_trace_on_warning();
 158
 159        file = NULL;
 160        line = 0;
 161        warning = 0;
 162
 163        if (bug) {
 164#ifdef CONFIG_DEBUG_BUGVERBOSE
 165#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
 166                file = bug->file;
 167#else
 168                file = (const char *)bug + bug->file_disp;
 169#endif
 170                line = bug->line;
 171#endif
 172                warning = (bug->flags & BUGFLAG_WARNING) != 0;
 173                once = (bug->flags & BUGFLAG_ONCE) != 0;
 174                done = (bug->flags & BUGFLAG_DONE) != 0;
 175
 176                if (warning && once) {
 177                        if (done)
 178                                return BUG_TRAP_TYPE_WARN;
 179
 180                        /*
 181                         * Since this is the only store, concurrency is not an issue.
 182                         */
 183                        bug->flags |= BUGFLAG_DONE;
 184                }
 185        }
 186
 187        /*
 188         * BUG() and WARN_ON() families don't print a custom debug message
 189         * before triggering the exception handler, so we must add the
 190         * "cut here" line now. WARN() issues its own "cut here" before the
 191         * extra debugging message it writes before triggering the handler.
 192         */
 193        if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
 194                printk(KERN_DEFAULT CUT_HERE);
 195
 196        if (warning) {
 197                /* this is a WARN_ON rather than BUG/BUG_ON */
 198                __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
 199                       NULL);
 200                return BUG_TRAP_TYPE_WARN;
 201        }
 202
 203        if (file)
 204                pr_crit("kernel BUG at %s:%u!\n", file, line);
 205        else
 206                pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
 207                        (void *)bugaddr);
 208
 209        return BUG_TRAP_TYPE_BUG;
 210}
 211
 212static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
 213{
 214        struct bug_entry *bug;
 215
 216        for (bug = start; bug < end; bug++)
 217                bug->flags &= ~BUGFLAG_DONE;
 218}
 219
 220void generic_bug_clear_once(void)
 221{
 222#ifdef CONFIG_MODULES
 223        struct module *mod;
 224
 225        rcu_read_lock_sched();
 226        list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
 227                clear_once_table(mod->bug_table,
 228                                 mod->bug_table + mod->num_bugs);
 229        rcu_read_unlock_sched();
 230#endif
 231
 232        clear_once_table(__start___bug_table, __stop___bug_table);
 233}
 234